query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Returns the request for the existing Firewall resource.
|
def _GetGetRequest(self, client, resource_reference):
return (client.apitools_client.firewalls, 'Get',
client.messages.ComputeFirewallsGetRequest(
firewall=resource_reference.Name(),
project=resource_reference.project))
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallDeviceArgs']]]]] = None,\n disabled: Optional[pulumi.Input[bool]] = None,\n inbound_policy: Optional[pulumi.Input[str]] = None,\n inbounds: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallInboundArgs']]]]] = None,\n label: Optional[pulumi.Input[str]] = None,\n linodes: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n outbound_policy: Optional[pulumi.Input[str]] = None,\n outbounds: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallOutboundArgs']]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Firewall':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _FirewallState.__new__(_FirewallState)\n\n __props__.__dict__[\"devices\"] = devices\n __props__.__dict__[\"disabled\"] = disabled\n __props__.__dict__[\"inbound_policy\"] = inbound_policy\n __props__.__dict__[\"inbounds\"] = inbounds\n __props__.__dict__[\"label\"] = label\n __props__.__dict__[\"linodes\"] = linodes\n __props__.__dict__[\"outbound_policy\"] = outbound_policy\n __props__.__dict__[\"outbounds\"] = outbounds\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n return Firewall(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n end_ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n start_ip_address: Optional[pulumi.Input[str]] = None,\n synapse_workspace_id: Optional[pulumi.Input[str]] = None) -> 'FirewallRule':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _FirewallRuleState.__new__(_FirewallRuleState)\n\n __props__.__dict__[\"end_ip_address\"] = end_ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"start_ip_address\"] = start_ip_address\n __props__.__dict__[\"synapse_workspace_id\"] = synapse_workspace_id\n return FirewallRule(resource_name, opts=opts, __props__=__props__)",
"def ex_get_firewall(self, name):\r\n request = '/global/firewalls/%s' % (name)\r\n response = self.connection.request(request, method='GET').object\r\n return self._to_firewall(response)",
"def inboundRequest(self, request):\n return self._inboundRequestEndpoint.app.resource()",
"def ex_update_firewall(self, firewall):\r\n firewall_data = {}\r\n firewall_data['name'] = firewall.name\r\n firewall_data['allowed'] = firewall.allowed\r\n firewall_data['network'] = firewall.network.extra['selfLink']\r\n if firewall.source_ranges:\r\n firewall_data['sourceRanges'] = firewall.source_ranges\r\n if firewall.source_tags:\r\n firewall_data['sourceTags'] = firewall.source_tags\r\n if firewall.target_tags:\r\n firewall_data['targetTags'] = firewall.target_tags\r\n if firewall.extra['description']:\r\n firewall_data['description'] = firewall.extra['description']\r\n\r\n request = '/global/firewalls/%s' % (firewall.name)\r\n\r\n self.connection.async_request(request, method='PUT',\r\n data=firewall_data)\r\n\r\n return self.ex_get_firewall(firewall.name)",
"def Handle(self, firewall_name):\n firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',\n firewall_name)\n\n firewall_request = self.api.firewalls.get(\n project=firewall_context['project'],\n firewall=firewall_context['firewall'])\n\n return firewall_request.execute()",
"def single_fw(name, mgmt_ip, mgmt_network, mgmt_interface='0', dns=None):\n result = Layer3Firewall.create(name, mgmt_ip, mgmt_network,\n mgmt_interface=mgmt_interface,\n domain_server_address=dns)\n return result",
"def firewall(action=None, name=None, pod=None,\n vlan=None, anycast=None, mac=None):\n base_url = '%s/firewalls' % (server)\n r = None\n if action == 'list':\n r = call('get', '%s' % base_url)\n elif action == 'create':\n if not pod:\n print 'Missing pod to create'\n sys.exit(1)\n zone_id = fetch_id('zones', pod)\n r = call('post', '%s/firewalls' % (server),\n data=json.dumps({'name': name, 'zone_id': zone_id,\n 'mac': mac})\n )\n elif action == 'delete':\n r = call('delete', '%s/%s' % (\n base_url, fetch_id('firewalls', name))\n )\n elif action == 'disable':\n r = call('post', '%s/firewalls/disable' % (server),\n data=json.dumps({'name': name})\n )\n elif action == 'enable':\n r = call('post', '%s/firewalls/enable' % (server),\n data=json.dumps({'name': name})\n )\n elif action == 'sync':\n r = call('post', '%s/firewalls/sync' % (server),\n data=json.dumps({'name': name})\n )\n elif action == 'zonesync':\n r = call('post', '%s/firewalls/zonesync' % (server),\n data=json.dumps({'name': name})\n )\n elif action == 'info':\n r = call('get', '%s/by-name/%s' % (base_url, name))\n elif action == 'anycast_attach':\n if not anycast:\n print 'Missing anycast to attach'\n sys.exit(1)\n\n anycast_id = fetch_id('anycasts', anycast)\n r = call('post', '%s/%s/anycasts' %\n (base_url, fetch_id('firewalls', name)),\n data=json.dumps({'anycast_id': anycast_id})\n )\n elif action == 'anycast_detach':\n if not anycast:\n print 'Missing anycast to attach'\n sys.exit(1)\n\n r = call('delete', '%s/%s/anycasts/%s' % (\n base_url,\n fetch_id('firewalls', name),\n fetch_id('anycasts', anycast))\n )\n elif action == 'vlan_list':\n r = call('get', '%s/vlans/list-by-firewall/%s' %\n (server, fetch_id('firewalls', name))\n )\n elif action == 'anycast_list':\n r = call('get', '%s/anycasts/list-by-firewall/%s' %\n (server, fetch_id('firewalls', name))\n )\n else:\n baker.usage(sys._getframe().f_code.co_name)\n sys.exit(1)\n pretty_output(r)",
"def Handle(self, firewall_name):\n if not self._flags.allowed:\n raise gcutil_errors.CommandError(\n 'You must specify at least one rule through --allowed.')\n\n firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',\n firewall_name)\n\n firewall_resource = {\n 'kind': self._GetResourceApiKind('firewall'),\n 'name': firewall_context['firewall'],\n 'description': self._flags.description,\n }\n\n if self._flags.network is not None:\n firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(\n 'networks', self._flags.network)\n\n if (not self._flags.allowed_ip_sources and\n not self._flags.allowed_tag_sources):\n self._flags.allowed_ip_sources.append('0.0.0.0/0')\n\n try:\n firewall_rules = FirewallRules(self._flags.allowed,\n self._flags.allowed_ip_sources)\n firewall_rules.SetTags(self._flags.allowed_tag_sources,\n self._flags.target_tags)\n firewall_rules.AddToFirewall(firewall_resource)\n firewall_request = self.api.firewalls.insert(\n project=firewall_context['project'], body=firewall_resource)\n return firewall_request.execute()\n except ValueError, e:\n raise gcutil_errors.CommandError(e)",
"def op(self, cmd=None, vsys=None, xml=False, cmd_xml=True, extra_qs=None, retry_on_peer=False):\n if vsys is None:\n vsys = self.vsys\n return super(Firewall, self).op(cmd, vsys, xml, cmd_xml, extra_qs, retry_on_peer)",
"def get_subnet_request_factory(self):\n return ipam_req.SubnetRequestFactory",
"def create_firewall_rule(self, **attrs):\n return self._create(_firewall_rule.FirewallRule, **attrs)",
"def service_resource(self):\n\n return self.gce_project.service.firewalls()",
"def ListFunc(self):\n return self.api.firewalls.list",
"def create_firewall_rule(self, starting_ip = None, ending_ip = None, **options):\n print(\"Creating Server Firewall Rule...\")\n try:\n assert starting_ip and ending_ip\n except AssertionError:\n starting_ip = ending_ip = \"\"\n import requests\n ip = requests.get('https://checkip.amazonaws.com').text.strip()\n print(f\"Starting IP and ending IP is not given, using current IP of the system which is {ip}\")\n subnet = \".\".join(ip.split('.')[0:2])\n starting_ip = subnet + \".0.0\"\n ending_ip = subnet + \".255.255\"\n print(f\"Creating firewall rule with starting IP: {starting_ip} and ending IP: {ending_ip}\")\n try:\n firewall_rule = self.sql_client.firewall_rules.create_or_update(\n self.default_RG,\n self.sql_credentials.server_name,\n f\"firewall_from_{starting_ip}_to_{ending_ip}\",\n f\"{starting_ip}\",\n f\"{ending_ip}\"\n )\n except Exception as err:\n print(f\"Error creating firewall rule, info -> {err.args[0]}\")\n finally:\n print(f\"Firewall rule created, info -> {firewall_rule}\")\n return firewall_rule",
"def get_firewall_rule(self, firewall_rule):\n return self._get(_firewall_rule.FirewallRule, firewall_rule)",
"def clone(self):\n return ResourceRequest(self.name, self.type, self.force_validate,\n self.save_state, **self.kwargs)",
"def ex_create_firewall(self, name, allowed, network='default',\r\n source_ranges=None, source_tags=None,\r\n target_tags=None):\r\n firewall_data = {}\r\n if not hasattr(network, 'name'):\r\n nw = self.ex_get_network(network)\r\n else:\r\n nw = network\r\n\r\n firewall_data['name'] = name\r\n firewall_data['allowed'] = allowed\r\n firewall_data['network'] = nw.extra['selfLink']\r\n firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0']\r\n if source_tags is not None:\r\n firewall_data['sourceTags'] = source_tags\r\n if target_tags is not None:\r\n firewall_data['targetTags'] = target_tags\r\n\r\n request = '/global/firewalls'\r\n\r\n self.connection.async_request(request, method='POST',\r\n data=firewall_data)\r\n return self.ex_get_firewall(name)",
"def get_resource_request(self, resource_name: str) -> Optional[str]:\n\n if not self.resources or not self.resources.requests:\n return None\n return self.resources.requests.get(resource_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets a Hello. If 'name' was an argument, greet the name, otherwise a random name is retrieved from the model
|
def get(self):
if self.args['name'] == '':
name = self.model.get_random_name()
else:
name = self.args['name']
return self.jsonify({'message': 'Hello {:s}!'.format(name)})
|
[
"def say_hello(name):\n return 'Hello, {} '.format(name)",
"def hello(self, message, args):\n if args.favorite_number is None:\n return \"Hello {name}\".format(name=args.name)\n else:\n return \"Hello {name}, I hear your favorite number is {number}\".format(\n name=args.name, number=args.favorite_number\n )",
"def say_hello(self, message, args):\n if args.favorite_number is None:\n return f'Hello {args.name}.'\n else:\n return f'Hello {args.name}, I hear your favorite number is {args.favorite_number}.'",
"def test_hello_arg_with_name(self):\n result = subprocess.run(\n [\"klickbrick\", \"hello\", \"--name\", \"Ole\"], capture_output=True)\n assert b\"Hello Ole\" in result.stdout",
"def greet_user(self):\n print(\"\\nHello, \" + self.full_name.title() + \"!\")",
"def hello_name(name=\"\"):\n print(f\"hello {name}\")",
"async def say_hi(self, to):\n name = to\n if to == 'me':\n name = self._message.author.name\n return f'Hello {name}, how are you?'",
"def greeting() -> str:\n return 'hello'",
"def whoami():\n\n msg = Message(flask.request.form)\n sender = msg.sender\n # load names from pickle file\n nameFound = db.findName(sender)\n\n if nameFound:\n print('You are {0}.'.format(nameFound))\n return 'You are {0}.'.format(nameFound)\n else:\n return 'I do not know you. Set your name with !name'",
"def Hello(s, goRun=False):\n\t_hi.hi_Hello(s, goRun)",
"def greet_user(self):\n print(\"Greetings \" + self.f_name.title() + \" \" + self.l_name.title() + \" we hope you enjoy your stay with us!\")",
"def describe_game(name):\n # meaning, if we do not already have this user's name,\n # then they are a new player and we need to get their name\n if name != \"\":\n print( \"\\nThank you for playing again, {}!\".format(name) )\n else:\n while True:\n if name == \"\":\n name = input( \"\\nWhat is your name? \\n>>> \".capitalize() )\n if name != \"\":\n print(\"\\nWelcome, {}!\".format(name) )\n print(\"\\nIn this game, you will be greeted \\n by several different people.\\n\")\n print(\"but at the end of the game your fate \\n will be sealed by your actions.\")\n break\n return name",
"def get_greetings() -> str:\n return \"Hello.\"",
"def greeting(self):\n #############################################################################\n # TODO: Write a short greeting message #\n #############################################################################\n\n greeting_message = \"Hi my name's Celeste! If you're ready to find your next favorite movie, tell me about some movie you've seen.\"\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return greeting_message",
"def ask():\r\n print('What is your name? ')\r\n name = input('Name: ')\r\n return name",
"def test_get_model_by_name(self):\n pass",
"async def memegen(self, ctx, name=\"\", *fields):\n if len(fields) == 0:\n return await ctx.send(\"Controleer je argumenten.\")\n\n # Get the meme info that corresponds to this name\n result: memes.Meme = memes.getMeme(name)\n\n # No meme found\n if result is None:\n return await ctx.send(\"Deze meme staat niet in de database.\")\n\n # Convert to list to support item assignment\n fields = list(fields)\n\n generated = generate(result, fields)\n\n # If the request was successful, remove the message calling it\n if generated[\"success\"]:\n await self.utilsCog.removeMessage(ctx.message)\n\n # Send the meme's url or the error message\n await ctx.send(generated[\"message\"])",
"async def _name(self, ctx, new_name: str):\n mother = ctx.message.author\n if common.has_mon(str(mother)):\n common.user_data['players'][str(mother)]['mon']['name'] = new_name\n await self.bot.say(\"Congratulations, {0}, your mon has been named {1}!\".format(mother.mention, new_name))\n else:\n await self.bot.say(\"{0}, you have no mon. You need to hatch an egg first.\".format(mother.mention))",
"def hiB(ctx, name='SomeDefaultName'):\n print(\"Hi %s!\" % name )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper code to compute average word length of a name
|
def average_word_length(self, name):
return np.mean([len(word) for word in name.split()])
|
[
"def get_avg_word_length(lyrics):\n\n\tlyrics = lyrics.translate(str.maketrans('','',string.punctuation))\n\treturn round(sum([len(word) for word in lyrics.split()]) / len(lyrics.split()),2)",
"def get_average_word_length(self):\n\n if self.word_count_list is None:\n self.tokenize_documents()\n\n return self.word_count_list.apply(lambda x: np.average([len(w) for w in x]))",
"def average_word_length(sentence_in):\n\tsum = 0.0\n\tcount = 0\n\tfor word in sentence_in.split(sep=\" \"):\n\t\tsum += len(word)\n\t\tcount += 1\n\treturn (sum / count)",
"def get_mean_word_length(self):\n if not self.words:\n return False\n redacted_words = purge_punctuation_etc(self.words)\n redacted_words = redacted_words.replace(\" \", \"\")\n total_letters = len(redacted_words)\n result = total_letters/self.word_count\n return result",
"def get_avg_wrdlen(tokens):\n if len(tokens) < 2:\n return -1\n num = len(tokens)\n count = 0\n for word in tokens:\n count += len(word)\n avg_wrdlen = float(count)/float(num)\n avg_wrdlen = avg_wrdlen\n if avg_wrdlen < 0: avg_wrdlen = 0\n return avg_wrdlen",
"def title_length(data):\n if data.title is None:\n return None\n avg_len = 0\n for i in range(len(data.title)):\n avg_len += len(data.title[i])\n return avg_len / len(data.title)",
"def average_words(statuses):\n total_words = sum([len(s.split()) for s in statuses])\n return 1.0 * total_words / len(statuses)",
"def get_name_length(name):\n\n return len(name)",
"def get_average_headline_len(self):\n\t\taverage = 0\n\t\tfor d in range(len(self.stories)):\n\t\t\theadline = self.stories[d][0].split(' ')\n\t\t\tlength = len(headline)\n\t\t\taverage += length\n\t\taverage = (float(average) / float(len(self.stories)))\n\t\treturn int(average)",
"def getAlphaRatio(word):\n\tlength = len(word)\n\talpha = 0.0\n\tfor letter in word:\n\t\tif letter.isalpha():\n\t\t\talpha += 1.0\n\t#print \"ALPHA\", word, alpha/length\n\treturn alpha/length",
"def average_len(records):\n count = 0\n total_Length = 0\n for i in records:\n count = count + 1\n total_Length = total_Length + len(i.seq)\n average = total_Length/count\n return average",
"def getWordScore(word, n):\n score = 0\n for letter in word:\n score += SCRABBLE_LETTER_VALUES[letter]\n if len(word) == n:\n score = score * len(word) + 50\n else:\n score = score * len(word)\n return score",
"def count_letters(words: list[str]) -> list[int]:\n \n len_of_words = []\n letter_counts = []\n \n for word in words:\n len_of_words.append(len(word))\n \n letter_counts.append(min(len_of_words))\n letter_counts.append(max(len_of_words))\n letter_counts.append(my_mean(len_of_words))\n \n return letter_counts;",
"def getWordScore(word, n):\r\n score=0\r\n for letter in word:\r\n score=score + SCRABBLE_LETTER_VALUES[letter]\r\n if len(word)<n:\r\n score=score*len(word)\r\n else:\r\n score=score*len(word) + 50\r\n return score",
"def abstract_length(data):\n if data.abstract is None:\n return None\n avg_len = 0\n for i in range(len(data.abstract)):\n avg_len += len(data.abstract[i])\n return avg_len / len(data.abstract)",
"def calc_wordlen_avg(start, end, dict1):\n lst = []\n for yr in range(int(start), int(end) + 1):\n sum_letters = 0\n total_words = 0\n for key in dict1:\n for entry in dict1[key]:\n if entry.year == yr:\n sum_letters += (len(key) * entry.occ)\n total_words += entry.occ\n lst.append(Wordlen(\n year=int(yr),\n avg=float(sum_letters/total_words),\n ))\n return lst",
"def _calculate_average_field_lengths(self):\n accumulator = defaultdict(int)\n documents_with_field = defaultdict(int)\n\n for field_ref, length in self.field_lengths.items():\n _field_ref = FieldRef.from_string(field_ref)\n field = _field_ref.field_name\n\n documents_with_field[field] += 1\n accumulator[field] += length\n\n for field_name in self._fields:\n accumulator[field_name] /= documents_with_field[field_name]\n\n self.average_field_length = accumulator",
"def avg_occ(word, all_texts, text_lens):\n n_occs = []\n for text in all_texts:\n n_occs.append(np.mean([i+1 for i in\n range(len(text)) if word in ' '.join(text[i])]))\n return np.array(n_occs)/text_lens",
"def name_score():\n\n names = read_file()\n total = 0\n for i in xrange(len(names)):\n score = 0\n for letter in names[i]:\n if letter != '\"':\n score += (ord(letter) - 64)\n score = score * (i+1)\n total += score\n return total",
"def _score_by_len(self, lst):\n words = []\n score = 0\n if isinstance(lst, tuple):\n words = [lst[1]]\n else:\n for each in lst:\n words.append(each[1])\n\n for word in words:\n if word in UNIGRAM_COUNTS:\n score = score + len(word)\n else:\n score = score + len(word)\n\n return score"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Enter the Case_Number from which you want to extract the year. This function takes the first for digits frome the entered data
|
def Year(Case_Number):
for x in Case_Number:
return Case_Number[:4]
|
[
"def get_year(msg):\n year = input(msg)\n if re.match(\"[1-3][0-9]{3}\", year) and len(year) == 4:\n return year\n else:\n print(\"Enter correct year!\")\n return get_year(msg)",
"def get_year(text):\n # type: (str) -> int\n year = re.search(r\"\\d{4}\", text)\n return int(year.group()) if year else 0",
"def getYear():",
"def extract_year(title: str):\n year = -1\n match = regex.search('\\((\\d{4})\\)$', title.strip())\n if match:\n year = int(match.group(1).strip())\n title = title[:match.start()].strip()\n return title, year",
"def extract_year(string: str) -> int:\n expr = r\"(?:19|20)\\d{2}\"\n matches = re.findall(expr, string)\n if matches:\n year = matches[0]\n else:\n raise Exception(\"The string does not have any valid year.\")\n\n return int(year)",
"def parse_year(raw):\n years = re.findall('[0-9]{4}', raw)\n\n if years:\n return int(years[0])",
"def _get_year_from_file(file_name):\n with open_input_file(file_name) as fh:\n year = None\n read_year = False\n for line in fh:\n if line.startswith('FH_TITLE:'):\n pass\n elif line.startswith('FH_DATE:'):\n read_year = True\n elif line.startswith('FH_'):\n return \"9999\" if year is None else year\n elif read_year:\n # skip empty lines (shouldn't be there though)\n if not line.strip():\n continue\n year = line.strip()[:4]\n return year\n # make sure we never return None\n return '9999'",
"def _getYearFromDesc(desc):\n year = desc.split(\"\\n\")[2]\n return int(year[-5:-1])",
"def _parse_year(key):\n\n base_str = key\n year = re.search('_[1-2][0-9]{3}$', key, flags=re.IGNORECASE)\n if year is not None:\n base_str = key.replace(year.group(0), '')\n year = int(year.group(0).lstrip('_'))\n\n # unlikely to be a year before 1800 or after 2200\n if year < 1800 or year > 2200:\n year = None\n base_str = key\n\n return year, base_str",
"def get_year(ax_id):\n modern_ax_id = re.compile(r\"([0-9]{2})([0-9]{2})\\.([0-9]+)\")\n search_modern = re.search(modern_ax_id, ax_id)\n if search_modern:\n year = \"20\" + search_modern[1]\n else:\n old_ax_id = re.compile(r\"([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)\")\n search_old = re.search(old_ax_id, ax_id)\n # get century right\n if search_old[2][0] == \"9\":\n year = \"19\" + search_old[2]\n else:\n year = \"20\" + search_old[2]\n return year",
"def get_filename_year(filename):\n new_filename = filename\n filename_year = None\n matches = re.findall(\"\\s\\(\\d+\\)\", new_filename)\n if not matches:\n matches = re.findall(\"\\s\\d+\", new_filename)\n if matches: \n match = matches[-1] # last match\n now = datetime.datetime.now() \n year_string = str(match)\n year = int(year_string.replace(\"(\", \"\").replace(\")\", \"\"))\n if new_filename.endswith(year_string):\n if year > 1945 and year <= now.year: \n filename_year = str(year)\n new_filename = filename.replace(year_string, \"\") \n return new_filename, filename_year",
"def get_year(ax_id):\n modern_ax_id = re.compile(r'([0-9]{2})([0-9]{2})\\.([0-9]+)')\n search_modern = re.search(modern_ax_id, ax_id)\n if search_modern:\n year = '20' + search_modern[1]\n else:\n old_ax_id = re.compile(r'([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)')\n search_old = re.search(old_ax_id, ax_id)\n # get century right\n if search_old[2][0] == \"9\":\n year = '19' + search_old[2]\n else:\n year = '20' + search_old[2]\n return year",
"def two_to_four_digit_year(year):\n if year > 99:\n return year\n elif year > 50:\n # 85 -> 1985\n return year + scoring.REFERENCE_YEAR - 100\n else:\n # 15 -> 2015\n return year + scoring.REFERENCE_YEAR",
"def extract_year(some_data):\n some_data['FiscalYear'] = pd.DatetimeIndex(some_data['Date']).year\n some_data['FiscalYear'] = some_data['FiscalYear'].fillna(0).astype(np.int64)\n return some_data",
"def model_year(self):\n if self._edid[0x10] == 255:\n return self._edid[0x11] + 1990\n else:\n return None",
"def getYearfromYeartxt(self):\n File = open(os.path.join(self.PATH[\"ABM_Path\"], \"year.txt\"), \"r\") \n y = int(File.readline()) # current year\n y_start = int(File.readline().split()[1]) \n y_end = int(File.readline().split()[1]) \n File.close()\n \n self.StartYear = y_start\n self.EndYear = y_end\n \n return y",
"def get_year_born():\n birth_year = int(input(\"What year were you born?\"))\n return birth_year",
"def getYear(period):\n match = re.search(r'^Run([0-9]+)', period)\n if match is None:\n raise RuntimeError('Unable to extract year from the datataking period = \"{}\"'.format(period))\n return int(match.group(1))",
"def get_singlename_year(name, sex, year):\n\n #identify filename for that year\n filename = \"names/yob\" + str(year) + \".txt\"\n\n #open the file and search for that name and sex, extracting the number\n with open(filename) as f:\n text = f.read()\n pat = name + \",\" + sex + \",(\\d+)\"\n result = re.findall(pat, text) \n\n if result == []:\n number = 0 #set number to zero if name isn't on the list\n else:\n number = int(result[0])\n return number #returns the number for that name in that year"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
gets a dictionary of boolean arrays for each sampling point. These are ultimately derived from shape files, but if possible this function will load a pickled dictionary
|
def get_drn_samp_pts_dict(recalc=False):
pickle_path = "{}/inputs/pickeled_files/drn_samp_pts.p".format(sdp)
if os.path.exists(pickle_path) and not recalc:
drn_con_samp_pts = pickle.load(open(pickle_path))
return drn_con_samp_pts
# load all shapefiles in base_shp_path
base_shp_path = "{}/inputs/shp_files/drain_catchments2/*.shp".format(sdp)
temp_lst = glob.glob(base_shp_path)
temp_kys = [os.path.basename(e).split('.')[0] for e in temp_lst]
shp_dict = dict(zip(temp_kys,temp_lst))
drn_con_samp_pts = {}
for loc in shp_dict.keys():
temp = shape_file_to_model_array(shp_dict[loc],'elv',alltouched=True)
temp2 = np.zeros(temp.shape,dtype=bool)
temp2[np.isfinite(temp)] = True
temp2[np.isnan(temp)] = False
drn_con_samp_pts[loc] = temp2
pickle.dump(drn_con_samp_pts,open(pickle_path,mode='w'))
return drn_con_samp_pts
|
[
"def import_sample_map_array(self):\n if self.sample_map_array is None:\n if self.sample_map.file_name == \"null\":\n self.sample_map_array = np.ones((self.sample_map.x_size, self.sample_map.y_size))\n else:\n self.sample_map.open()\n if self.uses_spatial_sampling:\n self.sample_map_array = self.sample_map.data\n else:\n self.sample_map_array = np.ma.masked_where(self.sample_map.data >= 0.5, self.sample_map.data).mask\n self.sample_map.data = None",
"def load_samples(self):\n self.samples = {}\n self.samplens = {}\n for note in self.sampdict.keys():\n rate_in, wavobj = wavfile.read(self.sampdict[note])\n # If it doesn't match the required rate, resample and re-write\n if rate_in != self.samprate:\n wavobj = utils.resample(rate_in, self.samprate, wavobj)\n # force to mono\n wavdat = np.mean(wavobj.data, axis=1)\n # remove DC term \n dc = wavdat.mean()\n wavdat -= dc\n wavdat /= abs(wavdat).max()\n samps = range(wavdat.size)\n self.samples[note] = interp1d(samps, wavdat,\n bounds_error=False,\n fill_value = (0.,0.),\n assume_sorted=True)\n self.samplens[note] = wavdat.size",
"def get_dict_of_scenes(self, dir_path):\n label_dir_path = dir_path + '/label'\n label_image_files_path = sorted(glob.glob(label_dir_path + '/*.png'))\n label_image_files_name = {x[len(label_dir_path)+1:-4] for x in label_image_files_path}\n\n pcd_dir_path = dir_path + '/pcd'\n pcd_image_files_path = sorted(glob.glob(pcd_dir_path + '/*.pcd'))\n pcd_image_files_name = {x[len(pcd_dir_path)+1:-4] for x in pcd_image_files_path}\n\n dict_of_np_array_dict = {}\n for cur_file_name in label_image_files_name:\n if cur_file_name in pcd_image_files_name:\n np_array_dict = {}\n np_array_dict['label'] = imageio.imread(label_dir_path+'/'+cur_file_name+'.png')\n\n temp_pcd = o3d.io.read_point_cloud(pcd_dir_path+'/'+cur_file_name+'.pcd', remove_nan_points=False)\n np_array_dict['rgb'] = self.get_rgb_from_pcd(temp_pcd)\n np_array_dict['xyz'] = self.get_xyz_from_pcd(temp_pcd)\n np_array_dict['depth'] = np_array_dict['xyz'][:,:,2]\n\n dict_of_np_array_dict[dir_path+'/'+cur_file_name] = np_array_dict\n return dict_of_np_array_dict",
"def loadData(fname):\r\n (grating, params, lines, meta) = pickle.load(open(fname, \"r\"))\r\n return grating, params, lines, meta",
"def data_dict():\n\n files = glob.glob('*.bpp')\n files.sort()\n\n ssr = [] # stress ratio list\n for f in range(len(files)):\n tmpfile = open(files[f], 'r')\n header = tmpfile.readline(); tmpfile.close()\n sratio = [int(header.split(',')[1]),\n int(header.split(',')[2])]\n #Only unique set of stress ratio is found\n # and put into ssr among '*.bpp' files\n if any(ssr[i]==sratio for i in range(len(ssr))):pass\n else: ssr.append(sratio) \n pass\n print \"total sratio as below\"\n print ssr\n ssr.sort() # [[1,2],[1,4],... ]\n #raw_input()\n #print 'sratio: %i %i'%(sratio[0], sratio[1])\n\n \n #recieve data and sorting based on their stress ratio\n ## dictionary generator\n ## yield stresses are saved\n ## according to the stress ratios\n YSS_dict = {}\n for i in range(len(ssr)):\n YSS_dict['ratio%i'%i]= dict()\n YSS_dict['ratio%i'%i]['sratio'] = ssr[i]\n pass\n\n ## loop over each of '*.bpp' files in the folder\n for f in range(len(files)):\n dat = dict()\n \n raw_data = np.loadtxt(\n files[f], dtype='float',\n skiprows=1).transpose()\n \n tmpfile = open(files[f], 'r')\n header = tmpfile.readline(); tmpfile.close()\n header = header.split(',')[1::]\n sratio = map(int, [header[0],header[1]])\n\n ## add keys to the dictionary\n dat['sratio'] = sratio\n dat['filename'] = files[f]\n dat['mxw'] = max(raw_data[6])\n dat['work'] = raw_data[6].copy()\n \n dat['xe'] = raw_data[0]\n dat['ye'] = raw_data[1] \n dat['xs'] = raw_data[2]\n dat['ys'] = raw_data[3]\n dat['ed'] = raw_data[9]\n\n ## loop over each of the yield stress dictionary\n for i in range(len(YSS_dict)):\n ## if encouters the same stress ratio\n if YSS_dict[\n 'ratio%i'%i]['sratio']==dat['sratio']:\n j = 0\n while True:\n if YSS_dict[\n 'ratio%i'%i].has_key('data_%i'%j):\n ## if yss alread has the data\n j = j + 1\n pass\n else:\n ## or it is first inciden to encouter\n ## the data, add dat dictionary\n ## to YSS_dict dictionary\n YSS_dict[\n 'ratio%i'%i]['data_%i'%j] = dat\n break\n pass\n pass\n pass\n pass\n ## in YSS_dict dictionary, there is a 'ratio' attribute\n ## under which the corresponding data reside.\n return YSS_dict",
"def load(self, file_path, map_location=None,\n check_attributes=['base_signal', 'base_representation',\n 'loss_function'],\n **pickle_load_args):\n tmp_dict = torch.load(file_path, pickle_module=dill, **pickle_load_args)\n for k in check_attributes:\n if not hasattr(self, k):\n raise Exception(\"All values of `check_attributes` should be attributes set at\"\n f\" initialization, but got attr {k}!\")\n if isinstance(getattr(self, k), torch.Tensor):\n # there are two ways this can fail -- the first is if they're\n # the same shape but different values and the second (in the\n # except block) are if they're different shapes.\n try:\n if not torch.allclose(getattr(self, k).to(tmp_dict[k].device), tmp_dict[k]):\n raise Exception(f\"Saved and initialized {k} are different! Initialized: {getattr(self, k)}\"\n f\", Saved: {tmp_dict[k]}, difference: {getattr(self, k) - tmp_dict[k]}\")\n except RuntimeError:\n raise Exception(f\"Attribute {k} have different shapes in saved and initialized versions!\"\n f\" Initialized: {getattr(self, k).shape}, Saved: {tmp_dict[k].shape}\")\n elif k == 'loss_function':\n # it is very difficult to check python callables for equality\n # so, to get around that, we instead call the two loss\n # functions on the same set of representations and images, and\n # compare the outputs. loss functions, as we've defined them,\n # take the base and synthesized representation and image (the\n # image is present in case you want to e.g., penalize pixel\n # values outside some range); we have the base signal and\n # representation and generate random tensors to use as the\n # \"synthesized one\"\n img = torch.rand_like(self.base_signal)\n rep = torch.rand_like(self.base_representation)\n saved_loss = tmp_dict[k](rep, self.base_representation, img,\n self.base_signal)\n init_loss = self.loss_function(rep, self.base_representation,\n img, self.base_signal)\n if not torch.allclose(saved_loss, init_loss):\n raise Exception(\"Saved and initialized loss_function are different! On base and random \"\n f\"representation got: Initialized: {init_loss}\"\n f\", Saved: {saved_loss}, difference: {init_loss-saved_loss}\")\n else:\n if getattr(self, k) != tmp_dict[k]:\n raise Exception(f\"Saved and initialized {k} are different! Self: {getattr(self, k)}\"\n f\", Saved: {tmp_dict[k]}\")\n for k, v in tmp_dict.items():\n setattr(self, k, v)\n self.to(device=map_location)",
"def load_dataset():\n d = {}\n files = glob.glob(os.path.join(paths.data_path, \"elev*\", \"L*.wav\"), recursive=True)\n for f in files:\n az, elev, data = to_coords(f)\n x = d.get(elev, {})\n x[az] = data\n d[elev] = x\n assert len(d) > 2, \"Must have at least 2 elevations\"\n print(f\"Have {len(d)} elevations\")\n return d",
"def samples_processed(self) -> dict[float, float]:\n return self._samples_processed",
"def get_samples():\n samples: dict([str,list]) = dict()\n for genre in os.listdir(samples_directory):\n samples[genre] = []\n for file in os.listdir(f\"{samples_directory}/{genre}\"):\n if file.endswith(sound_file_format):\n samples[genre].append(f\"{samples_directory}/{genre}/{file}\") \n return samples",
"def load_data(self):\n dat = {}\n if os.path.isfile(self.CONFIG_FILE):\n with open(self.CONFIG_FILE) as f:\n loaded = cPickle.load(f)\n dat.update(loaded)\n return dat",
"def initialize_analyzed_features_dict(all_features_dict1, all_features_dict2):\n\n # all_features_dict = pickle.load(open(all_features_dict_path, 'rb'))\n analyzed_features_dict = dict()\n popular_features1 = get_popular_features(all_features_dict1)\n popular_features2 = get_popular_features(all_features_dict2)\n\n for feature in popular_features1:\n analyzed_features_dict[feature] = [0]*4\n for feature in popular_features2:\n if feature not in analyzed_features_dict:\n analyzed_features_dict[feature] = [0]*4\n return analyzed_features_dict",
"def dict_of_arrays_and_scalar_from_h5_serial(filename):\n with h5py.File(filename, 'r') as fid:\n f_dict = {}\n for kk in list(fid.keys()):\n f_dict[kk] = np.array(fid[kk]).copy()\n if f_dict[kk].shape == ():\n f_dict[kk] = f_dict[kk].tolist()\n fid.close()\n return f_dict",
"def _load_data(self, filename):\n\n # Load the pickled data-file.\n data = self._unpickle(filename)\n\n # Get the raw images.\n raw_images = data[b'data']\n\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n #pdb.set_trace()\n\n # Convert the images.\n images = self._convert_images(raw_images)\n\n return images, cls",
"def Point_Srcs(clustername,dir_str,num_src):\n DoG_diction={}\n filenames_arr=get_filenames(clustername)\n Point_src_diction={}\n for i in filenames_arr:\n full_name_str=dir_str+i\n array_data=np.loadtxt(full_name_str)\n DoG_diction[i]=array_data\n arr_ps_x,arr_ps_y=findPointSources(array_data,clustername,num_src)\n Point_src_diction[i]=[arr_ps_x,arr_ps_y]\n return DoG_diction,Point_src_diction",
"def _collect_flags(project):\n acc = {}\n for s in project.samples:\n fs = fetch_sample_flags(project, s)\n if fs:\n acc[s.name] = fs\n return acc",
"def generate_picked_dataset():\n\tfor batch_name in os.listdir(ORIGINALS_PATH):\n\t\tif batch_name in os.listdir(settings.PICKED_ORIGINALS_PATH):\n\t\t\tprint(\"{} done already!\".format(batch_name))\n\t\t\tcontinue\n\t\n\t\toriginal_imgs = np.load(ORIGINALS_PATH + batch_name)\n\t\tedgemaps = np.load(EDGEMAPS_PATH + batch_name)\n\n\t\tassert original_imgs.shape[0] == edgemaps.shape[0]\n\n\t\tnum_imgs = original_imgs.shape[0]\n\t\tpicked_indices = np.random.choice(num_imgs, NUM_IMAGES_PICKED, replace=False)\n\t\toriginal_imgs_picked = original_imgs[picked_indices].transpose(0, 2, 3, 1)\n\t\tedgemaps_picked = edgemaps[picked_indices]\n\n\t\tnp.save(settings.PICKED_ORIGINALS_PATH+batch_name, original_imgs_picked)\n\t\tnp.save(settings.PICKED_EDGEMAPS_PATH+batch_name, edgemaps_picked)\n\n\t\tprint(\"{} done!\".format(batch_name))",
"def getSamples(self):\n samples = {}\n for entry in self.values:\n \n key1 = entry.split(':')[0]\n key2 = entry.split(':')[1]\n value = self.values[entry]\n \n # groove entries\n if key1 == 'groove':\n self.__dict__[key2] = value\n \n # sample entries\n else:\n path = entry.split(':')[0]\n var = entry.split(':')[1]\n if not path in samples:\n samples[path] = {}\n samples[path][var] = self.values[entry]\n \n return samples",
"def load_np_pickles_in_directory(path, regex=r'.*.(npy|npc)'):\n result = {}\n for filename in os.listdir(path):\n if re.match(regex, filename):\n # Get rid of file extensions and (keypoints|descriptors) annotations.\n key = filename.split('.')[0].split('_')[0]\n result[key] = np.load(path + filename, allow_pickle=True)\n\n return result",
"def load_data(self):\n # path = self.choose_path()\n # os.chdir(path)\n file_name = self.chosen_data\n path = os.path.join(os.path.abspath('.'), file_name)\n gps = np.loadtxt(path, delimiter=\",\")\n\n # Get three coordinates of GPS and timestamp\n tstamp = gps[:, 0]\n lat = gps[:, 3]\n lng = gps[:, 4]\n alt = gps[:, 5]\n # Store three coordinates into a dictionary\n # Transform radius to angle\n gps_dictionary = {}\n gps_dictionary['tstamp'] = tstamp\n gps_dictionary['lat'] = lat * 180 / np.pi\n gps_dictionary['lng'] = lng * 180 / np.pi\n gps_dictionary['alt'] = alt\n\n return gps_dictionary"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Makes a request to the token endpoint by sending the `post_data` parameters using the 'application/xwwwformurlencoded' format.
|
def _post_request(self, post_data, extras={}):
url = reverse('oidc_provider:token')
request = self.factory.post(
url,
data=urlencode(post_data),
content_type='application/x-www-form-urlencoded',
**extras)
response = TokenView.as_view()(request)
return response
|
[
"def post(self, query_string, data=None):\n if data:\n data[\"__FORM_TOKEN\"] = self._get_form_token()\n return self.session.post(self.base_url + query_string, data=data)",
"def test_create_token_exchange_using_post(self):\n pass",
"def _request_token(self):\n response = self._post_token_request()\n self._parse_server_response(response)",
"def send_post_request(url, data):\n post_data = {\n 'data': data\n }\n return requests.post(url, data=post_data)",
"def smart_post(conn, url, data):\n request = conn.get(url, verify=False)\n data['csrfmiddlewaretoken'] = get_csrf_token(request)\n logging.debug('csrf=' + data['csrfmiddlewaretoken'])\n post_request = conn.post(url, data=data, headers={'referer': url}, verify=False)\n if post_request.status_code == 302:\n raise WebException(\"Login failed\")\n return post_request",
"def make_oauth_request(method_name: str, data: dict):\n url = create_bot_oauth_url(method_name)\n timeout = current_app.config[\"YANDEX_OAUTH_API_TIMEOUT\"]\n id = environ[\"YANDEX_OAUTH_API_APP_ID\"]\n password = environ[\"YANDEX_OAUTH_API_APP_PASSWORD\"]\n\n return request(\n raise_for_status=False,\n content_type=\"json\",\n method=\"POST\",\n url=url,\n data=data,\n timeout=timeout,\n auth=HTTPBasicAuth(id, password),\n allow_redirects=False,\n verify=True\n )",
"def do_post(self,data=None,params={}):\n if data and params:\n raise ValueError('Either data or params can be submitted to be the POST body, but not both.')\n \n post_data = json.dumps(data) if data else params\n \n response = requests.post('%s/%s.json' % (self.service_url,self.descriptor['slug']),\n data=post_data,\n auth=(self.user,self.password))\n \n return self.process_response(response)",
"def _authorise_request_token_with_login(self):\n self._request_obj(\n self._urls[\"validate_with_login\"],\n method=\"POST\",\n json={\n \"username\": self.username,\n \"password\": self.password,\n \"request_token\": self.request_token,\n }\n )",
"def _post_request(self, url, data=None, headers={'Content-type': 'application/json'}, retries=1):\n try:\n r = requests.post(url, data, headers)\n except requests.ConnectionError:\n logger.error('Connection error: {}'.format(url))\n raise NotConnected\n content = json.loads(r.content)\n if content['error_code'] != 0:\n if content['error_code'] == -20651:\n if retries > 0:\n logger.warning('Token expired, getting new token'.format(retries))\n self.initialize_token()\n content = self._post_request('{}?token={}'.format(self.tplink_url, self.token),\n data, headers, retries-1)\n else:\n logger.warning('Token expired, out of retries')\n raise TokenError\n else:\n logger.error('TPLink Error: code {}, msg: {}'.format(content['error_code'], content['msg']))\n if content['error_code'] == -20571:\n raise DeviceNotConnected\n if content['error_code'] == -20601:\n raise LoginError\n if content['error_code'] == -20104:\n raise InvalidRequest\n if content['error_code'] == -20105:\n raise InvalidRequest('One or more parameter has wrong type')\n # handling retries\n if content['error_code'] != 0:\n raise InternalError\n return content",
"def create_restricted_data_token(self, **kwargs) -> ApiResponse:\n \n return self._request(kwargs.pop('path'), data=kwargs)",
"def test_request_post(self):\n r = self.base._request('/post', 'POST', {\n 'foo': 'bar'\n })\n\n self.assertEqual(r['url'], 'https://httpbin.org/post')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')\n self.assertEqual(r['form']['foo'], 'bar')",
"def _post(self, data=None, url_name=None, url_args=None,\r\n url_kwargs=None, get_kwargs=None, url=None, *args, **kwargs):\r\n url = url or self._url(url_name, url_args, url_kwargs, get_kwargs)\r\n data = self.post_data if data is None else data\r\n return self.client.post(path=url, data=data, *args, **kwargs)",
"def _request_token(self):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n # required credentials when request for a token.\n data = {\n 'grant_type': 'password',\n 'client_id': self._config.auth['client_id'],\n 'client_secret': self._config.auth['client_secret'],\n 'username': self._config.auth['username'],\n 'password': '{}{}'.format(\n self._config.auth['password'], self._config.auth['security_token']\n ),\n 'response_type': 'code',\n 'redirect_uri': self._SALESFORCE_TOKEN_URL\n }\n success, response = self._make_post_request(\n self._SALESFORCE_TOKEN_URL, headers, data, False\n )\n\n if not (success and response):\n return False\n\n if not (response.get('access_token') and response.get('instance_url')):\n LOGGER.error('Response invalid generating headers for service \\'%s\\'',\n self._type())\n return False\n\n bearer = 'Bearer {}'.format(response.get('access_token'))\n self._auth_headers = {\n 'Content-Type': 'application/json',\n 'Authorization': bearer\n }\n self._instance_url = response.get('instance_url')\n LOGGER.debug('Successfully obtain OAuth token and instance URL')\n return True",
"def send_post(self, url, data):\n if data:\n payload = {x: str(y).encode(\"utf-8\") for x,y in data.items()}\n else:\n payload = None\n\n r = requests.post(url, data=payload)\n return r.text",
"def make_request(route=None, url_args=None, post_args={}):\n if not os.path.isfile('~/.teleserver/credentials.json'):\n raise Exception('NO CREDENTIALS! Please first log in.')\n with open('~/.teleserver/credentials.json', 'r') as secret_file:\n secrets = json.load(secret_file)\n token = secrets['token']\n server = secrets['server']\n if not route:\n return {'message': 'None route specified', 'rc': 1}\n url = f'https://{server}:8080/{route}'\n if url_args:\n url += '?'\n for argname in url_args:\n url += f'{argname}={url_args[argname]}&'\n else:\n url = url[:-1]\n post_args['token'] = token\n response = requests.post(url=url, data=post_args)\n return response.json()",
"def post_form_data_request():\n url = \"http://httpbin.org/post\"\n payload = {'key1': 'value1', 'key2': 'value2'}\n r = requests.post(url, data=payload)\n print(r.text) # see how data goes into 'form'\n\n \"\"\"\n {\n \"args\": {},\n \"data\": \"\",\n \"files\": {},\n \"form\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n },\n \"headers\": {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Content-Length\": \"23\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Host\": \"httpbin.org\",\n \"User-Agent\": \"python-requests/2.5.3 CPython/2.7.9 Darwin/14.1.0\"\n },\n \"json\": null,\n \"origin\": \"74.71.230.126\",\n \"url\": \"http://httpbin.org/post\"\n }\n \"\"\"\n\n # If you want to send data that is not form-encoded, pass in a string\n payload = 'This is a test'\n r = requests.post(url, data=payload)\n print(r.text) # see how it goes to 'data' instead of 'form'\n\n \"\"\"\n {\n \"args\": {},\n \"data\": \"This is a test\",\n \"files\": {},\n \"form\": {},\n \"headers\": {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Content-Length\": \"14\",\n \"Host\": \"httpbin.org\",\n \"User-Agent\": \"python-requests/2.5.3 CPython/2.7.9 Darwin/14.1.0\"\n },\n \"json\": null,\n \"origin\": \"74.71.230.126\",\n \"url\": \"http://httpbin.org/post\"\n }\n \"\"\"",
"def request_token_url():\n return _BASE_URL_V1 % 'oauth/request_token/'",
"def post_token():\n token_name = request.args.get('token_name')\n token = db.session.query(Tokens).filter(Tokens.token_name == token_name).first()\n if token:\n response = Response(json.dumps({\"Response\": \"Token name already used\"}), status=409,\n mimetype='application/json')\n return response\n else:\n tokens = Tokens(\n token_name=token_name\n )\n tokens.save()\n response = Response(json.dumps({\"Response\": \"Created Token\"}), status=201, mimetype='application/json')\n return response",
"def request_a_ride(self, data, token):\n res = self.client().post(\n '/request',\n data=json.dumps(data),\n content_type='application/json',\n headers=dict(Authorization=\"Bearer \" + token)\n )\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Scope is ignored for token respones to auth code grant type. This comes down to that the scopes requested in authorize are returned.
|
def test_scope_is_ignored_for_auth_code(self):
SIGKEYS = self._get_keys()
for code_scope in [['openid'], ['openid', 'email'], ['openid', 'profile']]:
code = self._create_code(code_scope)
post_data = self._auth_code_post_data(
code=code.code, scope=code_scope)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
id_token = JWS().verify_compact(response_dic['id_token'].encode('utf-8'), SIGKEYS)
if 'email' in code_scope:
self.assertIn('email', id_token)
self.assertIn('email_verified', id_token)
else:
self.assertNotIn('email', id_token)
if 'profile' in code_scope:
self.assertIn('given_name', id_token)
else:
self.assertNotIn('given_name', id_token)
|
[
"def scope(self) -> dict:\n scope = self._auth_token.scope\n if not isinstance(scope, dict):\n raise ValueError(\"Token's scope claim must be of type 'dict'\")\n if \"admin\" not in scope or \"spotify\" not in scope:\n raise ValueError(\"'admin' and 'spotify' must be in token's scope\")\n if not isinstance(scope[\"admin\"], bool) or not isinstance(\n scope[\"spotify\"], bool\n ):\n raise ValueError(\"'admin' and 'spotify' claims in scope must be booleans\")\n return scope",
"def require_scope(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n global _user\n if _user.get('scope') is not None:\n result = f(*args, **kwargs)\n _user = None\n return result\n else:\n if _logger:\n _logger.warning('API call with no scope provided. Endpoint: %s\\tToken: %s' % (request.path,\n _user.get('token')))\n return app.response_class(response=_dumps({\"_status\": \"ERR\", \"_error\": {\"code\": 403, \"message\":\n \"A token scope is required and your token does not have one. If this is not your fault, contact \"\n \"the API developer.\"}}), status=403, mimetype='application/json')\n return wrapped",
"def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n msg = \"Scopes in the request: {}\".format(request.scopes)\n current_app.logger.debug(msg)\n associations = {\n \"scopes\": request.scopes,\n \"redirect_uri\": request.redirect_uri,\n \"client_id\": client_id,\n \"state\": request.state,\n \"user\": request.user,\n }\n\n cache.set(code[\"code\"], associations, timeout=10 * 60)\n\n return",
"def finish_oauth(self, code):\n r = requests.post(\n self._login_uri(\"/oauth/token\"),\n data={\n \"code\": code,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n },\n )\n\n if r.status_code != 200:\n raise ApiError(\n \"OAuth token exchange failed\",\n status=r.status_code,\n json=r.json(),\n )\n\n token = r.json()[\"access_token\"]\n scopes = OAuthScopes.parse(r.json()[\"scopes\"])\n expiry = datetime.now() + timedelta(seconds=r.json()[\"expires_in\"])\n refresh_token = r.json()[\"refresh_token\"]\n\n return token, scopes, expiry, refresh_token",
"def get_scopes(self):\n if not self.authenticated:\n raise ValueError(\"Must authenticate client first.\")\n\n scope = self.client['scope']\n return scope.split()",
"def auth_code(code):\n\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n # Use hardcoded redirect_uri to show auth code in browser\n # Note: redirect_uri is not used in this case, but still required\n flow.redirect_uri = 'urn:ietf:wg:oauth:2.0:oob'\n\n flow.fetch_token(code=code)\n\n return flow.credentials",
"def get_auth_code_params(self):\n return {\n \"client_id\": self.client_id,\n \"response_type\": \"code\",\n \"redirect_uri\": self.redirect_uri,\n \"scope\": \"user-modify-playback-state\",\n \"state\": 123\n }",
"def get_access_information(self, code):\n if self.config.grant_type == 'password':\n data = {'grant_type': 'password',\n 'username': self.config.user,\n 'password': self.config.pswd}\n else:\n data = {'code': code, 'grant_type': 'authorization_code',\n 'redirect_uri': self.redirect_uri}\n retval = self._handle_oauth_request(data)\n return {'access_token': retval['access_token'],\n 'refresh_token': retval.get('refresh_token'),\n 'scope': set(retval['scope'].split(' '))}",
"def get_token_scopes(access_token):\n token = Token.find(access_token)\n if not token:\n logger.debug(\"Access token %r not found\", access_token)\n raise auth_services.NotAuthorizedException(detail=\"Invalid token\")\n logger.debug(\"Found a token: %r\", token)\n\n # only if the token has been issued to a user\n # the user has to be automatically logged in\n logger.debug(\"The token user: %r\", token.user)\n if token.user:\n auth_services.login_user(token.user)\n # store the current client\n g.oauth2client = token.client\n # if the client is a Registry, store it on the current session\n from lifemonitor.api.models import WorkflowRegistry\n registry = WorkflowRegistry.find_by_client_id(token.client.client_id)\n logger.debug(\"Token issued to a WorkflowRegistry: %r\", registry is not None)\n if registry:\n auth_services.login_registry(registry)\n return {\n \"scope\": token.scope\n }",
"def extra_authorize_data(self) -> dict[str, Any]:\n return {\"scope\": \"basic devices_read\"}",
"def authorize(self):\n\t\treturn redirect(self.service.get_authorize_url(\n\t\t\tscope='email',\n\t\t\tresponse_type='code',\n\t\t\tredirect_uri= current_ip+\"callback\"\n\t\t\t# facebook will send the parameter to this page.\n\t\t\t)\n\t\t)",
"def get_oauth_scopes(self, **kwargs):\n\n all_params = ['accept_language']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_oauth_scopes\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/oauth/scopes'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n if 'accept_language' in params:\n header_params['Accept-Language'] = params['accept_language']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OAuthScopeListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n token_scopes = unverified_claims[\"scp\"]\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False",
"def test_post_grant_authorization_code_no_uris(self):\n self._test_post_redirect_uri_grant_combination(\n redirect_uris='',\n grant_type=Application.GRANT_AUTHORIZATION_CODE,\n is_valid=False,\n )",
"def create_authorization_code(self, application, user, scope):\n code = AccessToken(\n app_id=application.id,\n type='authorization_code',\n token=uuid.uuid4().hex[:35],\n date_insert=datetime.datetime.now(),\n id_user=user.id,\n expiration_date=datetime.datetime.now() + datetime.timedelta(weeks=2),\n is_enable=1,\n scopes=scope\n )\n try:\n session.begin()\n session.add(code)\n session.commit()\n except Exception as e:\n session.rollback()\n session.flush()\n return HttpResponse(500).error(ErrorCode.DB_ERROR, e)\n return code",
"def exchange_code_for_token(code):\n url = '{domain}/oauth2/token'.format(domain=cognito_config.domain)\n if cognito_config.client_secret:\n authorization_string = cognito_config.client_id + ':' + cognito_config.client_secret\n authorization = 'Basic ' + base64.b64encode(authorization_string.encode('utf-8')).decode('utf-8')\n headers = {'Content-type': 'application/x-www-form-urlencoded', 'Authorization': authorization}\n else:\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n data = {'grant_type': 'authorization_code',\n 'client_id': cognito_config.client_id,\n 'redirect_uri': cognito_config.redirect_uri,\n 'code': code\n }\n try:\n response = requests.post(url=url, headers=headers, data=data)\n tokens = json.loads(response.text)\n except requests.exceptions.HTTPError as e:\n raise AuthorizationExchangeError(str(e)) from e\n # check token expiry\n published_time = datetime(*eut.parsedate(response.headers['Date'])[:6])\n expiry = tokens.pop('expires_in')\n expiry_time = published_time + timedelta(int(expiry))\n if datetime.now() > expiry_time:\n raise AuthorizationExchangeError(\"Request is expired\")\n # check token type bearer\n token_type = tokens.pop('token_type')\n if token_type != 'Bearer':\n raise AuthorizationExchangeError(\"Invalid token type\")\n return tokens",
"async def fetch_scopes(self):\n\n data = await self.http.fetch_scopes()\n return data",
"def require_oauth(*scopes):\n def wrapper(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n logger.info('Check authorization')\n if request.method == 'GET' or request.method == 'DELETE':\n access_token = request.args['access_token']\n logger.info(access_token)\n else:\n access_token = request.get_json()['access_token_']\n logger.info(access_token)\n if access_token is not None:\n scopes_ = list(scopes)\n token_object = Token.query.join(Client, Token.client_id == Client.client_id)\\\n .add_columns(Token._scopes, Token.access_token, Client.service_name)\\\n .filter(Token.access_token == access_token).first()\n\n if token_object:\n logger.info('Valid token')\n if all(x in token_object._scopes for x in scopes_):\n logger.info('Valid authorization')\n kwargs['app_name'] = token_object.service_name\n return f(*args, **kwargs)\n return abort(403)\n return decorated\n return wrapper",
"def validate_any_scope(request: Request, scopes: SecurityScopes):\n req_scopes = request.auth.scopes\n if not any(scope in req_scopes for scope in scopes.scopes):\n raise HTTPException(status.HTTP_403_FORBIDDEN)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extending scope in refresh token is not allowed. Try to get a refresh token with "profile" in the scope even though the original authorized scope in the authorization code request is only ['openid', 'email'].
|
def test_refresh_token_invalid_scope(self):
self.do_refresh_token_check(scope=['openid', 'profile'])
|
[
"def test_refresh_token_narrowed_scope(self):\n self.do_refresh_token_check(scope=['openid'])",
"def refresh_access_information(self, refresh_token):\n if self.config.grant_type == 'password':\n data = {'grant_type': 'password',\n 'username': self.config.user,\n 'password': self.config.pswd}\n else:\n data = {'grant_type': 'refresh_token',\n 'redirect_uri': self.redirect_uri,\n 'refresh_token': refresh_token}\n retval = self._handle_oauth_request(data)\n return {'access_token': retval['access_token'],\n 'refresh_token': refresh_token,\n 'scope': set(retval['scope'].split(' '))}",
"def get_original_scopes(self, refresh_token, request, *args, **kwargs):\r\n log.debug('Obtaining scope of refreshed token.')\r\n tok = self._tokengetter(refresh_token=refresh_token)\r\n return tok.scopes",
"def refresh():\n client_id = \"287290951141-dl34gtgp8tvnanm809utk7if4klj0upg.apps.googleusercontent.com\"\n client_secret = \"V5ihqrK506ISAzYFH7V9SRfR\"\n r = requests.post(\"https://www.googleapis.com/oauth2/v3/token\",\n data = {\"client_id\":client_id, \"client_secret\":client_secret,\n \"refresh_token\":\"1/HCZswI4mR3ibVUirYLtQXlIgRlU2RYEbTP8p1kFIwkFIgOrJDtdun6zK6XiATCKT\",\n \"grant_type\":\"refresh_token\"})\n print(r.text)\n raw_cred = r.text\n json_cred = json.loads(r.text)\n my_dir = os.path.dirname(__file__)\n pickle_file_path = os.path.join(my_dir, 'saved_cred.p')\n pickle.dump(raw_cred, open(pickle_file_path, 'wb'))\n # cred = AccessTokenCredentials(json_cred['access_token'], 'SD-NUC/1.0') # For use with google storage library\n return raw_cred",
"async def refresh_grant(session: ClientSession, token_uri, refresh_token, client_id, client_secret):\n body = {\n 'grant_type': _REFRESH_GRANT_TYPE,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n }\n\n response_data = await _token_endpoint_request(session, token_uri, body)\n\n try:\n access_token = response_data['access_token']\n except KeyError:\n raise exceptions.RefreshError(\n 'No access token in response.', response_data)\n\n refresh_token = response_data.get('refresh_token', refresh_token)\n expiry = _parse_expiry(response_data)\n\n return access_token, refresh_token, expiry, response_data",
"def refresh_access_token():\n client = Client(sm.access_token)\n auth_dict = client.refresh_access_token(\n client_id=sm.client_id,\n client_secret=sm.client_secret,\n refresh_token=sm.refresh_token)\n logger.debug('Auth Dict: %s', auth_dict)\n\n # Save the dict back to Secret Manager\n sm.set_auth_dict(auth_dict)",
"def refresh_token():\n\n enc_token = jwt_helper.get_token_from_cookie(cookies=request.cookies, key='refToken')\n __, jwt_content = jwt_helper.decode(token=enc_token, token_type='refresh')\n\n # check_jti()\n subject = jwt_content['sub']\n refresh_token, access_token = jwt_helper.gen_tokens(subject)\n resp = jwt_helper.make_token_response(access_token, refresh_token)\n return resp",
"def _renew(self, data):\n self.created_at = datetime.utcnow()\n if data is None:\n return\n \n self.access_token = data['access_token']\n self.refresh_token = data.get('refresh_token', '')\n self.expires_in = data['expires_in']\n scopes = self.scopes\n scopes.clear()\n for scope in data['scope'].split():\n try:\n scopes.add(SCOPES[scope])\n except KeyError:\n pass",
"def google_token_request(self, auth_code=None):\n # Build request parameters. Order doesn't seem to matter, hence using dict.\n token_request_data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n }\n if auth_code is None:\n # Use existing refresh token to get new access token.\n token_request_data['refresh_token'] = self.refresh_token\n token_request_data['grant_type'] = 'refresh_token'\n else:\n # Request new access and refresh token.\n token_request_data['code'] = auth_code\n token_request_data['grant_type'] = 'authorization_code'\n # 'urn:ietf:wg:oauth:2.0:oob' signals to the Google Authorization\n # Server that the authorization code should be returned in the\n # title bar of the browser, with the page text prompting the user\n # to copy the code and paste it in the application.\n token_request_data['redirect_uri'] = 'urn:ietf:wg:oauth:2.0:oob'\n token_request_data['access_type'] = 'offline'\n\n # Make token request to Google.\n oauth2_token_request_url = 'https://www.googleapis.com/oauth2/v4/token'\n resp = requests.post(oauth2_token_request_url, data=token_request_data)\n # If request is successful then Google returns values as a JSON array\n values = resp.json()\n self.access_token = values['access_token']\n if auth_code: # Need to save value of new refresh token\n self.refresh_token = values['refresh_token']\n self.token_expiry = dt.datetime.now() + dt.timedelta(seconds=int(values['expires_in']))\n logging.info('Access token expires on %s', self.token_expiry.strftime(\"%Y/%m/%d %H:%M\"))",
"def get_token(request, refresh=False):\n api_url = \"https://ssl.reddit.com/api/v1/access_token\"\n is_expired = request.session.get(\"expires\", 0) < int(unixtime())\n headers = settings.OAUTH_REDDIT_BASE_HEADERS\n client_auth = requests.auth.HTTPBasicAuth(\n settings.OAUTH_REDDIT_CLIENT_ID, settings.OAUTH_REDDIT_CLIENT_SECRET\n )\n\n if is_expired and request.GET.get(\"code\", None):\n # Received an access code to get a new access_token. Use\n # this above anything else.\n post_data = {\n \"grant_type\": \"authorization_code\",\n \"code\": request.GET.get(\"code\"),\n \"redirect_uri\": settings.OAUTH_REDDIT_REDIRECT_URI,\n }\n response = requests.post(\n api_url, auth=client_auth, headers=headers, data=post_data\n )\n t = response.json()\n request.session[\"access_token\"] = t.get(\"access_token\", \"\")\n request.session[\"refresh_token\"] = t.get(\"refresh_token\", \"\")\n request.session[\"token_type\"] = t.get(\"token_type\", \"\")\n request.session[\"expires\"] = int(unixtime()) + int(t.get(\"expires_in\", 0))\n request.session[\"scope\"] = t.get(\"scope\", \"\")\n if settings.DEBUG:\n print(\"Initial access_token acquired.\")\n\n elif (refresh or is_expired) and request.session.get(\"refresh_token\", False):\n\n # The previous access_token is expired, use refresh_token to\n # get a new one.\n post_data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": request.session.get(\"refresh_token\"),\n }\n response = requests.post(\n api_url, auth=client_auth, headers=headers, data=post_data\n )\n t = response.json()\n request.session[\"access_token\"] = t.get(\"access_token\", \"\")\n request.session[\"token_type\"] = t.get(\"token_type\", \"\")\n request.session[\"expires\"] = int(unixtime()) + int(t.get(\"expires_in\", 0))\n request.session[\"scope\"] = t.get(\"scope\", \"\")\n if settings.DEBUG:\n print(\"New access_token acquired.\")\n else:\n if settings.DEBUG:\n if request.session.get(\"access_token\", False):\n print(\"Re-using cached access_token.\")\n else:\n print(\"No access_token found anywhere!\")\n\n # If there is an access_token now, return it. Or wipe session vals.\n if request.session.get(\"access_token\", False):\n return request.session.get(\"access_token\")\n else:\n request.session[\"access_token\"] = None\n request.session[\"refresh_token\"] = None\n request.session[\"token_type\"] = None\n request.session[\"expires\"] = 0\n request.session[\"scope\"] = None\n return False",
"def get_refresh_token_data(self):\n return {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token\n }",
"def _do_token_refresh(self, request):\n # type: (Mapping[str, str]) -> oic.oic.message.AccessTokenResponse\n token_request = RefreshAccessTokenRequest().from_dict(request)\n try:\n token_request.verify()\n except MessageException as e:\n raise InvalidTokenRequest(str(e), token_request) from e\n\n response = AccessTokenResponse()\n\n access_token, refresh_token = self.authz_state.use_refresh_token(token_request['refresh_token'],\n scope=token_request.get('scope'))\n self._add_access_token_to_response(response, access_token)\n if refresh_token:\n response['refresh_token'] = refresh_token\n\n return response",
"def refresh_token():\n global SESSION_ID\n if SESSION_ID:\n logger.info(\"Session ID is not none, so will not attempt to authenticate.\")\n else:\n logger.info(\"Session ID is none, so will need to authorize.\")\n SESSION_ID = authorize()\n return",
"def save_bearer_token(self, token, request, *args, **kwargs):\n if request.refresh_token:\n # remove used refresh token\n try:\n RefreshToken.objects.get(token=request.refresh_token).revoke()\n except RefreshToken.DoesNotExist:\n assert() # TODO though being here would be very strange, at least log the error\n\n expires = timezone.now() + timedelta(seconds=oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)\n if request.grant_type == 'client_credentials':\n request.user = None\n\n # TODO: get user from phone number in request, there should be some\n # secure system to get user from phone number\n data_dict = get_request_body_dict(request)\n phone = str(data_dict['phone'])\n account_object = get_object('account', 'phone', phone)\n user_object = get_object('user', 'id', account_object.user_id)\n\n access_token = AccessToken(\n user=user_object,\n scope=token['scope'],\n expires=expires,\n token=token['access_token'],\n application=request.client)\n access_token.save()\n\n if 'refresh_token' in token:\n refresh_token = RefreshToken(\n user=user_object,\n token=token['refresh_token'],\n application=request.client,\n access_token=access_token\n )\n refresh_token.save()\n\n # TODO check out a more reliable way to communicate expire time to oauthlib\n token['expires_in'] = oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS",
"def finish_oauth(self, code):\n r = requests.post(\n self._login_uri(\"/oauth/token\"),\n data={\n \"code\": code,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n },\n )\n\n if r.status_code != 200:\n raise ApiError(\n \"OAuth token exchange failed\",\n status=r.status_code,\n json=r.json(),\n )\n\n token = r.json()[\"access_token\"]\n scopes = OAuthScopes.parse(r.json()[\"scopes\"])\n expiry = datetime.now() + timedelta(seconds=r.json()[\"expires_in\"])\n refresh_token = r.json()[\"refresh_token\"]\n\n return token, scopes, expiry, refresh_token",
"def refresh_token(self, token_info):\r\n if 'refresh_token' not in token_info:\r\n return self.get_new_token()\r\n refresh_request = {'refresh_token': token_info['refresh_token'],\r\n 'client_id': self.user_id,\r\n 'client_secret': self.key,\r\n 'grant_type': 'refresh_token'}\r\n\r\n new_token = self._token_request(refresh_request)\r\n if 'refresh_token' not in new_token:\r\n new_token['refresh_token'] = token_info['refresh_token']\r\n return new_token",
"def refresh_access_token(user):\n refresh_token = user.refresh_key\n\n payload = {\n \"grant_type\": \"refresh_token\",\n \"client_id\": settings.oauth2_id,\n \"redirect_uri\": settings.oauth2_uri,\n \"client_secret\": settings.oauth2_key,\n \"refresh_token\": refresh_token,\n }\n response = requests.post(settings.BASE_URL + \"login/oauth2/token\", data=payload)\n\n try:\n response.raise_for_status()\n except HTTPError:\n app.logger.exception(\"Failed refresh. Probably bad refresh token.\")\n return {\"access_token\": None, \"expiration_date\": None}\n\n try:\n response_json = response.json()\n except ValueError:\n app.logger.exception(\n \"Unable to load JSON response of refresh. Possibly bad refresh token.\"\n )\n return {\"access_token\": None, \"expiration_date\": None}\n\n if \"access_token\" not in response_json:\n app.logger.warning(\n (\n \"Access token not in json. Bad api key or refresh token.\\n\"\n \"URL: {}\\n\"\n \"Status Code: {}\\n\"\n \"Payload: {}\\n\"\n \"Session: {}\"\n ).format(response.url, response.status_code, payload, session)\n )\n return {\"access_token\": None, \"expiration_date\": None}\n\n api_key = response_json[\"access_token\"]\n app.logger.info(\"New access token created\\n User: {0}\".format(user.user_id))\n\n if \"expires_in\" not in response_json:\n app.logger.warning(\n (\n \"expires_in not in json. Bad api key or refresh token.\\n\"\n \"URL: {}\\n\"\n \"Status Code: {}\\n\"\n \"Payload: {}\\n\"\n \"Session: {}\"\n ).format(response.url, response.status_code, payload, session)\n )\n return {\"access_token\": None, \"expiration_date\": None}\n\n current_time = int(time.time())\n new_expiration_date = current_time + response_json[\"expires_in\"]\n\n try:\n # Update expiration date in db\n user.expires_in = new_expiration_date\n db.session.commit()\n except Exception:\n readable_expires_in = time.strftime(\n \"%a, %d %b %Y %H:%M:%S\", time.localtime(user.expires_in)\n )\n readable_new_expiration = time.strftime(\n \"%a, %d %b %Y %H:%M:%S\", time.localtime(new_expiration_date)\n )\n app.logger.error(\n (\n \"Error in updating user's expiration time in the db:\\n\"\n \"session: {}\\n\"\n \"DB expires_in: {}\\n\"\n \"new_expiration_date: {}\"\n ).format(session, readable_expires_in, readable_new_expiration)\n )\n return {\"access_token\": None, \"expiration_date\": None}\n\n return {\"access_token\": api_key, \"expiration_date\": new_expiration_date}",
"def error_while_refreshing_access_token():\n return \"Error while refreshing access token: 'access_token'\"",
"def access_token(self):\n if not self._access_token:\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n payload = urllib.urlencode({\n 'grant_type': 'refresh_token',\n 'client_id': OAUTH_CLIENT_ID,\n 'refresh_token': self.refresh_token\n })\n\n request = urllib2.Request(OAUTH_URL, headers=headers, data=payload)\n request.get_method = lambda: 'POST'\n\n try:\n response = urllib2.urlopen(request)\n data = json.load(response)\n self._access_token = data['access_token']\n except urllib2.HTTPError:\n # the refresh token has expired or become invalid\n self._refresh_token = None\n self.get_oauth_tokens()\n\n return self._access_token"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Narrowing scope in refresh token is allowed. Try to get a refresh token with just "openid" in the scope even though the original authorized scope in the authorization code request is ['openid', 'email'].
|
def test_refresh_token_narrowed_scope(self):
self.do_refresh_token_check(scope=['openid'])
|
[
"def test_refresh_token_invalid_scope(self):\n self.do_refresh_token_check(scope=['openid', 'profile'])",
"def get_original_scopes(self, refresh_token, request, *args, **kwargs):\r\n log.debug('Obtaining scope of refreshed token.')\r\n tok = self._tokengetter(refresh_token=refresh_token)\r\n return tok.scopes",
"def test_scope_is_ignored_for_auth_code(self):\n SIGKEYS = self._get_keys()\n for code_scope in [['openid'], ['openid', 'email'], ['openid', 'profile']]:\n code = self._create_code(code_scope)\n\n post_data = self._auth_code_post_data(\n code=code.code, scope=code_scope)\n\n response = self._post_request(post_data)\n response_dic = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(response.status_code, 200)\n\n id_token = JWS().verify_compact(response_dic['id_token'].encode('utf-8'), SIGKEYS)\n\n if 'email' in code_scope:\n self.assertIn('email', id_token)\n self.assertIn('email_verified', id_token)\n else:\n self.assertNotIn('email', id_token)\n\n if 'profile' in code_scope:\n self.assertIn('given_name', id_token)\n else:\n self.assertNotIn('given_name', id_token)",
"def require_scope(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n global _user\n if _user.get('scope') is not None:\n result = f(*args, **kwargs)\n _user = None\n return result\n else:\n if _logger:\n _logger.warning('API call with no scope provided. Endpoint: %s\\tToken: %s' % (request.path,\n _user.get('token')))\n return app.response_class(response=_dumps({\"_status\": \"ERR\", \"_error\": {\"code\": 403, \"message\":\n \"A token scope is required and your token does not have one. If this is not your fault, contact \"\n \"the API developer.\"}}), status=403, mimetype='application/json')\n return wrapped",
"def scope(self) -> dict:\n scope = self._auth_token.scope\n if not isinstance(scope, dict):\n raise ValueError(\"Token's scope claim must be of type 'dict'\")\n if \"admin\" not in scope or \"spotify\" not in scope:\n raise ValueError(\"'admin' and 'spotify' must be in token's scope\")\n if not isinstance(scope[\"admin\"], bool) or not isinstance(\n scope[\"spotify\"], bool\n ):\n raise ValueError(\"'admin' and 'spotify' claims in scope must be booleans\")\n return scope",
"def require_oauth(*scopes):\n def wrapper(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n logger.info('Check authorization')\n if request.method == 'GET' or request.method == 'DELETE':\n access_token = request.args['access_token']\n logger.info(access_token)\n else:\n access_token = request.get_json()['access_token_']\n logger.info(access_token)\n if access_token is not None:\n scopes_ = list(scopes)\n token_object = Token.query.join(Client, Token.client_id == Client.client_id)\\\n .add_columns(Token._scopes, Token.access_token, Client.service_name)\\\n .filter(Token.access_token == access_token).first()\n\n if token_object:\n logger.info('Valid token')\n if all(x in token_object._scopes for x in scopes_):\n logger.info('Valid authorization')\n kwargs['app_name'] = token_object.service_name\n return f(*args, **kwargs)\n return abort(403)\n return decorated\n return wrapper",
"def ensure_globus_authorized(func):\n @functools.wraps(func)\n def do_reauth(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except globus_sdk.AuthAPIError as e:\n # older versions of the globus_sdk use raw_text, which is now deprecated\n response_text = getattr(e, \"text\", \"\") or getattr(e, \"raw_text\", \"\")\n if e.http_status == 400 and \"invalid_grant\" in response_text:\n print(\"Globus login has expired.\")\n get_refresh_token_authorizer(force_reauth=True)\n return func(*args, **kwargs)\n\n return do_reauth",
"def refresh_token():\n global SESSION_ID\n if SESSION_ID:\n logger.info(\"Session ID is not none, so will not attempt to authenticate.\")\n else:\n logger.info(\"Session ID is none, so will need to authorize.\")\n SESSION_ID = authorize()\n return",
"def client_authentication_required(self, request, *args, **kwargs):\r\n if request.grant_type == 'password':\r\n return True\r\n auth_required = ('authorization_code', 'refresh_token')\r\n return 'Authorization' in request.headers and\\\r\n request.grant_type in auth_required",
"def authorize_via_token(self) -> None:\n if not self.config[\"refresh_token\"]:\n log.warning(\"refresh_token_missing\")\n raise MissingRefreshToken\n\n self.validate_refresh_token()",
"def refresh_access_information(self, refresh_token):\n if self.config.grant_type == 'password':\n data = {'grant_type': 'password',\n 'username': self.config.user,\n 'password': self.config.pswd}\n else:\n data = {'grant_type': 'refresh_token',\n 'redirect_uri': self.redirect_uri,\n 'refresh_token': refresh_token}\n retval = self._handle_oauth_request(data)\n return {'access_token': retval['access_token'],\n 'refresh_token': refresh_token,\n 'scope': set(retval['scope'].split(' '))}",
"def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n token_scopes = unverified_claims[\"scp\"]\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False",
"def refresh_token():\n\n enc_token = jwt_helper.get_token_from_cookie(cookies=request.cookies, key='refToken')\n __, jwt_content = jwt_helper.decode(token=enc_token, token_type='refresh')\n\n # check_jti()\n subject = jwt_content['sub']\n refresh_token, access_token = jwt_helper.gen_tokens(subject)\n resp = jwt_helper.make_token_response(access_token, refresh_token)\n return resp",
"def _renew(self, data):\n self.created_at = datetime.utcnow()\n if data is None:\n return\n \n self.access_token = data['access_token']\n self.refresh_token = data.get('refresh_token', '')\n self.expires_in = data['expires_in']\n scopes = self.scopes\n scopes.clear()\n for scope in data['scope'].split():\n try:\n scopes.add(SCOPES[scope])\n except KeyError:\n pass",
"async def refresh_grant(session: ClientSession, token_uri, refresh_token, client_id, client_secret):\n body = {\n 'grant_type': _REFRESH_GRANT_TYPE,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n }\n\n response_data = await _token_endpoint_request(session, token_uri, body)\n\n try:\n access_token = response_data['access_token']\n except KeyError:\n raise exceptions.RefreshError(\n 'No access token in response.', response_data)\n\n refresh_token = response_data.get('refresh_token', refresh_token)\n expiry = _parse_expiry(response_data)\n\n return access_token, refresh_token, expiry, response_data",
"def get_token(request, refresh=False):\n api_url = \"https://ssl.reddit.com/api/v1/access_token\"\n is_expired = request.session.get(\"expires\", 0) < int(unixtime())\n headers = settings.OAUTH_REDDIT_BASE_HEADERS\n client_auth = requests.auth.HTTPBasicAuth(\n settings.OAUTH_REDDIT_CLIENT_ID, settings.OAUTH_REDDIT_CLIENT_SECRET\n )\n\n if is_expired and request.GET.get(\"code\", None):\n # Received an access code to get a new access_token. Use\n # this above anything else.\n post_data = {\n \"grant_type\": \"authorization_code\",\n \"code\": request.GET.get(\"code\"),\n \"redirect_uri\": settings.OAUTH_REDDIT_REDIRECT_URI,\n }\n response = requests.post(\n api_url, auth=client_auth, headers=headers, data=post_data\n )\n t = response.json()\n request.session[\"access_token\"] = t.get(\"access_token\", \"\")\n request.session[\"refresh_token\"] = t.get(\"refresh_token\", \"\")\n request.session[\"token_type\"] = t.get(\"token_type\", \"\")\n request.session[\"expires\"] = int(unixtime()) + int(t.get(\"expires_in\", 0))\n request.session[\"scope\"] = t.get(\"scope\", \"\")\n if settings.DEBUG:\n print(\"Initial access_token acquired.\")\n\n elif (refresh or is_expired) and request.session.get(\"refresh_token\", False):\n\n # The previous access_token is expired, use refresh_token to\n # get a new one.\n post_data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": request.session.get(\"refresh_token\"),\n }\n response = requests.post(\n api_url, auth=client_auth, headers=headers, data=post_data\n )\n t = response.json()\n request.session[\"access_token\"] = t.get(\"access_token\", \"\")\n request.session[\"token_type\"] = t.get(\"token_type\", \"\")\n request.session[\"expires\"] = int(unixtime()) + int(t.get(\"expires_in\", 0))\n request.session[\"scope\"] = t.get(\"scope\", \"\")\n if settings.DEBUG:\n print(\"New access_token acquired.\")\n else:\n if settings.DEBUG:\n if request.session.get(\"access_token\", False):\n print(\"Re-using cached access_token.\")\n else:\n print(\"No access_token found anywhere!\")\n\n # If there is an access_token now, return it. Or wipe session vals.\n if request.session.get(\"access_token\", False):\n return request.session.get(\"access_token\")\n else:\n request.session[\"access_token\"] = None\n request.session[\"refresh_token\"] = None\n request.session[\"token_type\"] = None\n request.session[\"expires\"] = 0\n request.session[\"scope\"] = None\n return False",
"def google_token_request(self, auth_code=None):\n # Build request parameters. Order doesn't seem to matter, hence using dict.\n token_request_data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n }\n if auth_code is None:\n # Use existing refresh token to get new access token.\n token_request_data['refresh_token'] = self.refresh_token\n token_request_data['grant_type'] = 'refresh_token'\n else:\n # Request new access and refresh token.\n token_request_data['code'] = auth_code\n token_request_data['grant_type'] = 'authorization_code'\n # 'urn:ietf:wg:oauth:2.0:oob' signals to the Google Authorization\n # Server that the authorization code should be returned in the\n # title bar of the browser, with the page text prompting the user\n # to copy the code and paste it in the application.\n token_request_data['redirect_uri'] = 'urn:ietf:wg:oauth:2.0:oob'\n token_request_data['access_type'] = 'offline'\n\n # Make token request to Google.\n oauth2_token_request_url = 'https://www.googleapis.com/oauth2/v4/token'\n resp = requests.post(oauth2_token_request_url, data=token_request_data)\n # If request is successful then Google returns values as a JSON array\n values = resp.json()\n self.access_token = values['access_token']\n if auth_code: # Need to save value of new refresh token\n self.refresh_token = values['refresh_token']\n self.token_expiry = dt.datetime.now() + dt.timedelta(seconds=int(values['expires_in']))\n logging.info('Access token expires on %s', self.token_expiry.strftime(\"%Y/%m/%d %H:%M\"))",
"def test_additional_idtoken_processing_hook_scope_available(self):\n id_token = self._request_id_token_with_scope(\n ['openid', 'email', 'profile', 'dummy'])\n self.assertEqual(\n id_token.get('scope_of_token_passed_to_processing_hook'),\n ['openid', 'email', 'profile', 'dummy'])",
"def jwt_refresh_token_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_refresh_token_in_request()\n return fn(*args, **kwargs)\n return wrapper"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The authorization server support including the client credentials in the requestbody using the `client_id` and `client_secret`parameters.
|
def test_client_authentication(self):
code = self._create_code()
# Test a valid request to the token endpoint.
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
self.assertNotIn(
'invalid_client',
response.content.decode('utf-8'),
msg='Client authentication fails using request-body credentials.')
# Now, test with an invalid client_id.
invalid_data = post_data.copy()
invalid_data['client_id'] = self.client.client_id * 2 # Fake id.
# Create another grant code.
code = self._create_code()
invalid_data['code'] = code.code
response = self._post_request(invalid_data)
self.assertIn(
'invalid_client',
response.content.decode('utf-8'),
msg='Client authentication success with an invalid "client_id".')
# Now, test using HTTP Basic Authentication method.
basicauth_data = post_data.copy()
# Create another grant code.
code = self._create_code()
basicauth_data['code'] = code.code
del basicauth_data['client_id']
del basicauth_data['client_secret']
response = self._post_request(basicauth_data, self._password_grant_auth_header())
response.content.decode('utf-8')
self.assertNotIn(
'invalid_client',
response.content.decode('utf-8'),
msg='Client authentication fails using HTTP Basic Auth.')
|
[
"def _authenticate_client(self, client, secret):\n credentials = str(base64.b64encode(str.join(':', [client, secret]).encode('utf-8')))[2:-1]\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Cache-Control': 'no-cache',\n 'Authorization': 'Basic ' + credentials\n }\n params = {\n 'client_id': client,\n 'grant_type': 'client_credentials'\n }\n uri = self.uri + '/oauth/token'\n\n logging.debug(\"URI=\" + str(uri))\n logging.debug(\"HEADERS=\" + str(headers))\n logging.debug(\"BODY=\" + str(params))\n\n response = requests.post(uri, headers=headers, params=params)\n if response.status_code == 200:\n logging.debug(\"RESPONSE=\" + str(response.json()))\n return response.json()\n else:\n logging.warn(\"Failed to authenticate as %s\" % (client))\n response.raise_for_status()",
"def authenticate_client(client_secret):\n global CONNECTED_CLIENTS\n \n CONNECTED_CLIENTS.append(client_secret)\n return defer.succeed(client_secret)",
"def authenticate_client(self):\n client_params = self.parse_basic_auth_header()\n if client_params:\n # authenticate the client if client authentication is included\n client_id, client_secret = client_params\n client = self.get_and_validate_client(client_id)\n # Client secrets are stored as hash.\n hashed = client.client_secret\n if (\n bcrypt.hashpw(\n client_secret.encode(\"utf-8\"), hashed.encode(\"utf-8\")\n ).decode(\"utf-8\")\n != hashed\n ):\n raise InvalidClientError(uri=self.uri)\n\n return client\n\n # require client authentication for confidential clients or for any\n # client that was issued client credentials (or with other\n # authentication requirements)\n client_id = self.params.get(\"client_id\")\n client = self.get_and_validate_client(client_id)\n if client.check_client_type(\"confidential\") or client.client_secret:\n raise UnauthorizedClientError(uri=self.uri)\n\n return client",
"def do_client_authentication(client_id, client_secret):\n client = globus_sdk.ConfidentialAppAuthClient(\n client_id,\n client_secret,\n )\n token_response = client.oauth2_client_credentials_tokens()\n return (token_response.by_resource_server\n ['transfer.api.globus.org']['access_token']\n )",
"def get_client_credentials(self):\n if self.client_id == None or self.client_secret == None:\n raise Exception(\"You must set client_id and client_secret.\")\n else: \n client_creds = f\"{self.client_id}:{self.client_secret}\"\n client_creds_b64 = base64.b64encode(client_creds.encode())\n return client_creds_b64.decode()",
"def put_oauth_client(self, client_id, body, **kwargs):\n\n all_params = ['client_id', 'body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method put_oauth_client\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'client_id' is set\n if ('client_id' not in params) or (params['client_id'] is None):\n raise ValueError(\"Missing the required parameter `client_id` when calling `put_oauth_client`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `put_oauth_client`\")\n\n\n resource_path = '/api/v2/oauth/clients/{clientId}'.replace('{format}', 'json')\n path_params = {}\n if 'client_id' in params:\n path_params['clientId'] = params['client_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OAuthClient',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):\n pass",
"def client_authentication_required(self, request, *args, **kwargs):\r\n if request.grant_type == 'password':\r\n return True\r\n auth_required = ('authorization_code', 'refresh_token')\r\n return 'Authorization' in request.headers and\\\r\n request.grant_type in auth_required",
"def extra_authorize_data(self) -> dict[str, Any]:\n return {\"client_secret\": self.client_secret}",
"async def create_client_credentials(\n current_user: User = Depends(get_current_active_user),\n):\n\n credentials = ClientCredentialsCreateIn()\n new = (\n await OAuth2Client.create(**credentials.dict(), owner_id=current_user.id)\n ).to_dict()\n new[\"client_secret\"] = credentials.client_secret\n return new",
"def get_oauth_client(self, client_id, **kwargs):\n\n all_params = ['client_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_oauth_client\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'client_id' is set\n if ('client_id' not in params) or (params['client_id'] is None):\n raise ValueError(\"Missing the required parameter `client_id` when calling `get_oauth_client`\")\n\n\n resource_path = '/api/v2/oauth/clients/{clientId}'.replace('{format}', 'json')\n path_params = {}\n if 'client_id' in params:\n path_params['clientId'] = params['client_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OAuthClient',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_client_secret_authorizer(self):\n client = globus_sdk.ConfidentialAppAuthClient(self.CLIENT_ID, self.CLIENT_SECRET)\n token_response = client.oauth2_client_credentials_tokens()\n\n # the useful values that you want at the end of this\n globus_transfer_data = token_response.by_resource_server['transfer.api.globus.org']\n globus_transfer_token = globus_transfer_data['access_token']\n\n return globus_sdk.AccessTokenAuthorizer(globus_transfer_token)",
"def authenticated_client(client, user) -> Client:\n client.force_login(user)\n return client",
"def auth_client(self):\n auth_url = f\"{self.url}/auth\"\n response = requests.post(auth_url, json={\"username\": self.user, \"password\": self.pwd})\n if response.status_code != 200:\n print(\"couldn't authenticate\")\n return None\n auth_token = response.json()[\"access_token\"]\n return auth_token",
"def create_auth_client(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.auth_token)\n return client",
"def _authorise(self):\n redirect_uri = f'http://localhost:8080/callback'\n token_request_data = {\n 'grant_type': 'authorization_code',\n 'code': self._access_token,\n 'redirect_uri': redirect_uri\n }\n\n if self._access_token is None:\n data = {\n 'client_id': self._client_id,\n 'response_type': 'code',\n 'redirect_uri': redirect_uri,\n 'scope': 'user-library-read playlist-read-private playlist-modify-public playlist-modify-private'\n }\n query_string = urlencode(data, doseq=True)\n\n print('No existing authorisation code found, use the link below to authorise this application.')\n print(f'https://accounts.spotify.com/authorize?{query_string}')\n\n auth_server = AuthorisationServer()\n token_request_data['code'] = auth_server.start()\n\n token_request_response = requests.post('https://accounts.spotify.com/api/token',\n data=token_request_data,\n auth=(self._client_id, self._client_secret))\n\n if token_request_response.status_code == 200:\n response_data = token_request_response.json()\n self._db_client.set_value('access_token', response_data['access_token'])\n self._db_client.set_value('refresh_token', response_data['refresh_token'])\n self._access_token = response_data['access_token']",
"def test_post_grant_client_credentials_uris(self):\n self._test_post_redirect_uri_grant_combination(\n redirect_uris='http://example.com',\n grant_type=Application.GRANT_CLIENT_CREDENTIALS,\n is_valid=True,\n )",
"def test_set_client_id(self):\n auth = (\"id000000000000000000\", \"secret99999999999999\")\n self.instance.set_client_id(*auth)\n assert self.session.params[\"client_id\"] == auth[0]\n assert self.session.params[\"client_secret\"] == auth[1]",
"def __init__(self, client_id, client_secret, access_token=None,\n refresh_token=None, expires_at=None, refresh_cb=None,\n redirect_uri=None, **kwargs):\n self.client = FitBarkOauth2Client(\n client_id,\n client_secret,\n access_token=access_token,\n refresh_token=refresh_token,\n expires_at=expires_at,\n refresh_cb=refresh_cb,\n redirect_uri=redirect_uri,\n **kwargs\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If present in the Authentication Request, Authorization Servers MUST include a nonce Claim in the ID Token with the Claim Value being the nonce value sent in the Authentication Request. If the client does not supply a nonce parameter, it SHOULD not be included in the `id_token`.
|
def test_access_token_contains_nonce(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()
self.assertEqual(id_token.get('nonce'), FAKE_NONCE)
# Client does not supply a nonce parameter.
code.nonce = ''
code.save()
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()
self.assertEqual(id_token.get('nonce'), None)
|
[
"def getNonce(self):\n return self[Header.PARAM_NONCE] if Header.PARAM_NONCE in self else None",
"def getCNonce(self):\n return self.getParameter(AuthenticationHeader.PARAM_CNONCE)",
"def auth_oidc_req(self):\n\n current_time = time.time()\n if self.token_json and self.token_time:\n if current_time - self.token_time < 30:\n print(\"Warning: token was requested less than 30 seconds ago. Will not renew this time.\")\n return\n \n self.token_time = current_time\n token_req_data = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'audience': self.audience\n }\n ret = requests.post(cern_api_url, data=token_req_data, verify=self.cert_verify, proxies=self.proxies)\n if ret.status_code!=200:\n raise Exception(\"Unable to acquire OAuth token: \" + ret.content.decode())\n\n self.token_json = json.loads(ret.content)\n self.token_headers = {'Authorization':'Bearer ' + self.token_json[\"access_token\"], 'content-type':'application/json'}",
"def setNonce(self, nonce):\n self[Header.PARAM_NONCE] = nonce",
"def _validate_id_token_data(token_data):\n aud = token_data.get(\"aud\")\n if not aud or aud != settings.COGNITO_USER_LOGIN_CLIENT_ID:\n raise exceptions.AuthenticationFailed(\"Invalid id token\")",
"def getNonce(self) -> int:\n return self.blockHeader.nonce",
"def test_incompatibleNonce(self):\n credentialFactory = FakeDigestCredentialFactory('md5', 'test realm')\n\n d = credentialFactory.getChallenge(clientAddress)\n\n def _test(challenge):\n badNonceOpaque = credentialFactory.generateOpaque(\n '1234567890',\n clientAddress.host)\n\n self.assertRaises(\n error.LoginFailed,\n credentialFactory.verifyOpaque,\n badNonceOpaque,\n challenge['nonce'],\n clientAddress.host)\n\n self.assertRaises(\n error.LoginFailed,\n credentialFactory.verifyOpaque,\n badNonceOpaque,\n '',\n clientAddress.host)\n return d.addCallback(_test)",
"def setNonce(self, nonce: int):\n self.blockHeader.nonce = nonce",
"def set_nonce(self):\n \n nonce = None\n while True:\n nonce = '{:.10f}'.format(time.time() * 1000).split('.')[0]\n nonce = int(nonce)\n nonce += int(1000 * round(random.uniform(1, 10), 4))\n nonce = str(nonce)\n \n if self.nonce_is_unique(nonce):# and nonce != self.last_nonce or self.last_nonce == None:\n break\n \n self.last_nonce = nonce",
"def generate_nonce():\n return get_random_vector(EBConsts.FRESHNESS_NONCE_LEN)",
"def _generate_attest_nonce(auth_data_raw, client_data_raw):\n\n # Hash the client data string\n client_digest = hashes.Hash(hashes.SHA256())\n client_digest.update(client_data_raw)\n\n # Concatenate with auth data\n concat = auth_data_raw + client_digest.finalize()\n\n # Generate nonce\n nonce_digest = hashes.Hash(hashes.SHA256())\n nonce_digest.update(concat)\n nonce = nonce_digest.finalize()\n\n return nonce",
"def create_id_token(token, user, aud, nonce='', at_hash='', request=None, scope=None):\n if scope is None:\n scope = []\n sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user)\n\n expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')\n\n # Convert datetimes into timestamps.\n now = int(time.time())\n iat_time = now\n exp_time = int(now + expires_in)\n user_auth_time = user.last_login or user.date_joined\n auth_time = int(dateformat.format(user_auth_time, 'U'))\n\n dic = {\n 'iss': get_issuer(request=request),\n 'sub': sub,\n 'aud': str(aud),\n 'exp': exp_time,\n 'iat': iat_time,\n 'auth_time': auth_time,\n }\n\n if nonce:\n dic['nonce'] = str(nonce)\n\n if at_hash:\n dic['at_hash'] = at_hash\n\n # Inlude (or not) user standard claims in the id_token.\n if settings.get('OIDC_IDTOKEN_INCLUDE_CLAIMS'):\n if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):\n custom_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token)\n claims = custom_claims.create_response_dic()\n else:\n claims = StandardScopeClaims(token).create_response_dic()\n dic.update(claims)\n\n dic = run_processing_hook(\n dic, 'OIDC_IDTOKEN_PROCESSING_HOOK',\n user=user, token=token, request=request)\n\n return dic",
"def nonce_is_unique(self, nonce):\n \n if self.client:\n nonce_record = self.client.db.query(sql_get_unique_nonce % (self.client.exchange, str(nonce)), describe = True)[0]\n \n if nonce_record['count'] == 0:\n self.client.db.query(sql_set_unique_nonce % (self.client.exchange, str(nonce)))\n return True\n \n else:\n return False\n \n else:\n raise Exception(\"Client was not initialized in \" + self.__class__.__name__)",
"def authToken(self, requestId, timestamp, format='request'):\n certificate = self.account['cert'][self.account['mode']]\n data = str(requestId) + timestamp if format == 'request' else timestamp + str(requestId)\n return base64.b64encode(hmac.new(certificate, data, sha256).digest())",
"def generate_nonce() -> str:\n b64_str = base64.b64encode(os.urandom(NONCE_LENGTH))\n return b64_str.decode().rstrip('=')",
"def increment_nonce(self):\r\n self.nonce_int += 1\r\n self.nonce = hexlify(self.nonce_int.to_bytes(4, byteorder='little', signed=False)).decode()",
"def test_noNonce(self):\n\n e = self.assertRaises(error.LoginFailed,\n self.credentialFactory.decode,\n 'realm=\"Test\",username=\"Foo\",opaque=\"bar\"',\n _trivial_GET)\n self.assertEquals(str(e), \"Invalid response, no nonce given.\")",
"def setCNonce(self, cNonce):\n self[Header.PARAM_CNONCE] = cNonce",
"def nonce_api_call(self):\n params = {}\n params['method'] = \"getInfo\"\n params['nonce'] = 1\n params = urllib.urlencode(params)\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Key\": self.api_key,\n \"Sign\": self.getSignature(params)}\n conn = httplib.HTTPSConnection(\"btc-e.com\")\n conn.request(\"POST\", \"/tapi\", params, headers)\n response = conn.getresponse()\n data = json.load(response)\n res = str(data['error'])\n if str.__contains__(res, \"you should send\"):\n newNonce = res.split(\"you should send:\", 1)[1]\n return int(newNonce)\n else:\n exit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If access_token is included, the id_token SHOULD contain an at_hash.
|
def test_id_token_contains_at_hash(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()
self.assertTrue(id_token.get('at_hash'))
|
[
"def authed():\n\n return 'access_token' in session",
"def _validate_at_hash(claims, access_token, algorithm):\n if \"at_hash\" not in claims:\n return\n\n if not access_token:\n msg = \"No access_token provided to compare against at_hash claim.\"\n raise JWTClaimsError(msg)\n\n try:\n expected_hash = calculate_at_hash(access_token, ALGORITHMS.HASHES[algorithm])\n except (TypeError, ValueError):\n msg = \"Unable to calculate at_hash to verify against token claims.\"\n raise JWTClaimsError(msg)\n\n if claims[\"at_hash\"] != expected_hash:\n raise JWTClaimsError(\"at_hash claim does not match access_token.\")",
"def test_access_token_contains_nonce(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('nonce'), FAKE_NONCE)\n\n # Client does not supply a nonce parameter.\n code.nonce = ''\n code.save()\n\n response = self._post_request(post_data)\n response_dic = json.loads(response.content.decode('utf-8'))\n\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('nonce'), None)",
"def test_full_update_access_token(self):\n pass",
"def extra_data(self, user, uid, response, details):\n return response.get('access_token', '')",
"def _add_access_token_to_response(self, response, access_token):\n # type: (oic.message.AccessTokenResponse, se_leg_op.access_token.AccessToken) -> None\n response['access_token'] = access_token.value\n response['token_type'] = access_token.type\n response['expires_in'] = access_token.expires_in",
"def valid(self, token_id):",
"def validate_access_token(self, client_key, token, request):\r\n log.debug('Validate access token %r for %r',\r\n token, client_key)\r\n tok = request.access_token or self._tokengetter(\r\n client_key=client_key,\r\n token=token,\r\n )\r\n if tok:\r\n request.access_token = tok\r\n return True\r\n return False",
"def validate_access_token(self, client_key, token, request):\n log.debug('Validate access token %r for %r',\n token, client_key)\n tok = request.access_token or self._tokengetter(\n client_key=client_key,\n token=token,\n )\n if tok:\n request.access_token = tok\n return True\n return False",
"def _validate_access_token(self, header, payload, **options):\n\n if not header or not payload or not payload.get(self.USER_IDENTITY_HOLDER) or \\\n payload.get('type') != TokenTypeEnum.ACCESS:\n raise InvalidAccessTokenError(_('Provided access token is invalid.'))\n\n generator = payload.get(self.AUTHENTICATOR_HOLDER)\n if generator != self.name:\n raise InvalidTokenAuthenticatorError(_('This access token is generated using '\n 'another authenticator with name [{name}].')\n .format(name=generator))",
"def test_remove_access_token(self):\n pass",
"def has_access_token(self):\n if self.get_access_token():\n return True\n return False",
"def _validate_id_token_data(token_data):\n aud = token_data.get(\"aud\")\n if not aud or aud != settings.COGNITO_USER_LOGIN_CLIENT_ID:\n raise exceptions.AuthenticationFailed(\"Invalid id token\")",
"def get_access_token():\n return _access_token",
"def hash_apitoken(accessToken, appSecret): # pragma: no cover\n tokenraw = accessToken + appSecret\n return hashlib.sha256(tokenraw.encode('utf-8')).hexdigest()",
"def _consume_single_use_token(at):\n # this update operation is expected to be atomic\n was_unused = facade.models.SingleUseAuthToken.objects.filter(\n id=at.id, used=False).update(used=True)\n return was_unused == 1",
"def set_access_token(access_token):\n global _access_token\n _access_token = access_token",
"def test_google_id_token_not_linked(oauth_test_client):\n data = {\"confirm\": \"yes\"}\n oauth_test_client.authorize(data=data)\n tokens = oauth_test_client.token()\n id_token = jwt.decode(\n tokens.id_token, options={\"verify_signature\": False}, algorithms=[\"RS256\"]\n )\n assert id_token[\"context\"][\"user\"].get(\"google\") is None",
"def test_request_another_access_token(self):\r\n request_token = self._obtain_request_token()\r\n self._request_authorization(request_token)\r\n request_token = self._update_token_from_db(request_token)\r\n self._obtain_access_token(request_token)\r\n\r\n parameters = self._make_access_token_parameters(request_token)\r\n response = self.c.get(\"/oauth/access_token/\", parameters)\r\n self.assertEqual(response.status_code, 400)\r\n self.assertEqual(response.content, 'Invalid request token.')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
We MUST validate the signature of the ID Token according to JWS using the algorithm specified in the alg Header Parameter of the JOSE Header.
|
def test_idtoken_sign_validation(self):
SIGKEYS = self._get_keys()
RSAKEYS = [k for k in SIGKEYS if k.kty == 'RSA']
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
JWS().verify_compact(response_dic['id_token'].encode('utf-8'), RSAKEYS)
|
[
"def _jwt_sign(self, header, payload, algorithm=jws.ALGORITHMS.RS256):\n secret = crypto.dump_privatekey(crypto.FILETYPE_PEM, self.p12.get_privatekey()).decode(\"utf-8\")\n return jws.sign(payload,\n key=secret,\n headers=header,\n algorithm=algorithm)",
"def check_sign_signature_algorithm(self, source):\n # Additionnal. XML coherence checks\n signed_info = source['Signature']['SignedInfo']\n\n # Signature algorithm\n sig = signed_info['SignatureMethod@Algorithm']\n if self.sig_ns_map[self.dcp.schema] != sig:\n self.error(\n \"Invalid Signature Algorithm, expected {} but got {}\".format(\n self.sig_ns_map[self.dcp.schema], sig))",
"def test_signature():\n with pytest.raises(InvalidJwtJson):\n signature({}, \"\")\n\n jwt_json = {\n HEADER: {\"typ\": \"JWT\", \"alg\": \"none\"},\n PAYLOAD: {\"login\": \"az\"},\n SIGNATURE: \"\",\n }\n new_jwt = signature(jwt_json, \"\")\n assert new_jwt == jwt\n\n jwt_json_test = jwt_to_json(jwt_rsa)\n jwt_json_test = change_alg(jwt_json_test, \"HS256\")\n new_jwt = signature(jwt_json_test, open(path).read())\n assert new_jwt == jwt_hs256\n\n new_jwt_json = jwt_json\n new_jwt_json[HEADER][\"alg\"] = \"unknowAlg\"\n with pytest.raises(UnknownAlg):\n signature(new_jwt_json, \"\")",
"def validate_id_nonce_signature(\n cls,\n *,\n id_nonce: IDNonce,\n ephemeral_public_key: bytes,\n signature: bytes,\n public_key: bytes,\n ) -> None:\n ...",
"def validate_algorithm(self, algorithm):\n # type: (Text) -> None\n if not algorithm.endswith(self.java_name):\n raise InvalidAlgorithmError(\n 'Requested algorithm \"{requested}\" is not compatible with signature \"{actual}\"'.format(\n requested=algorithm, actual=self.java_name\n )\n )",
"def test_verify_jwt_with_none_algorithm(self):\n verifier = self._setup_jwt_auth_verifier(self._public_key_pem)\n private_key_ret = atlassian_jwt_auth.key.StaticPrivateKeyRetriever(\n self._example_key_id, self._private_key_pem.decode())\n jwt_signer = NoneAlgorithmJwtAuthSigner(\n issuer=self._example_issuer,\n private_key_retriever=private_key_ret,\n )\n for algorithm in ['none', 'None', 'nOne', 'nonE', 'NONE']:\n if algorithm != 'none':\n jwt.register_algorithm(\n algorithm, jwt.algorithms.NoneAlgorithm())\n jwt_token = jwt_signer.generate_jwt(\n self._example_aud, alg_header=algorithm)\n if algorithm != 'none':\n jwt.unregister_algorithm(algorithm)\n jwt_headers = jwt.get_unverified_header(jwt_token)\n self.assertEqual(jwt_headers['alg'], algorithm)\n with self.assertRaises(jwt.exceptions.InvalidAlgorithmError):\n verifier.verify_jwt(jwt_token, self._example_aud)",
"def valid(self, token_id):",
"def _verify_crypto_keys(self):\n if self._private_key is not None and self._public_key is not None:\n # both keys provided, let's make sure these keys were generated correctly\n token = jwt.encode({\"some\": \"payload\"}, self._private_key, algorithm=self._algorithm)\n try:\n jwt.decode(token, self._public_key, algorithms=[self._algorithm])\n except jwt.PyJWTError as exc:\n logger.info(\"JWT Signer key verification failed with error: {err}\", err=repr(exc))\n raise InvalidJWTCryptoKeysException(\"private key and public key do not match!\") from exc\n # save jwk\n self._jwk: PyJWK = PyJWK.from_json(self.get_jwk(), algorithm=self._algorithm)\n elif (self._private_key != self._public_key) and (self._private_key is None or self._public_key is None):\n raise ValueError(\"JWT Signer not valid, only one of private key / public key pair was provided!\")\n elif self._private_key is None and self._public_key is None:\n # valid situation, running in dev mode and api security is off\n self._disable()\n else:\n raise ValueError(\"Invalid JWT Signer input!\")",
"def validate_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover",
"def _check_signature(self, request, key):\n supercls = super(SagradaAuthenticationPolicy, self)\n try:\n return supercls._check_signature(request, key)\n except HTTPUnauthorized:\n log_cef(\"Authentication Failed: invalid MAC signature\", 5,\n request.environ, request.registry.settings,\n \"\", signature=AUTH_FAILURE)\n raise",
"def token_should_verify(self, r):\n expect(r).to_be_instance_of(tuple)\n header, claims = r\n expect(header).to_equal({\n u'alg': u'RS256',\n u'typ': u'JWT'\n })\n expect(claims).to_equal({\n u'iss': u'761326798069-r5mljlln1rd4lrbhg75efgigp36m78j5@developer.gserviceaccount.com',\n u'scope': u'https://www.googleapis.com/auth/prediction',\n u'aud': u'https://accounts.google.com/o/oauth2/token',\n u'exp': 1328554385,\n u'iat': 1328550785\n })",
"def _check_signature(self, request, key):\n supercls = super(TokenServerAuthenticationPolicy, self)\n try:\n return supercls._check_signature(request, key)\n except HTTPUnauthorized:\n logger.warn(\"Authentication Failed: invalid hawk signature\")\n raise",
"def test_sign_and_verify(self):\n algos = {'sha1':'', \n 'ripemd160':'',\n 'md5':''}\n\n if m2.OPENSSL_VERSION_NUMBER >= 0x90800F:\n algos['sha224'] = ''\n algos['sha256'] = ''\n algos['sha384'] = '' \n algos['sha512'] = '' \n\n message = \"This is the message string\"\n digest = sha.sha(message).digest()\n rsa = RSA.load_key(self.privkey)\n rsa2 = RSA.load_pub_key(self.pubkey)\n for algo in algos.keys():\n signature = rsa.sign(digest, algo)\n #assert signature == algos[algo], 'mismatched signature with algorithm %s: signature=%s' % (algo, signature)\n verify = rsa2.verify(digest, signature, algo) \n assert verify == 1, 'verification failed with algorithm %s' % algo",
"def check_certif_signature_algorithm(self, cert, index):\n # 10. Signature Algorithm\n signature_algorithm = cert.get_signature_algorithm().decode(\"utf-8\")\n expected = self.certif_sig_algorithm_map[self.dcp.schema]\n\n if signature_algorithm not in expected:\n self.error(\"Invalid Signature Algorithm, expected {} but got {}\"\n .format(expected, signature_algorithm))",
"def test_signature_validity(curve, generator, Msg, Qx, Qy, R, S, expectedVerification):\n pubk = Public_key(generator, ellipticcurve.Point(curve, Qx, Qy))\n verificationRes = pubk.verifies(digest_integer(Msg), Signature(R, S))\n assert verificationRes == expectedVerification, \"Signature verification failed\"",
"def get_jwt_verify_keys(self, jwt, **kwargs):\n\n allow_missing_kid = kwargs.get(\"allow_missing_kid\", False)\n\n _key_type = \"\"\n if jwt.headers.get(\"alg\"):\n _key_type = jws_alg2keytype(jwt.headers[\"alg\"])\n\n _kid = jwt.headers.get(\"kid\", \"\")\n nki = kwargs.get(\"no_kid_issuer\", {})\n\n _payload = jwt.payload()\n\n _iss = _payload.get(\"iss\") or kwargs.get(\"iss\") or \"\"\n\n if not _iss:\n _iss = kwargs.get(\"issuer\")\n\n if _iss:\n # First extend the key jar iff allowed\n if \"jku\" in jwt.headers and _iss:\n if not self.find(jwt.headers[\"jku\"], _iss):\n # This is really questionable\n try:\n if kwargs[\"trusting\"]:\n self.add_url(_iss, jwt.headers[\"jku\"])\n except KeyError:\n pass\n\n keys = self._add_key([], _iss, \"sig\", _key_type, _kid, nki, allow_missing_kid)\n\n if _key_type == \"oct\":\n keys.extend(self.get(key_use=\"sig\", issuer_id=\"\", key_type=_key_type))\n else:\n # No issuer, just use all keys I have\n keys = self.get(key_use=\"sig\", issuer_id=\"\", key_type=_key_type)\n\n # Only want the appropriate keys.\n keys = [k for k in keys if k.appropriate_for(\"verify\")]\n return keys",
"def verify_signature(request_body, signature, hmac_key):\n computed = hmac.new(hmac_key, request_body, hashlib.sha1)\n if not hmac.compare_digest(computed.hexdigest(), signature.encode('ascii', 'ignore')):\n raise SignatureError('Computed signature does not match request signature.')",
"def check_sign_transform_algorithm(self, source):\n signed_info = source['Signature']['SignedInfo']\n # Transform alogrithm\n trans = signed_info['Reference']['Transforms']['Transform@Algorithm']\n if trans != DCP_SETTINGS['xmluri']['enveloped_sig']:\n self.error(\"Invalid transform method\")",
"def verify_self_signed_signature(statement):\n\n payload = unverified_entity_statement(statement)\n keyjar = KeyJar()\n if payload['iss'] not in keyjar:\n keyjar.import_jwks(payload['jwks'], payload['iss'])\n\n _jwt = JWT(key_jar=keyjar)\n _val = _jwt.unpack(statement)\n return _val"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test custom function for setting OIDC_IDTOKEN_SUB_GENERATOR.
|
def test_custom_sub_generator(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()
self.assertEqual(id_token.get('sub'), self.user.email)
|
[
"def testConfiguredModuleGeneratorID(self):\n self._testConfiguredPromiseViaAlarm(\"promise_slapos_module_id_generator\")",
"def test_init(self, monkeypatch, setup, sub_generator, p, super_episode_length, expected_sel):\n # setup test scenario\n self._sub_generator = sub_generator\n # call function to test\n test_object = SwitchedReferenceGenerator(sub_generator, p=p, super_episode_length=super_episode_length)\n # verify the expected results\n assert len(test_object._sub_generators) == len(sub_generator), 'unexpected number of sub generators'\n assert test_object._current_episode_length == 0, 'The current episode length is not 0.'\n assert test_object._super_episode_length == expected_sel, 'super episode length is not as expected'\n assert test_object._current_ref_generator in sub_generator\n assert test_object._sub_generators == list(sub_generator), 'Other sub generators than expected'",
"def test_organization_id_get(self):\n pass",
"def test_generate_token_service_account(self):\n pass",
"def test_template_permission_sets_id_get(self):\n pass",
"def test_setIDFunction(self):\n value = object()\n previous = util.setIDFunction(value)\n result = util.setIDFunction(previous)\n self.assertIdentical(value, result)",
"def test_additional_idtoken_processing_hook(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)\n self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)",
"def __get_sublist__(self, sublist=None):\n if sublist is None:\n return self.id_list\n elif type(sublist) == int:\n return numpy.random.permutation(self.id_list)[0:sublist]\n else:\n return sublist",
"def test_custom_global_generator_multiple():\n c = TestClient()\n for num in range(3):\n generator = textwrap.dedent(f\"\"\"\n class MyGenerator{num}:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n def generate(self):\n self._conanfile.output.info(f\"MyGenerator{num}!!\")\n \"\"\")\n save(os.path.join(c.cache.custom_generators_path, f\"mygen{num}.py\"), generator)\n conanfile = textwrap.dedent(\"\"\"\n [requires]\n pkg/0.1\n\n [generators]\n MyGenerator0\n MyGenerator1\n MyGenerator2\n \"\"\")\n c.save({\"pkg/conanfile.py\": GenConanfile(\"pkg\", \"0.1\"),\n \"conanfile.txt\": conanfile})\n c.run(\"create pkg\")\n c.run(\"install .\")\n assert \"conanfile.txt: Generator 'MyGenerator0' calling 'generate()'\" in c.out\n assert \"conanfile.txt: Generator 'MyGenerator1' calling 'generate()'\" in c.out\n assert \"conanfile.txt: Generator 'MyGenerator2' calling 'generate()'\" in c.out\n assert \"conanfile.txt: MyGenerator0!!\" in c.out\n assert \"conanfile.txt: MyGenerator1!!\" in c.out\n assert \"conanfile.txt: MyGenerator2!!\" in c.out",
"def test_storage_method_generate_id_returns_new_id(self):\n input_list = []\n input_list_2 = ['item_1', 'item_2']\n \n output = self.storage.generate_id(input_list)\n output_2 = self.storage.generate_id(input_list_2)\n\n self.assertEqual(1, output)\n self.assertEqual(3, output_2)",
"def set_generate_ID(self, value):\n self.__generate_new_IDS = value",
"def test_get_teams_id(self):\n pass",
"def test_gen_url_6():\n for _ in range(10):\n subdomain = gen_alphanumeric()\n result = gen_url(subdomain=subdomain)\n assert result\n\n # Breakdown the generated URL\n scheme_breakdown = result.split('//')\n domain = scheme_breakdown[1].split('.')\n assert domain[0] == subdomain",
"def test_mousegenes_id_get(self):\n pass",
"def test_team_builder_config_product_groups_id_get(self):\n pass",
"def test_return_special_token_ids(self):\n msg = 'Must return special token ids.'\n examples = (\n (\n ['[bos]', '[eos]', '[pad]', '[unk]'],\n [0, 1, 2, 3],\n ),\n (\n ['[bos]'],\n [0],\n ),\n (\n ['[eos]'],\n [1],\n ),\n (\n ['[pad]'],\n [2],\n ),\n (\n ['[unk]'],\n [3],\n ),\n )\n\n for tokens, ans_token_ids in examples:\n for tokenizer in self.tokenizers:\n self.assertEqual(\n tokenizer.convert_tokens_to_ids(tokens=tokens),\n ans_token_ids,\n msg=msg\n )",
"def test_administrativeregions_id_get(self):\n pass",
"def test_get_groups_id(self):\n pass",
"def test_musicals_id_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test custom function for setting OIDC_IDTOKEN_PROCESSING_HOOK.
|
def test_additional_idtoken_processing_hook(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()
self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)
self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)
|
[
"def test_additional_idtoken_processing_hook_one_element_in_list(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)\n self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)",
"def test_additional_idtoken_processing_hook_kwargs(self):\n id_token = self._request_id_token_with_scope(['openid', 'profile'])\n kwargs_passed = id_token.get('kwargs_passed_to_processing_hook')\n assert kwargs_passed\n self.assertTrue(kwargs_passed.get('token').startswith(\n '<Token: Some Client -'))\n self.assertEqual(kwargs_passed.get('request'),\n \"<WSGIRequest: POST '/openid/token'>\")\n self.assertEqual(set(kwargs_passed.keys()), {'token', 'request'})",
"def test_additional_idtoken_processing_hook_scope_available(self):\n id_token = self._request_id_token_with_scope(\n ['openid', 'email', 'profile', 'dummy'])\n self.assertEqual(\n id_token.get('scope_of_token_passed_to_processing_hook'),\n ['openid', 'email', 'profile', 'dummy'])",
"def test_processor_callback(self):\n dummy_processor = lambda x: x\n t = Tag(\"mudmud\", processor=dummy_processor)\n self.assertEqual(t.processor, dummy_processor)",
"def valid(self, token_id):",
"def test_impersonate_token(self):\n pass",
"def test_setIDFunction(self):\n value = object()\n previous = util.setIDFunction(value)\n result = util.setIDFunction(previous)\n self.assertIdentical(value, result)",
"def test_can_set_callback(self):\n test_object = CrossValidation(['fake'], ['fake'], 1, 1)\n\n fake_callback_object = mock.MagicMock()\n\n test_object.register_callback(POST_ITERATION_CALLBACK,\n fake_callback_object)\n post_iteration_callbacks = test_object.callbacks[POST_ITERATION_CALLBACK]\n self.assertTrue(post_iteration_callbacks)\n self.assertTrue(fake_callback_object in post_iteration_callbacks)",
"def test_gate(self, token_context):\n _, user, token = token_context\n session[\"user_id\"] = user.id\n\n @gate()\n def fn():\n return \"ok\"\n\n assert fn() == \"ok\"\n assert g.current_user == user\n assert g.auth_token == token",
"def test_process_request_adds_id(self):\n rim = RequestIdMiddleware()\n request = mock.MagicMock()\n\n rim.process_request(request)\n\n self.assertIsInstance(request.id, uuid.UUID)",
"def test_return_special_token_ids(self):\n msg = 'Must return special token ids.'\n examples = (\n (\n ['[bos]', '[eos]', '[pad]', '[unk]'],\n [0, 1, 2, 3],\n ),\n (\n ['[bos]'],\n [0],\n ),\n (\n ['[eos]'],\n [1],\n ),\n (\n ['[pad]'],\n [2],\n ),\n (\n ['[unk]'],\n [3],\n ),\n )\n\n for tokens, ans_token_ids in examples:\n for tokenizer in self.tokenizers:\n self.assertEqual(\n tokenizer.convert_tokens_to_ids(tokens=tokens),\n ans_token_ids,\n msg=msg\n )",
"def dispatch_hook(key, hooks, hook_data, **kwargs):\n ...",
"def test_post_review_feature_flag_change_request(self):\n pass",
"def setup_mock(self):\n idinfo_mock = {'name': 'Temp Temp', 'email': 'tempt3699@gmail.com', }\n utils.verify_id_token = MagicMock(return_value=idinfo_mock)",
"def test_post_apply_feature_flag_change_request(self):\n pass",
"def test_post_feature_flag_change_request(self):\n pass",
"def test_that_token_to_id_is_correct(self):\n token_to_id = CodeClassifier.map_tokens_to_ids(self.data, 0)\n expected_tokens = [\n 'and', 'UNK', '%', 'for', ')', '(', '+', 'V', 'else', '==', '0',\n '3', '5', '1000', 'in', 'print', ':', '=', 'or', '+=', 'if']\n self.assertListEqual(token_to_id.keys(), expected_tokens)",
"def myhook(self, args):\n print(\"myhook spec\")",
"def make_session_run_hook():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test custom function for setting OIDC_IDTOKEN_PROCESSING_HOOK.
|
def test_additional_idtoken_processing_hook_one_element_in_list(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()
self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)
self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)
|
[
"def test_additional_idtoken_processing_hook(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)\n self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)",
"def test_additional_idtoken_processing_hook_kwargs(self):\n id_token = self._request_id_token_with_scope(['openid', 'profile'])\n kwargs_passed = id_token.get('kwargs_passed_to_processing_hook')\n assert kwargs_passed\n self.assertTrue(kwargs_passed.get('token').startswith(\n '<Token: Some Client -'))\n self.assertEqual(kwargs_passed.get('request'),\n \"<WSGIRequest: POST '/openid/token'>\")\n self.assertEqual(set(kwargs_passed.keys()), {'token', 'request'})",
"def test_additional_idtoken_processing_hook_scope_available(self):\n id_token = self._request_id_token_with_scope(\n ['openid', 'email', 'profile', 'dummy'])\n self.assertEqual(\n id_token.get('scope_of_token_passed_to_processing_hook'),\n ['openid', 'email', 'profile', 'dummy'])",
"def test_processor_callback(self):\n dummy_processor = lambda x: x\n t = Tag(\"mudmud\", processor=dummy_processor)\n self.assertEqual(t.processor, dummy_processor)",
"def valid(self, token_id):",
"def test_impersonate_token(self):\n pass",
"def test_setIDFunction(self):\n value = object()\n previous = util.setIDFunction(value)\n result = util.setIDFunction(previous)\n self.assertIdentical(value, result)",
"def test_can_set_callback(self):\n test_object = CrossValidation(['fake'], ['fake'], 1, 1)\n\n fake_callback_object = mock.MagicMock()\n\n test_object.register_callback(POST_ITERATION_CALLBACK,\n fake_callback_object)\n post_iteration_callbacks = test_object.callbacks[POST_ITERATION_CALLBACK]\n self.assertTrue(post_iteration_callbacks)\n self.assertTrue(fake_callback_object in post_iteration_callbacks)",
"def test_gate(self, token_context):\n _, user, token = token_context\n session[\"user_id\"] = user.id\n\n @gate()\n def fn():\n return \"ok\"\n\n assert fn() == \"ok\"\n assert g.current_user == user\n assert g.auth_token == token",
"def test_process_request_adds_id(self):\n rim = RequestIdMiddleware()\n request = mock.MagicMock()\n\n rim.process_request(request)\n\n self.assertIsInstance(request.id, uuid.UUID)",
"def test_return_special_token_ids(self):\n msg = 'Must return special token ids.'\n examples = (\n (\n ['[bos]', '[eos]', '[pad]', '[unk]'],\n [0, 1, 2, 3],\n ),\n (\n ['[bos]'],\n [0],\n ),\n (\n ['[eos]'],\n [1],\n ),\n (\n ['[pad]'],\n [2],\n ),\n (\n ['[unk]'],\n [3],\n ),\n )\n\n for tokens, ans_token_ids in examples:\n for tokenizer in self.tokenizers:\n self.assertEqual(\n tokenizer.convert_tokens_to_ids(tokens=tokens),\n ans_token_ids,\n msg=msg\n )",
"def dispatch_hook(key, hooks, hook_data, **kwargs):\n ...",
"def test_post_review_feature_flag_change_request(self):\n pass",
"def setup_mock(self):\n idinfo_mock = {'name': 'Temp Temp', 'email': 'tempt3699@gmail.com', }\n utils.verify_id_token = MagicMock(return_value=idinfo_mock)",
"def test_post_apply_feature_flag_change_request(self):\n pass",
"def test_post_feature_flag_change_request(self):\n pass",
"def test_that_token_to_id_is_correct(self):\n token_to_id = CodeClassifier.map_tokens_to_ids(self.data, 0)\n expected_tokens = [\n 'and', 'UNK', '%', 'for', ')', '(', '+', 'V', 'else', '==', '0',\n '3', '5', '1000', 'in', 'print', ':', '=', 'or', '+=', 'if']\n self.assertListEqual(token_to_id.keys(), expected_tokens)",
"def myhook(self, args):\n print(\"myhook spec\")",
"def make_session_run_hook():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test scope is available in OIDC_IDTOKEN_PROCESSING_HOOK.
|
def test_additional_idtoken_processing_hook_scope_available(self):
id_token = self._request_id_token_with_scope(
['openid', 'email', 'profile', 'dummy'])
self.assertEqual(
id_token.get('scope_of_token_passed_to_processing_hook'),
['openid', 'email', 'profile', 'dummy'])
|
[
"def test_scope_is_ignored_for_auth_code(self):\n SIGKEYS = self._get_keys()\n for code_scope in [['openid'], ['openid', 'email'], ['openid', 'profile']]:\n code = self._create_code(code_scope)\n\n post_data = self._auth_code_post_data(\n code=code.code, scope=code_scope)\n\n response = self._post_request(post_data)\n response_dic = json.loads(response.content.decode('utf-8'))\n\n self.assertEqual(response.status_code, 200)\n\n id_token = JWS().verify_compact(response_dic['id_token'].encode('utf-8'), SIGKEYS)\n\n if 'email' in code_scope:\n self.assertIn('email', id_token)\n self.assertIn('email_verified', id_token)\n else:\n self.assertNotIn('email', id_token)\n\n if 'profile' in code_scope:\n self.assertIn('given_name', id_token)\n else:\n self.assertNotIn('given_name', id_token)",
"def test_refresh_token_narrowed_scope(self):\n self.do_refresh_token_check(scope=['openid'])",
"def _on_initializing(**kwargs):\n from djblets.webapi.oauth2_scopes import enable_web_api_scopes\n\n enable_web_api_scopes()",
"def test_additional_idtoken_processing_hook(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)\n self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)",
"def test_additional_idtoken_processing_hook_kwargs(self):\n id_token = self._request_id_token_with_scope(['openid', 'profile'])\n kwargs_passed = id_token.get('kwargs_passed_to_processing_hook')\n assert kwargs_passed\n self.assertTrue(kwargs_passed.get('token').startswith(\n '<Token: Some Client -'))\n self.assertEqual(kwargs_passed.get('request'),\n \"<WSGIRequest: POST '/openid/token'>\")\n self.assertEqual(set(kwargs_passed.keys()), {'token', 'request'})",
"def valid(self, token_id):",
"def require_scope(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n global _user\n if _user.get('scope') is not None:\n result = f(*args, **kwargs)\n _user = None\n return result\n else:\n if _logger:\n _logger.warning('API call with no scope provided. Endpoint: %s\\tToken: %s' % (request.path,\n _user.get('token')))\n return app.response_class(response=_dumps({\"_status\": \"ERR\", \"_error\": {\"code\": 403, \"message\":\n \"A token scope is required and your token does not have one. If this is not your fault, contact \"\n \"the API developer.\"}}), status=403, mimetype='application/json')\n return wrapped",
"def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n token_scopes = unverified_claims[\"scp\"]\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False",
"def test_refresh_token_invalid_scope(self):\n self.do_refresh_token_check(scope=['openid', 'profile'])",
"def __get_token_scope(self):\n print(self.df_scopes['scope'])\n while True:\n try:\n user_input = int(input('What is your token\\'s scope? >> '))\n except ValueError:\n print('Please enter an int. Try again.')\n continue\n if user_input in self.df_scopes.index:\n break\n self.scope = self.df_scopes['scope'][user_input]",
"def check_oidc_token_status(self):\n self.setup_client(auth_setup=True)\n\n oidc_util = OIDCAuthenticationUtils()\n if self.oidc_token:\n token = self.oidc_token\n else:\n status, token = oidc_util.load_token(self.oidc_token_file)\n if not status:\n logging.error(\"Token %s cannot be loaded: status: %s, error: %s\" % (self.oidc_token_file, status, token))\n return\n token = token['id_token']\n\n status, token_info = oidc_util.get_token_info(token)\n if status:\n if self.oidc_token:\n logging.info(\"ID token: %s\" % self.oidc_token)\n else:\n logging.info(\"Token path: %s\" % self.oidc_token_file)\n for k in token_info:\n logging.info(\"Token %s: %s\" % (k, token_info[k]))\n else:\n logging.error(\"Failed to parse token information: %s\" % str(token_info))",
"async def test_multiple_scopes():\n\n with pytest.raises(ValueError):\n await AzurePowerShellCredential().get_token(\"one scope\", \"and another\")",
"def test_google_id_token_not_linked(oauth_test_client):\n data = {\"confirm\": \"yes\"}\n oauth_test_client.authorize(data=data)\n tokens = oauth_test_client.token()\n id_token = jwt.decode(\n tokens.id_token, options={\"verify_signature\": False}, algorithms=[\"RS256\"]\n )\n assert id_token[\"context\"][\"user\"].get(\"google\") is None",
"def _CheckGoogScopeCalls(self, start_token):\n\n def IsScopeToken(token):\n return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and\n token.string == 'goog.scope')\n\n # Find all the goog.scope tokens in the file\n scope_tokens = [t for t in start_token if IsScopeToken(t)]\n\n for token in scope_tokens:\n scope_context = token.metadata.context\n\n if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and\n scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):\n self._MaybeReportError(\n error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,\n 'goog.scope call not in global scope', token))\n\n # There should be only one goog.scope reference. Register errors for\n # every instance after the first.\n for token in scope_tokens[1:]:\n self._MaybeReportError(\n error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,\n 'More than one goog.scope call in file.', token))",
"def test_context(self):\n\n testflow.step(\"Listing context of authz/authn\")\n for extension in ['internal-authz', 'internal-authn']:\n rc, out = self.info_cli.run('context', extension_name=extension)\n logger.info('Extension context : %s', out)\n\n assert rc, 'Failed to run info context'\n assert extension in out, (\n 'Extension \"%s\" was not found in context' % extension\n )",
"def valid_in_scope(scoped, scope, exact=True):\n return in_ucs(scoped) or has_scope(scoped, scope, exact)",
"def test_impersonate_token(self):\n pass",
"def testAddProductScope(self):\n add_product_scope.main(client, self.__class__.campaign_id)",
"def test_context_in_local_store(self):\n tenant = '012345'\n self.state.request.headers['X-Project-ID'] = tenant\n\n self.hook.before(self.state)\n self.assertIsNotNone(context.get_current())\n self.assertIsInstance(\n context.get_current(), hooks.context.PoppyRequestContext\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test correct kwargs are passed to OIDC_IDTOKEN_PROCESSING_HOOK.
|
def test_additional_idtoken_processing_hook_kwargs(self):
id_token = self._request_id_token_with_scope(['openid', 'profile'])
kwargs_passed = id_token.get('kwargs_passed_to_processing_hook')
assert kwargs_passed
self.assertTrue(kwargs_passed.get('token').startswith(
'<Token: Some Client -'))
self.assertEqual(kwargs_passed.get('request'),
"<WSGIRequest: POST '/openid/token'>")
self.assertEqual(set(kwargs_passed.keys()), {'token', 'request'})
|
[
"def test_additional_idtoken_processing_hook(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)\n self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)",
"def test_additional_idtoken_processing_hook_scope_available(self):\n id_token = self._request_id_token_with_scope(\n ['openid', 'email', 'profile', 'dummy'])\n self.assertEqual(\n id_token.get('scope_of_token_passed_to_processing_hook'),\n ['openid', 'email', 'profile', 'dummy'])",
"def test_additional_idtoken_processing_hook_one_element_in_list(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(response_dic['id_token'].encode('utf-8')).payload()\n\n self.assertEqual(id_token.get('test_idtoken_processing_hook'), FAKE_RANDOM_STRING)\n self.assertEqual(id_token.get('test_idtoken_processing_hook_user_email'), self.user.email)",
"def valid(self, token_id):",
"def _validate_id_token_data(token_data):\n aud = token_data.get(\"aud\")\n if not aud or aud != settings.COGNITO_USER_LOGIN_CLIENT_ID:\n raise exceptions.AuthenticationFailed(\"Invalid id token\")",
"def test_sensitive_function_keyword_arguments(self):\n with self.settings(DEBUG=True):\n self.verify_unsafe_response(sensitive_kwargs_function_caller)\n self.verify_unsafe_email(sensitive_kwargs_function_caller)\n\n with self.settings(DEBUG=False):\n self.verify_safe_response(\n sensitive_kwargs_function_caller, check_for_POST_params=False\n )\n self.verify_safe_email(\n sensitive_kwargs_function_caller, check_for_POST_params=False\n )",
"def test_decorated_logic_func_passes_arg_to_func(self):\n response = self.client.get('/test/', query_string={'annotation_id': 1})\n expected = {\n 'annotation_id': 1,\n 'name': None,\n 'pos_arg': 55,\n }\n self.assertEqual(expected, response.json)",
"def test_should_handlers_contains_kwargs(self):\n self.assertEqual(self.robot.handlers[0]['kwargs'],\n {'regex': '^goodbye', 'room': '^@random'})",
"def ikHandleCtx(*args, **kwargs):\n\n pass",
"def test_KeywordArgsAreIncluded(self):\n def function_with_kwargs(argument1='test1', argument2='test2'):\n return\n #---\n self.local_register.RPCFunction(function_with_kwargs)\n\n defined_args = self.server_stub.definitions[self.module]['function_with_kwargs']['args']['defined']['kw']\n assert defined_args == {'argument1': 'test1', 'argument2': 'test2'}",
"def checkExpParam(expId):",
"def test_unknown_kwargs(self):\n bad_kwargs = {\"bad_arg\": \"bad_val\"}\n with self.assertRaises(akismet.UnknownArgumentError):\n self._mock_request(\n \"comment_check\", akismet.Akismet.COMMENT_CHECK_URL, \"false\", bad_kwargs\n )",
"def test_return_special_token_ids(self):\n msg = 'Must return special token ids.'\n examples = (\n (\n ['[bos]', '[eos]', '[pad]', '[unk]'],\n [0, 1, 2, 3],\n ),\n (\n ['[bos]'],\n [0],\n ),\n (\n ['[eos]'],\n [1],\n ),\n (\n ['[pad]'],\n [2],\n ),\n (\n ['[unk]'],\n [3],\n ),\n )\n\n for tokens, ans_token_ids in examples:\n for tokenizer in self.tokenizers:\n self.assertEqual(\n tokenizer.convert_tokens_to_ids(tokens=tokens),\n ans_token_ids,\n msg=msg\n )",
"def test_URL_kwargs(self):\n self.request_method_test('matchdict')",
"def test_iomanager_kwargs_collected(self):\n iomanager_kwargs = dict(\n required=object(),\n optional=object(),\n unlimited=object(),\n returns=object(),\n )\n view_kwargs = dict(\n predicate=object()\n )\n decorator_kwargs = iomanager_kwargs.copy()\n decorator_kwargs.update(view_kwargs)\n \n @api_view(**decorator_kwargs)\n def view_callable():\n pass\n \n assert view_callable.view_kwargs == view_kwargs",
"def test_sensitive_function_arguments(self):\n with self.settings(DEBUG=True):\n self.verify_unsafe_response(sensitive_args_function_caller)\n self.verify_unsafe_email(sensitive_args_function_caller)\n\n with self.settings(DEBUG=False):\n self.verify_safe_response(\n sensitive_args_function_caller, check_for_POST_params=False\n )\n self.verify_safe_email(\n sensitive_args_function_caller, check_for_POST_params=False\n )",
"def kwargsfunc(**kwargs):",
"def _handle_args(self, *args):\n pass",
"def step_check_args_chain(context: dict) -> None:\n calls = context.mocked_function.call_args_list\n last_call = calls[-1]\n\n args = last_call[0]\n kwargs = last_call[1]\n\n context.expected_kwargs.update({\"context\": kwargs[\"context\"]})\n\n assert args == context.expected_args\n assert kwargs == context.expected_kwargs\n assert kwargs[\"context\"].get_state() == context.initial_state.get_state()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test Proof Key for Code Exchange by OAuth Public Clients.
|
def test_pkce_parameters(self):
code = create_code(user=self.user, client=self.client,
scope=['openid', 'email'], nonce=FAKE_NONCE, is_authentication=True,
code_challenge=FAKE_CODE_CHALLENGE, code_challenge_method='S256')
code.save()
post_data = self._auth_code_post_data(code=code.code)
# Add parameters.
post_data['code_verifier'] = FAKE_CODE_VERIFIER
response = self._post_request(post_data)
json.loads(response.content.decode('utf-8'))
|
[
"def test_init_key():\n key = 'test_key'\n\n client = GiftbitClient(api_key=key)\n\n assert client.api_key == key",
"def test_retrieve_iceberg_license_key_contents(self):\n pass",
"def test_approve_service_key(self):\n pass",
"def test_api_key(self):\n api_key = 'testapikey'\n zc = Zencoder(api_key=api_key)\n self.assertEquals(zc.api_key, api_key)",
"def test_get_service_key(self):\n pass",
"def test_013(self):\n\n HEADING()\n result = run(\"cm key get testkey\")\n print (result)\n assert \"OK.\" in result",
"def test_fetch_public_keys(mocked_requests_get):\n assert fetch_public_keys(APP) is not None",
"def test_azure_service_api_keypair_get(self):\n pass",
"def test_key(self, client, loggin_user): # noqa: F811\n user, token, session = loggin_user\n\n response = client.get(\"/auth/key\", **payload(token=token))\n expect_success(response, {\"key\": session.get(\"les_key\")}, code=200)",
"def test_user_can_create_api_key(self):\n self.client.login(username=\"alice@example.org\", password=\"password\")\n api_key = self.alice.profile.api_key\n self.assertEqual(api_key, 'abc') # Assert that api key created\n\n form = {\"revoke_api_key\": \"\"}\n # Try and revoke the api key\n self.client.post(\"/accounts/profile/\", form)\n self.alice.profile.refresh_from_db()\n api_key = self.alice.profile.api_key # Should return None\n self.assertEqual(\"\", api_key)\n\n #// CREATE AN API KEY AFTER REVOKING IT\n\n form = {\"create_api_key\": \"\"}\n self.client.post(\"/accounts/profile/\", form)\n self.alice.profile.refresh_from_db()\n\n api_key = self.alice.profile.api_key # should return a new api key\n assert api_key",
"def test_access_key_secret_plaintext(self):\n api_key = ApiKey.generate(self.session_researcher)\n \n secret_key = api_key.access_key_secret_plaintext\n self.assertTrue(secret_key)\n self.assertIsNone(api_key.access_key_secret_plaintext)",
"def test_361_private_party(app, order):\n app.refresh_page()\n app.booking.select_event(order)\n app.booking.fill_out_customer_info(order)\n app.booking.select_payment_method(order)\n app.booking.verify_payment_table(order)\n app.booking.submit_successful_booking()\n app.calendar.select_event(order)\n app.calendar.verify_event_manifest(order)\n app.calendar.verify_event_status(status=\"Pending\")",
"def _authorization_code_pkce(self, known, client):\n if self._client_secret is not None:\n logger.warning(\"Ignoring 'client_secret', preferring PKCE.\")\n\n logger.info(\"Getting authorization code using PKCE\")\n\n verifier = self._code_verifier()\n\n challenge = self._code_challenge(verifier)\n\n auth_response = self._get_authorization_response(\n client,\n known[\"authorization_endpoint\"],\n code_challenge=challenge,\n code_challenge_method=\"S256\",\n )\n\n logger.info(\"Got Authorization response\")\n\n return auth_response, verifier",
"def test_generate(self):\n current_count = ApiKey.objects.count()\n api_key = ApiKey.generate(self.session_researcher)\n \n self.assertEqual(ApiKey.objects.count(), current_count + 1)\n self.assertTrue(api_key.access_key_id)\n self.assertTrue(api_key.access_key_secret)\n self.assertEqual(api_key.researcher, self.session_researcher)\n \n # Check that the secret key is accessible\n secret_key = api_key.access_key_secret_plaintext\n self.assertTrue(secret_key)\n \n # Check that the secret key is valid\n self.assertIs(api_key.proposed_secret_key_is_valid(secret_key), True)",
"def api_checkkey():\n config = GitReceiveConfig.load(g.cintf.db)\n key = parse_public_key(request.args['key'])\n if not key[:2] in [k[:2] for k in config.auth_keys]:\n return 'unauthorized'\n return 'ok'",
"def test_apikey(self):\n njt = NJTransitAPI()\n assert njt.apikey\n\n njt.apikey = 'xyz'\n assert njt.apikey == 'xyz'\n assert njt.username",
"def test_create_service_key(self):\n pass",
"def test_api_v3_epics_epic_public_id_get(self):\n pass",
"def test_case_clarify_main_keyring(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\tfrom piaplib.pku import utils as utils\n\t\t\tif utils.__name__ is None:\n\t\t\t\traise ImportError(\"Failed to import utils\")\n\t\t\timport piaplib.keyring.__main__\n\t\t\ttemp_msg = None\n\t\t\ttest_args = []\n\t\t\tif sys.platform.startswith(\"linux\") or True:\n\t\t\t\ttemp_msg = str(\"\"\"U2FsdGVkX1/MMOdV6OYwAURQQg9b9K1AoVA0OCcanG9FjHk7gHk=\"\"\")\n\t\t\t\ttest_args = [\n\t\t\t\t\tstr(\"clarify\"),\n\t\t\t\t\tstr(\"--unpack\"),\n\t\t\t\t\tstr(\"--msg='{}'\").format(temp_msg),\n\t\t\t\t\tstr(\"-K=testkeyneedstobelong\")\n\t\t\t\t]\n\t\t\telse:\n\t\t\t\ttemp_msg = str(\n\t\t\t\t\t\"\"\"U2FsdGVkX1/beHoH2ziXWcMFpb3fzzPxQqdeU1tO5UVoBUEnow8T9g==\"\"\"\n\t\t\t\t)\n\t\t\t\ttest_args = [\n\t\t\t\t\tstr(\"clarify\"),\n\t\t\t\t\tstr(\"--unpack\"),\n\t\t\t\t\tstr(\"--msg={}\").format(str(temp_msg)),\n\t\t\t\t\tstr(\"-K=testkeyneedstobelong\")\n\t\t\t\t]\n\t\t\tprint(str(\"... test: piaplib.keyring.__main__({})\").format(str(test_args)))\n\t\t\ttest_out = piaplib.keyring.__main__.main(test_args)\n\t\t\tprint(str(\"... checking\"))\n\t\t\tself.assertIsNotNone(test_out)\n\t\t\tself.assertIsNotNone(str(test_out))\n\t\t\tprint(str(\"... is not none: PASS\"))\n\t\t\tif (int(0) == int(test_out)):\n\t\t\t\ttheResult = True\n\t\t\telse:\n\t\t\t\tif sys.platform.startswith(\"darwin\"):\n\t\t\t\t\tprint(str(test_out))\n\t\t\t\t\ttheResult = False\n\t\t\t\telse:\n\t\t\t\t\traise unittest.SkipTest(\"BETA. Experemental feature not ready yet.\")\n\t\texcept Exception as err:\n\t\t\tprint(str(\"\"))\n\t\t\tprint(str(type(err)))\n\t\t\tprint(str(err))\n\t\t\tprint(str((err.args)))\n\t\t\tprint(str(\"\"))\n\t\t\terr = None\n\t\t\tdel err\n\t\t\tif sys.platform.startswith(\"darwin\"):\n\t\t\t\ttheResult = False\n\t\t\telse:\n\t\t\t\traise unittest.SkipTest(\"BETA. Experemental feature not ready yet.\")\n\t\tassert theResult"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if coord is inside vacuum tube. pseudooverrides BaseClassFieldHelper
|
def is_Coord_Inside_Vacuum(self, x: float, y: float, z: float) -> bool:
return 0 <= x <= self.L and y ** 2 + z ** 2 < self.ap ** 2
|
[
"def is_coord_in_vacuum(x: float, y: float, z: float, params) -> bool:\n K, L, ap, field_fact = params\n return -eps <= x <= L * eps_fact and np.sqrt(y ** 2 + z ** 2) < ap",
"def check_safety_zone(self):\n if self.safety_zone is None:\n return 0\n\n if self.position_xy.within(self.safety_zone_inner):\n return 0\n if self.position_xy.within(self.safety_zone):\n return 1\n return 2",
"def is_in_unit_circle(self) -> bool:\n return (self.x**2 + self.y**2) <= 1",
"def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)",
"def in_screen(self, coord):\n\t\treturn coord.x >= 0 and coord.x < self.width and coord.y >= 0 and coord.y < self.height",
"def _is_inside(self, obj_name):\n self.sim.forward()\n self.sim.step()\n min_pos, max_pos = self._get_bounding_box(obj_name)\n b = self._config.cursor_boundary\n if (min_pos < np.array([-b, -b, -0.05])).any() or (\n max_pos > np.array([b, b, b])\n ).any():\n return False\n return True",
"def __is_in(self, figure):\n try:\n figure.transform_to(self.ground.frame)\n figure.to_cartesian_coords()\n self.ground.to_cartesian_coords()\n x = figure.x - self.ground.x\n y = figure.y - self.ground.y\n z = figure.z - self.ground.z\n return ((x / self.ground.space.a) ** 2 + (y / self.ground.space.b) ** 2 +\n (z / self.ground.space.c) ** 2) <= 1\n except AttributeError:\n raise LocationError(\"The operation 'is in' needs a figure with coordinates \"\n \"and a ground with a spanned space.\")",
"def is_in_box(self, lat, lng):\n is_between_horizontal = self.right >= lat >= self.left\n is_between_vertical = self.top >= lng >= self.bottom\n coord_is_in_box = is_between_horizontal and is_between_vertical\n print('IsInBox ({},{})? {}'.format(lat, lng, coord_is_in_box))\n return coord_is_in_box",
"def hasUVs(*args, **kwargs):\n \n pass",
"def in_box(self, position, object_position):\n ub = position + self.total_size\n lb = position - self.total_size\n\n # fudge factor for the z-check, since after insertion the object falls to table\n lb[2] -= 0.01\n\n return np.all(object_position > lb) and np.all(object_position < ub)",
"def is_inside(self, x, y):\n return x >= self.x and x < self.x + self.BUILDING_SIZE and y > self.y and y < self.y + self.BUILDING_SIZE",
"def subvoxel(self):\n return (not self.valid()) or self.volume() < 1",
"def player_is_inside_boundary(self, player_object):\n player_is_inside_boundary = False\n if self.shape == \"sphere\":\n \"\"\" we determine the location by the locations radius and the distance of the player from it's center,\n spheres make this especially easy, so I picked them first ^^\n \"\"\"\n distance_to_location_center = float(math.sqrt(\n (float(self.pos_x) - float(player_object.pos_x)) ** 2 + (\n float(self.pos_y) - float(player_object.pos_y)) ** 2 + (\n float(self.pos_z) - float(player_object.pos_z)) ** 2))\n player_is_inside_boundary = distance_to_location_center <= float(self.radius)\n if self.shape == \"cube\":\n \"\"\" we determine the area of the location by the locations center and it's radius (half a sides-length)\n \"\"\"\n if (float(self.pos_x) - float(self.radius)) <= float(player_object.pos_x) <= (float(self.pos_x) + float(self.radius)) and (float(self.pos_y) - float(self.radius)) <= float(player_object.pos_y) <= (float(self.pos_y) + float(self.radius)) and (float(self.pos_z) - float(self.radius)) <= float(player_object.pos_z) <= (float(self.pos_z) + float(self.radius)):\n player_is_inside_boundary = True\n if self.shape == \"room\":\n \"\"\" we determine the area of the location by the locations center, it's width, height and length. height will be calculated from ground level (-1) upwards \n \"\"\"\n if (float(self.pos_x) - float(self.width) / 2) <= float(player_object.pos_x) <= (float(self.pos_x) + float(self.width) / 2) and float(self.pos_y) <= float(player_object.pos_y) + 1 <= (float(self.pos_y) + float(self.height)) and (float(self.pos_z) - float(self.length) / 2) <= float(player_object.pos_z) <= (float(self.pos_z) + float(self.length) / 2):\n player_is_inside_boundary = True\n\n return player_is_inside_boundary",
"def contains(self, coord):\n try:\n pixel = self.getWcs().skyToPixel(coord)\n except (lsst.pex.exceptions.DomainError, lsst.pex.exceptions.RuntimeError):\n # Point must be way off the tract\n return False\n if not np.isfinite(pixel.getX()) or not np.isfinite(pixel.getY()):\n # Point is definitely off the tract\n return False\n return self.getBBox().contains(geom.Point2I(pixel))",
"def in_view(self):\n \n bbox = self.bbox()\n area = self.parent.canvas.get_visible_area()\n\n y1, y2 = bbox[1], bbox[3]\n v1, v2 = area[1], area[3]\n\n return (y1 > v1 and y2 < v2)",
"def is_corner_3(self):\n if self.shot_value != 3:\n return False\n if not hasattr(self, \"locY\") or self.locY is None:\n return False\n if self.locY <= 87:\n return True\n return False",
"def is_inside(self, point):\n x = point[0]\n y = point[1]\n if self.regtype == \"box\":\n #print(\"WARNING: rotation box currently not supported!\",\n # file=sys.stderr)\n xmin = self.xc - self.width/2.0\n xmax = self.xc + self.width/2.0\n ymin = self.yc - self.height/2.0\n ymax = self.yc + self.height/2.0\n if all([x >= xmin, x <= xmax, y >= ymin, y <= ymax]):\n return True\n else:\n return False\n else:\n raise ValueError(\"region type '%s' currently not implemented\" %\\\n self.regtype)",
"def collide_point(self, point):\n # This could probably be optimized as well\n return point[0] > self.left and point[0] < self.right and \\\n point[1] > self.top and point[1] < self.bottom",
"def in_bounds(self, coord):\n coord_x = coord[0]\n coord_y = coord[1]\n return (0 <= coord_x < self.dim\n and 0 <= coord_y < self.dim)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for interpolation of magnetic fields of plane at center lens.see self.magnetic_potential
|
def _magnetic_potential_Func_Inner(self, x: float, y: float, z: float) -> float:
V = interp2D(y, z, self.yArrIn, self.z_arrIn, self.VArrIn)
return V
|
[
"def _magnetic_potential(self, x: float, y: float, z: float) -> float:\n if not self.is_Coord_Inside_Vacuum(x, y, z):\n return np.nan\n y = abs(y)\n z = abs(z)\n if -self.extra_field_length <= x <= self.L_cap:\n V0 = self._magnetic_potential_Func_Fringe(x, y, z)\n elif self.L_cap < x <= self.L - self.L_cap:\n V0 = self._magnetic_potential_Func_Inner(x, y, z)\n elif 0 <= x <= self.L + self.extra_field_length:\n x = self.L - x\n V0 = self._magnetic_potential_Func_Fringe(x, y, z)\n else:\n raise Exception(\"Particle outside field region\")\n V0 *= self.field_fact\n return V0",
"def magnetic_field(self, xy, field=\"secondary\"):\n sig = self.sigma_hat # (n_freq, )\n f = self.frequency\n w = 2*np.pi*f\n k = np.sqrt(-1j*w*mu_0*sig)[:, None] # This will get it to broadcast over locations\n dxy = xy[:, :2] - self.location[:2]\n r = np.linalg.norm(dxy, axis=-1)\n x = dxy[:, 0]\n y = dxy[:, 1]\n\n em_x = em_y = em_z = 0\n src_x, src_y, src_z = self.orientation\n # Z component of source\n alpha = 1j*k*r/2.\n IK1 = iv(1, alpha)*kv(1, alpha)\n IK2 = iv(2, alpha)*kv(2, alpha)\n if src_z != 0.0:\n em_z += src_z*2.0/(k**2*r**5)*(9-(9+9*1j*k*r-4*k**2*r**2-1j*k**3*r**3)*np.exp(-1j*k*r))\n Hr = (k**2/r)*(IK1 - IK2)\n angle = np.arctan2(y, x)\n em_x += src_z*np.cos(angle)*Hr\n em_y += src_z*np.sin(angle)*Hr\n\n if src_x != 0.0 or src_y != 0.0:\n # X component of source\n phi = 2/(k**2*r**4)*(3 + k**2*r**2 - (3 + 3j*k*r - k**2*r**2)*np.exp(-1j*k*r))\n dphi_dr = 2/(k**2*r**5)*(-2*k**2*r**2 - 12 + (-1j*k**3*r**3 - 5*k**2*r**2 + 12j*k*r + 12)*np.exp(-1j*k*r))\n if src_x != 0.0:\n em_x += src_x*(-1.0/r**3)*(y**2*phi + x**2*r*dphi_dr)\n em_y += src_x*(1.0/r**3)*x*y*(phi - r*dphi_dr)\n em_z -= src_x*(k**2*x/r**2)*(IK1 - IK2)\n\n # Y component of source\n if src_y != 0.0:\n em_x += src_y*(1.0/r**3)*x*y*(phi - r*dphi_dr)\n em_y += src_y*(-1.0/r**3)*(x**2*phi + y**2*r*dphi_dr)\n em_z -= src_y*(k**2*y/r**2)*(IK1 - IK2)\n\n if field == \"secondary\":\n # subtract out primary field from above\n mdotr = src_x*x + src_y*y# + m[2]*(z=0)\n\n em_x -= 3*x*mdotr/r**5 - src_x/r**3\n em_y -= 3*y*mdotr/r**5 - src_y/r**3\n em_z -= -src_z/r**3 # + 3*(z=0)*mdotr/r**5\n\n return self.moment/(4*np.pi)*np.stack((em_x, em_y, em_z), axis=-1)",
"def get_efield(self):\n n = self.n\n ng = self.ngs\n vecs = self.vecs\n vol_per_cube = (vecs[0]/ng[0]) * (vecs[1]/ng[1]) * (vecs[2]/ng[2])\n\n pgrid = self.pot_grid\n res = [vecs[0]/ng[0], vecs[1]/ng[1], vecs[2]/ng[2]]\n\n grad_x, grad_y, grad_z = np.gradient(pgrid[:,:,:], res[0], res[1], res[2])\n\n xy = np.multiply(grad_x, grad_y)\n grad_mag = np.multiply(xy, grad_z)\n # grad_mag = mf.grad_magnitude(grad_x, grad_y, grad_z)\n print(grad_mag.shape)\n self.efield = grad_mag",
"def get_spherical(self):\n\n # Create an scalar Function Space to compute the cylindrical radius (x^2 + y^2)\n # and the angles phi and theta\n S1 = df.FunctionSpace(self.functionspace.mesh(), 'CG', 1)\n\n # Create a dolfin function from the FS\n m_r = df.Function(S1)\n # Compute the radius using the assemble method with dolfin dP\n # (like a dirac delta to get values on every node of the mesh)\n # This returns a dolfin vector\n cyl_vector = df.assemble(df.dot(df.sqrt(self.f[0] * self.f[0] + self.f[1] * self.f[1]),\n df.TestFunction(S1)) * df.dP,\n \n )\n # Set the vector values to the dolfin function\n m_r.vector().set_local(cyl_vector.get_local())\n\n # Now we compute the theta and phi angles to describe the magnetisation\n # and save them to the coresponding variables\n self.theta = df.Function(S1)\n self.phi = df.Function(S1)\n\n # We will use the same vector variable than the one used to\n # compute m_r, in order to save memory\n\n # Theta = arctan(m_r / m_z)\n cyl_vector = df.assemble(df.dot(df.atan_2(m_r, self.f[2]),\n df.TestFunction(S1)) * df.dP,\n tensor=cyl_vector\n )\n\n # Instead of:\n # self.theta.vector().set_local(cyl_vector.get_local())\n # We will use:\n self.theta.vector().axpy(1, cyl_vector)\n # which adds: 1 * cyl_vector\n # to self.theta.vector() and is much faster\n # (we assume self.theta.vector() is empty, i.e. only made of zeros)\n # See: Fenics Book, page 44\n \n # Phi = arctan(m_y / m_x)\n cyl_vector = df.assemble(df.dot(df.atan_2(self.f[1], self.f[0]),\n df.TestFunction(S1)) * df.dP,\n tensor=cyl_vector\n )\n\n # We will save this line just in case:\n # self.phi.vector().set_local(cyl_vector.get_local())\n self.phi.vector().axpy(1, cyl_vector)\n\n return self.theta, self.phi",
"def magnetic_field(r, n, r0, R):\n ### Translate the coordinates in the coil's frame\n n, l, m = base_vectors(n)\n\n # transformation matrix coil frame to lab frame\n trans = np.vstack((l, m, n))\n # transformation matrix to lab frame to coil frame\n inv_trans = np.linalg.inv(trans)\n\n # point location from center of coil\n r = r - r0\n # transform vector to coil frame\n r = np.dot(r, inv_trans)\n\n #### calculate field\n\n # express the coordinates in polar form\n x = r[:, 0]\n y = r[:, 1]\n z = r[:, 2]\n rho = np.sqrt(x**2 + y**2)\n theta = np.arctan(x/y)\n theta[y==0] = 0\n\n E = special.ellipe((4 * R * rho)/( (R + rho)**2 + z**2))\n K = special.ellipk((4 * R * rho)/( (R + rho)**2 + z**2))\n Bz = 1/np.sqrt((R + rho)**2 + z**2) * (\n K\n + E * (R**2 - rho**2 - z**2)/((R - rho)**2 + z**2)\n )\n Brho = z/(rho*np.sqrt((R + rho)**2 + z**2)) * (\n -K\n + E * (R**2 + rho**2 + z**2)/((R - rho)**2 + z**2)\n )\n # On the axis of the coil we get a divided by zero here. This returns a\n # NaN, where the field is actually zero :\n Brho[np.isnan(Brho)] = 0\n Brho[np.isinf(Brho)] = 0\n Bz[np.isnan(Bz)] = 0\n Bz[np.isinf(Bz)] = 0\n\n B = np.c_[np.cos(theta)*Brho, np.sin(theta)*Brho, Bz ]\n\n # Rotate the field back in the lab's frame\n B = np.dot(B, trans)\n return B",
"def earthmagnetic(self, *args, **kwargs):\n return _measures.measures_earthmagnetic(self, *args, **kwargs)",
"def calculate_near_field_forces_and_moments(self):\n\n # Initialize a variable to hold the global panel position as the panel's are\n # iterate through.\n global_panel_position = 0\n\n # Initialize three lists of variables, which will hold the effective strength\n # of the line vortices comprising\n # each panel's ring vortex.\n effective_right_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n effective_front_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n effective_left_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n\n # Iterate through the current_airplane's wings.\n for wing in self.airplane.wings:\n\n # Convert this wing's 2D array of panels into a 1D array.\n panels = np.ravel(wing.panels)\n\n # Iterate through this wing's 1D array panels.\n for panel in panels:\n\n # Check if this panel is on its wing's right edge.\n if panel.is_right_edge:\n\n # Change the effective right vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_right_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n\n else:\n\n # Get the panel directly to the right of this panel.\n panel_to_right = wing.panels[\n panel.local_chordwise_position,\n panel.local_spanwise_position + 1,\n ]\n\n # Change the effective right vortex line strength from zero to\n # the difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel to the right of it.\n effective_right_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_right.ring_vortex.strength\n )\n\n # Check if this panel is on its wing's leading edge.\n if panel.is_leading_edge:\n\n # Change the effective front vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_front_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n else:\n\n # Get the panel directly in front of this panel.\n panel_to_front = wing.panels[\n panel.local_chordwise_position - 1,\n panel.local_spanwise_position,\n ]\n\n # Change the effective front vortex line strength from zero to\n # the difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel in front of it.\n effective_front_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_front.ring_vortex.strength\n )\n\n # Check if this panel is on its wing's left edge.\n if panel.is_left_edge:\n\n # Change the effective left vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_left_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n else:\n\n # Get the panel directly to the left of this panel.\n panel_to_left = wing.panels[\n panel.local_chordwise_position,\n panel.local_spanwise_position - 1,\n ]\n\n # Change the effective left vortex line strength from zero to the\n # difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel to the left of it.\n effective_left_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_left.ring_vortex.strength\n )\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Calculate the solution velocities at the centers of the panel's front leg,\n # left leg, and right leg.\n velocities_at_ring_vortex_front_leg_centers = self.calculate_solution_velocity(\n points=self.panel_front_vortex_centers\n )\n velocities_at_ring_vortex_left_leg_centers = self.calculate_solution_velocity(\n points=self.panel_left_vortex_centers\n )\n velocities_at_ring_vortex_right_leg_centers = self.calculate_solution_velocity(\n points=self.panel_right_vortex_centers\n )\n\n # Using the effective line vortex strengths, and the Kutta-Joukowski theorem\n # to find the near field force in\n # geometry axes on the front leg, left leg, and right leg.\n near_field_forces_on_ring_vortex_right_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_right_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_right_leg_centers,\n self.panel_right_vortex_vectors,\n axis=-1,\n )\n )\n near_field_forces_on_ring_vortex_front_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_front_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_front_leg_centers,\n self.panel_front_vortex_vectors,\n axis=-1,\n )\n )\n near_field_forces_on_ring_vortex_left_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_left_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_left_leg_centers,\n self.panel_left_vortex_vectors,\n axis=-1,\n )\n )\n\n # Sum the forces on the legs to calculate the total near field force,\n # in geometry axes, on each panel.\n near_field_forces_geometry_axes = (\n near_field_forces_on_ring_vortex_front_legs_geometry_axes\n + near_field_forces_on_ring_vortex_left_legs_geometry_axes\n + near_field_forces_on_ring_vortex_right_legs_geometry_axes\n )\n\n # Find the near field moment in geometry axes on the front leg, left leg,\n # and right leg.\n near_field_moments_on_ring_vortex_front_legs_geometry_axes = np.cross(\n self.panel_front_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_front_legs_geometry_axes,\n axis=-1,\n )\n near_field_moments_on_ring_vortex_left_legs_geometry_axes = np.cross(\n self.panel_left_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_left_legs_geometry_axes,\n axis=-1,\n )\n near_field_moments_on_ring_vortex_right_legs_geometry_axes = np.cross(\n self.panel_right_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_right_legs_geometry_axes,\n axis=-1,\n )\n\n # Sum the moments on the legs to calculate the total near field moment,\n # in geometry axes, on each panel.\n near_field_moments_geometry_axes = (\n near_field_moments_on_ring_vortex_front_legs_geometry_axes\n + near_field_moments_on_ring_vortex_left_legs_geometry_axes\n + near_field_moments_on_ring_vortex_right_legs_geometry_axes\n )\n\n # Initialize a variable to hold the global panel position.\n global_panel_position = 0\n\n # Iterate through this solver's panels.\n for panel in self.panels:\n # Update the force and moment on this panel.\n panel.near_field_force_geometry_axes = near_field_forces_geometry_axes[\n global_panel_position, :\n ]\n panel.near_field_moment_geometry_axes = near_field_moments_geometry_axes[\n global_panel_position, :\n ]\n\n # Update the pressure on this panel.\n panel.update_pressure()\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Sum up the near field forces and moments on every panel to find the total\n # force and moment on the geometry.\n total_near_field_force_geometry_axes = np.sum(\n near_field_forces_geometry_axes, axis=0\n )\n total_near_field_moment_geometry_axes = np.sum(\n near_field_moments_geometry_axes, axis=0\n )\n\n # Find the total near field force in wind axes from the rotation matrix and\n # the total near field force in\n # geometry axes.\n self.airplane.total_near_field_force_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_force_geometry_axes\n )\n\n # Find the total near field moment in wind axes from the rotation matrix and\n # the total near field moment in\n # geometry axes.\n self.airplane.total_near_field_moment_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_moment_geometry_axes\n )\n\n # Calculate the current_airplane's induced drag coefficient\n induced_drag_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's side force coefficient.\n side_force_coefficient = (\n self.airplane.total_near_field_force_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's lift coefficient.\n lift_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's rolling moment coefficient.\n rolling_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n # Calculate the current_airplane's pitching moment coefficient.\n pitching_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.c_ref\n )\n\n # Calculate the current_airplane's yawing moment coefficient.\n yawing_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n self.airplane.total_near_field_force_coefficients_wind_axes = np.array(\n [induced_drag_coefficient, side_force_coefficient, lift_coefficient]\n )\n self.airplane.total_near_field_moment_coefficients_wind_axes = np.array(\n [\n rolling_moment_coefficient,\n pitching_moment_coefficient,\n yawing_moment_coefficient,\n ]\n )",
"def regrid_interpolate(self):\n temperature = self._regrid_interpolate_bilinear_array(\"air_temperature\")\n precipitation = self._regrid_interpolate_bilinear_array(\"precipitation\")\n forcing_file = self._finalize_forcing_for_PDD(temperature, precipitation)",
"def getMagnification(self, pos, units=galsim.arcsec, periodic=False, interpolant=None):\n\n if not hasattr(self, 'im_kappa'):\n raise RuntimeError(\"PowerSpectrum.buildGrid must be called before getMagnification\")\n\n # Convert to numpy arrays for internal usage:\n pos_x, pos_y = galsim.utilities._convertPositions(pos, units, 'getMagnification')\n\n # Set the interpolant:\n if interpolant is not None:\n xinterp = galsim.utilities.convert_interpolant_to_2d(interpolant)\n else:\n xinterp = galsim.utilities.convert_interpolant_to_2d(self.interpolant)\n kinterp = galsim.InterpolantXY(galsim.Quintic())\n\n # Calculate the magnification based on the convergence and shear\n _, _, mu = galsim.lensing_ps.theoryToObserved(self.im_g1.array, self.im_g2.array,\n self.im_kappa.array)\n # Interpolate mu-1, so the zero values off the edge are appropriate.\n im_mu = galsim.ImageD(mu-1)\n\n # Make an SBInterpolatedImage, which will do the heavy lifting for the interpolation.\n # However, if we are doing wrapped interpolation then we will want to manually stick the\n # wrapped grid bits around the edges, because otherwise the interpolant will treat\n # everything off the edges as zero.\n if periodic:\n # Make an expanded bounds. We expand by 7 (default) to be safe, though most\n # interpolants don't need that much.\n im_mu_new = self._wrap_image(im_mu)\n\n # Then make the SBInterpolated image.\n sbii_mu = galsim._galsim.SBInterpolatedImage(im_mu_new.image, xInterp=xinterp,\n kInterp=kinterp)\n else:\n sbii_mu = galsim._galsim.SBInterpolatedImage(im_mu.image, xInterp=xinterp,\n kInterp=kinterp)\n\n # Calculate some numbers that are useful to calculate before the loop over positions, but\n # only if we are doing a periodic treatment of the box.\n if periodic:\n dx = self.bounds.xmax-self.bounds.xmin\n dy = self.bounds.ymax-self.bounds.ymin\n\n # interpolate if necessary\n mu = []\n for iter_pos in [ galsim.PositionD(pos_x[i],pos_y[i]) for i in range(len(pos_x)) ]:\n # Check that the position is in the bounds of the interpolated image\n if not self.bounds.includes(iter_pos):\n if not periodic:\n import warnings\n warnings.warn(\n \"Warning: position (%f,%f) not within the bounds \"%(iter_pos.x,iter_pos.y) +\n \"of the gridded convergence values: \" + str(self.bounds) +\n \". Returning a magnification of 1 for this point.\")\n mu.append(1.)\n else:\n # Treat this as a periodic box.\n wrap_pos = galsim.PositionD(\n (iter_pos.x-self.bounds.xmin) % dx + self.bounds.xmin,\n (iter_pos.y-self.bounds.ymin) % dy + self.bounds.ymin\n )\n mu.append(sbii_mu.xValue((wrap_pos-self.center)/self.grid_spacing)+1.)\n\n else:\n mu.append(sbii_mu.xValue((iter_pos-self.center)/self.grid_spacing)+1.)\n\n if isinstance(pos, galsim.PositionD):\n return mu[0]\n elif isinstance(pos[0], np.ndarray):\n return np.array(mu)\n elif len(pos_x) == 1 and not isinstance(pos[0],list):\n return mu[0]\n else:\n return mu",
"def magnetic(self):\n return self.__magnetic",
"def get_trilinear_field():\n xl, xh, nx = -1.0, 1.0, 41\n yl, yh, ny = -1.5, 1.5, 41\n zl, zh, nz = -2.0, 2.0, 41\n x = np.linspace(xl, xh, nx)\n y = np.linspace(yl, yh, ny)\n z = np.linspace(zl, zh, nz)\n crds = viscid.wrap_crds(\"nonuniform_cartesian\",\n [('x', x), ('y', y), ('z', z)])\n b = viscid.empty(crds, name=\"f\", nr_comps=3, center=\"Cell\",\n layout=\"interlaced\")\n X, Y, Z = b.get_crds(shaped=True)\n\n x01, y01, z01 = 0.5, 0.5, 0.5\n x02, y02, z02 = 0.5, 0.5, 0.5\n x03, y03, z03 = 0.5, 0.5, 0.5\n\n b['x'][:] = (0.0 + 1.0 * (X - x01) + 1.0 * (Y - y01) + 1.0 * (Z - z01) +\n 1.0 * (X - x01) * (Y - y01) + 1.0 * (Y - y01) * (Z - z01) +\n 1.0 * (X - x01) * (Y - y01) * (Z - z01))\n b['y'][:] = (0.0 + 1.0 * (X - x02) - 1.0 * (Y - y02) + 1.0 * (Z - z02) +\n 1.0 * (X - x02) * (Y - y02) + 1.0 * (Y - y02) * (Z - z02) -\n 1.0 * (X - x02) * (Y - y02) * (Z - z02))\n b['z'][:] = (0.0 + 1.0 * (X - x03) + 1.0 * (Y - y03) - 1.0 * (Z - z03) +\n 1.0 * (X - x03) * (Y - y03) + 1.0 * (Y - y03) * (Z - z03) +\n 1.0 * (X - x03) * (Y - y03) * (Z - z03))\n return b",
"def interpolate_all_on_grid_3D(self):\n\n r3D = self.grid.r3D\n z3D = self.grid.z3D\n phi3D = self.grid.phi3D\n\n if(self.equilibrium_mesh == '3D'):\n #interpolation on 3D mesh: (currently not used in FWR3D)\n #psi on grid\n self.psi_on_grid = self.psi_interp(z3D,r3D)\n\n #B Field on grid, right now, BPhi,BZ, and BR are directly used.\n self.BX_on_grid = -self.BPhi_interp(z3D,r3D)*np.sin(phi3D)+self.BR_interp(z3D,r3D)*np.cos(phi3D)\n self.BY_on_grid = self.BZ_interp(z3D,r3D)\n self.BZ_on_grid = -self.BR_interp(z3D,r3D)*np.sin(phi3D)-self.BPhi_interp(z3D,r3D)*np.cos(phi3D)\n self.B_on_grid = np.sqrt(self.BX_on_grid**2 + self.BY_on_grid**2 + self.BZ_on_grid**2)\n\n\n #Te and Ti on grid\n self.te_on_grid = self.te0_sp(self.psi_on_grid)\n self.ti_on_grid = self.ti0_sp(self.psi_on_grid)\n\n #ne0 on grid\n self.ne0_on_grid = self.ne0_sp(self.psi_on_grid)\n self.ni0_on_grid = self.ni0_sp(self.psi_on_grid)\n elif(self.equilibrium_mesh == '2D'):\n #interpolation on 2D mesh: (used in FWR3D, the FWR3D code will then rotate the whole equilibrium to get the values on 3D mesh.)\n R1D = self.grid.X1D\n Z1D = self.grid.Y1D\n R2D = np.zeros((self.grid.NY,self.grid.NX)) + R1D[np.newaxis,:]\n Z2D = np.zeros_like(R2D) + Z1D[:,np.newaxis]\n\n #psi on 2D grid\n self.psi_on_grid = self.psi_interp(Z2D,R2D)\n out_mask = np.copy(self.psi_on_grid.mask)\n\n Zout = Z2D[out_mask]\n Rout = R2D[out_mask]\n\n #boundary points are obtained by applying ConvexHull on equilibrium grid points\n hull = ConvexHull(self.points)\n p_boundary = self.points[hull.vertices]\n Z_boundary = p_boundary[:,0]\n R_boundary = p_boundary[:,1]\n\n #Now let's calculate *psi* on outside points, first, get the nearest boundary point for each outside point\n nearest_indices = []\n for i in range(len(Zout)):\n Z = Zout[i]\n R = Rout[i]\n nearest_indices.append (np.argmin((Z-Z_boundary)**2 + (R-R_boundary)**2) )\n\n # Then, calculate *psi* based on the gradient at these nearest points\n Zn = Z_boundary[nearest_indices]\n Rn = R_boundary[nearest_indices]\n #The value *psi* and its gradiant at this nearest point can by easily obtained\n psi_n = self.psi_interp(Zn,Rn)\n gradpsi_Z,gradpsi_R = self.psi_interp.gradient(Zn,Rn)\n\n psi_out = psi_n + (Zout-Zn)*gradpsi_Z + (Rout-Rn)*gradpsi_R\n\n # Finally, assign these outside values to the original array\n self.psi_on_grid[out_mask] = psi_out\n\n #B on grid\n self.BR_on_grid = self.BR_interp(Z2D,R2D)\n BR_n = self.BR_interp(Zn,Rn)\n gradBR_Z, gradBR_R = self.BR_interp.gradient(Zn,Rn)\n BR_out = BR_n + (Zout-Zn)*gradBR_Z + (Rout-Rn)*gradBR_R\n self.BR_on_grid[out_mask] = BR_out\n\n self.BZ_on_grid = self.BZ_interp(Z2D,R2D)\n BZ_n = self.BZ_interp(Zn,Rn)\n gradBZ_Z, gradBZ_R = self.BZ_interp.gradient(Zn,Rn)\n BZ_out = BZ_n + (Zout-Zn)*gradBZ_Z + (Rout-Rn)*gradBZ_R\n self.BZ_on_grid[out_mask] = BZ_out\n\n self.BPhi_on_grid = self.BPhi_interp(Z2D,R2D)\n BPhi_n = self.BPhi_interp(Zn,Rn)\n gradBPhi_Z, gradBPhi_R = self.BPhi_interp.gradient(Zn,Rn)\n BPhi_out = BPhi_n + (Zout-Zn)*gradBPhi_Z + (Rout-Rn)*gradBPhi_R\n self.BPhi_on_grid[out_mask] = BPhi_out\n\n self.B_on_grid = np.sqrt(self.BR_on_grid**2 + self.BZ_on_grid**2 + self.BPhi_on_grid**2)\n\n\n\n #Te0, Ti0, ne0 and ni0 on grid\n self.te0_on_grid = self.te0_sp(self.psi_on_grid)\n self.ti0_on_grid = self.ti0_sp(self.psi_on_grid)\n self.ne0_on_grid = self.ne0_sp(self.psi_on_grid)\n self.ni0_on_grid = self.ni0_sp(self.psi_on_grid)\n\n\n #ne fluctuations on 3D grid\n\n if(not self.Equilibrium_Only):\n self.dne_ad_on_grid = np.zeros((self.n_cross_section,len(self.time_steps),r3D.shape[0],r3D.shape[1],r3D.shape[2]))\n if self.HaveElectron:\n self.nane_on_grid = np.zeros(self.dne_ad_on_grid.shape)\n if self.load_ions:\n self.dni_on_grid = np.zeros(self.dni_ad_on_grid.shape)\n\n interp_positions = find_interp_positions_v2_upgrade(self)\n\n for k in range(self.n_cross_section):\n print 'center plane {0}.'.format(self.center_planes[k])\n for i in range(len(self.time_steps)):\n print 'time step {0}'.format(self.time_steps[i])\n #for each time step, first create the 2 arrays of quantities for interpolation\n prev = np.zeros( (self.grid.NZ,self.grid.NY,self.grid.NX) )\n next = np.zeros(prev.shape)\n\n #create index dictionary, for each key as plane number and value the corresponding indices where the plane is used as previous or next plane.\n prev_idx = {}\n next_idx = {}\n for j in range(len(self.planes)):\n prev_idx[j] = np.where(self.prevplane == self.planes[j] )\n next_idx[j] = np.where(self.nextplane == self.planes[j] )\n\n #now interpolate adiabatic ne on each toroidal plane for the points using it as previous or next plane.\n for j in range(len(self.planes)):\n if(prev[prev_idx[j]].size != 0):\n prev[prev_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dne_ad[k,i,j,:], fill_value = 0)(np.array([interp_positions[0,0][prev_idx[j]], interp_positions[0,1][prev_idx[j]] ]).T )\n if(next[next_idx[j]].size != 0):\n next[next_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dne_ad[k,i,j,:], fill_value = 0)(np.array([interp_positions[1,0][next_idx[j]], interp_positions[1,1][next_idx[j]] ]).T )\n # on_grid adiabatic ne is then calculated by linearly interpolating values between these two planes\n\n self.dne_ad_on_grid[k,i,...] = prev * interp_positions[1,2,...] + next * interp_positions[0,2,...]\n\n\n if self.HaveElectron:\n #non-adiabatic ne data as well:\n for j in range(len(self.planes)):\n if(prev[prev_idx[j]].size != 0):\n prev[prev_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.nane[k,i,j,:], fill_value = 0)(np.array([interp_positions[0,0][prev_idx[j]], interp_positions[0,1][prev_idx[j]] ]).T )\n if(next[next_idx[j]].size != 0):\n next[next_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.nane[k,i,j,:], fill_value = 0)(np.array([interp_positions[1,0][next_idx[j]], interp_positions[1,1][next_idx[j]] ]).T )\n self.nane_on_grid[k,i,...] = prev * interp_positions[1,2,...] + next * interp_positions[0,2,...]\n\n \"\"\" NOW WE WORK WITH IONS \"\"\"\n\n if self.load_ions:\n #for each time step, first create the 2 arrays of quantities for interpolation\n prev = np.zeros( (self.grid.NZ,self.grid.NY,self.grid.NX) )\n next = np.zeros(prev.shape)\n\n for j in range(len(self.planes)):\n if(prev[prev_idx[j]].size != 0):\n prev[prev_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dni[k,i,j,:], fill_value = 0)(np.array([interp_positions[0,0][prev_idx[j]], interp_positions[0,1][prev_idx[j]] ]).T )\n if(next[next_idx[j]].size != 0):\n next[next_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dni[k,i,j,:], fill_value = 0)(np.array([interp_positions[1,0][next_idx[j]], interp_positions[1,1][next_idx[j]] ]).T )\n self.dni_on_grid[k,i,...] = prev * interp_positions[1,2,...] + next * interp_positions[0,2,...]",
"def get_qm_interp_full(magnitude, centers, q_m_dist):\n # Fold in the Q_0 and the F(r) values\n return np.interp(magnitude, centers, q_m_dist)",
"def get_min_mag_center(self):\r\n\t\treturn self.min_mag",
"def update_mag(self, msg):\n\t\tself.sen.imu.mag = enu_to_ned(np.array([[msg.magnetic_field.x], [msg.magnetic_field.y], [msg.magnetic_field.z]]))",
"def interpolate(m: np.ndarray, n: np.ndarray, sgrid: np.ndarray, points_on_sphere: np.ndarray, radius: np.ndarray):\n #print(\"Interpolate\")\n\n\n # =========================\n center_grid = np.zeros((m.shape[0],3))\n east_grid = np.zeros((m.shape[0],3))\n south_grid = np.zeros((m.shape[0],3))\n southeast_grid = np.zeros((m.shape[0],3))\n\n center_dist = np.zeros(m.shape[0])\n east_dist = np.zeros(m.shape[0])\n south_dist = np.zeros(m.shape[0])\n southeast_dist = np.zeros(m.shape[0])\n\n center_weight = np.zeros(m.shape[0])\n east_weight = np.zeros(m.shape[0])\n south_weight = np.zeros(m.shape[0])\n southeast_weight = np.zeros(m.shape[0])\n\n # use a mask to select the point on the boundary============================\n mask_north = m == 0\n mask_south = m == sgrid.shape[0] - 1\n mask_boundary = mask_north + mask_south\n m_boundary = m[mask_boundary]\n n_boundary = n[mask_boundary] % sgrid.shape[1]\n n_boundary_plus_one = (n_boundary + 1) % sgrid.shape[1]\n n_boundary_opposite = (n_boundary + (sgrid.shape[1] / 2)) % sgrid.shape[1]\n n_boundary_opposite=n_boundary_opposite.astype(int)\n n_boundary_plus_one_opposite = (n_boundary_plus_one + (sgrid.shape[1] / 2))%sgrid.shape[1]\n n_boundary_plus_one_opposite=n_boundary_plus_one_opposite.astype(int)\n center_grid[mask_boundary] = sgrid[m_boundary, n_boundary]\n east_grid[mask_boundary] = sgrid[m_boundary, n_boundary_plus_one]\n south_grid[mask_boundary] = sgrid[m_boundary, n_boundary_opposite]\n southeast_grid[mask_boundary] = sgrid[m_boundary, n_boundary_plus_one_opposite]\n\n # calculate distance and relevant weight\n center_dist[mask_boundary] = np.sqrt(np.sum((center_grid[mask_boundary] - points_on_sphere[mask_boundary]) ** 2))\n east_dist[mask_boundary] = np.sqrt(np.sum((east_grid[mask_boundary] - points_on_sphere[mask_boundary]) ** 2))\n south_dist[mask_boundary] = np.sqrt(np.sum((south_grid[mask_boundary] - points_on_sphere[mask_boundary]) ** 2))\n southeast_dist[mask_boundary] = np.sqrt(\n np.sum((southeast_grid[mask_boundary] - points_on_sphere[mask_boundary]) ** 2))\n sum = center_dist[mask_boundary] + east_dist[mask_boundary] + south_dist[mask_boundary] + southeast_dist[\n mask_boundary]\n center_weight[mask_boundary] = center_dist[mask_boundary] / sum\n east_weight[mask_boundary] = east_dist[mask_boundary] / sum\n south_weight[mask_boundary] = south_dist[mask_boundary] / sum\n southeast_weight[mask_boundary] = southeast_dist[mask_boundary] / sum\n\n # save the signal of distance\n radius_boundary = radius[mask_boundary]\n dist_im = np.zeros(sgrid.shape[0:2]) # signal of distance from points to sphere\n weight_im = np.zeros(sgrid.shape[\n 0:2]) # Since each grid point on the sphere could be affected by several different signals, we need to normalize the values.\n dist_im[m_boundary, n_boundary] += radius_boundary[:, 0] * center_weight[mask_boundary]\n dist_im[m_boundary, n_boundary_plus_one] += radius_boundary[:, 0] * east_weight[mask_boundary]\n dist_im[m_boundary, n_boundary_opposite] += radius_boundary[:, 0] * south_weight[mask_boundary]\n dist_im[m_boundary, n_boundary_plus_one_opposite] += radius_boundary[:, 0] * southeast_weight[mask_boundary]\n weight_im[m_boundary, n_boundary] += center_weight[mask_boundary]\n weight_im[m_boundary, n_boundary_plus_one] += east_weight[mask_boundary]\n weight_im[m_boundary, n_boundary_opposite] += south_weight[mask_boundary]\n weight_im[m_boundary, n_boundary_plus_one_opposite] += southeast_weight[mask_boundary]\n\n # use a mask to select the rest points===============================\n mask_rest = ~mask_boundary\n m_rest = m[mask_rest]\n n_rest = n[mask_rest] % sgrid.shape[1]\n n_rest_plus_one = (n_rest + 1) % sgrid.shape[1]\n center_grid[mask_rest] = sgrid[m_rest, n_rest]\n east_grid[mask_rest] = sgrid[m_rest, n_rest_plus_one]\n south_grid[mask_rest] = sgrid[m_rest + 1, n_rest]\n southeast_grid[mask_rest] = sgrid[m_rest + 1, n_rest_plus_one]\n\n # calculate distance and relevant weight\n center_dist[mask_rest] = np.sqrt(np.sum((center_grid[mask_rest] - points_on_sphere[mask_rest]) ** 2))\n east_dist[mask_rest] = np.sqrt(np.sum((east_grid[mask_rest] - points_on_sphere[mask_rest]) ** 2))\n south_dist[mask_rest] = np.sqrt(np.sum((south_grid[mask_rest] - points_on_sphere[mask_rest]) ** 2))\n southeast_dist[mask_rest] = np.sqrt(np.sum((southeast_grid[mask_rest] - points_on_sphere[mask_rest]) ** 2))\n sum = center_dist[mask_rest] + east_dist[mask_rest] + south_dist[mask_rest] + southeast_dist[mask_rest]\n center_weight[mask_rest] = center_dist[mask_rest] / sum\n east_weight[mask_rest] = east_dist[mask_rest] / sum\n south_weight[mask_rest] = south_dist[mask_rest] / sum\n southeast_weight[mask_rest] = southeast_dist[mask_rest] / sum\n\n # save the signal of distance\n radius_rest = radius[mask_rest]\n dist_im = np.zeros(sgrid.shape[0:2]) # signal of distance from points to sphere\n weight_im = np.zeros(sgrid.shape[\n 0:2]) # Since each grid point on the sphere could be affected by several different signals, we need to normalize the values.\n dist_im[m_rest, n_rest] += radius_rest[:, 0] * center_weight[mask_rest]\n dist_im[m_rest, n_rest_plus_one] += radius_rest[:, 0] * east_weight[mask_rest]\n dist_im[m_rest + 1, n_rest] += radius_rest[:, 0] * south_weight[mask_rest]\n dist_im[m_rest + 1, n_rest_plus_one] += radius_rest[:, 0] * southeast_weight[mask_rest]\n weight_im[m_rest, n_rest] += center_weight[mask_rest]\n weight_im[m_rest, n_rest_plus_one] += east_weight[mask_rest]\n weight_im[m_rest + 1, n_rest] += south_weight[mask_rest]\n weight_im[m_rest + 1, n_rest_plus_one] += southeast_weight[mask_rest]\n\n mask_weight = weight_im != 0\n dist_im[mask_weight] /= weight_im[mask_weight]\n dist_im = 1 - dist_im\n return dist_im, center_grid, east_grid, south_grid, southeast_grid",
"def calculate_near_field_forces_and_moments(self):\n # Calculate the velocities induced at every panel's bound vortex center.\n induced_velocities = aerodynamics.collapsed_velocities_from_horseshoe_vortices(\n points=self.panel_bound_vortex_centers,\n back_right_vortex_vertices=self.panel_back_right_vortex_vertices,\n front_right_vortex_vertices=self.panel_front_right_vortex_vertices,\n front_left_vortex_vertices=self.panel_front_left_vortex_vertices,\n back_left_vortex_vertices=self.panel_back_left_vortex_vertices,\n strengths=self.vortex_strengths,\n )\n\n # Add the freestream velocity to the induced velocities to calculate the\n # total velocity at every panel's bound vortex center.\n total_velocities = induced_velocities + self.freestream_velocity\n\n # Calculate the near field force, in geometry axes, on each panel's bound\n # vortex.\n near_field_forces_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(self.vortex_strengths, axis=1)\n * np.cross(total_velocities, self.panel_bound_vortex_vectors, axis=-1)\n )\n\n # Calculate the near field moments, in geometry axes, on each panel's bound\n # vortex.\n near_field_moments_geometry_axes = np.cross(\n self.panel_bound_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_geometry_axes,\n axis=-1,\n )\n\n functions.process_steady_solver_forces(\n steady_solver=self,\n near_field_forces_geometry_axes=near_field_forces_geometry_axes,\n near_field_moments_geometry_axes=near_field_moments_geometry_axes,\n )",
"def accel(self):\n return self.force()/self.mass",
"def magnetization(lattice):\n\n mag = np.sum(lattice)\n\n return mag"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Force on Li7 in simulation units at x,y,z. pseudooverrides BaseClassFieldHelper Symmetry is used to simplify the computation of force. Either end of the lens is identical, so coordinates falling within some range are mapped to an interpolation of the force field at the lenses end. If the lens is long enough, the inner region is modeled as a single plane as well. (nan,nan,nan) is returned if coordinate is outside vacuum tube
|
def _force(self, x: float, y: float, z: float) -> TupleOf3Floats:
if not self.is_Coord_Inside_Vacuum(x, y, z):
return np.nan, np.nan, np.nan
FySymmetryFact = 1.0 if y >= 0.0 else -1.0 # take advantage of symmetry
FzSymmetryFact = 1.0 if z >= 0.0 else -1.0
y = abs(y) # confine to upper right quadrant
z = abs(z)
if -self.extra_field_length <= x <= self.L_cap: # at beginning of lens
Fx, Fy, Fz = self._force_Func_Outer(x, y, z)
elif self.L_cap < x <= self.L - self.L_cap: # if long enough, model interior as uniform in x
Fx, Fy, Fz = self._force_Func_Inner(y, z)
elif self.L - self.L_cap <= x <= self.L + self.extra_field_length: # at end of lens
x = self.L - x
Fx, Fy, Fz = self._force_Func_Outer(x, y, z)
Fx = -Fx
else:
raise Exception("Particle outside field region") # this may be triggered when itentionally misligned
Fx *= self.field_fact
Fy *= FySymmetryFact * self.field_fact
Fz *= FzSymmetryFact * self.field_fact
# Fx, Fy, Fz = self.baseClass.rotate_Force_For_Misalignment(Fx, Fy, Fz)
return Fx, Fy, Fz
|
[
"def _xforce_xyz(self,x,y,z):\n return 1/2*self._b*self._c*f.cy_forceInt(x,y,z,self._a2,self._b2,self._c2,0,self.n)",
"def get_force(self):\n # @todo: decide whether or not we want to have gimbaling provide x force and lift for 4 of the engines, or to do x force (drag) for all engines in force_hover_engines.py\n return self.data(0.0, 0.0, 0.0)",
"def _magnetic_potential(self, x: float, y: float, z: float) -> float:\n if not self.is_Coord_Inside_Vacuum(x, y, z):\n return np.nan\n y = abs(y)\n z = abs(z)\n if -self.extra_field_length <= x <= self.L_cap:\n V0 = self._magnetic_potential_Func_Fringe(x, y, z)\n elif self.L_cap < x <= self.L - self.L_cap:\n V0 = self._magnetic_potential_Func_Inner(x, y, z)\n elif 0 <= x <= self.L + self.extra_field_length:\n x = self.L - x\n V0 = self._magnetic_potential_Func_Fringe(x, y, z)\n else:\n raise Exception(\"Particle outside field region\")\n V0 *= self.field_fact\n return V0",
"def force_calc(self):\n\n ##check the neighbor if it is Ant or fence\n\n # Calculate the force in x and y direction\n\n Fx = 0\n\n if (type(self.model.grid[self.pos[0]-1][self.pos[1]]) is Ant or\n type(self.model.grid[self.pos[0]-1][self.pos[1]]) is Fence):\n Fx += 1\n if (type(self.model.grid[self.pos[0]+1][self.pos[1]]) is Ant or\n type(self.model.grid[self.pos[0]+1][self.pos[1]]) is Fence):\n Fx -= 1\n\n Fy = 0\n if (type(self.model.grid[self.pos[0]][self.pos[1]-1]) is Ant or\n type(self.model.grid[self.pos[0]][self.pos[1]-1]) is Fence):\n Fy += 1\n if (type(self.model.grid[self.pos[0]][self.pos[1]+1]) is Ant or\n type(self.model.grid[self.pos[0]][self.pos[1]+1]) is Fence):\n Fy -= 1\n\n # Magnitude of the force\n F = np.sqrt(Fx**2+Fy**2)\n\n return Fx,Fy,F",
"def _zforce_xyz(self,x,y,z):\n return 1/2*self._b*self._c*f.cy_forceInt(x,y,z,self._a2,self._b2,self._c2,2,self.n)",
"def _yforce_xyz(self,x,y,z):\n return 1/2*self._b*self._c*f.cy_forceInt(x,y,z,self._a2,self._b2,self._c2,1,self.n)",
"def calculate_near_field_forces_and_moments(self):\n\n # Initialize a variable to hold the global panel position as the panel's are\n # iterate through.\n global_panel_position = 0\n\n # Initialize three lists of variables, which will hold the effective strength\n # of the line vortices comprising\n # each panel's ring vortex.\n effective_right_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n effective_front_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n effective_left_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n\n # Iterate through the current_airplane's wings.\n for wing in self.airplane.wings:\n\n # Convert this wing's 2D array of panels into a 1D array.\n panels = np.ravel(wing.panels)\n\n # Iterate through this wing's 1D array panels.\n for panel in panels:\n\n # Check if this panel is on its wing's right edge.\n if panel.is_right_edge:\n\n # Change the effective right vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_right_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n\n else:\n\n # Get the panel directly to the right of this panel.\n panel_to_right = wing.panels[\n panel.local_chordwise_position,\n panel.local_spanwise_position + 1,\n ]\n\n # Change the effective right vortex line strength from zero to\n # the difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel to the right of it.\n effective_right_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_right.ring_vortex.strength\n )\n\n # Check if this panel is on its wing's leading edge.\n if panel.is_leading_edge:\n\n # Change the effective front vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_front_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n else:\n\n # Get the panel directly in front of this panel.\n panel_to_front = wing.panels[\n panel.local_chordwise_position - 1,\n panel.local_spanwise_position,\n ]\n\n # Change the effective front vortex line strength from zero to\n # the difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel in front of it.\n effective_front_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_front.ring_vortex.strength\n )\n\n # Check if this panel is on its wing's left edge.\n if panel.is_left_edge:\n\n # Change the effective left vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_left_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n else:\n\n # Get the panel directly to the left of this panel.\n panel_to_left = wing.panels[\n panel.local_chordwise_position,\n panel.local_spanwise_position - 1,\n ]\n\n # Change the effective left vortex line strength from zero to the\n # difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel to the left of it.\n effective_left_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_left.ring_vortex.strength\n )\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Calculate the solution velocities at the centers of the panel's front leg,\n # left leg, and right leg.\n velocities_at_ring_vortex_front_leg_centers = self.calculate_solution_velocity(\n points=self.panel_front_vortex_centers\n )\n velocities_at_ring_vortex_left_leg_centers = self.calculate_solution_velocity(\n points=self.panel_left_vortex_centers\n )\n velocities_at_ring_vortex_right_leg_centers = self.calculate_solution_velocity(\n points=self.panel_right_vortex_centers\n )\n\n # Using the effective line vortex strengths, and the Kutta-Joukowski theorem\n # to find the near field force in\n # geometry axes on the front leg, left leg, and right leg.\n near_field_forces_on_ring_vortex_right_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_right_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_right_leg_centers,\n self.panel_right_vortex_vectors,\n axis=-1,\n )\n )\n near_field_forces_on_ring_vortex_front_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_front_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_front_leg_centers,\n self.panel_front_vortex_vectors,\n axis=-1,\n )\n )\n near_field_forces_on_ring_vortex_left_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_left_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_left_leg_centers,\n self.panel_left_vortex_vectors,\n axis=-1,\n )\n )\n\n # Sum the forces on the legs to calculate the total near field force,\n # in geometry axes, on each panel.\n near_field_forces_geometry_axes = (\n near_field_forces_on_ring_vortex_front_legs_geometry_axes\n + near_field_forces_on_ring_vortex_left_legs_geometry_axes\n + near_field_forces_on_ring_vortex_right_legs_geometry_axes\n )\n\n # Find the near field moment in geometry axes on the front leg, left leg,\n # and right leg.\n near_field_moments_on_ring_vortex_front_legs_geometry_axes = np.cross(\n self.panel_front_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_front_legs_geometry_axes,\n axis=-1,\n )\n near_field_moments_on_ring_vortex_left_legs_geometry_axes = np.cross(\n self.panel_left_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_left_legs_geometry_axes,\n axis=-1,\n )\n near_field_moments_on_ring_vortex_right_legs_geometry_axes = np.cross(\n self.panel_right_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_right_legs_geometry_axes,\n axis=-1,\n )\n\n # Sum the moments on the legs to calculate the total near field moment,\n # in geometry axes, on each panel.\n near_field_moments_geometry_axes = (\n near_field_moments_on_ring_vortex_front_legs_geometry_axes\n + near_field_moments_on_ring_vortex_left_legs_geometry_axes\n + near_field_moments_on_ring_vortex_right_legs_geometry_axes\n )\n\n # Initialize a variable to hold the global panel position.\n global_panel_position = 0\n\n # Iterate through this solver's panels.\n for panel in self.panels:\n # Update the force and moment on this panel.\n panel.near_field_force_geometry_axes = near_field_forces_geometry_axes[\n global_panel_position, :\n ]\n panel.near_field_moment_geometry_axes = near_field_moments_geometry_axes[\n global_panel_position, :\n ]\n\n # Update the pressure on this panel.\n panel.update_pressure()\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Sum up the near field forces and moments on every panel to find the total\n # force and moment on the geometry.\n total_near_field_force_geometry_axes = np.sum(\n near_field_forces_geometry_axes, axis=0\n )\n total_near_field_moment_geometry_axes = np.sum(\n near_field_moments_geometry_axes, axis=0\n )\n\n # Find the total near field force in wind axes from the rotation matrix and\n # the total near field force in\n # geometry axes.\n self.airplane.total_near_field_force_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_force_geometry_axes\n )\n\n # Find the total near field moment in wind axes from the rotation matrix and\n # the total near field moment in\n # geometry axes.\n self.airplane.total_near_field_moment_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_moment_geometry_axes\n )\n\n # Calculate the current_airplane's induced drag coefficient\n induced_drag_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's side force coefficient.\n side_force_coefficient = (\n self.airplane.total_near_field_force_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's lift coefficient.\n lift_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's rolling moment coefficient.\n rolling_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n # Calculate the current_airplane's pitching moment coefficient.\n pitching_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.c_ref\n )\n\n # Calculate the current_airplane's yawing moment coefficient.\n yawing_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n self.airplane.total_near_field_force_coefficients_wind_axes = np.array(\n [induced_drag_coefficient, side_force_coefficient, lift_coefficient]\n )\n self.airplane.total_near_field_moment_coefficients_wind_axes = np.array(\n [\n rolling_moment_coefficient,\n pitching_moment_coefficient,\n yawing_moment_coefficient,\n ]\n )",
"def find_lw_force(lw_centre_x, lw_centre_y, lw_kappa_x, lw_kappa_y, X , Y, min_grid, max_grid, grid_space, periodic):\n\t#Calculate x-force\n\tF_wall_x = np.where(X < lw_centre_x, 2 * lw_kappa_x * (X - lw_centre_x), 0)\n\tif periodic == 1:\n\t\tgrid_length = max_grid[0] - min_grid[0]\n\t\tgrid_centre = min_grid[0] + grid_length/2\n\t\tif lw_centre_x < grid_centre:\n\t\t\tindex_period = index(lw_centre_x + grid_length/2, min_grid[0], grid_space)\n\t\t\tF_wall_x[:, index_period:] = 2 * lw_kappa_x * (X[:, index_period:] - lw_centre_x - grid_length) \n\t\telif lw_centre_x > grid_centre:\n\t\t\tindex_period = index(lw_centre_x - grid_length/2, min_grid[0], grid_space)\n\t\t\tF_wall_x[:, :index_period] = 0\n\n\t#Calculate y-force\n\tF_wall_y = np.where(Y < lw_centre_y, 2 * lw_kappa_y * (Y - lw_centre_y), 0)\n\tif periodic == 1:\n\t\tgrid_length = max_grid[1] - min_grid[1]\n\t\tgrid_centre = min_grid[1] + grid_length/2\n\t\tif lw_centre_y < grid_centre:\n\t\t\tindex_period = index(lw_centre_y + grid_length/2, min_grid[1], grid_space)\n\t\t\tF_wall_y[index_period:, :] = 2 * lw_kappa_y * (Y[index_period:, :] - lw_centre_y - grid_length)\n\t\telif lw_centre_y > grid_centre:\n\t\t\tindex_period = index(lw_centre_y - grid_length/2, min_grid[1], grid_space)\n\t\t\tF_wall_y[:index_period, :] = 0\n\treturn [F_wall_x, F_wall_y]",
"def radial_force(self):\n return np.sum([t.radial_force_of_filament() for t in self.thick], 0)",
"def calculate_near_field_forces_and_moments(self):\n # Calculate the velocities induced at every panel's bound vortex center.\n induced_velocities = aerodynamics.collapsed_velocities_from_horseshoe_vortices(\n points=self.panel_bound_vortex_centers,\n back_right_vortex_vertices=self.panel_back_right_vortex_vertices,\n front_right_vortex_vertices=self.panel_front_right_vortex_vertices,\n front_left_vortex_vertices=self.panel_front_left_vortex_vertices,\n back_left_vortex_vertices=self.panel_back_left_vortex_vertices,\n strengths=self.vortex_strengths,\n )\n\n # Add the freestream velocity to the induced velocities to calculate the\n # total velocity at every panel's bound vortex center.\n total_velocities = induced_velocities + self.freestream_velocity\n\n # Calculate the near field force, in geometry axes, on each panel's bound\n # vortex.\n near_field_forces_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(self.vortex_strengths, axis=1)\n * np.cross(total_velocities, self.panel_bound_vortex_vectors, axis=-1)\n )\n\n # Calculate the near field moments, in geometry axes, on each panel's bound\n # vortex.\n near_field_moments_geometry_axes = np.cross(\n self.panel_bound_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_geometry_axes,\n axis=-1,\n )\n\n functions.process_steady_solver_forces(\n steady_solver=self,\n near_field_forces_geometry_axes=near_field_forces_geometry_axes,\n near_field_moments_geometry_axes=near_field_moments_geometry_axes,\n )",
"def get_spherical(self):\n\n # Create an scalar Function Space to compute the cylindrical radius (x^2 + y^2)\n # and the angles phi and theta\n S1 = df.FunctionSpace(self.functionspace.mesh(), 'CG', 1)\n\n # Create a dolfin function from the FS\n m_r = df.Function(S1)\n # Compute the radius using the assemble method with dolfin dP\n # (like a dirac delta to get values on every node of the mesh)\n # This returns a dolfin vector\n cyl_vector = df.assemble(df.dot(df.sqrt(self.f[0] * self.f[0] + self.f[1] * self.f[1]),\n df.TestFunction(S1)) * df.dP,\n \n )\n # Set the vector values to the dolfin function\n m_r.vector().set_local(cyl_vector.get_local())\n\n # Now we compute the theta and phi angles to describe the magnetisation\n # and save them to the coresponding variables\n self.theta = df.Function(S1)\n self.phi = df.Function(S1)\n\n # We will use the same vector variable than the one used to\n # compute m_r, in order to save memory\n\n # Theta = arctan(m_r / m_z)\n cyl_vector = df.assemble(df.dot(df.atan_2(m_r, self.f[2]),\n df.TestFunction(S1)) * df.dP,\n tensor=cyl_vector\n )\n\n # Instead of:\n # self.theta.vector().set_local(cyl_vector.get_local())\n # We will use:\n self.theta.vector().axpy(1, cyl_vector)\n # which adds: 1 * cyl_vector\n # to self.theta.vector() and is much faster\n # (we assume self.theta.vector() is empty, i.e. only made of zeros)\n # See: Fenics Book, page 44\n \n # Phi = arctan(m_y / m_x)\n cyl_vector = df.assemble(df.dot(df.atan_2(self.f[1], self.f[0]),\n df.TestFunction(S1)) * df.dP,\n tensor=cyl_vector\n )\n\n # We will save this line just in case:\n # self.phi.vector().set_local(cyl_vector.get_local())\n self.phi.vector().axpy(1, cyl_vector)\n\n return self.theta, self.phi",
"def find_uw_force(uw_centre_x, uw_centre_y, uw_kappa_x, uw_kappa_y, X , Y, min_grid, max_grid, grid_space, periodic):\n\n\t#Calculate x-force\n\tF_wall_x = np.where(X > uw_centre_x, 2 * uw_kappa_x * (X - uw_centre_x), 0)\n\tif periodic == 1:\n\t\tgrid_length = max_grid[0] - min_grid[0]\n\t\tgrid_centre = min_grid[0] + grid_length/2\n\t\tif uw_centre_x < grid_centre:\n\t\t\tindex_period = index(uw_centre_x + grid_length/2, min_grid[0], grid_space)\n\t\t\tF_wall_x[:, index_period:] = 0 \n\t\telif uw_centre_x > grid_centre:\n\t\t\tindex_period = index(uw_centre_x - grid_length/2, min_grid[0], grid_space)\n\t\t\tF_wall_x[:, :index_period] = 2 * uw_kappa_x * (X[:, :index_period] - uw_centre_x + grid_length) \n\t#Calculate y-force\n\tF_wall_y = np.where(Y > uw_centre_y, 2 * uw_kappa_y * (Y - uw_centre_y), 0)\n\tif periodic == 1:\n\t\tif uw_centre_y < grid_centre:\n\t\t\tindex_period = index(uw_centre_y + grid_length/2, min_grid[1], grid_space)\n\t\t\tF_wall_y[index_period:, :] = 0\n\t\telif uw_centre_y > grid_centre:\n\t\t\tindex_period = index(uw_centre_y - grid_length/2, min_grid[1], grid_space)\n\t\t\tF_wall_y[:index_period, :] = 2 * uw_kappa_y * (Y[:index_period, :] - uw_centre_y + grid_length)\n\treturn [F_wall_x, F_wall_y]",
"def calc_force(self):\n\n total_force = 0.\n\n # Loop through elements on the squirmer surface and compute the\n # hydrodynamic stresses on each one\n for elem_S in self.mesh.elems_in_region(\"sphere\"):\n # get the \"bulk\" element adjacent to the surface element.\n _S, elem_V = elem_S.adj_map['*']\n # get the element mapping\n x_cyl = elem_V.get_mapping()\n jac = x_cyl.jacobian()\n detJ, invJ = det_inv_2x2(jac)\n\n # coordinates in cylindrical and polar form\n x_cyl_S = elem_S.get_mapping()\n # let *_S denote quantities defined at the element surface only\n # theta = np.arctan2(x_cyl_S[0], x_cyl_S[1]) # polar angle\n sin_th = x_cyl_S[0] # here, r = 1\n sin2_th = sin_th**2\n cos_th = x_cyl_S[1]\n\n # surface slip velocity\n slip_profile = self.phys_params[\"slip_profile\"]\n vslip = slip_profile(sin_th, cos_th)\n\n # solution for vorticity field\n vort_gl = self.soln_vec[1::2]\n vort = elem_V.get_coeffs(vort_gl)\n\n invJ_S = invJ.get_boundary(_S)\n # compute d{vorticity}/d(xi, eta, ...)\n dw_du_S = vort.jacobian().get_boundary(_S)\n # d(rho, z)/d(xi, eta, ...)\n drhoz_dr_S = x_cyl.get_boundary(_S)\n # d{vorticity}/dr at squirmer surface\n dw_dr_S = np.einsum('im,ijm,jm->m',\n dw_du_S, invJ_S, drhoz_dr_S)\n\n # compute stresses\n vort_S = vort.get_boundary(_S)\n n_rey = self.phys_params[\"N_Re\"]\n bernouli_stress = np.pi * n_rey * vslip**2 * sin_th * cos_th\n w_asym_stress = np.pi * (dw_dr_S + vort_S) * sin2_th\n pressure_stress = bernouli_stress + w_asym_stress\n viscous_stress = -2*np.pi * vort_S * sin2_th\n total_stress = pressure_stress + viscous_stress\n\n # differential arc length\n t_vec = x_cyl_S.jacobian() # tangent vector\n d_arc = np.sqrt(t_vec[0]**2 + t_vec[1]**2)\n # compute integrands\n total_force += bs.CoeffArray.integrate(total_stress * d_arc)\n\n return total_force",
"def _magnetic_potential_Func_Inner(self, x: float, y: float, z: float) -> float:\n V = interp2D(y, z, self.yArrIn, self.z_arrIn, self.VArrIn)\n return V",
"def radialforce(self):\n warnings.warn(\"Check radial force direction in titin\")\n return self.force() * np.sin(self.angle())",
"def get_force(self):\n # @todo: make this work. Probably need to go through self.sim to get pod velocity, etc. \n\n \"\"\"\n # Numerical simulation run at 6 different velocities -- see Keith's graph \n # A34 data -- drag is for both brakes, lift is for one brake. Force_y has to do with the difference in force due to magnetic interactions and can be disregarded\n v = self.sim.pod.velocity\n air_gap = .024 # Should be self.sim.pod.brakes.gap, or brakes[i].gap if we're using an array of brakes, which we probably will\n \n # Fdrag(v) = gap_coefficient * (-e^(-.3*v)+1)*(1.5*e^(-.02*v)+1)\n # gap_coefficient = 5632e^-202gap\n \n # @todo: Either the drag force or the lift force is for a single brake, the other is for both. Which is which? \n gap_coefficient = 5632 * np.exp(-202 * air_gap)\n f_drag = gap_coefficient * (-np.exp(-.3*v) + 1) * (1.5 * np.exp(-.02*v)+1)\n #print \"Brake drag at air gap {}: {}\".format(air_gap, -f_drag)\n \"\"\"\n\n #f_drag = self.sim.brake_1.drag_force * 2 # *2 for both brakes. Just testing right now\n \n f_drag = self.sim.pod.brakes.get_drag() \n \n return self.data(f_drag, 0, 0)",
"def _fiber_length_explicit_musculotendon_dynamics(self):\n self._l_M_tilde = dynamicsymbols(f'l_M_tilde_{self.name}')\n self._l_MT = self.pathway.length\n self._v_MT = self.pathway.extension_velocity\n self._l_M = self._l_M_tilde*self._l_M_opt\n self._l_T = self._l_MT - sqrt(self._l_M**2 - (self._l_M_opt*sin(self._alpha_opt))**2)\n self._l_T_tilde = self._l_T/self._l_T_slack\n self._cos_alpha = (self._l_MT - self._l_T)/self._l_M\n self._fl_T = TendonForceLengthDeGroote2016.with_defaults(self._l_T_tilde)\n self._fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self._l_M_tilde)\n self._fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self._l_M_tilde)\n self._F_T_tilde = self._fl_T\n self._F_T = self._F_T_tilde*self._F_M_max\n self._F_M = self._F_T/self._cos_alpha\n self._F_M_tilde = self._F_M/self._F_M_max\n self._fv_M = (self._F_M_tilde - self._fl_M_pas)/(self.a*self._fl_M_act)\n self._v_M_tilde = FiberForceVelocityDeGroote2016.with_defaults(self._fv_M)\n self._dl_M_tilde_dt = (self._v_M_max/self._l_M_opt)*self._v_M_tilde\n\n self._state_vars = Matrix([self._l_M_tilde])\n self._input_vars = zeros(0, 1)\n self._state_eqns = Matrix([self._dl_M_tilde_dt])",
"def sky_coord(self):\n ctype1 = self.meta.get('CTYPE1')\n ctype2 = self.meta.get('CTYPE2')\n if self.meta.get('OBJECT_TO_OBJCTRADEC'):\n # First choice is to get precise direction to object we are\n # observing\n ra = self.meta.get('OBJCTRA')\n dec = self.meta.get('OBJCTDEC')\n unit = (u.hourangle, u.deg)\n elif ctype1 and ctype2 and 'RA' in ctype1 and 'DEC' in ctype2:\n # Official plate solutions next preference, if available,\n # though there will be some offset from actual object\n # unless CRVAL* are preset to object (e.g. with a good\n # obj_center)\n ra = self.meta.get('CRVAL1')\n dec = self.meta.get('CRVAL2')\n unit = (self.meta.get('CUNIT1') or u.deg,\n self.meta.get('CUNIT2') or u.deg)\n else:\n # Our standard has been to use RA and DEC as telescope\n # pointing position (e.g. center of FOV) and OBJECT* as\n # the object being pointed to before RAOFF & DECOFF (if\n # any) are applied. This works OK for small offsets, but\n # starts to be troublesome for large ones. Lets assume\n # that small offsets are recorded of sources that can have\n # astrometry done on them, which sets OBJECT_TO_OBJCTRADEC\n # and try to stay in the FOV with this code.\n ra = self.meta.get('RA')\n dec = self.meta.get('DEC')\n # These values are string sexagesimal with RA in hours\n unit = (u.hourangle, u.deg)\n # Fancy conversion to ICRS is likely not done anywhere in IoIO\n # system, so default to FK5 is safe\n radesys = (self.meta.get('RADESYS')\n or self.meta.get('RADECSYS')\n or 'FK5')\n return SkyCoord(ra, dec, unit=unit, frame=radesys.lower())",
"def _evaluate_xyz(self,x,y,z=0.):\n return -1/4/(self.n+1)*self._b*self._c*_potInt(x,y,z,self._a2,self._b2,self._c2,self.n)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Magnetic potential energy of Li7 in simulation units at x,y,z. pseudooverrides BaseClassFieldHelper Symmetry if used to simplify the computation of potential. Either end of the lens is identical, so coordinates falling within some range are mapped to an interpolation of the potential at the lenses end. If the lens is long enough, the inner region is modeled as a single plane as well. nan is returned if coordinate is outside vacuum tube
|
def _magnetic_potential(self, x: float, y: float, z: float) -> float:
if not self.is_Coord_Inside_Vacuum(x, y, z):
return np.nan
y = abs(y)
z = abs(z)
if -self.extra_field_length <= x <= self.L_cap:
V0 = self._magnetic_potential_Func_Fringe(x, y, z)
elif self.L_cap < x <= self.L - self.L_cap:
V0 = self._magnetic_potential_Func_Inner(x, y, z)
elif 0 <= x <= self.L + self.extra_field_length:
x = self.L - x
V0 = self._magnetic_potential_Func_Fringe(x, y, z)
else:
raise Exception("Particle outside field region")
V0 *= self.field_fact
return V0
|
[
"def earthmagnetic(self, *args, **kwargs):\n return _measures.measures_earthmagnetic(self, *args, **kwargs)",
"def potential_energy(self):\n m_s = self.arr_.m_s\n x_s = self.arr_.x_s\n y_s = self.arr_.y_s\n z_s = self.arr_.z_s\n\n m_dm = self.arr_.m_dm\n x_dm = self.arr_.x_dm\n y_dm = self.arr_.y_dm\n z_dm = self.arr_.z_dm\n\n m_g = self.arr_.m_g\n x_g = self.arr_.x_g\n y_g = self.arr_.y_g\n z_g = self.arr_.z_g\n\n pot_s = self.arr_.pot_s\n pot_dm = self.arr_.pot_dm\n pot_g = self.arr_.pot_g\n\n pot_s = self.arr_.pot_s\n pot_dm = self.arr_.pot_dm\n pot_g = self.arr_.pot_g\n\n eps_s = self.arr_.eps_s\n eps_dm = self.arr_.eps_dm\n eps_g = self.arr_.eps_g\n\n potential = np.concatenate([pot_s, pot_dm, pot_g])\n\n if np.all(potential == 0.0):\n x = np.hstack((x_s, x_dm, x_g))\n y = np.hstack((y_s, y_dm, y_g))\n z = np.hstack((z_s, z_dm, z_g))\n m = np.hstack((m_s, m_dm, m_g))\n eps = np.max([eps_s, eps_dm, eps_g])\n\n pot = utils.potential(\n da.asarray(x, chunks=100),\n da.asarray(y, chunks=100),\n da.asarray(z, chunks=100),\n da.asarray(m, chunks=100),\n da.asarray(eps),\n )\n\n num_s = len(m_s)\n num = len(m_s) + len(m_dm)\n\n pot_s = pot[:num_s]\n pot_dm = pot[num_s:num]\n pot_g = pot[num:]\n\n new = attr.asdict(self, recurse=False)\n del new[\"arr_\"]\n new.update(\n pot_s=-pot_s * (u.km / u.s) ** 2,\n pot_dm=-pot_dm * (u.km / u.s) ** 2,\n pot_g=-pot_g * (u.km / u.s) ** 2,\n )\n\n return Galaxy(**new)\n\n else:\n raise ValueError(\"Potentials are already calculated\")",
"def _magnetic_potential_Func_Inner(self, x: float, y: float, z: float) -> float:\n V = interp2D(y, z, self.yArrIn, self.z_arrIn, self.VArrIn)\n return V",
"def get_efield(self):\n n = self.n\n ng = self.ngs\n vecs = self.vecs\n vol_per_cube = (vecs[0]/ng[0]) * (vecs[1]/ng[1]) * (vecs[2]/ng[2])\n\n pgrid = self.pot_grid\n res = [vecs[0]/ng[0], vecs[1]/ng[1], vecs[2]/ng[2]]\n\n grad_x, grad_y, grad_z = np.gradient(pgrid[:,:,:], res[0], res[1], res[2])\n\n xy = np.multiply(grad_x, grad_y)\n grad_mag = np.multiply(xy, grad_z)\n # grad_mag = mf.grad_magnitude(grad_x, grad_y, grad_z)\n print(grad_mag.shape)\n self.efield = grad_mag",
"def calc_electronic_energy(self, verbose=False):\n\n if self.Gelec is None:\n if self.energy_source == 'datafile':\n with open(self.path, 'r') as file:\n lines = file.readlines()\n self.Gelec = float(lines[0].split('eV')[0])\n else:\n if isinstance(self.read_from_alternate, dict):\n if 'get_electronic_energy' in self.read_from_alternate.keys():\n self.Gelec = self.read_from_alternate['get_electronic_energy']()\n if self.Gelec is None:\n if self.atoms is None:\n self.get_atoms()\n self.Gelec = self.atoms.get_potential_energy(force_consistent=True)",
"def e_magtot(self):\n if self.ref_flux is not None and self.ref_flux > 0 and self.ref_fluxerr > 0:\n flux = self.fluxes\n err = self.fluxerr\n bad_idx = np.isnan(flux) | np.isnan(err) | (flux <= 0) | (err <= 0)\n magerr = np.sqrt(err**2 + self.ref_fluxerr**2)\n magerr /= self.ref_flux + flux\n magerr *= 2.5 / np.log(10)\n magerr[bad_idx] = np.nan\n\n return magerr\n else:\n return None",
"def calculate_near_field_forces_and_moments(self):\n # Calculate the velocities induced at every panel's bound vortex center.\n induced_velocities = aerodynamics.collapsed_velocities_from_horseshoe_vortices(\n points=self.panel_bound_vortex_centers,\n back_right_vortex_vertices=self.panel_back_right_vortex_vertices,\n front_right_vortex_vertices=self.panel_front_right_vortex_vertices,\n front_left_vortex_vertices=self.panel_front_left_vortex_vertices,\n back_left_vortex_vertices=self.panel_back_left_vortex_vertices,\n strengths=self.vortex_strengths,\n )\n\n # Add the freestream velocity to the induced velocities to calculate the\n # total velocity at every panel's bound vortex center.\n total_velocities = induced_velocities + self.freestream_velocity\n\n # Calculate the near field force, in geometry axes, on each panel's bound\n # vortex.\n near_field_forces_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(self.vortex_strengths, axis=1)\n * np.cross(total_velocities, self.panel_bound_vortex_vectors, axis=-1)\n )\n\n # Calculate the near field moments, in geometry axes, on each panel's bound\n # vortex.\n near_field_moments_geometry_axes = np.cross(\n self.panel_bound_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_geometry_axes,\n axis=-1,\n )\n\n functions.process_steady_solver_forces(\n steady_solver=self,\n near_field_forces_geometry_axes=near_field_forces_geometry_axes,\n near_field_moments_geometry_axes=near_field_moments_geometry_axes,\n )",
"def _compute_kinetic_energy_cell(self):\n return self.b_masses_cell * self.b_velocities_cell ** 2",
"def calc_electronic_energy(self, verbose=False):\n\n assert(self.scaling_reactions is not None)\n assert(self.scaling_coeffs is not None)\n\n self.Gelec = self.scaling_coeffs['intercept']\n\n for r in self.scaling_reactions.values():\n dEIS = r['reaction'].get_reaction_energy(T=273,\n p=1.0e5,\n verbose=verbose,\n etype='electronic') / (eVtokJ * 1.0e3)\n if self.dereference:\n ref_EIS = sum([reac.Gelec for reac in r['reaction'].reactants])\n else:\n ref_EIS = 0.0\n if 'multiplicity' not in r.keys():\n r['multiplicity'] = 1.0\n self.Gelec += r['multiplicity'] * (self.scaling_coeffs['gradient'] * dEIS + ref_EIS)\n\n if verbose:\n print((self.name + ' elec: %1.2f eV') % self.Gelec)",
"def get_velocity_exo(velocity_Earth, dist_Earth_sun, dist_exo_star):\n return velocity_Earth * sqrt(dist_Earth_sun / dist_exo_star) # km/s",
"def electric_field(q, r, x, y):\n\n return q * (x - r[0]) / np.hypot(x - r[0], y - r[1]) ** 3, q * (y - r[1]) / np.hypot(x - r[0], y - r[1]) ** 3",
"def magnetic_field(self, xy, field=\"secondary\"):\n sig = self.sigma_hat # (n_freq, )\n f = self.frequency\n w = 2*np.pi*f\n k = np.sqrt(-1j*w*mu_0*sig)[:, None] # This will get it to broadcast over locations\n dxy = xy[:, :2] - self.location[:2]\n r = np.linalg.norm(dxy, axis=-1)\n x = dxy[:, 0]\n y = dxy[:, 1]\n\n em_x = em_y = em_z = 0\n src_x, src_y, src_z = self.orientation\n # Z component of source\n alpha = 1j*k*r/2.\n IK1 = iv(1, alpha)*kv(1, alpha)\n IK2 = iv(2, alpha)*kv(2, alpha)\n if src_z != 0.0:\n em_z += src_z*2.0/(k**2*r**5)*(9-(9+9*1j*k*r-4*k**2*r**2-1j*k**3*r**3)*np.exp(-1j*k*r))\n Hr = (k**2/r)*(IK1 - IK2)\n angle = np.arctan2(y, x)\n em_x += src_z*np.cos(angle)*Hr\n em_y += src_z*np.sin(angle)*Hr\n\n if src_x != 0.0 or src_y != 0.0:\n # X component of source\n phi = 2/(k**2*r**4)*(3 + k**2*r**2 - (3 + 3j*k*r - k**2*r**2)*np.exp(-1j*k*r))\n dphi_dr = 2/(k**2*r**5)*(-2*k**2*r**2 - 12 + (-1j*k**3*r**3 - 5*k**2*r**2 + 12j*k*r + 12)*np.exp(-1j*k*r))\n if src_x != 0.0:\n em_x += src_x*(-1.0/r**3)*(y**2*phi + x**2*r*dphi_dr)\n em_y += src_x*(1.0/r**3)*x*y*(phi - r*dphi_dr)\n em_z -= src_x*(k**2*x/r**2)*(IK1 - IK2)\n\n # Y component of source\n if src_y != 0.0:\n em_x += src_y*(1.0/r**3)*x*y*(phi - r*dphi_dr)\n em_y += src_y*(-1.0/r**3)*(x**2*phi + y**2*r*dphi_dr)\n em_z -= src_y*(k**2*y/r**2)*(IK1 - IK2)\n\n if field == \"secondary\":\n # subtract out primary field from above\n mdotr = src_x*x + src_y*y# + m[2]*(z=0)\n\n em_x -= 3*x*mdotr/r**5 - src_x/r**3\n em_y -= 3*y*mdotr/r**5 - src_y/r**3\n em_z -= -src_z/r**3 # + 3*(z=0)*mdotr/r**5\n\n return self.moment/(4*np.pi)*np.stack((em_x, em_y, em_z), axis=-1)",
"def calculate_near_field_forces_and_moments(self):\n\n # Initialize a variable to hold the global panel position as the panel's are\n # iterate through.\n global_panel_position = 0\n\n # Initialize three lists of variables, which will hold the effective strength\n # of the line vortices comprising\n # each panel's ring vortex.\n effective_right_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n effective_front_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n effective_left_vortex_line_strengths = np.zeros(self.airplane.num_panels)\n\n # Iterate through the current_airplane's wings.\n for wing in self.airplane.wings:\n\n # Convert this wing's 2D array of panels into a 1D array.\n panels = np.ravel(wing.panels)\n\n # Iterate through this wing's 1D array panels.\n for panel in panels:\n\n # Check if this panel is on its wing's right edge.\n if panel.is_right_edge:\n\n # Change the effective right vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_right_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n\n else:\n\n # Get the panel directly to the right of this panel.\n panel_to_right = wing.panels[\n panel.local_chordwise_position,\n panel.local_spanwise_position + 1,\n ]\n\n # Change the effective right vortex line strength from zero to\n # the difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel to the right of it.\n effective_right_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_right.ring_vortex.strength\n )\n\n # Check if this panel is on its wing's leading edge.\n if panel.is_leading_edge:\n\n # Change the effective front vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_front_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n else:\n\n # Get the panel directly in front of this panel.\n panel_to_front = wing.panels[\n panel.local_chordwise_position - 1,\n panel.local_spanwise_position,\n ]\n\n # Change the effective front vortex line strength from zero to\n # the difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel in front of it.\n effective_front_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_front.ring_vortex.strength\n )\n\n # Check if this panel is on its wing's left edge.\n if panel.is_left_edge:\n\n # Change the effective left vortex line strength from zero to\n # this panel's ring vortex's strength.\n effective_left_vortex_line_strengths[\n global_panel_position\n ] = self.vortex_strengths[global_panel_position]\n else:\n\n # Get the panel directly to the left of this panel.\n panel_to_left = wing.panels[\n panel.local_chordwise_position,\n panel.local_spanwise_position - 1,\n ]\n\n # Change the effective left vortex line strength from zero to the\n # difference between this panel's\n # ring vortex's strength, and the ring vortex strength of the\n # panel to the left of it.\n effective_left_vortex_line_strengths[global_panel_position] = (\n self.vortex_strengths[global_panel_position]\n - panel_to_left.ring_vortex.strength\n )\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Calculate the solution velocities at the centers of the panel's front leg,\n # left leg, and right leg.\n velocities_at_ring_vortex_front_leg_centers = self.calculate_solution_velocity(\n points=self.panel_front_vortex_centers\n )\n velocities_at_ring_vortex_left_leg_centers = self.calculate_solution_velocity(\n points=self.panel_left_vortex_centers\n )\n velocities_at_ring_vortex_right_leg_centers = self.calculate_solution_velocity(\n points=self.panel_right_vortex_centers\n )\n\n # Using the effective line vortex strengths, and the Kutta-Joukowski theorem\n # to find the near field force in\n # geometry axes on the front leg, left leg, and right leg.\n near_field_forces_on_ring_vortex_right_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_right_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_right_leg_centers,\n self.panel_right_vortex_vectors,\n axis=-1,\n )\n )\n near_field_forces_on_ring_vortex_front_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_front_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_front_leg_centers,\n self.panel_front_vortex_vectors,\n axis=-1,\n )\n )\n near_field_forces_on_ring_vortex_left_legs_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(effective_left_vortex_line_strengths, axis=1)\n * np.cross(\n velocities_at_ring_vortex_left_leg_centers,\n self.panel_left_vortex_vectors,\n axis=-1,\n )\n )\n\n # Sum the forces on the legs to calculate the total near field force,\n # in geometry axes, on each panel.\n near_field_forces_geometry_axes = (\n near_field_forces_on_ring_vortex_front_legs_geometry_axes\n + near_field_forces_on_ring_vortex_left_legs_geometry_axes\n + near_field_forces_on_ring_vortex_right_legs_geometry_axes\n )\n\n # Find the near field moment in geometry axes on the front leg, left leg,\n # and right leg.\n near_field_moments_on_ring_vortex_front_legs_geometry_axes = np.cross(\n self.panel_front_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_front_legs_geometry_axes,\n axis=-1,\n )\n near_field_moments_on_ring_vortex_left_legs_geometry_axes = np.cross(\n self.panel_left_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_left_legs_geometry_axes,\n axis=-1,\n )\n near_field_moments_on_ring_vortex_right_legs_geometry_axes = np.cross(\n self.panel_right_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_on_ring_vortex_right_legs_geometry_axes,\n axis=-1,\n )\n\n # Sum the moments on the legs to calculate the total near field moment,\n # in geometry axes, on each panel.\n near_field_moments_geometry_axes = (\n near_field_moments_on_ring_vortex_front_legs_geometry_axes\n + near_field_moments_on_ring_vortex_left_legs_geometry_axes\n + near_field_moments_on_ring_vortex_right_legs_geometry_axes\n )\n\n # Initialize a variable to hold the global panel position.\n global_panel_position = 0\n\n # Iterate through this solver's panels.\n for panel in self.panels:\n # Update the force and moment on this panel.\n panel.near_field_force_geometry_axes = near_field_forces_geometry_axes[\n global_panel_position, :\n ]\n panel.near_field_moment_geometry_axes = near_field_moments_geometry_axes[\n global_panel_position, :\n ]\n\n # Update the pressure on this panel.\n panel.update_pressure()\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Sum up the near field forces and moments on every panel to find the total\n # force and moment on the geometry.\n total_near_field_force_geometry_axes = np.sum(\n near_field_forces_geometry_axes, axis=0\n )\n total_near_field_moment_geometry_axes = np.sum(\n near_field_moments_geometry_axes, axis=0\n )\n\n # Find the total near field force in wind axes from the rotation matrix and\n # the total near field force in\n # geometry axes.\n self.airplane.total_near_field_force_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_force_geometry_axes\n )\n\n # Find the total near field moment in wind axes from the rotation matrix and\n # the total near field moment in\n # geometry axes.\n self.airplane.total_near_field_moment_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_moment_geometry_axes\n )\n\n # Calculate the current_airplane's induced drag coefficient\n induced_drag_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's side force coefficient.\n side_force_coefficient = (\n self.airplane.total_near_field_force_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's lift coefficient.\n lift_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's rolling moment coefficient.\n rolling_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n # Calculate the current_airplane's pitching moment coefficient.\n pitching_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.c_ref\n )\n\n # Calculate the current_airplane's yawing moment coefficient.\n yawing_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n self.airplane.total_near_field_force_coefficients_wind_axes = np.array(\n [induced_drag_coefficient, side_force_coefficient, lift_coefficient]\n )\n self.airplane.total_near_field_moment_coefficients_wind_axes = np.array(\n [\n rolling_moment_coefficient,\n pitching_moment_coefficient,\n yawing_moment_coefficient,\n ]\n )",
"def gravitational_field(self, xyz):\n xyz = check_xyz_dim(xyz)\n r_vec = xyz - self.location\n r = np.linalg.norm(r_vec, axis=-1)\n g_vec = np.zeros((*r.shape, 3))\n ind0 = r > self.radius\n g_vec[ind0] = super().gravitational_field(xyz[ind0])\n g_vec[~ind0] = -G * 4 / 3 * np.pi * self.rho * r_vec[~ind0]\n return g_vec",
"def calculate_energy(self, field=0., exch_energy = J_e, mag_moment = mu_e) :\n\n\t\t#find the spin interactions\n\t\tspin_interaction = 0 \n\n\t\tfor neighbour in self.nearest_neighbours :\n\n\t\t\tspin_interaction += -1. * exch_energy * neighbour.spin * self.spin * h_bar ** 2 \n\n\t\t#Find the field contribution\n\t\tfield_contribution = -1. * self.spin*h_bar * mag_moment * field \n\n\t\treturn spin_interaction + field_contribution",
"def velocity(self):\n if self.vmax > 0:\n mod = VelField(x_0=self.x_0,\n y_0=self.y_0,\n r_eff=self.r_eff,\n ellip=self.ellip,\n theta=self.theta,\n vmax=self.vmax,\n q=self.q)\n result = mod(self.x, self.y)\n else:\n result = np.ones(shape=self.x.shape)\n\n return result",
"def AtmosphericNeutrons(self, E):\n\n \"\"\" Solar activity calculated from the solar modulation\n as linear between minimum and maximum (page 10 Kole et al. 2015)\n \"\"\"\n solac = (self.solmod - 250.0)/859.0\n\n Pressure = 0. # in hPa\n\n EnergyMeV = 0.001*np.copy(np.asarray(E, dtype=float))\n Flux = np.copy(np.asarray(E, dtype=float))\n\n a = 0.0003 + (7.0-5.0*solac)*0.001*(1-np.tanh(np.deg2rad(180-4.0*self.geomlat)))\n b = 0.0140 + (1.4-0.9*solac)*0.1*(1-np.tanh(np.deg2rad(180-3.5*self.geomlat)))\n c = 180 - 42*(1-np.tanh(np.deg2rad(180-5.5*self.geomlat)))\n d = -0.008 + (6.0-1.0*solac)*0.001*(1-np.tanh(np.deg2rad(180-4.4*self.geomlat)))\n\n Slope1 = -0.29 * np.exp(-Pressure/7.5) + 0.735\n Norm1 = (a*Pressure + b)*np.exp(-Pressure/c) + d\n Mask1 = EnergyMeV < 0.9\n\n Slope2 = -0.247 * np.exp(-Pressure/36.5) + 1.4\n Norm2 = Norm1*pow(0.9, -Slope1+Slope2)\n Mask2 = np.logical_and(EnergyMeV >= 0.9, EnergyMeV < 15)\n\n Slope3 = -0.40 * np.exp(-Pressure/40.0) + 0.9\n Norm3 = Norm2*pow(15, -Slope2+Slope3)\n Mask3 = np.logical_and(EnergyMeV >= 15, EnergyMeV < 70)\n\n Slope4 = -0.46 * np.exp(-Pressure/100.0) + 2.53\n Norm4 = Norm3*pow(70, -Slope3+Slope4)\n Mask4 = EnergyMeV >= 70\n\n Flux[Mask1] = Norm1 * pow(EnergyMeV[Mask1], -Slope1)\n Flux[Mask2] = Norm2 * pow(EnergyMeV[Mask2], -Slope2)\n Flux[Mask3] = Norm3 * pow(EnergyMeV[Mask3], -Slope3)\n Flux[Mask4] = Norm4 * pow(EnergyMeV[Mask4], -Slope4)\n\n try:\n self.LowENeutrons\n except AttributeError:\n self.LingenfelterNeutrons()\n\n data = self.LowENeutrons\n f = self.log_interp1d(data[\"Ener(MeV)\"].loc[data['Flux(n/cm2MeVs)'] > 0.],\n data[\"Flux(n/cm2MeVs)\"].loc[data['Flux(n/cm2MeVs)'] > 0.])\n\n LowEnergyNeutron = self.LingenfelterNeutrons\n\n Scaler = (Norm1 * pow(0.008, -Slope1))/f(0.008)\n\n Flux[EnergyMeV < 0.008] = f(EnergyMeV[EnergyMeV < 0.008]) * Scaler\n\n # View angle of the atmosphere = 4 PI - 2 PI (1-cos(HorizonAngle))\n AngleFactor = 2*np.pi * (np.cos(np.deg2rad(self.HorizonAngle)) + 1)\n\n return Flux / (AngleFactor * 1000.0) # Switch from n/MeV/cm2/s to n/keV/cm2/s/sr.",
"def f_molGas_dyn(self):\n# print self.M_gas, self.M_dyn\n return self.M_gas / self.M_dyn",
"def potentialmat(self):\n potential_mat = -0.5 * self.EJ1 * np.kron(self._exp_i_phi_operator() + self._exp_i_phi_operator().T,\n self._identity())\n potential_mat += -0.5 * self.EJ2 * np.kron(self._identity(),\n self._exp_i_phi_operator() + self._exp_i_phi_operator().T)\n potential_mat += -0.5 * self.EJ3 * (np.exp(1j * 2 * np.pi * self.flux)\n * np.kron(self._exp_i_phi_operator(), self._exp_i_phi_operator().T))\n potential_mat += -0.5 * self.EJ3 * (np.exp(-1j * 2 * np.pi * self.flux)\n * np.kron(self._exp_i_phi_operator().T, self._exp_i_phi_operator()))\n return potential_mat"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make brushes for spots with differnet alpha factors.
|
def makeBrushes(self):
self.brushes = []
deltaAlpha = self.maxAlpha - self.minAlpha
slope = deltaAlpha / (self.dataSize - 1)
for i in range(self.dataSize):
alpha = slope * i + self.minAlpha
self.brushes.append(mkBrush(*self.brushColor, int(alpha)))
#c = int(alpha)
#self.brushes.append(mkBrush(c, c, c, self.maxAlpha))
|
[
"def build_billboard(self, tex):\n img = Image.new(\"RGBA\", (24,24), self.bgcolor)\n\n front = tex.resize((14, 12), Image.ANTIALIAS)\n alpha_over(img, front, (5,9))\n return img",
"def fence(x, y, l, w, item):\r\n for a in range(x, l + x, 10):\r\n for b in range(y, w + y, 10):\r\n main_canvas.create_image(a, b, image=item, anchor=NW)",
"def applyBrush(self,pos,color,radius = 3,hardness=.75):\n brush = generateBrush(radius, hardness)\n for i in range(0, int(2*radius+1)):\n for j in range(0, int(2*radius+1)):\n x = pos[0] + (i - radius)\n y = pos[1] + (j - radius)\n if x >= 0 and x < self._width and y >= 0 and y < self._height:\n weight = brush[i,j]/255.\n weighted_color = (color[0], color[1], color[2], int(color[3]*weight))\n self._data[x,y] = blendRGBA(weighted_color, self._data[x,y])",
"def draw_tile_backgrounds(self, tiles):\n\n def process_tile(tile):\n h = tile.height\n h_index = (h - self.parent.min_height) / (self.parent.max_height - self.parent.min_height)\n\n rgb_rand_1 = random.randint(0, self.ocean_noise)\n\n height_rgb = [0, 0, 0]\n height_rgb[0] = self.height_rgb_low[0] + h_index * (self.height_rgb_high[0] - self.height_rgb_low[0])\n height_rgb[1] = self.height_rgb_low[1] + h_index * (self.height_rgb_high[1] - self.height_rgb_low[1])\n height_rgb[2] = self.height_rgb_low[2] + h_index * (self.height_rgb_high[2] - self.height_rgb_low[2])\n\n water_rgb = (rgb_rand_1, rgb_rand_1, 255)\n if self.screen_mode == \"dark\":\n water_rgb = (rgb_rand_1 // 2, rgb_rand_1 // 2, 150)\n if self.screen_mode == \"martin\":\n water_rgb = (195 + rgb_rand_1 * 0.5, 234 + rgb_rand_1 * 0.5, 251)\n\n fillColors = [\n height_rgb, # Ground\n height_rgb, # Rail\n self.road_tile_rgb, # Road\n height_rgb, # Town building\n height_rgb, # Trees\n self.station_rgb, # Stations\n water_rgb, # Water\n height_rgb, # Void\n self.industry_rgb, # Industries\n self.torb_rgb, # Tunnel/bridge\n height_rgb, # Objects\n ]\n fillColor = fillColors[tile.kind % len(fillColors)]\n if tile.kind == 1:\n rail = tile.occupant\n if rail.is_depot:\n fillColor = self.rail_depot_rgb\n\n if tile.kind == 5:\n station = tile.occupant\n if station.station_type == 0:\n fillColor = self.rail_station_rgb\n if station.station_type == 1:\n fillColor = self.airport_rgb\n if station.station_type == 2:\n fillColor = self.bus_station_rgb\n if station.station_type == 3:\n fillColor = self.truck_station_rgb\n if station.station_type == 4:\n fillColor = self.heliport_rgb\n if station.station_type == 5:\n fillColor = self.seaport_rgb\n\n self.draw_square(tile, fillColor)\n if tile.kind == 1:\n rail = tile.occupant\n if not rail.is_depot:\n self.draw_rail_background(tile)\n\n if self.parent.show_progress_bar:\n with alive_bar(len(tiles)) as abar:\n for tile in tiles:\n process_tile(tile)\n abar()\n else:\n for tile in tiles:\n process_tile(tile)",
"def make_bricks(small, big, goal):\n return (goal - big*5 - small <= 0) and (goal % 5 - small <= 0)",
"def _create_blended(self):\n hm_cpy = self.heat_map.copy()\n # Make filtered channels to be neutral after blend\n for c in range(self.hm_lvl):\n hm_cpy[:, :, c] = GREY_BLEND\n blended = cv2.addWeighted(self.im, 0.5, hm_cpy, 0.5, 0)\n self.output_filename = \"blended_{}.jpg\".format(self.top1_label)\n cv2.imwrite(os.path.join(\"media/\", self.output_filename), blended)",
"def testBlends(self):\n \"\"\"\n We create another object next to the one of interest,\n joined by a bridge so that they're part of the same\n footprint. The extra object should be masked.\n \"\"\"\n self.checkCandidateMasking([(self.x+2, self.y, 1.0)], [(self.x+1, self.y, 0.5)])",
"def newShapes(board):\r\n\r\n brd = board.brd\r\n\r\n #drop all shapes to lowest available slot first\r\n for x in range(1,9): \r\n for y in range(1,9):\r\n if int(brd[(x,y)].shape)==0:\r\n for n in range(y+1, 9):\r\n if not int(brd[(x,n)].shape)==0:\r\n brd[(x,n)].row = y\r\n brd[(x,y)].row = n\r\n brd[(x,y)], brd[(x,n)] = brd[(x,n)], brd[(x,y)]\r\n break\r\n \r\n #now fill shapes in from the top down\r\n for x in range(8,0,-1):\r\n for y in range(8,0,-1):\r\n if not int(brd[(x,y)].shape):\r\n C = brd[(x,y)]\r\n \r\n imgs = ['imgs/h1.png','imgs/S2.png','imgs/L3.png','imgs/G4.png',\r\n 'imgs/T5.png','imgs/W6.png','imgs/Z7.png']\r\n \r\n bimgs = ['imgs/h1d.png','imgs/S2d.png','imgs/L3d.png','imgs/G4d.png',\r\n 'imgs/T5d.png','imgs/W6d.png','imgs/Z7d.png']\r\n \r\n img = random.randint(1,7)\r\n\r\n X = .1*x\r\n Y = 1\r\n \r\n I = shapeTog(pos_hint={'x': X, 'y': Y},\r\n row = y,\r\n column = x,\r\n shape = img,\r\n background_normal= imgs[img-1],\r\n background_down= bimgs[img-1])\r\n\r\n board.add_widget(I)\r\n brd[(x,y)] = I",
"def spice_bloom(self, x=-1, y=-1):\n w, h = self.surface.get_size()\n if x < 0 or x >= w or y < 0 or y >= h:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n r, g, b, a = self.surface.get_at((x, y))\n while (r, g, b) != self.sand_colour and (r, g, b) != self.spice_colour:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n r, g, b, a = self.surface.get_at((x, y))\n if (r, g, b) == self.sand_colour:\n self.surface.set_at((x, y), self.spice_colour)\n else:\n if self.surface.get_at((x+1, y)) == self.sand_colour:\n self.surface.set_at((x+1, y), self.spice_colour)\n if self.surface.get_at((x+1, y+1)) == self.sand_colour:\n self.surface.set_at((x+1, y+1), self.spice_colour)\n if self.surface.get_at((x, y+1)) == self.sand_colour:\n self.surface.set_at((x, y+1), self.spice_colour)\n if self.surface.get_at((x-1, y+1)) == self.sand_colour:\n self.surface.set_at((x-1, y+1), self.spice_colour)\n if self.surface.get_at((x-1, y)) == self.sand_colour:\n self.surface.set_at((x-1, y), self.spice_colour)\n if self.surface.get_at((x-1, y-1)) == self.sand_colour:\n self.surface.set_at((x-1, y-1), self.spice_colour)\n if self.surface.get_at((x, y-1)) == self.sand_colour:\n self.surface.set_at((x, y-1), self.spice_colour)\n if self.surface.get_at((x+1, y-1)) == self.sand_colour:\n self.surface.set_at((x+1, y-1), self.spice_colour)",
"def set_bricks(self):\n for c in range(BRICKS_IN_ROW):\n for q in range(BRICK_ROWS):\n self._bricks.append(GRectangle(y=GAME_HEIGHT-\n (BRICK_Y_OFFSET+(BRICK_SEP_V+BRICK_HEIGHT)*(q+1)),\n x=BRICK_SEP_H/2.0+c*(float(BRICK_WIDTH)+float(BRICK_SEP_H)),\n linecolor=BRICK_COLORS[q%10], fillcolor=BRICK_COLORS[q%10],\n height=BRICK_HEIGHT, width=BRICK_WIDTH))\n self.view.add(GImage(size=(GAME_WIDTH,GAME_HEIGHT),x=0,y=0,\n source=\"futurama\" + str(random.randrange(10)) + \".png\"))\n for p in self._bricks:\n self.view.add(p)",
"def clump_walls():\n # For this, we ignore all of Valve's wall textures.\n # We then start making clumps.\n # These are 2x2x4 maximum rectangular areas (configurable), which all get\n # the same texture. We don't overwrite previously-set ones though.\n # After that, we fill in any unset textures with the white/black_gap ones.\n # This makes it look like those areas were patched up\n # The floor and ceiling are made normally.\n\n # Additionally, we are able to nodraw all attached faces.\n walls = {}\n\n # we keep a list for the others, so we can nodraw them if needed\n others = {}\n\n texture_lock = get_bool_opt('tile_texture_lock', True)\n\n for solid in VMF.iter_wbrushes(world=True, detail=True):\n # first build a dict of all textures and their locations...\n for face in solid:\n mat = face.mat.casefold()\n if mat in (\n 'glass/glasswindow007a_less_shiny',\n 'metal/metalgrate018',\n 'anim_wp/framework/squarebeams',\n 'tools/toolsnodraw',\n 'anim_wp/framework/backpanels_cheap'\n ):\n # These textures aren't wall textures, and usually never\n # use random textures. Don't add them here. They also aren't\n # on grid.\n alter_mat(face)\n continue\n\n if face.mat in GOO_TEX:\n # For goo textures, don't add them to the dicts\n # or floors will be nodrawed.\n alter_mat(face)\n break\n\n origin = face.get_origin().as_tuple()\n orient = get_face_orient(face)\n if orient is ORIENT.wall:\n # placeholder to indicate these can be replaced.\n if mat in WHITE_PAN:\n face.mat = \"WHITE\"\n elif mat in BLACK_PAN:\n face.mat = \"BLACK\"\n if origin in walls:\n # The only time two textures will be in the same\n # place is if they are covering each other -\n # nodraw them both and ignore them\n face.mat = \"tools/toolsnodraw\"\n walls[origin].mat = \"tools/toolsnodraw\"\n del walls[origin]\n else:\n walls[origin] = face\n else:\n if origin in others:\n # The only time two textures will be in the same\n # place is if they are covering each other - delete\n # them both.\n face.mat = \"tools/toolsnodraw\"\n others[origin].mat = \"tools/toolsnodraw\"\n del others[origin]\n else:\n others[origin] = face\n alter_mat(face, face_seed(face), texture_lock)\n\n todo_walls = len(walls) # number of walls un-edited\n clump_size = int(get_opt(\"clump_size\"))\n clump_wid = int(get_opt(\"clump_width\"))\n clump_numb = (todo_walls // clump_size) * int(get_opt(\"clump_number\"))\n wall_pos = sorted(list(walls.keys()))\n random.seed(MAP_SEED)\n for _ in range(clump_numb):\n pos = random.choice(wall_pos)\n wall_type = walls[pos].mat\n pos = Vec(pos) // 128 * 128\n ':type pos: Vec'\n state = random.getstate() # keep using the map_seed for the clumps\n if wall_type == \"WHITE\" or wall_type == \"BLACK\":\n random.seed(pos.as_tuple())\n pos_min = Vec()\n pos_max = Vec()\n # these are long strips extended in one direction\n direction = random.randint(0, 2)\n for i in range(3):\n if i == direction:\n dist = clump_size\n else:\n dist = clump_wid\n pos_min[i] = int(\n pos[i] - random.randint(0, dist) * 128)\n pos_max[i] = int(\n pos[i] + random.randint(0, dist) * 128)\n\n tex = get_tex(wall_type.lower() + '.wall')\n # Loop though all these grid points, and set to the given\n # texture if they have the same wall type\n for pos, side in walls.items():\n if pos_min <= Vec(pos) <= pos_max and side.mat == wall_type:\n side.mat = tex\n if not texture_lock:\n reset_tex_offset(side)\n # Return to the map_seed state.\n random.setstate(state)\n\n for pos, face in walls.items():\n random.seed(pos)\n # We missed these ones!\n if face.mat == \"WHITE\":\n # Allow using special textures for these, to fill in gaps.\n if not get_tex(\"special.white_gap\") == \"\":\n face.mat = get_tex(\"special.white_gap\")\n else:\n face.mat = get_tex(\"white.wall\")\n elif face.mat == \"BLACK\":\n if not get_tex(\"special.black_gap\") == \"\":\n face.mat = get_tex(\"special.black_gap\")\n else:\n face.mat = get_tex(\"black.wall\")\n else:\n alter_mat(face, seed=pos, texture_lock=texture_lock)",
"def draw_spot_light_gray(img, blobs, size=0.2):\n\n if len(img.shape) == 3:\n\n img = rgb2grey(img)\n\n alpha_img = zeros_like(img)\n temp_img = zeros_like(img)\n\n for blob in blobs:\n\n y, x, r = blob\n rr, cc = circle(y, x, size)\n\n temp_img[rr, cc] += 0.5 * r\n alpha_img[rr, cc] = 1.\n\n kernel = getGaussianKernel(2, 1) * getGaussianKernel(2, 1).T\n\n _, ax = plt.subplots(1,3)\n\n ax[0].imshow(temp_img.copy())\n\n temp_img = filter2D(temp_img, -1, kernel)\n alpha_img = filter2D(alpha_img, -1, kernel)\n ax[1].imshow(temp_img)\n\n ax[2].imshow(alpha_img)\n\n plt.show()\n\n new_img = img.copy()\n\n new_img[temp_img > 0] = 0.2 * (temp_img[temp_img > 0] - temp_img[temp_img > 0].min())/(temp_img[temp_img > 0].max() - temp_img[temp_img > 0].min()) + 0.8\n new_img[temp_img > 0] = (1 - alpha_img[temp_img > 0]) * img[temp_img > 0] + alpha_img[temp_img > 0] * new_img[temp_img > 0]\n\n return new_img",
"def _blend_borders(self, feature_name, matrix, blending_time=25):\n blending_time = 1/1000*blending_time\n phone_borders = [phone[2] for phone in self.label.cur_phones_additions()]\n\n last_time = phone_borders[-1]\n last_index = self._len_phones[feature_name][-1][1]\n step = last_time/last_index\n\n for i in range(len(phone_borders)):\n if i == 0 or i == len(phone_borders)-1:\n continue\n\n if phone_borders[i]-blending_time < phone_borders[i-1] or phone_borders[i]+blending_time > phone_borders[i+1]:\n continue\n\n start = phone_borders[i] - blending_time\n end = phone_borders[i] + blending_time\n\n blend_index_start = round(start/step)\n blend_index_end = round(end/step)-1\n\n blend_start_values = matrix[blend_index_start, :]\n blend_end_values= matrix[blend_index_end, :]\n blend_factors = np.linspace(1,0, blend_index_end-blend_index_start)\n\n for j in range(len(blend_factors)):\n blend_factor = blend_factors[j]\n matrix[blend_index_start+j, :] = blend_factor*blend_start_values[:] + (1-blend_factor)*blend_end_values[:]\n\n return matrix",
"def vector_brushes(self) -> List[VectorBrush]:\n return self.__vector_brushes",
"def draw_multicolor_square(t,sz):\r\n for i in [\"red\",\"purple\",\"hotpink\",\"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)",
"def build_block(self, top, side):\n img = Image.new(\"RGBA\", (24,24), self.bgcolor)\n\n original_texture = top.copy()\n top = self.transform_image_top(top)\n\n if not side:\n alpha_over(img, top, (0,0), top)\n return img\n\n side = self.transform_image_side(side)\n otherside = side.transpose(Image.FLIP_LEFT_RIGHT)\n\n # Darken the sides slightly. These methods also affect the alpha layer,\n # so save them first (we don't want to \"darken\" the alpha layer making\n # the block transparent)\n sidealpha = side.split()[3]\n side = ImageEnhance.Brightness(side).enhance(0.9)\n side.putalpha(sidealpha)\n othersidealpha = otherside.split()[3]\n otherside = ImageEnhance.Brightness(otherside).enhance(0.8)\n otherside.putalpha(othersidealpha)\n\n alpha_over(img, top, (0,0), top)\n alpha_over(img, side, (0,6), side)\n alpha_over(img, otherside, (12,6), otherside)\n\n # Manually touch up 6 pixels that leave a gap because of how the\n # shearing works out. This makes the blocks perfectly tessellate-able\n for x,y in [(13,23), (17,21), (21,19)]:\n # Copy a pixel to x,y from x-1,y\n img.putpixel((x,y), img.getpixel((x-1,y)))\n for x,y in [(3,4), (7,2), (11,0)]:\n # Copy a pixel to x,y from x+1,y\n img.putpixel((x,y), img.getpixel((x+1,y)))\n\n return img",
"def alpha_blend(bottom, top):\n assert bottom.shape == top.shape, \"Cannot blend two images of different shapes: %s != %s\" % (str(bottom.shape), str(top.shape))\n\n if bottom.dtype != top.dtype:\n bottom = bottom.astype(top.dtype)\n \"\"\"\n br, bg, bb, ba = split_channels(bottom)\n tr, tg, tb, ta = split_channels(top)\n\n oneminusa = 1.0 - ta\n \n out = np.empty_like(bottom)\n out[:, :, 0] = br * oneminusa + tr * ta\n out[:, :, 1] = bg * oneminusa + tg * ta\n out[:, :, 2] = bb * oneminusa + tb * ta\n out[:, :, 3] = np.maximum(ba, ta)\n #return out\n #return top\n out = top.copy()\n out[:, :, 3] = np.maximum(ba, ta)\n return out\n \"\"\"\n\n br, bg, bb, ba = split_channels(bottom)\n tr, tg, tb, ta = split_channels(top)\n\n out = np.empty_like(bottom)\n out[:, :, 0] = br + (tr-br) * ta\n out[:, :, 1] = bg + (tg-bg) * ta\n out[:, :, 2] = bb + (tb-bb) * ta\n out[:, :, 3] = np.maximum(ba, ta)\n return out",
"def create_brick(ai_settings, screen, bricks, brick_number, row_number):\n brick = Bricks(ai_settings, screen)\n bricks_width = brick.rect.width\n brick.x = bricks_width + 1.3*bricks_width * brick_number\n brick.rect.x = brick.x\n brick.rect.y = brick.rect.height + 1.7 * brick.rect.height * row_number\n bricks.add(brick)",
"def _draw_background(self):\r\n for i in range(self._size):\r\n for j in range(self._size):\r\n self._grid.draw_entity((i, j), BACK_GROUND)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the scatter and histogram plots.
|
def showPlot(self):
self.scatterPlotItem.setData(self.xData, self.yData, pen=self.pointPen, brush=self.brushes)
xy, xx = np.histogram(self.xData,
bins=np.linspace(np.min(self.xData), np.max(self.xData), self.numBins))
self.xHistogramItem.setData(xx, xy, stepMode=True, fillLevel=0, fillBrush=self.histogramFillBrush)
yy, yx = np.histogram(self.yData,
bins=np.linspace(np.min(self.yData), np.max(self.yData), self.numBins))
# Flip due to rotated plot
yy *= -1
self.yHistogramItem.setData(yx, yy, stepMode=True, fillLevel=0, fillBrush=self.histogramFillBrush)
|
[
"def show_scatterplot(self, *args, **kwargs):\n raise NotImplementedError()",
"def visualize(self):\n plt.show()",
"def show():\n plt.show()",
"def show(self, show =1):\n\t\tplt.scatter(*zip(*self.x), s=0.1)\n\t\tplt.axis('equal')\n\t\tplt.axis('off')\n\t\tmarker='.'\n\t\tif show== 1:\n\t\t\tplt.show()",
"def Display_List_Dist(self):\n # Get the neccessary distributions\n p,lh = self.get_p()\n low = lh[0]\n high = lh[1]\n N = len(p)\n clr = ['g','c','b','r'] \n fig , subplt = plt.subplots(nrows=N, figsize=(8, 9))\n x_grid = np.arange(low,high,self.get_precision())\n for i in range(N):\n subplt[i].plot(x_grid,p[i](x_grid),\\\n clr[i%4], linewidth=2.5,\\\n label = 'PDF {}'.format(i))\n subplt[i].legend()\n plt.show(block = False)",
"def display_2D_scatter_plot(dataset, title, xlabel, ylabel, labels = None):\n \n plt.figure()\n plt.scatter(dataset[:,0], dataset[:,1], c = labels)\n plt.suptitle(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()",
"def show(self):\n\n nrows = 2\n ncols = 2\n fig, axs = plt.subplots(nrows, ncols)\n fig.tight_layout()\n\n palette = sns.color_palette()\n houses_order = ['Ravenclaw', 'Hufflepuff', 'Slytherin', 'Gryffindor']\n colors = {house: palette[i] for i, house in enumerate(houses_order)}\n\n ax = [0, 0]\n\n sorted_data = self.data.sort_values(\n self.data.columns.values.tolist(),\n ascending=False\n )\n\n for i, row in self.thetas.iterrows():\n sig = self.sigmoid(sorted_data.dot(row))\n axs[ax[0]][ax[1]].plot(\n sorted_data.dot(row),\n sig,\n '.',\n alpha=0.5,\n label=self.houses[i]\n )\n axs[ax[0]][ax[1]].set_title(\n self.houses[i],\n c=colors[self.houses[i]]\n )\n ax = self.get_next_axe(axs, ax, nrows=nrows)\n plt.show()",
"def showPlot2():\n title(\"Clean time for 25x25 vs Number of Robots\")\n xlabel(\"Number of Robots\")\n ylabel(\"Average time\")\n means = []\n for i in range(1, 11):\n means.append(runSimulation(i, 1.0, 25, 25, 0.75, 30, Robot, False))\n num_robots = []\n for i in range(1,11):\n num_robots.append(i)\n plot(num_robots, means)",
"def plot_distributions(self, df):\r\n for col in df.columns.values:\r\n plt.figure(); plt.hist(df[col], bins=200)\r\n plt.title(\"Histogram showing distribution of \" + str(col))",
"def display_site_plot(fit):\n ax_2[0].cla()\n ax_2[1].cla()\n try:\n thellierData[site_wid.value].regplot(ax_2[0])\n ax_2[0].axhline(np.median(fit['int_site']),color='k')\n ax_2[1].axhline(np.median(fit['int_site']),color='k')\n ax_2[1].hist(fit['int_site'],color='skyblue',bins=100,density=True,orientation='horizontal')\n ax_2[0].set_ylim(min(np.percentile(fit['int_real'],2.5,axis=0))*0.9,max(np.percentile(fit['int_real'],97.5,axis=0))*1.1)\n ax_2[0].set_xlim(min(min(np.percentile(fit['k'],2.5,axis=0))*1.1,min(np.percentile(fit['k'],2.5,axis=0))*0.9),max(max(np.percentile(fit['k'],97.5,axis=0))*1.1,max(np.percentile(fit['k'],97.5,axis=0))*0.9))\n ax_2[1].set_ylabel('$B_{anc}$')\n ax_2[1].set_xlabel('Probability Density')\n try:\n display_specimen_ring()\n except:\n pass\n except:\n rhatlabel.description='R_hat:'\n nefflabel.description='n_eff:'\n banclabel.description='B_anc:'\n gradelabel.description='Category: '\n banclabel.button_style='info'\n nefflabel.button_style='info'\n rhatlabel.button_style='info'\n gradelabel.button_style='info'\n fig_2.tight_layout();",
"def show_heatmap(self):\n plt.show()",
"def __show_distribution(self, train_data, test_data, valid_data):\r\n num_plots = 2\r\n \r\n if valid_data:\r\n num_plots = 3\r\n plt.figure(figsize=(10, 3))\r\n plt.subplot(1, num_plots, 1)\r\n objects = train_data.keys()\r\n x_pos = np.arange(len(objects))\r\n num_examples = [len(train_data[obj]) for obj in objects]\r\n \r\n plt.bar(x_pos, num_examples, align='center')\r\n plt.xticks(x_pos, objects)\r\n plt.ylabel('Number of examples')\r\n plt.title('Training set distribution')\r\n \r\n plt.subplot(1, num_plots, 2)\r\n objects = test_data.keys()\r\n x_pos = np.arange(len(objects))\r\n num_examples = [len(test_data[obj]) for obj in objects]\r\n \r\n plt.bar(x_pos, num_examples, align='center')\r\n plt.xticks(x_pos, objects)\r\n plt.ylabel('Number of examples')\r\n plt.title('Test set distribution') \r\n \r\n if valid_data:\r\n plt.subplot(1, num_plots, 3)\r\n objects = valid_data.keys()\r\n x_pos = np.arange(len(objects))\r\n num_examples = [len(valid_data[obj]) for obj in objects]\r\n\r\n plt.bar(x_pos, num_examples, align='center')\r\n plt.xticks(x_pos, objects)\r\n plt.ylabel('Number of examples')\r\n plt.title('Validation set distribution')\r\n \r\n plt.tight_layout()\r\n plt.show()",
"def visualize(kmeans, x,y, title):\n plt.scatter(x, y, s=10, c=kmeans.labels_)\n plt.title(title)\n plt.show()",
"def plot_hist(self, **kwargs: Any) -> None:\n plt.hist(self.iterable, **kwargs)\n if kwargs.get('grid', False):\n plt.grid()\n plt.ylabel('$P(x)$')\n plt.xlabel('$x$')\n plt.show()",
"def create_scatter_plot(self):\n xy = self.get_x_and_y_as_dict()\n x = xy[\"x\"]\n y = xy[\"y\"]\n plt.scatter(x, y)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(\"Scatter plot of x and y values\")\n plt.savefig(f\"{self.save_directory}/task_2_scatter_plot.png\")",
"def show(self):\n self._create_figure(raise_exception=True)\n self._fig_width, self._fig_height = self._fig_dims()\n plt.show()",
"def display_specimen_plots():\n ax[0].cla()\n ax[1].cla()\n thellierData[site_wid.value][specimen_wid.value].change_temps(lower_temp_wid.value+273,upper_temp_wid.value+273)\n thellierData[site_wid.value][specimen_wid.value].plot_circ(ax[0])\n thellierData[site_wid.value][specimen_wid.value].plot_arai(ax[0])\n thellierData[site_wid.value][specimen_wid.value].plot_zijd(ax[1])\n madbox.description='MAD: %1.2f'%thellierData[site_wid.value][specimen_wid.value].mad\n dangbox.description='DANG: %1.2f'%thellierData[site_wid.value][specimen_wid.value].dang\n dratbox.description='DRAT: %1.2f'%thellierData[site_wid.value][specimen_wid.value].drat\n rhat=thellierData[site_wid.value][specimen_wid.value].rhat\n rhatbox.description='R_hat: %1.2f'%thellierData[site_wid.value][specimen_wid.value].rhat\n if (rhat==None)|(0.9<rhat<1.1):\n rhatbox.button_style='info'\n else:\n rhatbox.button_style='danger'\n fig.tight_layout()",
"def dispersion_diagram(data, p1, p2, qual, lat, long, show_axes):\n u = data[qual].unique()\n fig, ax = plt.subplots(figsize=(lat, long))\n for i in range(len(u)):\n x = data.loc[data[qual] == u[i]][p1]\n y = data.loc[data[qual] == u[i]][p2]\n ax.scatter(x, y)\n # ax.set_xlabel(p1)\n # ax.set_ylabel(p2)\n ax.axis(show_axes)\n # ax.legend(u)\n return fig",
"def show_scatter_plot(inputs, function, x_label, y_label):\n inps = list(inputs)\n \n plot.scatter(inps, [function(x) for x in inps])\n plot.xlabel(x_label)\n plot.ylabel(y_label)\n \n plot.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
evaluate Compute the mean Average Precision metrics on a subset with a given model
|
def evaluate(model, subset, batch_size=default_batch_size, data_dir=default_data_dir, verbose=0):
#disable_tqdm = (verbose == 0)
# Create the generator on the given subset
data_generator = PascalVOCDataGenerator(subset, data_dir)
steps_per_epoch = int(len(data_generator.id_to_label) / batch_size) + 1
# Get the generator
generator = data_generator.flow(batch_size=batch_size)
y_all = []
y_pred_all = []
for i in range(steps_per_epoch):
# Get the next batch
X, y = next(generator)
y_pred = model.predict(X)
# We concatenate all the y and the prediction
for y_sample, y_pred_sample in zip(y, y_pred):
y_all.append(y_sample)
y_pred_all.append(y_pred_sample)
y_all = np.array(y_all)
y_pred_all = np.array(y_pred_all)
# Now we can compute the AP for each class
AP = np.zeros(data_generator.nb_classes)
for cl in range(data_generator.nb_classes):
AP[cl] = average_precision_score(y_all[:, cl], y_pred_all[:, cl])
return AP
|
[
"def average_model(self, key, model):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n for param, other_param in zip(\n self.models[key].parameters(), model.parameters()):\n param.data += other_param.data.cuda(param.data.get_device())\n param.data /= 2",
"def evaluate_model(model, inputs_test, labels_test, category_names):\n y_hat = model.predict(inputs_test)\n\n score_df = pd.DataFrame({\"category\": category_names, \"precision\": np.nan, \"recall\": np.nan, \"F1 score\": np.nan})\n\n for ii, col_name in enumerate(category_names):\n pre, rec, score, support = precision_recall_fscore_support(labels_test.iloc[:, ii], y_hat[:, ii], average=\"weighted\")\n score_df.loc[score_df[\"category\"] == col_name, \"precision\"] = pre\n score_df.loc[score_df[\"category\"] == col_name, \"recall\"] = rec\n score_df.loc[score_df[\"category\"] == col_name, \"F1 score\"] = score\n\n print(score_df)\n print(score_df.mean())",
"def compute(self):\n average_precision = {}\n label_records = self.label_records\n\n for label in self.gt_bboxes_count:\n # if there are no predicted boxes with this label\n if label not in label_records:\n average_precision[label] = 0\n continue\n pred_infos = label_records[label].pred_infos\n gt_bboxes_count = self.gt_bboxes_count[label]\n\n pred_infos = sorted(pred_infos, reverse=True)\n true_pos = np.array(list(zip(*pred_infos))[1]).astype(int)\n false_pos = 1 - true_pos\n\n acc_tp = np.cumsum(true_pos)\n acc_fp = np.cumsum(false_pos)\n\n recall = acc_tp / gt_bboxes_count\n precision = np.divide(acc_tp, (acc_fp + acc_tp))\n ap = self.ap_method(recall, precision)\n # add class result in the dictionary to be returned\n average_precision[label] = ap\n\n return average_precision",
"def calcAvgPrec(self):\n avg = 0.0\n counter = 0\n self.recallCompInter = []\n self.precComplete = []\n for i in range (0, len(self.retrieved)):\n if self.retrieved[i] in self.relevant:\n counter += 1 \n avg += ((float(counter)/(i+1)))\n \n self.recallCompInter.append(float(counter)/(self.numberRelevant))\n self.precComplete.append(float(counter)/(i+1)) \n\n avg = avg/counter\n\n print(\"##############################################\") \n print(\"AvgPrecision:\")\n print(avg)\n print(\"##############################################\")",
"def average_precision(predictions):\n precisions = []\n correct_predictions = 0\n for i in range(len(predictions)):\n if predictions[i]:\n correct_predictions += 1\n precisions.append(correct_predictions / (i + 1))\n if precisions:\n #return sum(precisions) / len(precisions)\n return mean(precisions)\n return 0",
"def evaluate_models_2():\n df = prepare_individual_datasets()\n get_averaged_models()\n scores = []\n mean_plot = []\n print(\"Starting evaluation...\")\n for model in glob.glob('*_averaged.csv'):\n averaged_model = pd.read_csv(model)\n featuress = averaged_model['feature']\n\n # weights\n intercept = averaged_model['weight'].values[0]\n weights = averaged_model['weight'][1:]\n features_used = featuress.values[1:]\n # reindex to perform series multiplication\n weights.index = features_used\n\n temp_scores = []\n for station in df:\n X = station.loc[:, station.columns != 'bikes']\n Y = station['bikes']\n X = X.filter(items=features_used)\n predictions = X.apply(lambda row: intercept + row.dot(weights), axis=1).astype('int64')\n temp_scores.append(mean_absolute_error(predictions, Y))\n name = model.split('_averaged')[0]\n scores.append((name, temp_scores))\n mean_score = mean(temp_scores)\n print(f'Accuracy of model {name} is {mean_score}\\n')\n mean_plot.append(mean_score)\n plot_scores_2(scores, mean(mean_plot))\n print(mean(mean_plot))",
"def single_run(model):\n global X_train, X_test, y_train, y_test\n\n model.fit(X_train, y_train)\n Y_hat = model.predict(X_test)\n MAE = np.mean(abs(Y_hat - y_test))\n print('MAE for given model : %.3f' % MAE)",
"def average_precision_score(y, y_pred):\n pass",
"def mean_average_precision(predictions_list):\n return mean(map(average_precision, predictions_list))",
"def calc_accuracy(model_dict, test_dict):\n\n \"\"\" Calculate the result \"\"\"\n\n all_prob = []\n result_dict = {}\n test_label = []\n predict_label = []\n\n for t_name, t in test_dict.items():\n result = []\n index = []\n hype_dict = {}\n sum = len(t)\n counter = 0\n letter = t_name\n for p in t:\n test_label.append(t_name)\n high_score = -100000\n for m_name, m in model_dict.items():\n score = m.score([p])\n if score > high_score:\n high_score = score\n hypo = m_name\n result.append(hypo)\n predict_label.append(hypo)\n if hypo == letter:\n counter += 1\n all_letters = list(set(result))\n for l in all_letters:\n hype_dict[l] = result.count(l)\n\n sorted_hype_dict = sorted(hype_dict.iteritems(), key=operator.itemgetter(1))\n sorted_hype_dict.reverse()\n\n if sum != 0:\n prob = float(counter)/sum\n print str(letter) + \"(\"+ str(counter) + \"/\" + str(sum) + \")\" + \" ==> Accuracy: \" + str(prob),\n print sorted_hype_dict\n all_prob.append(prob)\n result_dict[letter] = np.array([counter, sum])\n\n \"\"\" Print the average accuracy\"\"\"\n\n all_prob = np.array(all_prob)\n print \"Average accuracy is: \" + str(all_prob.mean())\n print \"=================================\"\n\n return all_prob, result_dict, test_label, predict_label",
"def ensemble_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_train, y_val, y_test):\n train_eq_ensemble_pred = equally_ensemble_results(train_probas)\n val_eq_ensemble_pred = equally_ensemble_results(val_probas)\n test_eq_ensemble_pred = equally_ensemble_results(test_probas)\n\n print(\"Equally weighted ensemble:\")\n print(\"--------------------------\")\n print(\"Train accuracy: \", accuracy_score(y_train, train_eq_ensemble_pred))\n print(\"Validation accuracy: \", accuracy_score(y_val, val_eq_ensemble_pred))\n print(\"Test accuracy: \", accuracy_score(y_test, test_eq_ensemble_pred))\n\n np.save(os.path.join('model', 'train_eq_ensemble_pred'), train_eq_ensemble_pred)\n np.save(os.path.join('model', 'val_eq_ensemble_pred'), val_eq_ensemble_pred)\n np.save(os.path.join('model', 'test_eq_ensemble_pred'), test_eq_ensemble_pred)\n\n confidence_train = calculate_confidence_val(train_probas, y_train)\n confidence_val = calculate_confidence_val(val_probas, y_val)\n confidence_test = calculate_confidence_val(test_probas, y_test)\n\n train_w_ensemble_pred = weighted_ensemble_results(train_probas, confidence_train)\n val_w_ensemble_pred = weighted_ensemble_results(val_probas, confidence_val)\n test_w_ensemble_pred = weighted_ensemble_results(test_probas, confidence_test)\n\n print(\"Weighted ensemble:\")\n print(\"--------------------------\")\n print(\"Train accuracy: \", accuracy_score(y_train, train_w_ensemble_pred))\n print(\"Validation accuracy: \", accuracy_score(y_val, val_w_ensemble_pred))\n print(\"Test accuracy: \", accuracy_score(y_test, test_w_ensemble_pred))\n\n np.save(os.path.join('model', 'train_w_ensemble_pred.npy'), train_w_ensemble_pred)\n np.save(os.path.join('model', 'val_w_ensemble_pred.npy'), val_w_ensemble_pred)\n np.save(os.path.join('model', 'test_w_ensemble_pred.npy'), test_w_ensemble_pred)",
"def _get_models_avg_min(x_train, y_train, metric, model):\n\n errors_avg = 0\n best_min = 999999999\n for i, value in enumerate(EVAL_WEIGHTS):\n model.set_weight(value)\n eval_result = model.evaluate_model(x_train)\n error = metric(y_train, eval_result)\n if error < best_min:\n best_min = error\n errors_avg += error\n\n return model.model_id, errors_avg / len(EVAL_WEIGHTS), best_min",
"def compute_gp_prediction_mean_and_uncertainty(bufferx, model, param_space, var=False):\n normalized_bufferx = preprocess_data_buffer(bufferx, param_space)\n normalized_bufferx = np.array(normalized_bufferx)\n means = {}\n vars = {}\n uncertainty = {}\n for parameter in model:\n means[parameter], vars[parameter] = model[parameter].predict(normalized_bufferx)\n means[parameter] = means[parameter].flatten()\n vars[parameter] = vars[parameter].flatten()\n\n # Precision can sometimes lead GPy to predict extremely low deviation, which leads to numerical issues\n # We add a floor to std to avoid these numerical issues. The majority of std values observed are naturally above this floor.\n vars[parameter][vars[parameter] < 10**-11] = 10**-11\n if var:\n uncertainty[parameter] = vars[parameter]\n else:\n uncertainty[parameter] = np.sqrt(vars[parameter])\n\n return means, uncertainty",
"def ensemble(dict_model_acc, test_design, method='vote'):\n pred_models_dict = {}\n pred_models_lst = []\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n test_design = np.array(test_design)\n\n for name_model, (model, acc) in dict_model_acc.items():\n pred_model = model.predict(test_design).tolist()\n pred_models_dict[name_model] = pred_model\n pred_models_lst.append(pred_model)\n\n acc_lst.append(acc)\n\n pred_models_df = pd.DataFrame(pred_models_lst)\n\n if method == 'vote':\n pred_vote_df = pred_models_df.mode()\n pred_vote_lst = list(pred_vote_df.loc[0, :])\n\n return pred_vote_lst\n\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n\n for name_model, (model, acc) in dict_model_acc.items():\n prob_model = model.predict_proba(test_design)\n prob1_model = np.array(prob_model)[:, 1].tolist()\n prob_models_dict[name_model] = prob_model\n prob1_models_lst.append(prob1_model)\n prob_models_lst.append(prob_model)\n\n acc_lst.append(acc)\n\n prob1_models_df = pd.DataFrame(prob1_models_lst)\n\n if method == 'avg_unif':\n prob1_avgunif_lst = list(prob1_models_df.mean())\n pred_avgunif_lst = [int(score > 0.5) for score in prob1_avgunif_lst]\n\n return pred_avgunif_lst, prob1_avgunif_lst\n elif method == 'avg_softmax':\n sum_exp_acc = sum(np.exp(acc_lst))\n acc_softmax = [np.exp(item) / sum_exp_acc for item in acc_lst]\n prob1_weighted_df = prob1_models_df.multiply(acc_softmax, axis='rows')\n prob1_softmax_lst = list(prob1_weighted_df.sum())\n pred_softmax_lst = [int(score > 0.5) for score in prob1_softmax_lst]\n\n return pred_softmax_lst, prob1_softmax_lst\n\n #elif method == 'grid_search':",
"def mean_average_precision(val_frame, touched_dict, k=5):\n average_precisions = val_frame.groupby(['user_id']).apply(lambda x: average_precision(x, touched_dict[x.name], k=k))\n return np.mean(average_precisions['average_precision'])",
"def compute_rf_prediction_mean_and_uncertainty(bufferx, model, param_space, var=False):\n prediction_means = {}\n prediction_uncertainty = {}\n normalized_bufferx = preprocess_data_buffer(bufferx, param_space)\n for objective in model:\n leaf_per_sample = model[objective].get_leaves_per_sample(\n normalized_bufferx, param_space\n )\n prediction_means[objective] = model[objective].compute_rf_prediction(\n leaf_per_sample\n )\n prediction_variances = model[objective].compute_rf_prediction_variance(\n leaf_per_sample, prediction_means[objective]\n )\n if var:\n prediction_uncertainty[objective] = prediction_variances\n else:\n prediction_uncertainty[objective] = np.sqrt(prediction_variances)\n\n return prediction_means, prediction_uncertainty",
"def getpreds(models, x_test,y_test,w=None):\n probabs=[]\n i=1\n for clf in models:\n pred=clf.predict(x_test)\n p=np.argmax(pred,axis=1)\n i+=1\n probabs.append(pred) \n probabs=np.array(probabs)\n labels = np.average(probabs, axis=0, weights=w)\n labels=np.argmax(labels,axis=1)\n accuracy_test = accuracy_score(y_test,labels)*100\n return accuracy_test,labels",
"def evaluate_model(X, y, Model, pca_n_components=None,\n n_splits=1, params=None):\n mean_mrr = 0.\n mean_top30 = 0.\n mean_mrank = 0.\n print(f'Test of {str(Model().__class__.__name__)} on {n_splits} train-test split(s)')\n for n in range(n_splits):\n\n # Evaluate as the average accuracy on one train/split random sample:\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)\n\n classifier = Model(**params)\n classifier.fit(X_train,y_train)\n\n y_predicted = classifier.predict(X_test)\n\n mean_top30 += classifier.top30_score(y_predicted, y_test)\n mean_mrr += classifier.mrr_score(y_predicted, y_test)\n mean_mrank += classifier.mean_rank(y_predicted, y_test)\n\n mean_top30 /= n_splits\n mean_mrr /= n_splits\n mean_mrank /= n_splits\n\n print('Params:',classifier.get_params())\n print(f'Top30 score: {mean_top30}')\n print(f'MRR score: {mean_mrr}')\n print(f'Mean rank: {mean_mrank}')\n\n # print(\"Example of predict proba:\")\n # print(f\"occurrence:\\n{X_test[12]}\")\n # y_pred, y_probas = classifier.predict(X_test[12].reshape(1,-1), return_proba=True)\n # print(f'predicted labels:\\n{y_pred}')\n # print(f'predicted probas:\\n{y_probas}')",
"def avg_predictions(df_grp_2, cal_df, fisc_calender, pred_start, fpg, res_all_sp):\r\n pivot_var = \"MM\"\r\n agg_col = \"LINE_ORDERS\"\r\n var_list = ['prev_5wk_avg_4wk_hld']\r\n level_list = list(df_grp_2[pivot_var].unique())\r\n \r\n tt = df_grp_2.copy()\r\n level = \"wk\"\r\n # Rearrange date by fiscal weeks\r\n tt = cu.merge_cal_tt(tt, cal_df.drop('FISC_EOW_DT', axis = 1), level)\r\n tt[agg_col+\"_ACT\"] = tt[agg_col]\r\n tt[agg_col].replace(np.nan, 0, inplace=True)\r\n \r\n model_data_pivot = pd.pivot_table(tt,\r\n index=['FISC_YR_NBR', 'FISC_MTH_NBR','FISC_WK_OF_MTH_ID', 'FISC_WK_OF_YR_NBR', 'FISC_QTR_NBR'],\r\n columns=pivot_var,\r\n values=agg_col).reset_index()\r\n model_data_pivot = cu.merge_cal_tt(model_data_pivot, cal_df, level)\r\n\r\n model_data_pivot = cu.fillna_values(model_data_pivot, pred_start)\r\n for i in level_list:\r\n if str(i) not in model_data_pivot.columns:\r\n model_data_pivot[str(i)] = 0\r\n tt = model_data_pivot.copy()\r\n train = tt[(tt.FISC_WK_OF_MTH_ID < pred_start) ]\r\n test = tt[(tt.FISC_WK_OF_MTH_ID >= pred_start) ]\r\n for i in level_list:\r\n if str(i) not in train.columns:\r\n train[str(i)] = 0\r\n if str(i) not in test.columns:\r\n test[str(i)] = 0\r\n \r\n model_data_orders_df = prev_avg_sparsity_scoring(level_list, train, test, \"_salesMAPP_prev_5wk_avg_4wk_hld\")\r\n model_data_orders_melt_df = cu.lag_variable_melt(agg_col, model_data_orders_df, var_list)\r\n model_data_orders_melt_df.columns = model_data_orders_melt_df.columns.str.replace(\"variable\", pivot_var)\r\n model_data_orders_melt_df[\"RSS\"] = fpg\r\n model_data_orders_melt_df[\"RSS_MM\"] = model_data_orders_melt_df[\"RSS\"]+\"_\"+model_data_orders_melt_df[pivot_var]\r\n model_data_orders_melt_df = pd.merge(model_data_orders_melt_df, \r\n df_grp_2[[\"FISC_WK_OF_MTH_ID\", pivot_var, agg_col, \"SPARSITY\"]], how =\"left\")\r\n \r\n model_data_orders_melt_df.columns = model_data_orders_melt_df.columns.str.replace(\"LINE_ORDERS_VALUE\", \"Prediction_Trf\")\r\n model_data_orders_melt_df = model_data_orders_melt_df[['FISC_YR_NBR', 'FISC_MTH_NBR', 'FISC_WK_OF_MTH_ID',\r\n pivot_var, 'Prediction_Trf', 'RSS', 'RSS_MM']]\r\n res_all_sp.append(model_data_orders_melt_df)\r\n \r\n return res_all_sp",
"def run_avg_results():\n\n # List of logs to be measured (tested)\n items = [\"logs_2017-06-23_14-16-00\",\n \"logs_2017-06-23_14-16-59\",\n \"logs_2017-06-23_14-17-58\",\n \"logs_2017-06-23_14-18-48\",\n \"logs_2017-06-23_14-19-39\"]\n\n results = []\n game = \"2048\"\n evals = 1000\n for item in items:\n prefix = \"C:/Users/Jan/Documents/GitHub/general-ai/Experiments/best_models_repeats/2048/MLP+ES/\"\n postfix = \"/best/best_0.json\"\n file_name = prefix + item + postfix\n logdir = prefix + item\n\n # SELECT PROPER MODEL\n model = MLP.load_from_file(file_name, game)\n # model = EchoState.load_from_file(file_name, game)\n\n # RUN MODEL\n # 2048\n result = run_2048_extended(model, evals)\n\n # MARIO\n # result = eval_mario_winrate(model=model, evals=evals, level=\"spikes\", vis_on=False)\n\n # ALHAMBRA\n # First element is result of our model (rest are original models from previous work)\n # result = eval_alhambra_avg_score(model, evals)[0]\n\n # TORCS\n # For reinforcement learning, please run model separately (tensorflow needs to be restarted)\n results.append(result)\n\n results = np.array(results)\n file_name = \"{}_stats_{}.txt\".format(game, utils.miscellaneous.get_pretty_time())\n with open(file_name, \"w\") as f:\n f.write(\"--GAME {} STATISTICS-- {} trainings of the same model\".format(game.upper(), len(items)))\n f.write(os.linesep)\n f.write(\"Model: {}\".format(model.get_name()))\n f.write(os.linesep)\n f.write(\"Total games: {} (for each model)\".format(evals))\n f.write(os.linesep)\n f.write(\"MAX TEST: {}\".format(np.max(results)))\n f.write(os.linesep)\n f.write(\"AVG TEST: {}\".format(np.mean(results)))\n f.write(os.linesep)\n f.write(\"MIN TEST: {}\".format(np.min(results)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assert that get_outbox returns an Outbox. This test brought to you by the department of redundancy department.
|
def test_single_scope(self):
with get_outbox() as outbox:
self.assertIsInstance(outbox, Outbox)
|
[
"def check_outbox(self):\n response = urlopen(self.outbox_url)\n if response.getcode() == 200:\n content = json.loads(response.read())\n # for each message in our outbox\n for message in content['outbox']:\n # add it to our outgoing queue\n self.add_outgoing_message(message['backend'],\n None,\n message['contact'],\n message['text'],\n id=message['id'])\n \n else:\n raise Exception(\"Unable to send message, got status: %s\" % response.getcode())",
"def _assert_cli_out(self, condition, func, msg, cmd, cliout):\n if not condition:\n err_msg = (_('%(func)s: %(msg)s\\nCLI command: %(cmd)s\\n'\n 'CLI out: %(out)s') % {'func': func,\n 'msg': msg,\n 'cmd': cmd,\n 'out': cliout})\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)",
"def test_was_closed(self, mock_close):\n with get_outbox():\n pass\n\n self.assertTrue(mock_close.called)",
"def test_penaltyshootouts_get(self):\n pass",
"def test_thank_you_email(self):\n self.assertEqual(len(mail.outbox), 1)",
"def assert_no_email(self):\n # The mailbox file has not been created\n self.assertFalse(mail.outbox)",
"def test_queues_check_out_queue_item_v1(self):\n pass",
"def test_usf_nat_fail(self):\n\n self.cgn.get_xml_output = MagicMock()\n xml_tree = self.xml.xml_string_to_dict(xml_str=self.response[\"NAT_POOL_False\"])\n xml_tree = xml_tree['rpc-reply']\n xpath = 'source-nat-pool-detail-information/source-nat-pool-info-entry'\n for path in xpath.split('/'):\n xml_tree = xml_tree[path]\n self.cgn.get_xml_output.return_value = xml_tree\n option = {'interface-name':'vms-2/3/0', 'pool-name':'src_pool1','service-set-name':'snat_ss1','total-pool-address':'254','address-range-high':'60.1.1.254','address-range-low':'60.1.1.1'}\n self.assertEqual(self.cgn.verify_nat_pool(name=None, nat_type = \"source\", expected_output = option),True)",
"def test_was_flushed(self, mock_flush):\n with get_outbox():\n pass\n\n self.assertTrue(mock_flush.called)",
"def test_outbox_view_thread_list_for_sender(self):\n self.client.login(username=self.harry.username, password='password')\n response = self.client.get(reverse('tm:messages_outbox'))\n self.assertEqual(response.status_code, 200, \"Sender has logged in successfully\")\n thread_list = response.context['thread_list']\n self.assertTrue(len(thread_list)==1, \"There is a message in sender's outbox\")",
"def test_nhif_outpatient_endpoint_gets_nhif_outpatient(self):\n response = self.client.get(\"search/nhif-outpatient?q=BRISTOL\")\n self.assertIn(b\"OK\", response.data)",
"def wait_outbox_empty(\n self, sleep: float = DEFAULT_SLEEP, timeout: float = DEFAULT_TIMEOUT\n ) -> \"AeaTool\":\n start_time = time.time()\n while not self.aea.outbox.empty():\n time.sleep(sleep)\n if time.time() - start_time > timeout:\n raise Exception(\"timeout\")\n return self",
"def test_act_iv(self):\n # setup\n self.strategy.is_contract_deployed = True\n self.strategy._is_tokens_created = False\n self.strategy._contract_address = self.contract_address\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.registration_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(1)\n\n assert self.strategy.is_behaviour_active is False\n\n # _request_token_create_transaction\n message = self.get_message_from_outbox()\n has_attributes, error_str = self.message_has_attributes(\n actual_message=message,\n message_type=ContractApiMessage,\n performative=ContractApiMessage.Performative.GET_RAW_TRANSACTION,\n to=LEDGER_API_ADDRESS,\n sender=str(self.skill.skill_context.skill_id),\n ledger_id=self.strategy.ledger_id,\n contract_id=self.strategy.contract_id,\n contract_address=self.strategy.contract_address,\n callable=\"get_create_batch_transaction\",\n kwargs=ContractApiMessage.Kwargs(\n {\n \"deployer_address\": self.skill.skill_context.agent_address,\n \"token_ids\": self.strategy.token_ids,\n \"gas\": self.strategy.gas,\n }\n ),\n )\n assert has_attributes, error_str\n\n contract_api_dialogue = cast(\n ContractApiDialogue, self.contract_api_dialogues.get_dialogue(message)\n )\n assert contract_api_dialogue.terms == self.strategy.get_create_token_terms()\n\n mock_logger.assert_any_call(\n logging.INFO, \"requesting create batch transaction...\"\n )",
"def badTestBoxManagement(self):\n rcExit = 0;\n\n #\n # We skip this entirely if we're running in the past and not in harmless debug mode.\n #\n if self.oConfig.cStartHoursAgo != 0 \\\n and (not self.oConfig.fDebug or self.oConfig.fRealRun):\n return rcExit;\n tsNow = self.tsNow if self.oConfig.fDebug else None;\n cHoursBack = self.oConfig.cHoursBack if self.oConfig.fDebug else 2;\n oTestBoxLogic = TestBoxLogic(self.oDb);\n\n #\n # Generate a list of failures reasons we consider bad-testbox behavior.\n #\n aidFailureReasons = [\n self.getFailureReason(self.ktReason_Host_DriverNotUnloading).idFailureReason,\n self.getFailureReason(self.ktReason_Host_DriverNotCompilable).idFailureReason,\n self.getFailureReason(self.ktReason_Host_InstallationFailed).idFailureReason,\n ];\n\n #\n # Get list of bad test boxes for given period and check them out individually.\n #\n aidBadTestBoxes = self.oTestSetLogic.fetchBadTestBoxIds(cHoursBack = cHoursBack, tsNow = tsNow,\n aidFailureReasons = aidFailureReasons);\n for idTestBox in aidBadTestBoxes:\n # Skip if the testbox is already disabled or has a pending reboot command.\n try:\n oTestBox = TestBoxData().initFromDbWithId(self.oDb, idTestBox);\n except Exception as oXcpt:\n rcExit = self.eprint('Failed to get data for test box #%u in badTestBoxManagement: %s' % (idTestBox, oXcpt,));\n continue;\n if not oTestBox.fEnabled:\n self.dprint(u'badTestBoxManagement: Skipping test box #%u (%s) as it has been disabled already.'\n % ( idTestBox, oTestBox.sName, ));\n continue;\n if oTestBox.enmPendingCmd != TestBoxData.ksTestBoxCmd_None:\n self.dprint(u'badTestBoxManagement: Skipping test box #%u (%s) as it has a command pending: %s'\n % ( idTestBox, oTestBox.sName, oTestBox.enmPendingCmd));\n continue;\n\n # Get the most recent testsets for this box (descending on tsDone) and see how bad it is.\n aoSets = self.oTestSetLogic.fetchSetsForTestBox(idTestBox, cHoursBack = cHoursBack, tsNow = tsNow);\n cOkay = 0;\n cBad = 0;\n iFirstOkay = len(aoSets);\n for iSet, oSet in enumerate(aoSets):\n if oSet.enmStatus == TestSetData.ksTestStatus_BadTestBox:\n cBad += 1;\n else:\n # Check for bad failure reasons.\n oFailure = None;\n if oSet.enmStatus in TestSetData.kasBadTestStatuses:\n oFailure = self.oTestResultFailureLogic.getById(oSet.idTestResult);\n if oFailure is not None and oFailure.idFailureReason in aidFailureReasons:\n cBad += 1;\n else:\n # This is an okay test result then.\n ## @todo maybe check the elapsed time here, it could still be a bad run?\n cOkay += 1;\n if iFirstOkay > iSet:\n iFirstOkay = iSet;\n if iSet > 10:\n break;\n\n # We react if there are two or more bad-testbox statuses at the head of the\n # history and at least three in the last 10 results.\n if iFirstOkay >= 2 and cBad > 2:\n # Frank: For now don't reboot boxes automatically\n if True or oTestBoxLogic.hasTestBoxRecentlyBeenRebooted(idTestBox, cHoursBack = cHoursBack, tsNow = tsNow):\n self.vprint(u'Disabling testbox #%u (%s) - iFirstOkay=%u cBad=%u cOkay=%u'\n % ( idTestBox, oTestBox.sName, iFirstOkay, cBad, cOkay));\n if self.oConfig.fRealRun is True:\n try:\n oTestBoxLogic.disableTestBox(idTestBox, self.uidSelf, fCommit = True,\n sComment = 'Automatically disabled (iFirstOkay=%u cBad=%u cOkay=%u)'\n % (iFirstOkay, cBad, cOkay),);\n except Exception as oXcpt:\n rcExit = self.eprint(u'Error disabling testbox #%u (%u): %s\\n' % (idTestBox, oTestBox.sName, oXcpt,));\n else:\n self.vprint(u'Rebooting testbox #%u (%s) - iFirstOkay=%u cBad=%u cOkay=%u'\n % ( idTestBox, oTestBox.sName, iFirstOkay, cBad, cOkay));\n if self.oConfig.fRealRun is True:\n try:\n oTestBoxLogic.rebootTestBox(idTestBox, self.uidSelf, fCommit = True,\n sComment = 'Automatically rebooted (iFirstOkay=%u cBad=%u cOkay=%u)'\n % (iFirstOkay, cBad, cOkay),);\n except Exception as oXcpt:\n rcExit = self.eprint(u'Error rebooting testbox #%u (%u): %s\\n' % (idTestBox, oTestBox.sName, oXcpt,));\n else:\n self.dprint(u'badTestBoxManagement: #%u (%s) looks ok: iFirstOkay=%u cBad=%u cOkay=%u'\n % ( idTestBox, oTestBox.sName, iFirstOkay, cBad, cOkay));\n return rcExit;",
"def testProcessEmailToOrganisation(self):\n\n s3db = current.s3db\n resource = s3db.resource(\"org_organisation\", uid=[\"MsgTestOrg\"])\n rows = resource.select([\"pe_id\"], as_rows=True)\n\n self.sent = []\n\n outbox = s3db.msg_outbox\n for row in rows:\n outbox_id = outbox.insert(pe_id = row.pe_id,\n message_id = self.message_id)\n\n msg = current.msg\n msg.send_email = self.send_email\n msg.process_outbox()\n self.assertEqual(len(self.sent), 2)\n self.assertIn(\"test1@example.com\", self.sent)\n self.assertIn(\"test2@example.com\", self.sent)",
"def testOutsourcedAccess(self):\r\n\r\n self.project.outsource = self.project_outsource\r\n self.project.save()\r\n\r\n for user_role in USER_ROLES:\r\n check_page_status(self, user_role, convert_url_roles(URL_ROLES_OUTSOURCE))\r\n\r\n # Check if a writer and a team member of the outsource project can\r\n # open up Lotte\r\n expected_code = 200\r\n url = '/projects/p/project1/resource/resource1/l/pt_BR/'\r\n for user_role in self.EXTRA_USER_ROLES:\r\n response = self.client[user_role].get(url)\r\n assert_status_code(self, response, expected_code, url,\r\n user_role)",
"def test_verify_usf_nat_pool_(self):\n\n self.cgn.get_xml_output = MagicMock()\n xml_tree = self.xml.xml_string_to_dict(xml_str=self.response[\"NAT_POOL\"])\n xml_tree = xml_tree['rpc-reply']\n xpath = 'source-nat-pool-detail-information/source-nat-pool-info-entry'\n for path in xpath.split('/'):\n xml_tree = xml_tree[path]\n self.cgn.get_xml_output.return_value = xml_tree\n option = {'interface-name':'vms-2/3/0', 'pool-name':'src_pool1','service-set-name':'snat_ss1','total-pool-address':'254','address-range-high':'60.1.1.254','address-range-low':'60.1.1.1'}\n self.assertEqual(self.cgn.verify_nat_pool(name=None, nat_type = \"source\", expected_output = option),True)",
"def test_ok_returned_ticket(self):\n process_result = process_response(self.resp_ok)\n self.assertEqual(process_result[\"detail\"], self.sample_ok)",
"def lookup_out(self):\n\n res = self.get_from_api('outcodes/' + self.outcode)\n if res.ok:\n outward_info = res.json()['result']\n return outward_info\n else:\n res.raise_for_status()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assert that the close() method is called when the scope exits.
|
def test_was_closed(self, mock_close):
with get_outbox():
pass
self.assertTrue(mock_close.called)
|
[
"def assert_close(self) -> None:\n assert self.is_closed",
"def test_close_event(self):\n pass",
"def __exit__(self, *args: Any) -> None:\n self.close()",
"def test_operation_on_closed(self):\n self.fh.close()\n assert self.fh[META_ATTR]\n\n # cannot access closed handles\n with pytest.raises(RuntimeError):\n self.fh.file_handle # noqa: B018",
"def test_close(self):\n self.test_fan.close()\n\n self.assertTrue(getattr(self.test_fan.power_pin, 'closed'))\n self.assertTrue(getattr(self.test_fan.tach_pin, 'closed'))",
"def test_term_close(self):\n sockets = [ self.context.socket(zmqpy.REP) for i in range(65) ]\n # close half of the sockets\n [ s.close() for s in sockets[::2] ]\n self.context.term()\n for s in sockets:\n self.assertTrue(s.closed)",
"def test_close(self):\n with self.assertRaises(InterfaceError):\n db = database()\n db.close()\n db.query('SELECT * FROM test_data')",
"def end_test(self):",
"def __exit__(self, _, __, ___):\n for f, h in self.handles.items():\n try:\n h.close()\n except Exception as e:\n logger.warning(\"Exception on closing %s: %s\", f, str(e))\n return False",
"def close(self):\n self.env = None",
"def __exit__(self, *args):\r\n\t\tself.io_buffer.close()",
"def testOpenClose(self):\n test_file_path = self._GetTestFilePath(['winevt-rc.db'])\n self._SkipIfPathNotExists(test_file_path)\n\n database_file = database.SQLite3DatabaseFile()\n database_file.Open(test_file_path, read_only=True)\n\n with self.assertRaises(IOError):\n database_file.Open(test_file_path, read_only=True)\n\n database_file.Close()\n\n # Test close after close.\n with self.assertRaises(IOError):\n database_file.Close()",
"def closeCase(self):",
"def test_close(self):\n wrp = self.dlg.find()\n\n # mock a failure in get_elem_interface() method only for 'Window' param\n orig_get_elem_interface = uia_defs.get_elem_interface\n with mock.patch.object(uia_defs, 'get_elem_interface') as mock_get_iface:\n def side_effect(elm_info, ptrn_name):\n if ptrn_name == \"Window\":\n raise uia_defs.NoPatternInterfaceError()\n else:\n return orig_get_elem_interface(elm_info, ptrn_name)\n mock_get_iface.side_effect=side_effect\n # also mock a failure in type_keys() method\n with mock.patch.object(UIAWrapper, 'type_keys') as mock_type_keys:\n exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ())\n mock_type_keys.side_effect = exception_err\n self.assertRaises(WindowNotFoundError, self.dlg.close)\n\n self.dlg.close()\n self.assertEqual(self.dlg.exists(), False)",
"def test_unstartedClose(self):\n reactor = EventReactor(False)\n pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)\n # There should be a startup trigger waiting.\n self.assertEqual(reactor.triggers, [('after', 'startup', pool._start)])\n pool.close()\n # But not anymore.\n self.assertFalse(reactor.triggers)",
"def test_teardown(self):\n assert self.cosm_trade_handler.teardown() is None\n self.assert_quantity_in_outbox(0)",
"def test_on_close_event(self):\n self.question = \"Please close this window by\\nclicking the close button.\"\n self._test_main()",
"def test_box_closing(self):\n return self.state == 'Closing' or self.ready_for_testing",
"def testClose(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid)\n check(res, msg=\"Locking file %s\" % t.code)\n res = c.lock_test(fh)\n check(res, NFS4ERR_DENIED, \"Testing file %s is locked\" % t.code)\n res = c.close_file(t.code, fh, stateid)\n checklist(res, [NFS4_OK, NFS4ERR_LOCKS_HELD],\n \"Trying to close locked file\")\n if res.status == NFS4ERR_LOCKS_HELD:\n t.fail_support(\"Can not close locked files\")\n # Now make sure lock was released\n res = c.lock_test(fh)\n check(res, msg=\"Testing that close released locks on file %s\" % t.code)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assert that the flush() method is called when the scope exits.
|
def test_was_flushed(self, mock_flush):
with get_outbox():
pass
self.assertTrue(mock_flush.called)
|
[
"def flush():\n actual_flush()",
"def test_flush_empties(queue):\n queue.flush()\n assert queue.empty()",
"def test_teardown(self):\n assert self.cosm_trade_handler.teardown() is None\n self.assert_quantity_in_outbox(0)",
"def end_test(self):",
"def test_ending(shared_ressources):\n synchronization = shared_ressources['synchronization']\n data = shared_ressources['data']\n encoder = shared_ressources['encoder']\n songwriter = SongWriter(synchronization, data, encoder)\n synchronization['end'].set()\n songwriter.start()\n songwriter.join()\n assert not songwriter.is_alive()",
"def test_single_scope(self):\n with get_outbox() as outbox:\n self.assertIsInstance(outbox, Outbox)",
"def test_environment_end():\n assert not ray.is_initialized()",
"def flush(self):\n self.context.flush_commands()",
"def _atexit_run ( self ):\n if self.finalize_at_exit:\n self.finalize()",
"def test_flush_raises(self):\n with self.assertRaises(AttributeError):\n self.dset.flush()",
"def assert_close(self) -> None:\n assert self.is_closed",
"def test_was_closed(self, mock_close):\n with get_outbox():\n pass\n\n self.assertTrue(mock_close.called)",
"def after_flush(self, session, flush_context):\n if not self.options['versioning']:\n return\n uow = self.unit_of_work(session)\n uow.process_after_flush(session)",
"def _exit(self):\n self.logger.info(\"Traptor is set to exit its main event loop.\")\n\n self.exit = True\n self.exit_event.set()\n\n # If we're currently waiting on rule assignment, wake up\n self.rule_wait_event.set()",
"def __exit__(self, exc_type, exc_value, exc_tb):\n if exc_type is None:\n self._db.Write(self.batch, self.write_sync)",
"def __exit__(self, *args):\r\n\t\tself.io_buffer.close()",
"def test_watchers_are_finished(self):\n from cassandra.io.libevreactor import _global_loop\n with patch.object(_global_loop, \"_thread\"),\\\n patch.object(_global_loop, \"notify\"):\n\n self.make_connection()\n\n # We have to make a copy because the connections shouldn't\n # be alive when we verify them\n live_connections = set(_global_loop._live_conns)\n\n # This simulates the process ending without cluster.shutdown()\n # being called, then with atexit _cleanup for libevreactor would\n # be called\n libev__cleanup(_global_loop)\n for conn in live_connections:\n self.assertTrue(conn._write_watcher.stop.mock_calls)\n self.assertTrue(conn._read_watcher.stop.mock_calls)\n\n _global_loop._shutdown = False",
"def flush(self):\n self.seek(0)",
"def test_eofReceived(self):\n class FakeStdio:\n writeConnLost = False\n\n def loseWriteConnection(self):\n self.writeConnLost = True\n\n stdio = FakeStdio()\n channel = StdioInteractingSession()\n channel.stdio = stdio\n channel.eofReceived()\n self.assertTrue(stdio.writeConnLost)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
show an error to the screen if vertex does not exist
|
def vert_not_exists_error(self, v: int) -> Optional[NoReturn]:
try:
assert v in self.vertices.keys()
except AssertionError:
raise Exception(f"Vertex {v} does not exist")
else:
return None
|
[
"def test_undirected_graph_vertex_already_exists(self):\n g = UndirectedGraph()\n g.add_vertex(v_val='v0')\n\n with self.assertRaises(ValueError):\n g.add_vertex(v_val='v0')",
"def test_directed_graph_vertex_already_exists(self):\n g = DirectedGraph()\n g.add_vertex(v_val='v0')\n\n with self.assertRaises(ValueError):\n g.add_vertex('v0')",
"def _validateVertex(self, v):\n V = len(self._rank)\n if v < 0 or v >= V:\n raise Exception(\"vertex {} is not between 0 and {}\".format(v, (V-1))\n\n# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.\n# Copyright 2002-2016, DV Klopfenstein, Python port",
"def test_vertex_only(self):\n\n v = g.random((1000, 3))\n v[g.np.floor(g.random(90) * len(v)).astype(int)] = v[0]\n\n mesh = g.trimesh.Trimesh(v)\n\n assert len(mesh.vertices) < 950\n assert len(mesh.vertices) > 900",
"def test_has_vert_filled_wrong(graph_one):\n assert graph_one.has_vert(\"X\") is False",
"def _validate_prim_vertex_index(prim, index):\n # If the index is less than 0, throw an exception since it's not valid.\n if index < 0:\n raise IndexError(\"Index must be 0 or greater.\")\n\n # If the index is too high it is also invalid.\n if index >= prim.numVertices():\n raise IndexError(\"Invalid index: {}\".format(index))",
"def test_parse_stl_file_invalid_facet_vertices_count(self):\n try:\n solid = parser.parse_stl_file(self.invalid_vertices)\n self.fail('Failed to raise bad facet vertices count')\n except STLAnalysisException as e:\n self.assertEqual('Bad vertex line: \"vertex 1 0\"', str(e))",
"def add_vertex(self, vertex):\n if isinstance(vertex, Vertex) and vertex.name not in self.vertices:\n self.vertices[vertex.name] = vertex\n return True\n else:\n return False",
"def get_vertex(self, id_num):",
"def add_vertex(self, v):\n if v not in self.vertices.keys(): \n self.vertices[v] = [False,[],0]",
"def show_errormsg():\n print(\"\\nTarget file does not exist, please put correct path for the file\")\n print()",
"def get_vertex(self, v_id):\n pass",
"def has_vertex(self,v):\n return v in self.graph",
"def get_or_create_vertex(self, label=None, **kwargs):",
"def test_parse_stl_file_invalid_facet_vertices_types(self):\n try:\n solid = parser.parse_stl_file(self.invalid_vertices_types)\n self.fail('Failed to raise bad facet vertex type')\n except STLAnalysisException as e:\n self.assertEqual('Bad vertex value in line: \"vertex not 0 0\"', str(e))",
"def test_has_vert_filled(graph_one):\n\n assert graph_one.has_vert(\"C\") is True",
"def graph_has_vertex( g, i ):\n return i in g.id_to_vertex",
"def add_vertex(self, v):\n pass",
"def __lookup_vertex_name(self, vertex: int):\n if vertex < 0:\n return None\n\n return self.vertices[vertex]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert from ORF to genomic coordinates.
|
def toGenomic(self, doSwapStartEnd=True):
self.genomic = True
o = parseOrfHeader(self.accession)
self.sStart,self.sEnd = convertOrfToGenomic(
self.sStart, self.sEnd, o.strand, o.start)
self.addStrandAttribute(o.strand)
if doSwapStartEnd:
self.swapStartEnd()
|
[
"def convertOrfToGenomic(start, end, strand, orfStart):\n if strand=='+':\n gStart = orfStart + 3*(start-1)\n gEnd = orfStart + 3*(end-1) + 2\n else:\n gStart = orfStart - 3*(start-1)\n gEnd = orfStart - 3*(end-1) - 2\n return gStart, gEnd",
"def convOSM(wkt):\n obj = OGRGeometry(wkt)\n obj.srs = 'EPSG:4326'\n obj.transform_to(SpatialReference('EPSG:900913'))\n #obj.transform_to(SpatialReference('EPSG:4326'))\n return (obj.x, obj.y)",
"def shape2coords(self, f):\n\t\tn = np.sqrt(self.mu/(self.a**3))\n\t\tE = f2E(f, self.e)\n\t\tx = self.a*(np.cos(E) - self.e)\n\t\ty = self.a*np.sqrt(1 - (self.e**2))*np.sin(E)\n\t\tvx = - self.a*n*np.sin(E)/(1 - self.e*np.cos(E))\n\t\tvy = self.a*n*np.cos(E)*np.sqrt(1 - (self.e**2))/(1 - self.e*np.cos(E))\n\t\tR = np.array([x,y,0])\n\t\tV = np.array([vx,vy,0])\n\t\treturn R, V",
"def geo2cell(cellfile, posfile, outfile):",
"def translate_to_alg_coords(self, list_pos):\n\n # list_pos would be something like [1, 3]\n # first element = row, second element = column\n row = list_pos[0]\n col = list_pos[1]\n\n # Add one for row since list coordinates start at zero but alg.\n # notation starts at 1\n row_str = str(row + 1)\n\n # Add 97 to get the character code. 0 -> a, 1 -> b, etc.\n col_str = str(chr(col + 97))\n\n return col_str + row_str",
"def _to_genes(self, x, scope):\n\n x = scope.index(x)\n x = self._converter.convert(x, self._digits)\n\n return x",
"def find_orfs(seq, min_protein_length, strand=1, trans_table=1,\n ignore_ambiguous_orfs=True):\n answer = []\n seq_len = len(seq)\n\n # Get sequence associated with the specified location and strand\n if strand == 1:\n dna_seq = seq\n else:\n dna_seq = seq.reverse_complement()\n\n for frame in range(3):\n trans = str(dna_seq[frame:].translate(trans_table))\n trans_len = len(trans)\n aa_start = 0\n aa_end = 0\n\n # Iterate through ORFS in reading frame\n while aa_start < trans_len:\n # Set end counter to position of next stop codon\n aa_start = trans.find(\"M\", aa_start)\n aa_end = trans.find(\"*\", aa_start)\n\n # If no start or stop codons found, stop here\n if aa_start == -1 or aa_end == -1:\n break\n\n if (aa_end < aa_start):\n raise Exception('wtf')\n\n # Compute coordinates of ORF\n if strand == 1:\n start = frame + aa_start * 3\n end = min(seq_len, frame + aa_end * 3 + 3)\n else:\n start = seq_len - frame - aa_end * 3 - 3\n end = seq_len - frame - aa_start * 3\n\n # Add to output\n str_strand = \"+\" if strand == 1 else '-'\n\n # Check to make sure ORF doesn't contain a bunch of N's\n if ignore_ambiguous_orfs:\n num_unknown = trans[aa_start:aa_end].count('X')\n if (num_unknown / (aa_end - aa_start)) > 0.25:\n aa_start = aa_end + 1\n continue\n\n # increment start counter\n aa_start = aa_end + 1\n\n # Add ORF coordinates and continue\n answer.append((start, end, str_strand))\n\n # Sort results\n answer.sort()\n\n return answer",
"def geom2geog(lat, long):\n lat = np.deg2rad(lat)\n long = np.deg2rad(long)\n\n # Pole coordinates for 2015\n pole_lat = np.deg2rad(80.37)\n pole_long = np.deg2rad(-72.62)\n\n pole_lat_s = np.sin(pole_lat)\n pole_lat_c = np.cos(pole_lat)\n pole_long_s = np.sin(pole_long)\n pole_long_c = np.cos(pole_long)\n\n # Rotation matrix\n matrix = np.array([\n [pole_lat_s * pole_long_c, pole_lat_s * pole_long_s, -pole_lat_c],\n [-pole_long_s, pole_long_c, 0],\n [pole_lat_c * pole_long_c, pole_lat_c * pole_long_s, pole_lat_s]\n ])\n matrix = np.linalg.inv(matrix)\n\n x = earth_radii * np.cos(lat) * np.cos(long)\n y = earth_radii * np.cos(lat) * np.sin(long)\n z = earth_radii * np.sin(lat)\n vect_geom = np.array([x, y, z])\n vect_geog = np.dot(matrix, vect_geom)\n norm = np.linalg.norm(vect_geog)\n\n lat_geog = np.arcsin(vect_geog[2] / norm)\n long_geog = np.arctan2(vect_geog[1], vect_geog[0])\n\n lat_geog = np.rad2deg(lat_geog)\n long_geog = np.rad2deg(long_geog)\n return lat_geog, long_geog",
"def get_aoi_geometry_as_geojson(self):\n aoi_geojson = db.engine.execute(self.geometry.ST_AsGeoJSON()).scalar()\n return geojson.loads(aoi_geojson)",
"def _togis(self, *args, **kwargs):\n return togis(self, *args, **kwargs)",
"def get_genomic_coord(chr, bin_idx, cfg):\r\n sizes = np.load(cfg.hic_path + cfg.sizes_file, allow_pickle=True).item()\r\n chr = ['chr' + str(x - 1) for x in chr]\r\n chr_start = [sizes[key] for key in chr]\r\n\r\n return (bin_idx - chr_start) * cfg.resolution",
"def chromosomeCoordinateToCodon(self, p):\n m = self.chromosomeCoordinateToMRna(p)\n return self.mRnaCoordinateToCodon(m)",
"def coordinates2Region():\n\tpass",
"def convert_to_local(self, geom):\n x_off = self.origin[0] - self.position[0]\n y_off = self.origin[1] - self.position[1]\n ang_off = self.origin_angle - self.rotation\n out = rotate(geom, ang_off, origin=self.position, use_radians=True)\n out = translate(out, x_off, y_off)\n return out",
"def get_gdf_coords(gdf):\n return np.array([[p.geometry.x, p.geometry.y] for p in gdf.itertuples()])",
"def split2gene(self, f_genome=None):\n # Fetch TGS and ANN reads\n self.fetch_reads()\n\n # Clustering reads\n clusters = self.find_clusters()\n\n # Cluster2gene\n gene_list = list()\n for cluster in clusters:\n gene = self.cluster2gene(cluster, f_genome)\n gene_list.append(gene)\n gene_list.sort(key=lambda x: x.ival.start)\n return gene_list",
"def annotate_region_gdna_genic_point(args, q, reg):\n r = Record()\n r.reg = reg\n r.chrm = q.tok\n r.set_promoter()\n\n c, p = reg.t.gpos2codon(q.pos)\n r.append_info(\"is_gene_body\")\n r.tname = reg.t.format()\n r.gene = reg.t.gene_name if reg.t.gene_name else '.'\n r.strand = reg.t.strand\n\n if p.tpos == 0 and reg.t.transcript_type == 'protein_coding':\n if c.seq in standard_codon_table:\n r.taa_ref = aaf(standard_codon_table[c.seq], args)\n r.taa_pos = c.index\n if args.aacontext>0 and r.taa_ref:\n aa1 = aaf(reg.t.taa_range2aa_seq(\n c.index-args.aacontext if c.index>=args.aacontext else 0, c.index-1), args)\n aa2 = aaf(reg.t.taa_range2aa_seq(c.index+1, c.index+args.aacontext), args)\n r.append_info('aacontext=%s[%s]%s' % (aa1, r.taa_ref, aa2))\n\n r.gnuc_pos = q.pos\n r.pos = q.pos\n r.gnuc_ref = faidx.refgenome.fetch_sequence(q.tok, q.pos, q.pos)\n \n # optional output\n if args.gseq:\n r.gnuc_beg = r.gnuc_pos\n r.gnuc_end = r.gnuc_pos\n\n r.tnuc_pos = p\n r.tnuc_ref = r.gnuc_ref if c.strand == '+' else complement(r.gnuc_ref)\n r.append_info('codon_pos=%s' % ('-'.join(map(str, c.locs)),))\n\n return r",
"def codonCoordinateToChromosome(self, p):\n m = self.codonCoordinateToMRna(p)\n return self.mRnaCoordinateToChromosome(m)",
"def _convert_geodataframe(self):\r\n\r\n value = self._frame\r\n\r\n c1_field, c2_field, c3_field, geometry_field = Series(), Series(), Series(), Series()\r\n try:\r\n c1_field = self._frame['coord_field1']\r\n c2_field = self._frame['coord_field2']\r\n c3_field = self._frame['coord_field3']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n print(self._frame.columns)\r\n print(self._frame)\r\n geometry_field = self._frame['geometry']\r\n except KeyError:\r\n pass\r\n\r\n crs = self.__dict__.get('crs', None)\r\n to_crs = self.__dict__.get('to_crs', None)\r\n\r\n if isinstance(value, GeoDataFrame):\r\n if not geometry_field.empty:\r\n if not c1_field.empty or not c2_field.empty or not c3_field.empty:\r\n warnings.warn('Coordinate fields should not be passed with a geometry field. This process will '\r\n 'continue assuming the geometry field takes precedence.')\r\n value = geodataframe_from_geometry(value, crs=crs)\r\n\r\n # is this part even necessary?\r\n elif (not c1_field.empty and not c2_field.empty) or (\r\n not c1_field.empty and not c2_field.empty and not c3_field.empty):\r\n if geometry_field is not None:\r\n raise GeoDataSetInfoError('Geometry field should not be passed along with longitude and '\r\n 'latitude fields.')\r\n value = geodataframe_from_coordinates(value, z=(not c3_field.empty), crs=crs)\r\n\r\n elif isinstance(value, DataFrame):\r\n try:\r\n value = geodataframe_from_coordinates(value, z=(not c3_field.empty), crs=crs)\r\n except KeyError:\r\n value = geodataframe_from_geometry(value, crs=crs)\r\n\r\n else:\r\n raise GeoDataSetFrameError(\"Your frame must be a valid GeoDataFrame!\")\r\n\r\n if value.empty:\r\n raise GeoDataSetInfoError(\"The frame can not be empty!\")\r\n\r\n if not value.crs:\r\n warnings.warn(\"A crs has not been set. This can be dangerous when performing spatial operations...\")\r\n elif to_crs:\r\n value.to_crs(crs=to_crs, inplace=True)\r\n\r\n self._finalize_frame(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Jump to regex match in file.
|
def jumpToMatch(iFile, regex):
for line in iFile:
if regex.match(line):
return True
return False
|
[
"def _match(self, regex):\n cregex = re.compile(regex)\n for line in self.content.splitlines():\n match = cregex.match(line)\n if match:\n return match\n raise Exception('No \"{0}\" line in {1}.cpp'.format(\n regex_to_error_msg(regex),\n self.name\n ))",
"def jump_to(f,tag):\n while True:\n line = f.readline()\n if tag in line:\n break",
"def correct_regexp(self):\n result = 0\n procmailregexp = re.compile(self.regexp)\n filename = self.check_for_files_in_maildir()\n if filename:\n file = open(filename,'r')\n for line in file:\n match = procmailregexp.search(line)\n if match:\n result = 1\n break\n return result",
"def process_file(file_path):\n file_of_matches=open(file_path, \"r\")\n #loop over every line to get process individual matches\n for match in file_of_matches:\n process_match(match[:-1])#drop the \\n from end of line \n file_of_matches.close()",
"def grep(self, fileregex, lineregex):\n import glob, re, os\n for filename in glob.glob(fileregex):\n if os.path.isfile(filename):\n f = open(filename, 'r')\n for line in f.xreadlines():\n if re.match(lineregex, line):\n print \"%s: %s\" % (filename, line)",
"def getNextPattern(filePointer, lineNumber, pattern):\n filePointer.seek(lineNumber)\n current_tell = filePointer.tell()\n for line_number, line in enumerate(iter(filePointer.readline, '')):\n line = line.rstrip()\n # find first header\n # print(line)\n if re.search(pattern, line):\n break\n current_tell = filePointer.tell()\n return current_tell",
"def search(regex, fullpath):\n\n p = re.compile(regex)\n for line in open(fullpath):\n if p.search(line):\n return True\n\n return False",
"def extractUptoMatch(iFile, regex):\n block = []\n for line in iFile:\n if regex.match(line):\n break\n else:\n block.append(line.rstrip())\n return block",
"def search(self, line):\n pass",
"def match_regex_in_markdown(markdown_file: pathlib.Path, exp_to_match: str) -> bool:\n with open(markdown_file, \"r\") as f:\n for line in f:\n match = re.search(exp_to_match, line)\n if match is not None:\n return True\n return False",
"def isearch_forward_regexp(self, event: Event) -> None: # pragma: no cover (cmd)\n self.start_incremental(event, 'isearch-forward-regexp',\n forward=True, ignoreCase=False, regexp=True)",
"def get_next_match():\n pass",
"def searchRegex(self,expression):\n #threadPool.acquire()\n \n #this is just used for debugging\n command = buildCommand(self.filepath)\n \n #get a reader from the \n reader = buildReader(self.filepath)\n \n #provide some helpful information \n #about what process is running.\n if debug:\n print \"Running: \" + command\n #reader = os.popen(command, \"r\")\n while 1:\n text = reader.readline()\n #print text\n if not text: break\n if expression.search(text):\n self.status = True\n break\n #close our resources.\n reader.close()\n #threadPool.release()",
"def return_match(self, line, regexp):\n parser = re.compile(regexp)\n match = parser.search(line)\n return match",
"def file_path_extract(self, logger, f, reg):\n try:\n reg = re.compile(reg)\n found = re.findall(reg, f)\n return None if len(found) == 0 else found[-1]\n except re.error:\n logger.warning(\"Invalid regex {0}, skipping\".format(reg))\n return None",
"def get_regex_match_in_file(file, regex):\n\n try:\n file_content = open(file).read();\n except IOError as e:\n raise Exception('Failed reading file [' + file + ']');\n\n match = re.findall(r'' + regex, file_content);\n\n # If something matched, return the first group of the first match.\n # Otherwise, return an empty string.\n if (len(match) == 0):\n return '';\n else:\n return match[0][0];",
"def wait_for_match(self, file, regex, timeout=60, poll_interval=1.0):\n compiled_regex = re.compile(regex)\n\n def check_content():\n try:\n file_content = self._exec.send_line(\n \"cat '{file}'\".format(file=file), expected_exit_code=0)\n except Exception:\n logger.debug(\n 'Error occurred when checking content of file {file}'.format(file=file),\n exc_info=True)\n return False\n\n return compiled_regex.search(file_content)\n\n return wait_for(check_content, timeout=timeout, poll_interval=poll_interval)",
"def regex_search(folder_path, regex):\n user_regex = re.compile(regex)\n\n if not os.path.isdir(folder_path):\n print('Please enter a valid folder_path')\n return\n\n for file_name in os.listdir(folder_path):\n\n if file_name.endswith('.txt'):\n with open(file_name) as file:\n for line in file:\n if user_regex.search(line): print(line, end='')",
"def fsearch(self, pattern, msg=None, killOn=None, textFlag=False):\n current = 0\n result = \"\"\n if textFlag:\n # fast, text-only mode\n for line in self.f:\n if self.eChecking:\n self.checkErrors(line)\n if pattern in line:\n result = line\n break\n elif killOn and killOn in line:\n result = \"\"\n break\n else:\n result = \"\"\n else:\n # slower regular expression mode\n cpat = re.compile(pattern)\n if killOn:\n kpat = re.compile(killOn)\n for line in self.f:\n if self.eChecking:\n self.checkErrors(line)\n if killOn:\n kill = re.search(kpat, line)\n if kill:\n # the kill phrase was found first, so die.\n result = \"\"\n break\n current = re.search(cpat, line)\n if current:\n if msg:\n print(msg)\n result = line\n break\n if not current:\n result = \"\"\n\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract up to regex match from file.
|
def extractUptoMatch(iFile, regex):
block = []
for line in iFile:
if regex.match(line):
break
else:
block.append(line.rstrip())
return block
|
[
"def process_file(file_path):\n file_of_matches=open(file_path, \"r\")\n #loop over every line to get process individual matches\n for match in file_of_matches:\n process_match(match[:-1])#drop the \\n from end of line \n file_of_matches.close()",
"def get_regex_match_in_file(file, regex):\n\n try:\n file_content = open(file).read();\n except IOError as e:\n raise Exception('Failed reading file [' + file + ']');\n\n match = re.findall(r'' + regex, file_content);\n\n # If something matched, return the first group of the first match.\n # Otherwise, return an empty string.\n if (len(match) == 0):\n return '';\n else:\n return match[0][0];",
"def file_path_extract(self, logger, f, reg):\n try:\n reg = re.compile(reg)\n found = re.findall(reg, f)\n return None if len(found) == 0 else found[-1]\n except re.error:\n logger.warning(\"Invalid regex {0}, skipping\".format(reg))\n return None",
"def _match(self, regex):\n cregex = re.compile(regex)\n for line in self.content.splitlines():\n match = cregex.match(line)\n if match:\n return match\n raise Exception('No \"{0}\" line in {1}.cpp'.format(\n regex_to_error_msg(regex),\n self.name\n ))",
"def extract_pattern_from_file(file,pos_dict,ex_dict,tag,output):\n if file.endswith(\"_pos.txt\"):\n with open(POS + file, 'r', encoding='utf-8') as f:\n for line in f:\n for entry in pos_dict:\n # For each entry find all occurrences in this sentence\n matches = re.finditer(entry, line, re.I)\n for match in matches:\n exclude = 0\n for exItem in ex_dict:\n exMatches = re.finditer(exItem.rstrip('\\n'), line, re.I)\n for exMatch in exMatches:\n # Check if positive match is within range of exclusion match, as the exclusion\n # may contain additional context\n if exMatch and (exMatch.start() <= match.start(1) <= exMatch.end()):\n exclude = 1\n # Save result to list of results with appropriate tag\n if match and exclude is 0:\n # Print match with context\n pre, post, m = \"\", \"\", \"\"\n for w in line[0:match.start()].split():\n pre = pre + w.rsplit(\"_\")[0].split(\":\")[1] + \" \"\n for w in line[match.end():].split():\n if w.startswith(\"_\"):\n continue\n post = post + w.rsplit(\"_\")[0].split(\":\")[1] + \" \"\n for w in match.group(0).split():\n m = m + w.rsplit(\"_\")[0].split(\":\")[1] + \" \"\n print(file + \" | sent. \" + match.group(1) + \": \\t\" + pre + \"\\t\"\n + m + \" \\t\" + post + \"\\t\" + tag + \"\\n\")\n output.write(file + \" | sent. \" + match.group(1) + \": \\t\" + pre + \"\\t\"\n + m + \" \\t\" + post + \"\\t\" + tag + \"\\n\")",
"def find_additional_content(self, alfile):\n with open(alfile, 'rb') as f:\n data = f.read()\n for ftype, tinfo in iter(self.PAT_FILEMARKERS.items()):\n\n # Build up the regex\n if tinfo[1] is not None:\n embed_regex = re.compile(tinfo[0] + '.+' + tinfo[1], re.DOTALL)\n else:\n embed_regex = re.compile(tinfo[0] + '.+', re.DOTALL)\n\n # Find the pattern that should match the image.\n img_match = re.match(embed_regex, str(data))\n if img_match:\n img_data = img_match.group()\n # Go to extraction module if there is one\n if tinfo[2] is not None:\n img_data = getattr(self, tinfo[2])(img_data)\n\n # Otherwise extract data as-is (regex is considered good enough)\n leftovers = data.replace(img_data, b\"\")\n\n # Remove trailing NULL bytes\n leftovers = re.sub(b'[\\x00]*$', b'', leftovers)\n\n if len(leftovers) > 15:\n return leftovers\n return",
"def search_re_lines(self, regexp):\n rec = re.compile(regexp, re.IGNORECASE)\n for l in self.lines:\n rem = rec.match(l)\n if rem:\n return rem.group(1)\n else:\n return ''",
"def _get_line(self, regex):\n return self._match(regex).group(1)",
"def parse_filename(pdf_file):\n logger.info(\"Input file: \" + pdf_file)\n for (pattern_name, file_pattern, fields) in FILE_SEARCH_PATTERN:\n search_result = file_pattern.search(pdf_file)\n if search_result:\n logger.debug('Recognised ' + pattern_name)\n logger.debug(\n \"cnum: \" + search_result.group(1) + \" fpage:\" + search_result.group(2))\n return search_result.groups()\n logger.warning('No known pattern for ' + pdf_file)",
"def jumpToMatch(iFile, regex):\n for line in iFile:\n if regex.match(line):\n return True\n return False",
"def split_match(self, match):\n\n if match:\n if match.group('file') != '-':\n match = None\n\n match, line, col, error, warning, message, _ = super().split_match(match)\n near = self.search_token(message)\n\n return match, line, col, error, warning, message, near",
"def grep(self, fileregex, lineregex):\n import glob, re, os\n for filename in glob.glob(fileregex):\n if os.path.isfile(filename):\n f = open(filename, 'r')\n for line in f.xreadlines():\n if re.match(lineregex, line):\n print \"%s: %s\" % (filename, line)",
"def load_expected_results(file, pattern):\n expected = {}\n compiled_pattern = re.compile(pattern)\n with open(file, encoding='utf-8') as f:\n test = None\n for line in f:\n line = line.rstrip()\n match = compiled_pattern.search(line)\n if match:\n test = match.groups()[0]\n expected[test] = ''\n else:\n expected[test] += line + '\\n'\n return expected",
"def return_match(self, line, regexp):\n parser = re.compile(regexp)\n match = parser.search(line)\n return match",
"def scan_project(self, project, regex):\n # cre = re.compile(regex)\n file = self.file_for_project(project)\n if not file:\n return\n line = 0\n result = []\n with open(file) as f:\n for task in f:\n line +=1\n match = re.match(regex, task)\n if match:\n result.append({'project':project, 'task':task.strip(), 'file':file, 'line':line, 'match':match})\n return result",
"def log_file_parse(rx, zippath, logfile):\n zf = ZipFile(zippath, \"r\")\n for line in log_line_split(zf.open(logfile)):\n m = rx.match(line)\n if not m:\n print zippath, logfile, \"not matched:\", line\n else:\n yield m.groups()",
"def matchAgainstFiles(regex, files):\n prevMatchDict = None\n compiled = re.compile(regex, re.VERBOSE)\n for f in files:\n\n match = compiled.match(f.fullPath)\n if not match:\n # Give up, it must match every file\n return None\n\n matchDict = match.groupdict()\n if prevMatchDict is not None and prevMatchDict != matchDict:\n # Give up, we got conflicting matches\n return None\n\n prevMatchDict = matchDict\n\n # If we got this far, the regex matched every file with\n # the same results. Now filter the matched portion out of\n # each file and store the matches we found.\n for f in files:\n f.path = compiled.sub('', f.fullPath)\n return prevMatchDict",
"def grep(regex, p_raw, m=None, s=False, lc=False):\n clean_src = clean_flist(p_raw,s=s)\n results = []\n if type(regex) == type(list()):\n regex_list = regex\n else:\n regex_list = [regex]\n \n match_cnt = 0\n for src in clean_src:\n try:\n fh = open(src)\n except Exception, e:\n if (s or STRICT):\n raise e\n else:\n perror(\"Cannot open file [%s].\" %src)\n err(\"Cannot open file [%s].\" %src)\n continue\n for line in fh:\n line_cnt = 0\n for re_tmp in regex_list:\n # TODO: regexes should be compiled once, not once per line per regex!\n if re.search(re_tmp, line) != None:\n results.append(line)\n line_cnt += 1\n continue\n\n if m and line_cnt > 0:\n match_cnt += 1\n if match_cnt >= m:\n break\n\n fh.close()\n\n if m and match_cnt >= m:\n break\n \n return results",
"def __toRegExp(self, fname, targetName) -> re:\n fname = os.path.join(self.packageDir(), fname)\n if not os.path.isfile(fname):\n EmergeDebug.die(\"%s not found at: %s\" % (targetName.capitalize(), os.path.abspath(fname)))\n regex = \"(\"\n for line in fileinput.input(fname):\n # Cleanup white spaces / line endings\n line = line.splitlines()\n line = line[0].rstrip()\n if line.startswith(\"#\") or len(line) == 0:\n continue\n try:\n tmp = \"^%s$\" % line\n regex += \"%s|\" % tmp\n re.compile(tmp, re.IGNORECASE) #for debug\n EmergeDebug.debug(\"%s added to %s as %s\" % (line, targetName, tmp), 2)\n except re.error:\n EmergeDebug.die(\"%s is not a valid regexp\" % tmp)\n return re.compile(\"%s)\" % regex[:-2], re.IGNORECASE)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse a 6 frame header (from translate or python).
|
def parseSixFrameHeader(header):
header = header.strip()
regex = re.compile(
'(?P<name>\w+)([\.|:](?P<start>\d+)[-|,](?P<end>\d+))?:(?P<frame>[0-5])')
rs = regex.search(header)
d = rs.groupdict()
d['frame'] = hmmer2frame[int(d['frame'])]
if d['frame']>0:
d['strand'] = '+'
else:
d['strand'] = '-'
try:
d['start'] = int(d['start'])
d['end'] = int(d['end'])
except:
pass
return ClassFromDict(d)
|
[
"def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']\n slot_ind = hdict['slot']\n eud2_ind = hdict['eud2']\n\n station = self.bit_manip(head_int[stat_ind[0]], stat_ind[1], stat_ind[2])\n link = self.bit_manip(head_int[link_ind[0]], link_ind[1], link_ind[2])\n slot = self.bit_manip(head_int[slot_ind[0]], slot_ind[1], slot_ind[2])\n frame = self.bit_manip(head_int[frame_ind[0]], frame_ind[1], frame_ind[2])\n time = self.bit_manip(head_int[t_ind[0]], t_ind[1], t_ind[2])\n count = self.bit_manip(head_int[eud2_ind[0]], eud2_ind[1], eud2_ind[2])\n\n return station, link, slot, frame, time, count",
"def _parse_header(head):\n # CALL1>CALL2,CALL3,CALL4,CALL5:\n # |from-|--to-|------path-------|\n #\n try:\n (fromcall, path) = head.split('>', 1)\n except:\n raise ParseError(\"invalid packet header\")\n\n # looking at aprs.fi, the rules for from/src callsign\n # are a lot looser, causing a lot of packets to fail\n # this check.\n #\n # if len(fromcall) == 0:\n # raise ParseError(\"no fromcallsign in header\")\n # _validate_callsign(fromcall, \"fromcallsign\")\n\n if (not 1 <= len(fromcall) <= 9 or\n not re.findall(r\"^[a-z0-9]{0,9}(\\-[a-z0-9]{1,8})?$\", fromcall, re.I)):\n\n raise ParseError(\"fromcallsign is invalid\")\n\n path = path.split(',')\n\n if len(path) < 1 or len(path[0]) == 0:\n raise ParseError(\"no tocallsign in header\")\n\n tocall = path[0]\n path = path[1:]\n\n _validate_callsign(tocall, \"tocallsign\")\n\n for digi in path:\n if not re.findall(r\"^[A-Z0-9\\-]{1,9}\\*?$\", digi, re.I):\n raise ParseError(\"invalid callsign in path\")\n\n parsed = {\n 'from': fromcall,\n 'to': tocall,\n 'path': path,\n }\n\n # viacall is the callsign that gated the packet to the net\n # it's located behind the q-contructed\n #\n # CALL1>CALL2,CALL3,qAR,CALL5:\n # .....................|-via-|\n #\n viacall = \"\"\n if len(path) >= 2 and re.match(r\"^q..$\", path[-2]):\n viacall = path[-1]\n\n parsed.update({'via': viacall})\n\n return parsed",
"def _parse_header(self):\n log.debug('---In dcd.py, parse_header()')\n #process the first header block\n\n header1 = self._fo.read(92)\n header1_format=\\\n \"i---cccci---i---i---i---xxxxxxxxxxxxxxxxxxxxf---i---i---xxxxxxxxxxxxxxxxxxxxxxxxxxxxi---i---\"\n # |1 |5 |10 |15 |20 |25 |30 |35 |40 |45 |50 |55 |60 |65 |70 |75 |80 |85 |90\n #|header size=84 |nframes*tstep |tstep_size |charm_ver\n # |CORD=has coordinates |block_a |header_size=84\n # |nframes |block_b\n # |starting timestep\n # |timestep between coord sets \n header1_format = string.replace(header1_format, \"-\", \"\")\n header1 = struct.unpack(header1_format, header1)\n header1_size1, c1, c2, c3, c4, self._nframes, self._firsttstep, self._dcdfreq, self._ntsteps, self._tstep_size, self._block_a, self._block_b, self._charm_v, header1_size2 = header1 #unpack the tuple header1\n \n \n self._dcdtype = \"\".join((c1,c2,c3,c4)) #get the data-type field. I it should always be cord...\n if header1_size1 != 84 or header1_size2 !=84:\n log.error(\"error-- header size fields not correct (should be 84)\\n\")\n if self._block_a != 0 or self._block_b != 0:\n log.info(\"I've found a signal possibly indicating an extra record block\")\n log.info(\" I'll try to parse it, but it might fail. Also, I won't use\")\n log.info(\" any data from them.\")",
"def parse_header(header_bytes):\n size = header_bytes[:4]\n size = int.from_bytes(size, byteorder=\"big\")\n\n f_hash = header_bytes[4:20]\n\n f_name = header_bytes[20:275]\n try:\n f_name = f_name.decode()\n except:\n print(f_name)\n raise Exception(\n \"Content could not be retrieved from image. (File name could not be parsed.)\")\n\n return size, f_hash, f_name.strip()",
"def ParseHeader(self, data):\n header = struct.unpack('<BB',data);\n self.hdr_msgID = header[0]\n self.hdr_msgLen = header[1]\n return True",
"def extract_header(msg):\n\n raw_header = unpack_from(\">HHHHHH\", msg, 0)\n\n x_id = raw_header[0]\n flags = raw_header[1]\n\n qr = flags >> 15\n opcode = (flags & 0x7800) >> 11\n aa = (flags & 0x0400) >> 10\n tc = (flags & 0x0200) >> 9\n rd = (flags & 0x0100) >> 8\n ra = (flags & 0x0080) >> 7\n rcode = (flags & 0x000f)\n\n qdcount = raw_header[2]\n ancount = raw_header[3]\n nscount = raw_header[4]\n arcount = raw_header[5]\n\n return Header(x_id, qr, opcode, aa, tc, rd, ra, rcode, qdcount, ancount, nscount, arcount)",
"def _parse_header(header):\n names, lengths = [], []\n for line in header:\n if line.startswith(\"@SQ\"):\n for field in line.split(\"\\t\"):\n if field.startswith(\"SN:\"):\n names.append(field[3:])\n elif field.startswith(\"LN:\"):\n lengths.append(int(field[3:]))\n return names, lengths",
"def decode_header(head):\r\n freqs=[44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000]\r\n fr={}\r\n if head & (1 << 20):\r\n if head & (1 << 19):\r\n fr[\"lsf\"]=0\r\n else:\r\n fr[\"lsf\"]=1\r\n fr[\"mpeg25\"] = 0\r\n else:\r\n fr[\"lsf\"] = 1\r\n fr[\"mpeg25\"] = 1\r\n\r\n fr[\"lay\"] = 4 - ((head >> 17) & 3)\r\n if fr[\"mpeg25\"]:\r\n fr[\"sampling_frequency\"] = freqs[6 + ((head >> 10) & 0x3)]\r\n else:\r\n fr[\"sampling_frequency\"] = freqs[((head >> 10) & 0x3) + (fr[\"lsf\"] * 3)]\r\n\r\n fr[\"error_protection\"] = ((head >> 16) & 0x1) ^ 0x1\r\n fr[\"bitrate_index\"] = ((head >> 12) & 0xf)\r\n fr[\"bitrate\"]=table[fr[\"lsf\"]][fr[\"lay\"]-1][fr[\"bitrate_index\"]]\r\n fr[\"padding\"]=((head>>9) & 0x1)\r\n fr[\"channel_mode\"]=((head>>6) & 0x3)\r\n \r\n if fr[\"lay\"]==1:\r\n fr[\"framesize\"]=table[fr[\"lsf\"]][0][fr[\"bitrate_index\"]]*12000\r\n fr[\"framesize\"]=fr[\"framesize\"]/fr[\"sampling_frequency\"]\r\n fr[\"framesize\"]=((fr[\"framesize\"]+fr[\"padding\"])<<2)-4\r\n elif fr[\"lay\"]==2:\r\n fr[\"framesize\"]=table[fr[\"lsf\"]][1][fr[\"bitrate_index\"]]*144000\r\n fr[\"framesize\"]=fr[\"framesize\"]/fr[\"sampling_frequency\"]\r\n fr[\"framesize\"]=fr[\"framesize\"]+fr[\"padding\"]-1\r\n elif fr[\"lay\"]==3:\r\n fr[\"framesize\"]=table[fr[\"lsf\"]][2][fr[\"bitrate_index\"]]*144000\r\n fr[\"framesize\"]=fr[\"framesize\"]/fr[\"sampling_frequency\"]<<fr[\"lsf\"]\r\n fr[\"framesize\"]=fr[\"framesize\"]+fr[\"padding\"]-4\r\n pass\r\n else:\r\n return 0\r\n \r\n return fr",
"def _get_header(self):\n log.debug('---In dcd.py, get_header()')\n self._parse_header()\n self._parse_title()\n self._parse_atoms()\n \n log.debug(\"Parsing: %s\"% self._title) #print out some useful information\n for i in range(0,len(self._title),80):\n log.debug(self._title[i:i+80])\n\n if self._nframes*self._dcdfreq != self._ntsteps:\n log.warn(\"error-- the wierd ntsteps frame is not what I think it should be!\")",
"def parse_header(self):\n\n chunk_id, chunk_len = self.next_chunk()\n instream = self.instream\n\n # check if it is a proper midi file\n if chunk_id != b\"MThd\":\n raise ParseError(\"Invalid MIDI file header. Chunk identifier must be 'MThd'.\")\n\n # Header values are at fixed locations, so no reason to be clever\n self.format = read_bew(instream.read(2))\n self.num_tracks = read_bew(instream.read(2))\n\n if self.format == 0 and self.num_tracks > 1:\n msg = (\n \"Invalid number of tracks (%i). Type 0 midi files may only \"\n \"contain a single track.\" % self.num_tracks\n )\n\n if self.strict:\n raise ParseError(msg)\n else:\n log.warning(msg)\n\n tick_div = instream.read(2)\n fps, resolution = tointseq(tick_div)\n\n if fps & 0x80:\n metrical = False\n else:\n metrical = True\n division = read_bew(tick_div)\n\n # Theoretically a header larger than 6 bytes can exist\n # but no one has seen one in the wild.\n # We will correctly ignore unknown data if present, though.\n if chunk_len > 6:\n log.warning(\"Invalid header size (%i). Skipping trailing header \" \"bytes\", chunk_len)\n instream.seek(chunk_len - 6, 1)\n\n # call the header event handler on the stream\n if metrical:\n self.dispatch(\n \"header\", self.format, self.num_tracks, metrical=True, tick_division=division\n )\n else:\n self.dispatch(\n \"header\",\n self.format,\n self.num_tracks,\n metrical=False,\n fps=fps,\n frame_resolution=resolution,\n )",
"def _extractPayload(response):\n # extract bytecount and check it\n print \"response:{}\".format(repr(response))\n pos = 2\n bytecount = ord(response[pos])\n pos += 1\n\n if bytecount < 6:\n raise ValueError(bytecount)\n\n subframe = response[2:3+bytecount]\n\n # extract DA\n if ord(subframe[pos]) == DLE:\n pos += 1\n da = ord(subframe[pos])\n pos += 1\n\n # extract CW\n if ord(subframe[pos]) == DLE:\n pos += 1\n cw = ord(subframe[pos])\n pos += 1\n\n # extract SAX\n if ord(subframe[pos]) == DLE:\n pos += 1\n sax = ord(subframe[pos])\n pos += 1\n\n # extract SA\n if ord(subframe[pos]) == DLE:\n pos += 1\n sa = ord(subframe[pos])\n pos += 1\n\n # extract cmd\n cmd = ord(subframe[pos]) \n\n return subframe",
"def _parse_header(self):\n header = int_from_lbytes(self._reader.read(4))\n if header != self._HEADER:\n raise StashFileParseError(f'Invalid header id: 0x{header:08X}')\n self.version = int_from_lbytes(self._reader.read(2))",
"def parseFrames(self):\n\n start = self.buf.find(\"\\x00\")\n\n while start != -1:\n end = self.buf.find(\"\\xff\")\n if end == -1:\n # Incomplete frame, try again later.\n return\n else:\n frame, self.buf = self.buf[start + 1:end], self.buf[end + 1:]\n # Decode the frame, if we have a decoder.\n if self.codec:\n frame = decoders[self.codec](frame)\n # Pass the frame to the underlying protocol.\n ProtocolWrapper.dataReceived(self, frame)\n start = self.buf.find(\"\\x00\")",
"def parse_header(self, header):\n header_separator = self.header_separator.encode()\n length, separator, message_chunk = header.partition(header_separator)\n try:\n return int(length), message_chunk\n except ValueError:\n return None, None",
"def _parse_header(fh):\n headerConverters = {\n b'StartFontMetrics': float,\n b'FontName': _to_str,\n b'FullName': _to_str,\n b'FamilyName': _to_str,\n b'Weight': _to_str,\n b'ItalicAngle': float,\n b'IsFixedPitch': _to_bool,\n b'FontBBox': _to_list_of_ints,\n b'UnderlinePosition': _to_int,\n b'UnderlineThickness': _to_int,\n b'Version': _to_str,\n b'Notice': _to_str,\n b'EncodingScheme': _to_str,\n b'CapHeight': float, # Is the second version a mistake, or\n b'Capheight': float, # do some AFM files contain 'Capheight'? -JKS\n b'XHeight': float,\n b'Ascender': float,\n b'Descender': float,\n b'StdHW': float,\n b'StdVW': float,\n b'StartCharMetrics': _to_int,\n b'CharacterSet': _to_str,\n b'Characters': _to_int,\n }\n d = {}\n while 1:\n line = bytes(fh.readline(), 'ascii')\n if not line: break\n line = line.rstrip()\n if line.startswith(b'Comment'): continue\n lst = line.split(b' ', 1 )\n key = lst[0]\n if len( lst ) == 2:\n val = lst[1]\n else:\n val = b''\n #key, val = line.split(' ', 1)\n try: d[key] = headerConverters[key](val)\n except ValueError:\n continue\n except KeyError:\n continue\n if key==b'StartCharMetrics': return d\n raise RuntimeError('Bad parse')",
"def parse_header(self):\n\n # get the sequence value of the entry\n seq = unpack(\"<B\", self._entry[16:17])[0]\n\n # get the logfile sequence number (lsn) of the entry\n lsn = unpack(\"<2L\", self._entry[8:16])[0]\n\n # get used size of the entry\n self._used_size = unpack(\"<L\", self._entry[24:28])[0]\n\n # get allocated size of the entry\n allocated_size = unpack(\"<L\", self._entry[28:32])[0]\n\n # get offset to first attribute\n self._first_attr = unpack(\"<H\", self._entry[20:22])[0]\n\n # get next attribute id\n self._next_attrID = unpack(\"<H\", self._entry[40:42])[0]\n\n print (\"MFT Entry Header Values:\")\n print (\"Sequence: %d\" % seq)\n print (\"$LogFile Sequence Number: %d\" % lsn)\n if allocated_size > 0:\n print (\"Allocated File\")\n else:\n print (\"Unallocated File\")\n print (\"\")\n print (\"Used size: %d bytes\" % self._used_size)\n print (\"Allocated size: %d bytes\" % allocated_size)\n print (\"\")",
"def _parse_headers(self, instr):\n top, rest = hdr_end.split(instr, 1)\n self.input_header_length = len(top)\n header_lines = top.splitlines()\n\n # chop off the top line\n while True: # TODO: limit?\n try:\n top_line = header_lines.pop(0)\n if top_line.strip() != \"\":\n break\n except IndexError: # empty\n return rest\n \n try:\n hdr_tuples, conn_tokens, transfer_codes, content_length \\\n = self._parse_fields(header_lines, True)\n except TypeError: # returned None because there was an error\n if not self.inspecting:\n return \"\" # throw away the rest\n \n # ignore content-length if transfer-encoding is present\n if transfer_codes != [] and content_length != None:\n content_length = None\n\n try:\n allows_body = self.input_start(top_line, hdr_tuples,\n conn_tokens, transfer_codes, content_length)\n except ValueError: # parsing error of some kind; abort.\n if not self.inspecting:\n return \"\" # throw away the rest\n allows_body = True\n\n self._input_state = HEADERS_DONE\n if not allows_body:\n self._input_delimit = NOBODY\n elif len(transfer_codes) > 0:\n if transfer_codes[-1] == 'chunked':\n self._input_delimit = CHUNKED\n self._input_body_left = -1 # flag that we don't know\n else:\n self._input_delimit = CLOSE\n elif content_length != None:\n self._input_delimit = COUNTED\n self._input_body_left = content_length\n else:\n self._input_delimit = CLOSE\n return rest",
"def parse_lead_headers(self):\n # parse General Header blocks\n self.header_data[\"General Header\"] = OrderedDict([])\n for header_block in self.schema[\"General Header\"]:\n self.header_data[\"General Header\"][header_block] = self._read_header_block(\n self.schema[\"General Header\"][header_block]\n )\n self.cursor_position += self.schema[\"General Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n # parse Channel Set Descriptor blocks\n self.header_data[\"Channel Set Descriptor\"] = OrderedDict([])\n for n in range(\n self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"channel_sets_per_scan_type\"\n ][\"value\"]\n ):\n self.header_data[\"Channel Set Descriptor\"][\n \"Channel Set Descriptor Block #%d\" % (n + 1)\n ] = self._read_header_block(\n self.schema[\"Channel Set Descriptor\"][\"Main Block\"]\n )\n self.cursor_position += self.schema[\"Channel Set Descriptor\"][\"Main Block\"][\n \"block_length_in_bytes\"\n ]\n # parse the first three Extended Header blocks\n self.header_data[\"Extended Header\"] = OrderedDict([])\n for n in range(3):\n header_block = \"32-byte Extended Header Block #%d\" % (n + 1)\n self.header_data[\"Extended Header\"][header_block] = self._read_header_block(\n self.schema[\"Extended Header\"][header_block]\n )\n self.cursor_position += self.schema[\"Extended Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n self.number_of_trace_blocks = (\n self.header_data[\"Extended Header\"][\"32-byte Extended Header Block #2\"][\n \"number_of_records_in_file\"\n ][\"value\"]\n * self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"channel_sets_per_scan_type\"\n ][\"value\"]\n )\n # parse the next n 32-byte Extended Header blocks as necessary\n for n in range(\n 3,\n self.header_data[\"General Header\"][\"General Header Block #2\"][\n \"extended_header_blocks\"\n ][\"value\"],\n ):\n header_block = \"32-byte Extended Header Auxiliary Block\"\n block_label = \"32-byte Extended Header Block #%d\" % (n + 1)\n self.header_data[\"Extended Header\"][block_label] = self._read_header_block(\n self.schema[\"Extended Header\"][header_block]\n )\n self.cursor_position += self.schema[\"Extended Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n # parse the general External Header Block\n self.header_data[\"External Header\"] = OrderedDict([])\n self.header_data[\"External Header\"][\n \"External Header Block #1\"\n ] = self._read_header_block(\n self.schema[\"External Header\"][\"External Header Block #1\"]\n )\n self.cursor_position += self.schema[\"External Header\"][\n \"External Header Block #1\"\n ][\"block_length_in_bytes\"]\n # parse the next n 32-byte External Header blocks\n if (\n self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"number_of_32_byte_external_header_blocks\"\n ][\"value\"]\n == \"ff\"\n ):\n number_of_32_byte_external_header_blocks = self.header_data[\n \"General Header\"\n ][\"General Header Block #2\"][\"external_header_blocks\"][\"value\"]\n else:\n number_of_32_byte_external_header_blocks = self.header_data[\n \"General Header\"\n ][\"General Header Block #1\"][\"number_of_32_byte_external_header_blocks\"][\n \"value\"\n ]\n for n in range(number_of_32_byte_external_header_blocks - 1):\n self.header_data[\"External Header\"][\n \"32-byte External Header Block #%d\" % (n + 1)\n ] = self._read_header_block(\n self.schema[\"External Header\"][\n \"32-byte External Header Auxiliary Block\"\n ]\n )\n self.cursor_position += self.schema[\"External Header\"][\n \"32-byte External Header Auxiliary Block\"\n ][\"block_length_in_bytes\"]",
"def parse_hybi07_frames(buf):\r\n\r\n start = 0\r\n frames = []\r\n\r\n while True:\r\n # If there's not at least two bytes in the buffer, bail.\r\n if len(buf) - start < 2:\r\n break\r\n\r\n # Grab the header. This single byte holds some flags nobody cares\r\n # about, and an opcode which nobody cares about.\r\n header = ord(buf[start])\r\n if header & 0x70:\r\n # At least one of the reserved flags is set. Pork chop sandwiches!\r\n raise WSException(\"Reserved flag in HyBi-07 frame (%d)\" % header)\r\n frames.append((\"\", CLOSE))\r\n return frames, buf\r\n\r\n # Get the opcode, and translate it to a local enum which we actually\r\n # care about.\r\n opcode = header & 0xf\r\n try:\r\n opcode = opcode_types[opcode]\r\n except KeyError:\r\n raise WSException(\"Unknown opcode %d in HyBi-07 frame\" % opcode)\r\n\r\n # Get the payload length and determine whether we need to look for an\r\n # extra length.\r\n length = ord(buf[start + 1])\r\n masked = length & 0x80\r\n length &= 0x7f\r\n\r\n # The offset we're gonna be using to walk through the frame. We use\r\n # this because the offset is variable depending on the length and\r\n # mask.\r\n offset = 2\r\n\r\n # Extra length fields.\r\n if length == 0x7e:\r\n if len(buf) - start < 4:\r\n break\r\n\r\n length = buf[start + 2:start + 4]\r\n length = unpack(\">H\", length)[0]\r\n offset += 2\r\n elif length == 0x7f:\r\n if len(buf) - start < 10:\r\n break\r\n\r\n # Protocol bug: The top bit of this long long *must* be cleared;\r\n # that is, it is expected to be interpreted as signed. That's\r\n # fucking stupid, if you don't mind me saying so, and so we're\r\n # interpreting it as unsigned anyway. If you wanna send exabytes\r\n # of data down the wire, then go ahead!\r\n length = buf[start + 2:start + 10]\r\n length = unpack(\">Q\", length)[0]\r\n offset += 8\r\n\r\n if masked:\r\n if len(buf) - (start + offset) < 4:\r\n break\r\n\r\n key = buf[start + offset:start + offset + 4]\r\n offset += 4\r\n\r\n if len(buf) - (start + offset) < length:\r\n break\r\n\r\n data = buf[start + offset:start + offset + length]\r\n\r\n if masked:\r\n data = mask(data, key)\r\n\r\n if opcode == CLOSE:\r\n if len(data) >= 2:\r\n # Gotta unpack the opcode and return usable data here.\r\n data = unpack(\">H\", data[:2])[0], data[2:]\r\n else:\r\n # No reason given; use generic data.\r\n data = 1000, \"No reason given\"\r\n\r\n frames.append((opcode, data))\r\n start += offset + length\r\n\r\n return frames, buf[start:]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse an ORF header (from extractORFs.py).
|
def parseOrfHeader(header):
regex = re.compile(
'(?P<name>\w+)\.(?P<orfId>\d+)\.(?P<start>\d+)-(?P<end>\d+)(\SLength=(?P<length>\d+))?')
rs = regex.match(header.strip())
d = rs.groupdict()
try:
d['start'] = int(d['start'])
d['end'] = int(d['end'])
d['length'] = int(d['length'])
except:
pass
if d['start']>d['end']:
d['strand'] = '-'
else:
d['strand'] = '+'
return ClassFromDict(d)
|
[
"def _parse_elf_header(self):\r\n return struct_parse(self.structs.Elf_Ehdr, self.stream, stream_pos=0)",
"def _parse_header(self):\n\n if self.ei_magic != '\\x7fELF':\n return\n\n self.seek(16,0)\n reading = {'h': self.le_half, 'w': self.le_word,'a': self.le_addr,\n 'o': self.le_offset, 'x': self.le_xword}\n labels = ('type', 'machine', 'version', 'entry', 'phoff', \\\n 'shoff', 'flags', 'ehsize', 'phentsize', 'phnum',\\\n 'shentsize','shnum','shstrndx')\n htypes = ('h','h','w','a','o','o','w','h','h','h','h','h','h')\n\n # Retrieve ELF header\n self.elfhead = dict(zip(labels,[reading[t]() for t in htypes]))\n\n # Retrieve section header string table.\n # sh: name, type, flags, addr, offset, size, link, info, addralign, entsize\n self.seek((self.elfhead['shentsize'] * self.elfhead['shstrndx'])\\\n + self.elfhead['shoff'], 0)\n\n labels = ('name', 'type', 'flags', 'addr', 'offset', \\\n 'size', 'link', 'info', 'addralign', 'entsize')\n\n shtypes = ('w','w','x','a','o','x','w','w','x','x')\n\n sh_strtableh = dict(zip(labels,[reading[t]() for t in shtypes]))\n self.seek(sh_strtableh['offset'],0)\n self.sh_strtableh = sh_strtableh\n\n # Now the section header is known, can retrieve dynamic string table\n self.dynstrh = self._find_section('.dynstr')",
"def parse_header(header_bytes):\n size = header_bytes[:4]\n size = int.from_bytes(size, byteorder=\"big\")\n\n f_hash = header_bytes[4:20]\n\n f_name = header_bytes[20:275]\n try:\n f_name = f_name.decode()\n except:\n print(f_name)\n raise Exception(\n \"Content could not be retrieved from image. (File name could not be parsed.)\")\n\n return size, f_hash, f_name.strip()",
"def parse_header(hdr_file):\n with open(hdr_file, encoding=\"utf8\", errors='ignore') as f:\n text = f.read()\n\n try:\n lines = [e.split() for e in text.split(\"\\n\") if e != \"\"]\n headers = dict(lines)\n is_dem = True if DATUM in headers or Z_SCALE in headers \\\n or PROJECTION in headers else False\n if is_dem and DATUM not in headers:\n msg = 'No \"DATUM\" parameter in DEM header/resource file'\n raise RoipacException(msg)\n except ValueError:\n msg = \"Unable to parse content of %s. Is it a ROIPAC header file?\"\n raise RoipacException(msg % hdr_file)\n\n for k in headers.keys():\n if k in INT_HEADERS:\n headers[k] = int(headers[k])\n elif k in STR_HEADERS:\n headers[k] = str(headers[k])\n elif k in FLOAT_HEADERS:\n headers[k] = float(headers[k])\n elif k in DATE_HEADERS:\n headers[k] = parse_date(headers[k])\n else: # pragma: no cover\n pass # ignore other headers\n\n # grab a subset for GeoTIFF conversion\n subset = {ifc.PYRATE_NCOLS: headers[WIDTH],\n ifc.PYRATE_NROWS: headers[FILE_LENGTH],\n ifc.PYRATE_LAT: headers[Y_FIRST],\n ifc.PYRATE_LONG: headers[X_FIRST],\n ifc.PYRATE_X_STEP: headers[X_STEP],\n ifc.PYRATE_Y_STEP: headers[Y_STEP]}\n\n if is_dem:\n subset[ifc.PYRATE_DATUM] = headers[DATUM]\n else:\n subset[ifc.PYRATE_WAVELENGTH_METRES] = headers[WAVELENGTH]\n\n # grab first/second dates from header, or the filename\n has_dates = True if DATE in headers and DATE12 in headers else False\n dates = headers[DATE12] if has_dates else _parse_dates_from(hdr_file)\n subset[ifc.FIRST_DATE], subset[ifc.SECOND_DATE] = dates\n\n # replace time span as ROIPAC is ~4 hours different to (second minus first)\n timespan = (subset[ifc.SECOND_DATE] - subset[ifc.FIRST_DATE]).days / ifc.DAYS_PER_YEAR\n subset[ifc.PYRATE_TIME_SPAN] = timespan\n\n # Add data units of interferogram\n subset[ifc.DATA_UNITS] = RADIANS\n\n # Add InSAR processor flag\n subset[ifc.PYRATE_INSAR_PROCESSOR] = ROIPAC\n\n # add custom X|Y_LAST for convenience\n subset[X_LAST] = headers[X_FIRST] + (headers[X_STEP] * (headers[WIDTH]))\n subset[Y_LAST] = headers[Y_FIRST] + (headers[Y_STEP] * (headers[FILE_LENGTH]))\n\n return subset",
"def _parse_header(self):\n log.debug('---In dcd.py, parse_header()')\n #process the first header block\n\n header1 = self._fo.read(92)\n header1_format=\\\n \"i---cccci---i---i---i---xxxxxxxxxxxxxxxxxxxxf---i---i---xxxxxxxxxxxxxxxxxxxxxxxxxxxxi---i---\"\n # |1 |5 |10 |15 |20 |25 |30 |35 |40 |45 |50 |55 |60 |65 |70 |75 |80 |85 |90\n #|header size=84 |nframes*tstep |tstep_size |charm_ver\n # |CORD=has coordinates |block_a |header_size=84\n # |nframes |block_b\n # |starting timestep\n # |timestep between coord sets \n header1_format = string.replace(header1_format, \"-\", \"\")\n header1 = struct.unpack(header1_format, header1)\n header1_size1, c1, c2, c3, c4, self._nframes, self._firsttstep, self._dcdfreq, self._ntsteps, self._tstep_size, self._block_a, self._block_b, self._charm_v, header1_size2 = header1 #unpack the tuple header1\n \n \n self._dcdtype = \"\".join((c1,c2,c3,c4)) #get the data-type field. I it should always be cord...\n if header1_size1 != 84 or header1_size2 !=84:\n log.error(\"error-- header size fields not correct (should be 84)\\n\")\n if self._block_a != 0 or self._block_b != 0:\n log.info(\"I've found a signal possibly indicating an extra record block\")\n log.info(\" I'll try to parse it, but it might fail. Also, I won't use\")\n log.info(\" any data from them.\")",
"def _fmap_decode_header(blob, offset):\n header = {}\n for (name, value) in zip(FMAP_HEADER_NAMES,\n struct.unpack_from(FMAP_HEADER_FORMAT,\n blob,\n offset)):\n header[name] = value\n\n if header['signature'] != FMAP_SIGNATURE:\n raise struct.error('Invalid signature')\n if (header['ver_major'] != FMAP_VER_MAJOR or\n header['ver_minor'] < FMAP_VER_MINOR_MIN or\n header['ver_minor'] > FMAP_VER_MINOR_MAX):\n raise struct.error('Incompatible version')\n\n # convert null-terminated names\n header['name'] = header['name'].strip(chr(0))\n return (header, struct.calcsize(FMAP_HEADER_FORMAT))",
"def _parse_header(fh):\n headerConverters = {\n b'StartFontMetrics': float,\n b'FontName': _to_str,\n b'FullName': _to_str,\n b'FamilyName': _to_str,\n b'Weight': _to_str,\n b'ItalicAngle': float,\n b'IsFixedPitch': _to_bool,\n b'FontBBox': _to_list_of_ints,\n b'UnderlinePosition': _to_int,\n b'UnderlineThickness': _to_int,\n b'Version': _to_str,\n b'Notice': _to_str,\n b'EncodingScheme': _to_str,\n b'CapHeight': float, # Is the second version a mistake, or\n b'Capheight': float, # do some AFM files contain 'Capheight'? -JKS\n b'XHeight': float,\n b'Ascender': float,\n b'Descender': float,\n b'StdHW': float,\n b'StdVW': float,\n b'StartCharMetrics': _to_int,\n b'CharacterSet': _to_str,\n b'Characters': _to_int,\n }\n d = {}\n while 1:\n line = bytes(fh.readline(), 'ascii')\n if not line: break\n line = line.rstrip()\n if line.startswith(b'Comment'): continue\n lst = line.split(b' ', 1 )\n key = lst[0]\n if len( lst ) == 2:\n val = lst[1]\n else:\n val = b''\n #key, val = line.split(' ', 1)\n try: d[key] = headerConverters[key](val)\n except ValueError:\n continue\n except KeyError:\n continue\n if key==b'StartCharMetrics': return d\n raise RuntimeError('Bad parse')",
"def _parse_header(self):\n header = int_from_lbytes(self._reader.read(4))\n if header != self._HEADER:\n raise StashFileParseError(f'Invalid header id: 0x{header:08X}')\n self.version = int_from_lbytes(self._reader.read(2))",
"def _parse_header(head):\n # CALL1>CALL2,CALL3,CALL4,CALL5:\n # |from-|--to-|------path-------|\n #\n try:\n (fromcall, path) = head.split('>', 1)\n except:\n raise ParseError(\"invalid packet header\")\n\n # looking at aprs.fi, the rules for from/src callsign\n # are a lot looser, causing a lot of packets to fail\n # this check.\n #\n # if len(fromcall) == 0:\n # raise ParseError(\"no fromcallsign in header\")\n # _validate_callsign(fromcall, \"fromcallsign\")\n\n if (not 1 <= len(fromcall) <= 9 or\n not re.findall(r\"^[a-z0-9]{0,9}(\\-[a-z0-9]{1,8})?$\", fromcall, re.I)):\n\n raise ParseError(\"fromcallsign is invalid\")\n\n path = path.split(',')\n\n if len(path) < 1 or len(path[0]) == 0:\n raise ParseError(\"no tocallsign in header\")\n\n tocall = path[0]\n path = path[1:]\n\n _validate_callsign(tocall, \"tocallsign\")\n\n for digi in path:\n if not re.findall(r\"^[A-Z0-9\\-]{1,9}\\*?$\", digi, re.I):\n raise ParseError(\"invalid callsign in path\")\n\n parsed = {\n 'from': fromcall,\n 'to': tocall,\n 'path': path,\n }\n\n # viacall is the callsign that gated the packet to the net\n # it's located behind the q-contructed\n #\n # CALL1>CALL2,CALL3,qAR,CALL5:\n # .....................|-via-|\n #\n viacall = \"\"\n if len(path) >= 2 and re.match(r\"^q..$\", path[-2]):\n viacall = path[-1]\n\n parsed.update({'via': viacall})\n\n return parsed",
"def _parse_header(header):\n names, lengths = [], []\n for line in header:\n if line.startswith(\"@SQ\"):\n for field in line.split(\"\\t\"):\n if field.startswith(\"SN:\"):\n names.append(field[3:])\n elif field.startswith(\"LN:\"):\n lengths.append(int(field[3:]))\n return names, lengths",
"def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']\n slot_ind = hdict['slot']\n eud2_ind = hdict['eud2']\n\n station = self.bit_manip(head_int[stat_ind[0]], stat_ind[1], stat_ind[2])\n link = self.bit_manip(head_int[link_ind[0]], link_ind[1], link_ind[2])\n slot = self.bit_manip(head_int[slot_ind[0]], slot_ind[1], slot_ind[2])\n frame = self.bit_manip(head_int[frame_ind[0]], frame_ind[1], frame_ind[2])\n time = self.bit_manip(head_int[t_ind[0]], t_ind[1], t_ind[2])\n count = self.bit_manip(head_int[eud2_ind[0]], eud2_ind[1], eud2_ind[2])\n\n return station, link, slot, frame, time, count",
"def parse_header(self, line):\n bml.logger.debug(\"BssFile.parse_header(line=%s)\" % (line))\n # GJP 2021-04-16 Allow empty system names\n m = re.match(r\"(?P<file_type>.)00\\{(?P<system_name>[^\\}]*)\\}=NYYYYYY(?P<summary>.*$)\", line)\n assert m, \"line (%s) does not match header record\" % (line)\n self.file_type = m.group('file_type')\n self.system_name = m.group('system_name')\n self.summary = m.group('summary').rstrip()\n bml.logger.debug(\"file_type: %s; system_name: %s; summary: %s\" % (self.file_type, self.system_name, self.summary))\n self.state_nr = self.state_nr + 1 # only one header\n return True",
"def ParseElfHeader(path):\n try:\n return elf.ParseElfHeader(path)\n except elf.Error as e:\n raise Error(str(e))",
"def _parse_headers(self, instr):\n top, rest = hdr_end.split(instr, 1)\n self.input_header_length = len(top)\n header_lines = top.splitlines()\n\n # chop off the top line\n while True: # TODO: limit?\n try:\n top_line = header_lines.pop(0)\n if top_line.strip() != \"\":\n break\n except IndexError: # empty\n return rest\n \n try:\n hdr_tuples, conn_tokens, transfer_codes, content_length \\\n = self._parse_fields(header_lines, True)\n except TypeError: # returned None because there was an error\n if not self.inspecting:\n return \"\" # throw away the rest\n \n # ignore content-length if transfer-encoding is present\n if transfer_codes != [] and content_length != None:\n content_length = None\n\n try:\n allows_body = self.input_start(top_line, hdr_tuples,\n conn_tokens, transfer_codes, content_length)\n except ValueError: # parsing error of some kind; abort.\n if not self.inspecting:\n return \"\" # throw away the rest\n allows_body = True\n\n self._input_state = HEADERS_DONE\n if not allows_body:\n self._input_delimit = NOBODY\n elif len(transfer_codes) > 0:\n if transfer_codes[-1] == 'chunked':\n self._input_delimit = CHUNKED\n self._input_body_left = -1 # flag that we don't know\n else:\n self._input_delimit = CLOSE\n elif content_length != None:\n self._input_delimit = COUNTED\n self._input_body_left = content_length\n else:\n self._input_delimit = CLOSE\n return rest",
"def convert_header(header):\n return pf.Header().fromstring(header, sep='\\n') if header else None",
"def parse_headerfile(self):\r\n # Read and store the data contained in the input File\r\n myEDFFile = open(self.InputFile,'rb')\r\n EDFValues = myEDFFile.read()\r\n myEDFFile.close()\r\n\r\n # Extract the Header File that contains info about the record\r\n self.HeaderSize = int(EDFValues[184:192])\r\n self.HeaderFile = EDFValues[:self.HeaderSize]\r\n # Extract the actual data in the input file\r\n self.rawDataRecord = EDFValues[self.HeaderSize:]\r\n self.tempDataRecord = list(self.rawDataRecord)\r\n\r\n # Extract information from the EDF Header File\r\n self.total_duration = int(self.HeaderFile[236:244]) # total duration of the signal (in seconds)\r\n if self.duration == None:\r\n self.duration = self.total_duration\r\n self.signalnum = int(self.HeaderFile[252:256]) # number of signals in edf file\r",
"def test_read_header():\n header = get_header(AIA_193_JP2)[0]\n assert isinstance(header, FileHeader)",
"def parse_header(self):\n\n # get the sequence value of the entry\n seq = unpack(\"<B\", self._entry[16:17])[0]\n\n # get the logfile sequence number (lsn) of the entry\n lsn = unpack(\"<2L\", self._entry[8:16])[0]\n\n # get used size of the entry\n self._used_size = unpack(\"<L\", self._entry[24:28])[0]\n\n # get allocated size of the entry\n allocated_size = unpack(\"<L\", self._entry[28:32])[0]\n\n # get offset to first attribute\n self._first_attr = unpack(\"<H\", self._entry[20:22])[0]\n\n # get next attribute id\n self._next_attrID = unpack(\"<H\", self._entry[40:42])[0]\n\n print (\"MFT Entry Header Values:\")\n print (\"Sequence: %d\" % seq)\n print (\"$LogFile Sequence Number: %d\" % lsn)\n if allocated_size > 0:\n print (\"Allocated File\")\n else:\n print (\"Unallocated File\")\n print (\"\")\n print (\"Used size: %d bytes\" % self._used_size)\n print (\"Allocated size: %d bytes\" % allocated_size)\n print (\"\")",
"def header_read(buf, begin=0):\n buf.seek(begin) # starting at the given offset\n stringvar = str(buf.read(56)) # reading header\n listvar = stringvar.split() # spliting header\n listvar.pop(0) # first element of header is \"FCS\" and it's useless\n while len(listvar) > 4: # listvar needs only 4 elements, and elements are removed from\n listvar.pop() # the tail until list is 4 elements long\n # offsets are converted into string\n listvar = [int(x) for x in listvar]\n next_offset = listvar[-1]+1 # next offset is calculated\n text_begin = listvar[0]\n # the difference of BEGIN and END gives size-1\n text_size = listvar[1]-listvar[0]\n data_begin = listvar[2]\n # the difference of BEGIN and END gives size-1\n data_size = listvar[3]-listvar[2]\n listvar = [text_begin, text_size, data_begin, data_size]\n return(next_offset, listvar)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert 6 frame coords to genomic.
|
def convertSixFrameToGenomic(start, end, frame, L):
if frame>=0:
gStart = 3*(start-1)+(frame-1)+1
gEnd = 3*(end-1)+(frame-1)+3
else:
gStart = L-(3*(start-1)+abs(frame)-1)
gEnd = L-(3*(end-1)+abs(frame)+1)
return gStart,gEnd
|
[
"def convertBlockSixFrameToGenomic(block, start, end):\n #prog = re.compile('\\.|-|\\:')\n #tokens = prog.split(block)\n \n #prog = re.compile(\"(?P<chrom>[\\w]+)[.:](?P<bstart>[0-9]+)-(?P<bend>[0-9]+):(?P<frame>[0-9]+)\")\n #rs = prog.search(block)\n #if rs:\n # g = rs.groupdict()\n # chrom,blockStart,blockEnd,hmmerFrame = g[\"chrom\"],g[\"bstart\"],g[\"bend\"],g[\"frame\"]\n # blockStart = int(blockStart)\n # blockEnd = int(blockEnd)\n # hmmerFrame = int(hmmerFrame)\n # L = blockEnd-blockStart+1\n \n tokens = block.split(\":\")\n if len(tokens)==2:\n hmmerFrame = tokens[1]\n tokens = tokens[0].split(\".\")\n chrom = tokens[0]\n blockStart,blockEnd = tokens[1].split(\"-\")\n elif len(tokens)==3:\n chrom = tokens[0]\n blockStart,blockEnd = tokens[1].split(\"-\")\n hmmerFrame = tokens[2]\n else:\n print(tokens, file=sys.stderr)\n raise Exception(\"Don't know what to do\")\n \n blockStart = int(blockStart)\n blockEnd = int(blockEnd)\n L = blockEnd-blockStart+1\n hmmerFrame = int(hmmerFrame)\n \n frame = hmmer2frame[hmmerFrame]\n if frame>0:\n strand = '+'\n else:\n strand = '-'\n gStart,gEnd = convertSixFrameToGenomic(start, end, frame, L)\n return chrom,blockStart,blockEnd,gStart,gEnd,strand",
"def six_frame_translations(seq, genetic_code=...): # -> str:\n ...",
"def capture_flanking_coordinates(gene_list_dict, chr_contigs):\n\n # If gene is on - strand, START from higher number and move down to end.\n # Therefore, UPSTREAM is +500 from highest end to start.\n # If gene is on + strand, start from LOWEST number and move up to end.\n # Therefore, UPSTREAM is -500 from lowest end to start.\n\n # For testing purposes, will print out 500bp upstream + whole CDS in order to\n # compare to traditional sequence retrieval techniques. Can chop off later\n # if only interested in upstream regions.\n\n # Because the chromosomal sequences are large, we will iterate through each one\n # in turn, and then search for the particular genes in gene_list_dict that need\n # to reference the given contig.\n\n\n output_handle = open(outfilename, \"w\")\n\n START_BUFFER = 500 # Number of bp to capture before transcription start\n END_BUFFER = 0 # Number of bp to capture after transcription end\n\n for seq_record in SeqIO.parse(chr_contigs, \"fasta\"):\n fasta_chr = seq_record.description\n fasta_to_gff_dict, gff_to_fasta_dict = build_fasta_to_gff_chr_names()\n gff_chr = fasta_to_gff_chr_names(fasta_to_gff_dict, seq_record.description)\n for gene in gene_list_dict:\n if gene_list_dict[gene][\"Chromosome\"] == gff_chr:\n # Create new SeqRecord\n out_handle = gene\n if gene_list_dict[gene][\"Strand\"] == \"+\":\n start_cut = int(gene_list_dict[gene][\"Start\"]) - 1 - START_BUFFER\n end_cut = int(gene_list_dict[gene][\"End\"]) + END_BUFFER\n out_sequence = seq_record.seq[start_cut:end_cut]\n elif gene_list_dict[gene][\"Strand\"] == \"-\":\n start_cut = int(gene_list_dict[gene][\"Start\"]) - 1 - END_BUFFER\n end_cut = int(gene_list_dict[gene][\"End\"]) + START_BUFFER\n out_sequence = seq_record.seq[start_cut:end_cut]\n out_sequence = out_sequence.reverse_complement()\n if only_upstream == \"True\":\n out_sequence = out_sequence[:500]\n out_record = SeqRecord(out_sequence, id= out_handle)\n SeqIO.write(out_record, output_handle, \"fasta\")\n output_handle.close()",
"def table_to_genome(table):\r\n if type(table) == str:\r\n table = pd.read_csv(table, sep = \"\\t\", index_col=False)\r\n c1 = table[\"A1\"]\r\n c2 = table[\"A2\"]\r\n G = [c1[i] + c2[i] for i in range(len(c1))]\r\n return G",
"def convertOrfToGenomic(start, end, strand, orfStart):\n if strand=='+':\n gStart = orfStart + 3*(start-1)\n gEnd = orfStart + 3*(end-1) + 2\n else:\n gStart = orfStart - 3*(start-1)\n gEnd = orfStart - 3*(end-1) - 2\n return gStart, gEnd",
"def trim_g4_chr_with_seq(base_dir):\n #base_dir='/Users/Yun/Documents/bacteria_G4/D_thermus/'\n G4_dir = base_dir + \"all_G4/\"\n if not os.path.isdir(base_dir + 'all_G4_with_seq'):\n os.mkdir(base_dir + 'all_G4_with_seq/')\n for i in os.listdir(G4_dir):\n if i.startswith('.'):\n continue ## ignore the hidden files from apple\n with open(G4_dir+i, 'r') as fp:\n lines = fp.readlines()\n newlines = []\n for line in lines:\n line = line.split('\\t')\n seq_name = line[0].split(' ')[0]\n newlines.append((seq_name, line[1], line[2], line[6].split()[0], \\\n line[4], line[5]))\n ## save as bed6 format later\n if len(newlines) > 0:\n with open(base_dir+'all_G4_with_seq/' + i, 'w') as f0:\n ## substitude GCF with GCA to match GFF files\n f0.write('\\n'.join('{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\\\n x[0], x[1], x[2], x[3], x[4], x[5]) for x in newlines))\n else:\n continue",
"def format_genome(self, sformat='fasta'):\n complete_genomes = \"\"\n if not sformat == 'fasta':\n raise NotImplementedError('Other format are not implemented')\n\n for g in ['G-atp6']:#self.sequences['genes_list']:\n seq = self.sequences['sequences'].get(g, '')\n cur_header = '>{gname} {specname}'.format(\n gname=g, specname=self.sequences['species_name'])\n pos = self.sequences['gpos'].get(g)\n if pos:\n cur_header += \", {size} ({start}:{end})\".format(\n size=len(seq), start=pos[0], end=pos[1])\n complete_genomes += cur_header + \"\\n\" + seq + \"\\n\"\n\n return complete_genomes",
"def toGenomic(self, doSwapStartEnd=True):\n self.genomic = True\n o = parseOrfHeader(self.accession)\n self.sStart,self.sEnd = convertOrfToGenomic(\n self.sStart, self.sEnd, o.strand, o.start)\n self.addStrandAttribute(o.strand)\n if doSwapStartEnd:\n self.swapStartEnd()",
"def _convertGene(self, gene):\r\n durations = np.require(gene[:12].copy(),\r\n requirements=['c', 'a', 'o', 'w'])\r\n\r\n self.ontimes = np.zeros((12,))\r\n self.offtimes = np.zeros((12,))\r\n # set the first coil on time to the stored value from config.\r\n self.ontimes[0] = self.t0 + durations[0] - 30\r\n self.offtimes[0] = self.t0 + durations[0]\r\n # The next coil ontime is 6 us before the previous coil is turned off.\r\n for i in range(1, len(durations)):\r\n self.ontimes[i] = self.offtimes[i-1] - 6\r\n self.offtimes[i] = self.ontimes[i] + durations[i]",
"def _get_genomic_bounds(self):\n\t\treturn self.GVCFLine.get_int_position(), self.GVCFLine.get_int_position() + len(self.GVCFLine.ref_seq)",
"def _get_frame_for_genomic_position(self,genome_position):\n\t\tprotein_name = self._get_protein_name_for_genomic_position(genome_position)\n\t\tif (protein_name is None) or (protein_name not in self.reference_protein_locations):\n\t\t\treturn None\n\t\tframe = self._transform_genomic_position_to_protein(genome_position) % 3\n\t\tif frame == 0:\n\t\t\tframe = 3\n\t\treturn frame",
"def convert(fasta, nloci=None, ns=None, hdf5=None, quiet=False):\n\n if nloci and ns:\n raise Exception(\"Only one mode is allowed not both. 1) arbitrarily split the sequence in N loci or 2) Ns as locus separator\")\n elif not nloci and not ns:\n raise Exception(\"Define the method to delimitate loci from sequences with nloci OR ns\")\n \n #define default hdf5 path\n if not hdf5:\n path = os.path.dirname(fasta)\n base = os.path.basename(fasta)\n name = os.path.splitext(base)[0]\n hdf5 = os.path.join(path, name + \".hdf5\")\n \n with open(fasta) as f:\n \n phynames = []\n phy = []\n\n for idx, line in enumerate(f):\n #if line is a header extract the first element before space as name\n if line[0] == \">\":\n phynames.append(line.split()[0][1:])\n\n # else extract the sequence info\n else:\n #Mode arbitrary n loci\n if nloci:\n # if is the first sequence create phymap and scaffold dataset\n if idx == 1:\n \n # create empty arrays\n phymap = []\n scaffold_names = []\n scaffold_lengths = []\n\n # get length\n length = len(line.strip())\n\n ## if nloci is provided \n if nloci > length:\n raise Exception(f\"Impossible to get the number of loci requested ({nloci}), the number is larger than bases in the alignment ({length})\")\n\n length_loci = length / nloci\n adjusted_length_loci = math.floor(length_loci)\n\n # split each locus one by one\n for idx_locus in range(nloci):\n start = idx_locus*adjusted_length_loci\n end = start + adjusted_length_loci\n\n\n # if length is not divisible, include the remainder bases in the last locus\n if idx_locus == range(nloci)[-1] and end < length:\n end += length%nloci\n\n\n # fill phymap, scaffold_lengths, and scaffold_names \n phymap.append([idx_locus + 1, start, end, 0, end])\n scaffold_names.append(f\"loc-{idx_locus + 1}\")\n scaffold_lengths.append(end-start)\n\n # prepare phy, for now add sequence by sequence to the file. \n phy.append([0 if base in [\"N\",\"-\",\"?\"] else ord(base) for base in line.strip().upper()])\n # certainly this will fill the memory try somethng like:\n # def append(self, values):\n # with h5py.File(self.datapath, mode='a') as h5f:\n # dset = h5f[self.dataset]\n # dset.resize((self.i + 1, ) + shape)\n # dset[self.i] = [values]\n # self.i += 1\n # h5f.flush()\n\n #Mode loci separated by NNNNN chains\n if ns:\n # if is the first sequence create phymap and scaffold dataset\n if idx == 1:\n\n # create empty arrays\n phymap = []\n scaffold_names = []\n scaffold_lengths = []\n\n #get location of loci \n for idx_locus, locus in enumerate(re.finditer(\"[^=]+\", line.strip().upper().replace(\"N\"*ns,\"=\"))):\n start = locus.start() - idx_locus\n end = locus.end() - idx_locus\n\n \n phymap.append([idx_locus + 1, start, end, 0, end])\n scaffold_names.append(f\"loc-{idx_locus + 1}\")\n scaffold_lengths.append(end-start)\n\n phy.append([0 if base in [\"N\",\"-\",\"?\"] else ord(base) for base in line.strip().upper().replace(\"N\"*ns,\"\")])\n\n\n\n with h5py.File(hdf5, 'w') as h:\n h[\"phy\"] = np.asarray(phy, dtype=\"u1\")\n h[\"phymap\"] = np.asarray(phymap)\n h[\"scaffold_names\"] = np.asarray(scaffold_names, dtype=\"S\")\n h[\"scaffold_lengths\"] = np.asarray(scaffold_lengths)\n h[\"phymap\"].attrs[\"reference\"] = \"imported-from-fasta\"\n h[\"phymap\"].attrs[\"phynames\"] = np.asarray(phynames, dtype=\"S\")\n h[\"phymap\"].attrs[\"columns\"] = [b\"chroms\", b\"phy0\", b\"phy1\", b\"pos0\", b\"pos1\",]\n \n if not quiet: \n print(f\"HDF5 file saved at: {hdf5}\")",
"def get_start_position_from_gff(file_name, base_dir):\n ucsc_tss=[]\n with open(base_dir+file_name, 'r') as f0:\n lines=f0.readlines()\n for line in lines:\n line=line.split('\\t')\n if len(line[0])>5: ## ignore sequences not in chromosome\n continue\n if line[0].startswith('#'):\n continue\n elif line[6]=='+':\n ucsc_tss.append((line[0], line[3], line[3], line[5], line[8].split(';')[0], line[6]))\n elif line[6]=='-':\n ucsc_tss.append((line[0], line[4], line[4], line[5], line[8].split(';')[0], line[6]))\n with open(base_dir+file_name+'.bed', 'w') as f0:\n f0.write('\\n'.join('{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\\\n x[0], x[1], x[2], x[3], x[4], x[5]) for x in ucsc_tss))",
"def _to_genes(self, x, scope):\n\n x = scope.index(x)\n x = self._converter.convert(x, self._digits)\n\n return x",
"def Get_Gaia(tpf, magnitude_limit = 18, Offset = 10):\n\tkeys = ['objID','RAJ2000','DEJ2000','e_RAJ2000','e_DEJ2000','gmag','e_gmag','gKmag','e_gKmag','rmag',\n\t\t\t'e_rmag','rKmag','e_rKmag','imag','e_imag','iKmag','e_iKmag','zmag','e_zmag','zKmag','e_zKmag',\n\t\t\t'ymag','e_ymag','yKmag','e_yKmag','tmag','gaiaid','gaiamag','gaiadist','gaiadist_u','gaiadist_l',\n\t\t\t'row','col']\n\n\tresult = Get_Catalogue(tpf, Catalog = 'gaia')\n\tresult = result[result.Gmag < magnitude_limit]\n\tif len(result) == 0:\n\t\traise no_targets_found_message\n\tradecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T\n\tcoords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is origin supposed to be zero or one?\n\tGmag = result['Gmag'].values\n\t#Jmag = result['Jmag']\n\tind = (((coords[:,0] >= -10) & (coords[:,1] >= -10)) & \n\t\t ((coords[:,0] < (tpf.shape[1] + 10)) & (coords[:,1] < (tpf.shape[2] + 10))))\n\tcoords = coords[ind]\n\tGmag = Gmag[ind]\n\tTmag = Gmag - 0.5\n\t#Jmag = Jmag[ind]\n\treturn coords, Tmag",
"def create_shot_coordinates(df_events):\r\n goal_center_idx = (\r\n df_events[\"position_goal_low_center\"]\r\n | df_events[\"position_goal_mid_center\"]\r\n | df_events[\"position_goal_high_center\"]\r\n )\r\n df_events.loc[goal_center_idx, \"end_x\"] = 100.0\r\n df_events.loc[goal_center_idx, \"end_y\"] = 50.0\r\n\r\n goal_right_idx = (\r\n df_events[\"position_goal_low_right\"]\r\n | df_events[\"position_goal_mid_right\"]\r\n | df_events[\"position_goal_high_right\"]\r\n )\r\n df_events.loc[goal_right_idx, \"end_x\"] = 100.0\r\n df_events.loc[goal_right_idx, \"end_y\"] = 55.0\r\n\r\n goal_left_idx = (\r\n df_events[\"position_goal_mid_left\"]\r\n | df_events[\"position_goal_low_left\"]\r\n | df_events[\"position_goal_high_left\"]\r\n )\r\n df_events.loc[goal_left_idx, \"end_x\"] = 100.0\r\n df_events.loc[goal_left_idx, \"end_y\"] = 45.0\r\n\r\n out_center_idx = (\r\n df_events[\"position_out_high_center\"] | df_events[\"position_post_high_center\"]\r\n )\r\n df_events.loc[out_center_idx, \"end_x\"] = 100.0\r\n df_events.loc[out_center_idx, \"end_y\"] = 50.0\r\n\r\n out_right_idx = (\r\n df_events[\"position_out_low_right\"]\r\n | df_events[\"position_out_mid_right\"]\r\n | df_events[\"position_out_high_right\"]\r\n )\r\n df_events.loc[out_right_idx, \"end_x\"] = 100.0\r\n df_events.loc[out_right_idx, \"end_y\"] = 60.0\r\n\r\n out_left_idx = (\r\n df_events[\"position_out_mid_left\"]\r\n | df_events[\"position_out_low_left\"]\r\n | df_events[\"position_out_high_left\"]\r\n )\r\n df_events.loc[out_left_idx, \"end_x\"] = 100.0\r\n df_events.loc[out_left_idx, \"end_y\"] = 40.0\r\n\r\n post_left_idx = (\r\n df_events[\"position_post_mid_left\"]\r\n | df_events[\"position_post_low_left\"]\r\n | df_events[\"position_post_high_left\"]\r\n )\r\n df_events.loc[post_left_idx, \"end_x\"] = 100.0\r\n df_events.loc[post_left_idx, \"end_y\"] = 55.38\r\n\r\n post_right_idx = (\r\n df_events[\"position_post_low_right\"]\r\n | df_events[\"position_post_mid_right\"]\r\n | df_events[\"position_post_high_right\"]\r\n )\r\n df_events.loc[post_right_idx, \"end_x\"] = 100.0\r\n df_events.loc[post_right_idx, \"end_y\"] = 44.62\r\n\r\n blocked_idx = df_events[\"blocked\"]\r\n df_events.loc[blocked_idx, \"end_x\"] = df_events.loc[blocked_idx, \"start_x\"]\r\n df_events.loc[blocked_idx, \"end_y\"] = df_events.loc[blocked_idx, \"start_y\"]\r\n\r\n return df_events",
"def generate_sequence(self):\n self.sequence = 'ATG'\n for i in range(self.seq_len-6): # Length minus 6 bc we are adding start and stop codons\n rand_num = random.random() # returns a random number from 0.0 to 1.0\n for nt, probability in sorted(self.nt_composition.items()):\n if rand_num < probability:\n self.sequence += nt\n break\n self.sequence += str(random.choice([\"TGA\",\"TAA\",\"TAG\"]))\n return self.sequence",
"def build_fasta_to_gff_chr_names():\n\n # First element of tuple is FASTA format, second element is GFF format\n chr_rubric = [(\"chr_1\", \"Chr1\"),\n (\"chr_2\", \"Chr2\"),\n (\"chr_3\", \"Chr3\"),\n (\"chr_4\", \"Chr4\"),\n (\"chr_5\", \"Chr5\"),\n (\"chr_6\", \"Chr6\"),\n (\"chr_7\", \"Chr7\"),\n (\"chr_8\", \"Chr8\"),\n (\"chr_9\", \"Chr9\"),\n (\"chr_10\", \"Chr10\"),\n (\"chr_11a\", \"Chr11a\"),\n (\"chr_11b\", \"Chr11b\"),\n (\"chr_12\", \"Chr12\"),\n (\"chr_13\", \"Chr13\"),\n (\"chr_14\", \"Chr14\"),\n (\"chr_15\", \"Chr15\"),\n (\"chr_16a\", \"Chr16a\"),\n (\"chr_16b\", \"Chr16b\"),\n (\"chr_17\", \"Chr17\"),\n (\"chr_18\", \"Chr18\"),\n (\"chr_19a_19\", \"Chr19a_19\"),\n (\"chr_19b_31\", \"Chr19b_31\"),\n (\"chr_19c_29\", \"Chr19c_29\"),\n (\"chr_20\", \"Chr20\"),\n (\"chr_22\", \"Chr22\"),\n (\"chr_23\", \"Chr23\"),\n (\"chr_24\", \"Chr24\")]\n\n # Build dictionaries from the rubric for going either direction\n fasta_to_gff_dict = {}\n for (fasta, gff) in chr_rubric:\n fasta_to_gff_dict[fasta] = [gff]\n\n gff_to_fasta_dict = {}\n for (fasta, gff) in chr_rubric:\n gff_to_fasta_dict[gff] = [fasta]\n\n return fasta_to_gff_dict, gff_to_fasta_dict",
"def annotate_region_gdna_genic_point(args, q, reg):\n r = Record()\n r.reg = reg\n r.chrm = q.tok\n r.set_promoter()\n\n c, p = reg.t.gpos2codon(q.pos)\n r.append_info(\"is_gene_body\")\n r.tname = reg.t.format()\n r.gene = reg.t.gene_name if reg.t.gene_name else '.'\n r.strand = reg.t.strand\n\n if p.tpos == 0 and reg.t.transcript_type == 'protein_coding':\n if c.seq in standard_codon_table:\n r.taa_ref = aaf(standard_codon_table[c.seq], args)\n r.taa_pos = c.index\n if args.aacontext>0 and r.taa_ref:\n aa1 = aaf(reg.t.taa_range2aa_seq(\n c.index-args.aacontext if c.index>=args.aacontext else 0, c.index-1), args)\n aa2 = aaf(reg.t.taa_range2aa_seq(c.index+1, c.index+args.aacontext), args)\n r.append_info('aacontext=%s[%s]%s' % (aa1, r.taa_ref, aa2))\n\n r.gnuc_pos = q.pos\n r.pos = q.pos\n r.gnuc_ref = faidx.refgenome.fetch_sequence(q.tok, q.pos, q.pos)\n \n # optional output\n if args.gseq:\n r.gnuc_beg = r.gnuc_pos\n r.gnuc_end = r.gnuc_pos\n\n r.tnuc_pos = p\n r.tnuc_ref = r.gnuc_ref if c.strand == '+' else complement(r.gnuc_ref)\n r.append_info('codon_pos=%s' % ('-'.join(map(str, c.locs)),))\n\n return r",
"def _identify_all_possible_position(self):\n lign = 1\n index_number = 1\n while lign < 16:\n column = 1\n while column < 16:\n self.all_position.append(index_number)\n column += 1\n index_number += 1\n index_number += 85\n lign += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convenience function that takes block 6 frame coords (block,start,end), extracts the block start/end and frame and converts them to genomic coords ie.
|
def convertBlockSixFrameToGenomic(block, start, end):
#prog = re.compile('\.|-|\:')
#tokens = prog.split(block)
#prog = re.compile("(?P<chrom>[\w]+)[.:](?P<bstart>[0-9]+)-(?P<bend>[0-9]+):(?P<frame>[0-9]+)")
#rs = prog.search(block)
#if rs:
# g = rs.groupdict()
# chrom,blockStart,blockEnd,hmmerFrame = g["chrom"],g["bstart"],g["bend"],g["frame"]
# blockStart = int(blockStart)
# blockEnd = int(blockEnd)
# hmmerFrame = int(hmmerFrame)
# L = blockEnd-blockStart+1
tokens = block.split(":")
if len(tokens)==2:
hmmerFrame = tokens[1]
tokens = tokens[0].split(".")
chrom = tokens[0]
blockStart,blockEnd = tokens[1].split("-")
elif len(tokens)==3:
chrom = tokens[0]
blockStart,blockEnd = tokens[1].split("-")
hmmerFrame = tokens[2]
else:
print(tokens, file=sys.stderr)
raise Exception("Don't know what to do")
blockStart = int(blockStart)
blockEnd = int(blockEnd)
L = blockEnd-blockStart+1
hmmerFrame = int(hmmerFrame)
frame = hmmer2frame[hmmerFrame]
if frame>0:
strand = '+'
else:
strand = '-'
gStart,gEnd = convertSixFrameToGenomic(start, end, frame, L)
return chrom,blockStart,blockEnd,gStart,gEnd,strand
|
[
"def _get_genomic_bounds(self):\n\t\treturn self.GVCFLine.get_int_position(), self.GVCFLine.get_int_position() + len(self.GVCFLine.ref_seq)",
"def _GetFrame(self):\n for node in self.svg.iter(): \n if node.get(inkex.addNS(\"Type\",\"TimeAnalysis\")) == \"Frame\":\n frame = node\n Coordinates = self._GetPoints(frame)[0]\n# print(\"Coordinates: \", Coordinates)\n Xvalues = []\n Yvalues = []\n for element in Coordinates:\n Xvalues.append(element[X])\n Yvalues.append(element[Y])\n xrange = (min(Xvalues),max(Xvalues))\n yrange = (min(Yvalues),max(Yvalues))\n return xrange,yrange",
"def get_start_position_from_gff(file_name, base_dir):\n ucsc_tss=[]\n with open(base_dir+file_name, 'r') as f0:\n lines=f0.readlines()\n for line in lines:\n line=line.split('\\t')\n if len(line[0])>5: ## ignore sequences not in chromosome\n continue\n if line[0].startswith('#'):\n continue\n elif line[6]=='+':\n ucsc_tss.append((line[0], line[3], line[3], line[5], line[8].split(';')[0], line[6]))\n elif line[6]=='-':\n ucsc_tss.append((line[0], line[4], line[4], line[5], line[8].split(';')[0], line[6]))\n with open(base_dir+file_name+'.bed', 'w') as f0:\n f0.write('\\n'.join('{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\\\n x[0], x[1], x[2], x[3], x[4], x[5]) for x in ucsc_tss))",
"def extract_bed_coordinates_block_format(input_bed, output_exons_bed, output_introns_bed):\n\n # set up dictionary to hold coordinates\n exon_list = collections.defaultdict(lambda: collections.defaultdict())\n intron_list = collections.defaultdict(lambda: collections.defaultdict())\n # read in data\n data = gen.read_many_fields(input_bed, \"\\t\")\n\n with open(output_exons_bed, \"w\") as output_exons:\n with open(output_introns_bed, \"w\") as output_introns:\n for line in data:\n start = int(line[1])\n id = line[3]\n strand = line[5]\n block_sizes = [int(i) for i in line[10].split(\",\") if len(i)]\n start_indices = [int(i) for i in line[11].split(\",\") if len(i)]\n # if on the reverse strand, need to reverse order\n if strand == \"-\":\n block_sizes = block_sizes[::-1]\n start_indices = start_indices[::-1]\n # now get a list of exon ids to use for intron calculations\n exon_ids = list(range(len(start_indices)))\n\n for i in range(len(start_indices)):\n # now get the start and end of the exon coordinates\n start_index = start + start_indices[i]\n end_index = start_index + block_sizes[i]\n # get the exon id\n exon_id = i+1\n # now write to the exons file\n output_exons.write(\"{0}\\t{1}\\t{2}\\t{3}.{4}\\t.\\t{5}\\n\".format(line[0], start_index, end_index, id, exon_id, strand))\n\n if i+1 in exon_ids:\n intron_id = \"{0}-{1}\".format(i+1, i+2)\n if strand == \"-\":\n intron_start = start + start_indices[i+1] + block_sizes[i+1]\n intron_end = start_index\n else:\n intron_start = end_index\n intron_end = start + start_indices[i+1]\n output_introns.write(\"{0}\\t{1}\\t{2}\\t{3}.{4}\\t.\\t{5}\\n\".format(line[0], intron_start, intron_end, id, intron_id, strand))",
"def get_sequence(chrom, start, end, range):\n # print(start)\n # print(end)\n # start = int(start) - range \n # end = int(end) + range\n # print(start)\n # print(end)\n\n # command to get the region from the two bit file from fasta\n cmd = [\"/ye/zaitlenlabstore/christacaggiano/twoBit/twoBitToFa\", \"/ye/zaitlenlabstore/christacaggiano/twoBit/hg38.2bit\",\n \"stdout\", \"-seq=\" + chrom, \"-start=\" + str(start), \"-end=\" + str(end)]\n\n # call command and get output\n result = subprocess.check_output(cmd)\n\n return result.decode().upper()",
"def transform_region_to_coordinates(x_coord,\n y_coord,\n prefix_len,\n image_bit_level=10):\n\n shift = image_bit_level - prefix_len\n x_bot = x_coord << shift\n x_top = ((x_coord + 1) << shift) - 1\n y_bot = y_coord << shift\n y_top = ((y_coord + 1) << shift) - 1\n return (x_bot, x_top, y_bot, y_top)",
"def tuple(self):\n return self.start.coordinates[0], self.start.coordinates[1], self.end.coordinates[0], self.end.coordinates[1]",
"def capture_flanking_coordinates(gene_list_dict, chr_contigs):\n\n # If gene is on - strand, START from higher number and move down to end.\n # Therefore, UPSTREAM is +500 from highest end to start.\n # If gene is on + strand, start from LOWEST number and move up to end.\n # Therefore, UPSTREAM is -500 from lowest end to start.\n\n # For testing purposes, will print out 500bp upstream + whole CDS in order to\n # compare to traditional sequence retrieval techniques. Can chop off later\n # if only interested in upstream regions.\n\n # Because the chromosomal sequences are large, we will iterate through each one\n # in turn, and then search for the particular genes in gene_list_dict that need\n # to reference the given contig.\n\n\n output_handle = open(outfilename, \"w\")\n\n START_BUFFER = 500 # Number of bp to capture before transcription start\n END_BUFFER = 0 # Number of bp to capture after transcription end\n\n for seq_record in SeqIO.parse(chr_contigs, \"fasta\"):\n fasta_chr = seq_record.description\n fasta_to_gff_dict, gff_to_fasta_dict = build_fasta_to_gff_chr_names()\n gff_chr = fasta_to_gff_chr_names(fasta_to_gff_dict, seq_record.description)\n for gene in gene_list_dict:\n if gene_list_dict[gene][\"Chromosome\"] == gff_chr:\n # Create new SeqRecord\n out_handle = gene\n if gene_list_dict[gene][\"Strand\"] == \"+\":\n start_cut = int(gene_list_dict[gene][\"Start\"]) - 1 - START_BUFFER\n end_cut = int(gene_list_dict[gene][\"End\"]) + END_BUFFER\n out_sequence = seq_record.seq[start_cut:end_cut]\n elif gene_list_dict[gene][\"Strand\"] == \"-\":\n start_cut = int(gene_list_dict[gene][\"Start\"]) - 1 - END_BUFFER\n end_cut = int(gene_list_dict[gene][\"End\"]) + START_BUFFER\n out_sequence = seq_record.seq[start_cut:end_cut]\n out_sequence = out_sequence.reverse_complement()\n if only_upstream == \"True\":\n out_sequence = out_sequence[:500]\n out_record = SeqRecord(out_sequence, id= out_handle)\n SeqIO.write(out_record, output_handle, \"fasta\")\n output_handle.close()",
"def blocks2cigar(cls, blocks):\n cigar = list()\n for indx, (block_start, block_end) in enumerate(blocks):\n cigar.append((0, block_end - block_start))\n if indx < (len(blocks) - 1):\n cigar.append((3, blocks[indx+1][0] - block_end))\n cigar = tuple(cigar)\n return cigar",
"def convertOrfToGenomic(start, end, strand, orfStart):\n if strand=='+':\n gStart = orfStart + 3*(start-1)\n gEnd = orfStart + 3*(end-1) + 2\n else:\n gStart = orfStart - 3*(start-1)\n gEnd = orfStart - 3*(end-1) - 2\n return gStart, gEnd",
"def getFrameRange():\n start = cmds.playbackOptions(query=True, minTime=True)\n end = cmds.playbackOptions(query=True, maxTime=True)\n \n return int(start), int(end)",
"def cliprange(self):\n return (self._startframe if self._startframe is not None else 0, self._endframe)",
"def get_frame_id(data, arg, single_row=False, multiple_row=False, row_section=False):\r\n frame_id = []\r\n\r\n print(\"NOTE: Each element is a frame ID for single azimuth block\")\r\n if single_row is True:\r\n for col in range(9, len(data.columns) - 1, 788):\r\n frame_id_segment = list(data.values[arg, col:col + 2])\r\n frame_id_segment.reverse()\r\n frame_id_segment = [int(elem) for index, elem in enumerate(frame_id_segment)]\r\n frame = int(\"\".join(map(str, frame_id_segment)))\r\n frame_id.append(frame)\r\n elif multiple_row is True:\r\n for index, elem in enumerate(arg):\r\n frame_id.append(\"new row\")\r\n for col in range(9, len(data.columns) - 1, 788):\r\n frame_id_segment = list(data.values[elem, col:col + 2])\r\n frame_id_segment.reverse()\r\n frame_id_segment = [int(elem) for index, elem in enumerate(frame_id_segment)]\r\n frame = int(\"\".join(map(str, frame_id_segment)))\r\n frame_id.append(frame)\r\n elif row_section is True:\r\n for row in range(arg[0], arg[1] + 1):\r\n frame_id.append(\"new row\")\r\n for col in range(9, len(data.columns) - 1, 788):\r\n frame_id_segment = list(data.values[row, col:col + 2])\r\n frame_id_segment.reverse()\r\n frame_id_segment = [int(elem) for index, elem in enumerate(frame_id_segment)]\r\n frame = int(\"\".join(map(str, frame_id_segment)))\r\n frame_id.append(frame)\r\n\r\n return frame_id",
"def coordinates2Region():\n\tpass",
"def _interpret_frame(self, frame):\n\n i0 = 0\n i1 = self.id_len\n cell_id = frame[i0:i1]\n cell_id = b.b_to_int(cell_id)\n\n i0 = i1\n i1 += self.coord_len\n x = frame[i0:i1]\n x = b.b_to_int(x)\n\n # i0 = i1\n # i1 += self.coord_len\n # y = frame[i0:i1]\n # y = b.b_to_int(y)\n\n i0 = i1\n i1 += 8 # dend len\n dend_len = frame[i0:i1]\n dend_len = b.b_to_int(dend_len) + 1 # it cannot be 0\n\n i0 = i1\n i1 += 8\n dend_pas = frame[i0:i1]\n dend_pas = b.b_to_int(dend_pas) - 127\n\n # Scan for (index, weight, delay) triples\n iwd_len = self.id_len + 1 + self.w_len + self.d_len # 1 is w_type\n iwd_triples = []\n i = i1\n while i < len(frame)-(iwd_len-1):\n iwd_frame = frame[i: i+iwd_len]\n iwd_triples.append(self._decode_iwd_triple(iwd_frame))\n\n i+=iwd_len\n\n cell_dict = {\"x\": x, \"dend_len\": dend_len, \"dend_pas\": dend_pas,\n \"connections\": iwd_triples}\n # Overwrite existing settings or create new\n self.Cells[cell_id] = cell_dict",
"def six_frame_translations(seq, genetic_code=...): # -> str:\n ...",
"def test_convertWithFragmentModeMiddleWindow(self):\n\n expCurrentSeq = array.array('i',[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 2, 2, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 1, 1, 0, 1, 1, 2, 3, 3, 2, 2, 2, 5, 3, 4, 1, 5, 4, 4, 3, 6, 6, 3, 1, 4, 5, 6, 3, 5, 3, 6, 7, 8, 5, 2, 2, 3, 3, 6, 8, 8, 5, 3, 2, 4, 4, 4, 3, 5, 5, 6, 8, 10])\n seq = 'lm_SuperContig_13_v2'\n currentSeq = array.array('i')\n for i in range(0,1634580):\n currentSeq.append(0)\n bc = BamConverter(self.BamFile, mode=\"fragment-middle\", window=1)\n testCurrentSeq, dFragmentSize, lBedTracks = bc.convertWithFragmentMode(seq, currentSeq)\n self.assertEquals(expCurrentSeq,testCurrentSeq[0:200])",
"def _get_position_frames(self):\n \n f = open( self.filename)\n f.seek(self._first_line) # start reading after header\n lines_per_frame = self.n_chunks + 1\n offsets = []\n counter = 0\n \n line = True\n while line:\n if not counter % lines_per_frame:\n offsets.append(int(f.tell()))\n line = f.readline()\n counter += 1\n array_offsets = offsets[:-1] # last is EOF\n self.offsets = np.array(array_offsets, dtype = int)\n \n f.close()",
"def getSequence(self, loc=None, **kargs):\n\n # This is old and ugly code.\n # But it works and has been pretty extensively tested and is 'fast enough'.\n # So don't go messing with it unless you have a very good reason.\n\n valid_args = [\"coords\", \"strand\", \"mask\"]\n for key in kargs:\n assert key in valid_args, \"getSequence() - Argument '%s' is not recognised\" % key\n\n assert loc or \"coords\" in kargs, \"No valid coords or loc specified\"\n assert self.bHasBoundSequence, \"No Available genome FASTA files\"\n\n if \"coords\" in kargs:\n loc = kargs[\"coords\"]\n\n try:\n loc = location(loc=loc)\n except Exception:\n pass\n\n assert isinstance(loc, location), \"'loc' must be a proper genome location\"\n\n left = loc[\"left\"]\n right = loc[\"right\"]\n chrom = loc[\"chr\"]\n\n if chrom not in self.seq:\n config.log.warning(\"'%s' not found\" % chrom)\n return None\n\n seekloc = (left + (left // self.seq_data[chrom][\"linelength\"]))-1 # the division by 50 is due to the presence of newlines every 50 characters.\n #print chrom, self.seq[chrom], seekloc, self.seq_data[chrom][\"offset\"], loc\n self.seq[chrom].seek(seekloc+self.seq_data[chrom][\"offset\"]) # move to the start location.\n\n delta = (right - left)+1\n\n # count the number of line endings.\n # get a niave reading.\n bonus = 0\n ret = \"\"\n while len(ret) < delta:\n self.seq[chrom].seek(seekloc+self.seq_data[chrom][\"offset\"])\n ret = self.seq[chrom].read(delta + (delta // self.seq_data[chrom][\"linelength\"]) + bonus).replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n bonus += 1\n if bonus > delta: # breaks in case you send a loc that is beyond the end of the file.\n break\n\n if \"strand\" in kargs and kargs[\"strand\"] in negative_strand_labels:\n ret = utils.rc(ret)\n\n if \"mask\" in kargs and kargs[\"mask\"]:\n ret = utils.repeat_mask(ret)\n\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert domain coordinates in ORF to genomic.
|
def convertOrfToGenomic(start, end, strand, orfStart):
if strand=='+':
gStart = orfStart + 3*(start-1)
gEnd = orfStart + 3*(end-1) + 2
else:
gStart = orfStart - 3*(start-1)
gEnd = orfStart - 3*(end-1) - 2
return gStart, gEnd
|
[
"def toGenomic(self, doSwapStartEnd=True):\n self.genomic = True\n o = parseOrfHeader(self.accession)\n self.sStart,self.sEnd = convertOrfToGenomic(\n self.sStart, self.sEnd, o.strand, o.start)\n self.addStrandAttribute(o.strand)\n if doSwapStartEnd:\n self.swapStartEnd()",
"def _to_genes(self, x, scope):\n\n x = scope.index(x)\n x = self._converter.convert(x, self._digits)\n\n return x",
"def coordinates2Region():\n\tpass",
"def annotate_region_gdna_genic_point(args, q, reg):\n r = Record()\n r.reg = reg\n r.chrm = q.tok\n r.set_promoter()\n\n c, p = reg.t.gpos2codon(q.pos)\n r.append_info(\"is_gene_body\")\n r.tname = reg.t.format()\n r.gene = reg.t.gene_name if reg.t.gene_name else '.'\n r.strand = reg.t.strand\n\n if p.tpos == 0 and reg.t.transcript_type == 'protein_coding':\n if c.seq in standard_codon_table:\n r.taa_ref = aaf(standard_codon_table[c.seq], args)\n r.taa_pos = c.index\n if args.aacontext>0 and r.taa_ref:\n aa1 = aaf(reg.t.taa_range2aa_seq(\n c.index-args.aacontext if c.index>=args.aacontext else 0, c.index-1), args)\n aa2 = aaf(reg.t.taa_range2aa_seq(c.index+1, c.index+args.aacontext), args)\n r.append_info('aacontext=%s[%s]%s' % (aa1, r.taa_ref, aa2))\n\n r.gnuc_pos = q.pos\n r.pos = q.pos\n r.gnuc_ref = faidx.refgenome.fetch_sequence(q.tok, q.pos, q.pos)\n \n # optional output\n if args.gseq:\n r.gnuc_beg = r.gnuc_pos\n r.gnuc_end = r.gnuc_pos\n\n r.tnuc_pos = p\n r.tnuc_ref = r.gnuc_ref if c.strand == '+' else complement(r.gnuc_ref)\n r.append_info('codon_pos=%s' % ('-'.join(map(str, c.locs)),))\n\n return r",
"def _togis(self, *args, **kwargs):\n return togis(self, *args, **kwargs)",
"def codonCoordinateToChromosome(self, p):\n m = self.codonCoordinateToMRna(p)\n return self.mRnaCoordinateToChromosome(m)",
"def dna_to_rna(self):\n self.seq = self.seq.replace(\"T\",\"U\")",
"def chromosomeCoordinateToCodon(self, p):\n m = self.chromosomeCoordinateToMRna(p)\n return self.mRnaCoordinateToCodon(m)",
"def geom2geog(lat, long):\n lat = np.deg2rad(lat)\n long = np.deg2rad(long)\n\n # Pole coordinates for 2015\n pole_lat = np.deg2rad(80.37)\n pole_long = np.deg2rad(-72.62)\n\n pole_lat_s = np.sin(pole_lat)\n pole_lat_c = np.cos(pole_lat)\n pole_long_s = np.sin(pole_long)\n pole_long_c = np.cos(pole_long)\n\n # Rotation matrix\n matrix = np.array([\n [pole_lat_s * pole_long_c, pole_lat_s * pole_long_s, -pole_lat_c],\n [-pole_long_s, pole_long_c, 0],\n [pole_lat_c * pole_long_c, pole_lat_c * pole_long_s, pole_lat_s]\n ])\n matrix = np.linalg.inv(matrix)\n\n x = earth_radii * np.cos(lat) * np.cos(long)\n y = earth_radii * np.cos(lat) * np.sin(long)\n z = earth_radii * np.sin(lat)\n vect_geom = np.array([x, y, z])\n vect_geog = np.dot(matrix, vect_geom)\n norm = np.linalg.norm(vect_geog)\n\n lat_geog = np.arcsin(vect_geog[2] / norm)\n long_geog = np.arctan2(vect_geog[1], vect_geog[0])\n\n lat_geog = np.rad2deg(lat_geog)\n long_geog = np.rad2deg(long_geog)\n return lat_geog, long_geog",
"def table_to_genome(table):\r\n if type(table) == str:\r\n table = pd.read_csv(table, sep = \"\\t\", index_col=False)\r\n c1 = table[\"A1\"]\r\n c2 = table[\"A2\"]\r\n G = [c1[i] + c2[i] for i in range(len(c1))]\r\n return G",
"def convert_gridext_to_gis(inputfile, outputfile, crs=None, river='na', reach=0):\n\n errmsg = 'file {} not found'\n if not Path(inputfile).exists:\n raise ValueError(errmsg.format(inputfile))\n\n gdf = (\n pandas.read_csv(inputfile, sep='\\s+', engine='python', header=None,\n dtype={'ii': int, 'jj': int, 'x': float, 'y': float},\n names=['ii', 'jj', 'x', 'y'])\n .assign(id=lambda df: df.index)\n .assign(ii_jj=lambda df:\n df['ii'].astype(str).str.pad(3, fillchar='0') + '_' +\n df['jj'].astype(str).str.pad(3, fillchar='0'))\n .assign(elev=0.0, river=river, reach=reach)\n .assign(geometry=lambda df: df.apply(lambda r: Point((r['x'], r['y'])), axis=1))\n .drop(['x', 'y'], axis='columns')\n .pipe(geopandas.GeoDataFrame, geometry='geometry', crs=crs)\n )\n\n gdf.to_file(outputfile)\n\n return gdf",
"def geo2cell(cellfile, posfile, outfile):",
"def mRnaCoordinateToChromosome(self, p):\n assert(len(self.exons))\n if p is None: return None\n if p < 0: return None\n limit = sum([(e.stop - e.start) for e in self.exons])\n if p >= limit: return None\n p = self.mRnaCoordinateToExon(p)\n if p >= limit: return None\n return self.exonCoordinateToChromosome(p)",
"def xyz2enu(p_xyz, p_base_xyz, lat, lon):\n dx_xyz = array(p_xyz) - array(p_base_xyz)\n R = enuRxyz(lat, lon)\n dx_enu = dot(R, dx_xyz)\n return dx_enu",
"def test_xy_to_osgb_precision(self):\n\n osgb_gridref = xy_to_osgb(393618.933445, 564351.935939, 100)\n expected = \"NY 936 643\"\n self.assertEqual(osgb_gridref, expected)",
"def annotate_region_gdna_intergenic_point(args, q, reg):\n\n r = Record()\n r.reg = reg\n r.chrm = q.tok\n r.set_promoter()\n\n r.gnuc_pos = q.pos if hasattr(q,'pos') else q.beg\n r.pos = r.gnuc_pos\n \n # optional output\n if args.gseq:\n r.gnuc_beg = r.gnuc_pos\n r.gnuc_end = r.gnuc_pos\n r.gnuc_ref = faidx.refgenome.fetch_sequence(r.chrm, r.gnuc_pos, r.gnuc_pos)\n\n # # # annotate extra noncoding features\n # if 'GENCODE' in args.ffhs:\n # iis = set()\n # for entry in args.ffhs['GENCODE'].fetch(tok, beg, end+1):\n # fields = entry.strip().split('\\t')\n # info = dict(re.findall(r'\\s*([^\"]*) \"([^\"]*)\";', fields[8]))\n # if 'gene_type' in info:\n # ii = info['gene_type']\n # if 'gene_name' in info: ii += '(%s)' % info['gene_name']\n # iis.add(ii)\n # r.info = 'gene_type=%s;' % ','.join(list(iis))\n return r",
"def transform(cls):\n with xr.open_dataset(cls.sample_path) as data:\n transform = data.crs.GeoTransform\n return transform",
"def map_population_to_genome(self, genome):\n pass",
"def convert_coord(plot, axis, to_system, coord):\n to_system = int(to_system)\n from_system = 2 - to_system\n from_name = axis.upper() + (\"2\" if from_system == 2 else \"\")\n to_name = axis.upper() + (\"2\" if to_system == 2 else \"\")\n from_min = get_var(plot, \"GPVAL_%s_MIN\" % from_name, float)\n from_max = get_var(plot, \"GPVAL_%s_MAX\" % from_name, float)\n to_min = get_var(plot, \"GPVAL_%s_MIN\" % to_name, float)\n to_max = get_var(plot, \"GPVAL_%s_MAX\" % to_name, float)\n if None not in (from_min, from_max, to_min, to_max):\n return to_min + (to_max - to_min) * \\\n (coord - from_min) / (from_max - from_min)\n else:\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load hmmer domain results.
|
def loadDomains(iFileHandle):
domains = []
for d in HmmerFile(iFileHandle):
domains.append(d)
return domains
|
[
"def _load_humaneval(self, eval_cache_path: str) -> Dict:\n if \"cnndm\" in self.task:\n dataset = \"cnndm\"\n elif \"xsum\" in self.task:\n dataset = \"xsum\"\n else:\n raise ValueError\n\n all_humaneval_scores = dict()\n for shots in [0, 5]:\n score_analyzer = SummarizationHumanEvalAnalyzer(dataset, eval_cache_path, shots=shots)\n for (model_name, input_id, output_text), score in score_analyzer.faithfulness_full.items():\n if isinstance(output_text, float):\n output_text = \"\"\n all_humaneval_scores[(\"faithfulness\", model_name, input_id, output_text)] = score\n for (model_name, input_id, output_text), score in score_analyzer.relevance_full.items():\n if isinstance(output_text, float):\n output_text = \"\"\n all_humaneval_scores[(\"relevance\", model_name, input_id, output_text)] = score\n for (model_name, input_id, output_text), score in score_analyzer.coherence_full.items():\n if isinstance(output_text, float):\n output_text = \"\"\n all_humaneval_scores[(\"coherence\", model_name, input_id, output_text)] = score\n return all_humaneval_scores",
"def _load_data(self):\n for ordering, result in self._results.items():\n self._xticks.append(ordering.get_ordering_type())\n self._bdd_nodes.append(result.bdd_nodes)\n self._min_bdd_nodes.append(result.min_bdd_nodes)\n self._ord_times.append(result.ordering_time)\n self._con_times.append(result.construction_time)\n self._min_times.append(result.minimising_time)",
"def load(self):\n\t\tfor group_name in domain_groups_keys:\n\t\t\t# print \"\\n*** %s ***\" % group_name\n\t\t\tgroup = NSES_stds_pool[group_name]\n\t\n\t\t\tbands = group.keys()\n\t\t\tbands.sort(band_cmp)\n\t\t\tfor band in bands:\n\t\t\t\tsample = self._get_sample (group[band])\t\n\t\t\t\t# instantiate the SampleSet, which in turn instantiates SuggestionSets\n\t\t\t\tself.append (SampleSet ( group_name, band, sample))",
"def load_mails_dataset(dir):\n X = []\n y = []\n hams = os.listdir(os.path.join(dir, 'ham'))\n spams = os.listdir(os.path.join(dir, 'spam'))\n\n for f in hams:\n X.append(get_file_content(os.path.join(dir, 'ham', f)))\n y.append(0)\n for f in spams:\n X.append(get_file_content(os.path.join(dir, 'spam', f)))\n y.append(1)\n return X, y",
"def _readResults(self, M):\n self.detailedResults[\"Heating Load\"] = np.array(\n [\n sum(M.connectVars[c, t].value for c in M.bConnectedHeat)\n for t in M.fullTimeIndex\n ]\n )\n self.detailedResults[\"Cooling Load\"] = np.array(\n [\n sum(M.connectVars[c, t].value for c in M.bConnectedCool)\n for t in M.fullTimeIndex\n ]\n )\n self.detailedResults[\"T_air\"] = np.array(\n [M.bT_air[t].value for t in M.fullTimeIndex]\n )\n self.detailedResults[\"T_s\"] = np.array(\n [M.bT_s[t].value for t in M.fullTimeIndex]\n )\n self.detailedResults[\"T_m\"] = np.array(\n [M.bT_m[t].value for t in M.fullTimeIndex]\n )\n self.detailedResults[\"T_e\"] = np.array(\n [M.profiles[\"T_e\"][t] for t in M.fullTimeIndex]\n )\n self.detailedResults[\"Electricity Load\"] = self.cfg[\"elecLoad\"].values\n\n for dec in M.exVarIx:\n if M.exVars[dec].stale:\n if M.exVars[dec].lb == M.exVars[dec].ub:\n M.exVars[dec].value = M.exVars[dec].lb\n else:\n warnings.warn(\n \"Stale value in result of \"\n + str(dec)\n + \" detected. Result is set to the lb \"\n \"of the variable\",\n UserWarning,\n )\n M.exVars[dec].value = M.exVars[dec].lb\n self.detailedRefurbish[dec] = {}\n self.detailedRefurbish[dec][\"Capacity\"] = M.exVars[dec].value\n self.detailedRefurbish[dec][\"FixCost\"] = (\n M.exVarCost[dec] * M.exVars[dec].value\n )\n self.detailedRefurbish[dec][\"CAPEX\"] = (\n M.exVarCAPEX[dec] * M.exVars[dec].value\n )\n self.static_results[\"Capacity\"] = M.bQ_des.value\n self.static_results[\"FixCost\"] = 0\n self.static_results[\"CAPEX\"] = 0\n self.static_results[\"OPEX fix\"] = 0.0\n self.static_results[\"VarCost\"] = 0.0\n self.static_results[\"OPEX var\"] = 0.0\n self.static_results[\"OPEX\"] = 0.0\n self.result_load = self.detailedResults[\"Heating Load\"]\n\n return",
"def load(self):\n results_fn = os.path.join(self.full_path, self.output_filename)\n self.results = rlpy.Tools.results.load_single(results_fn)\n return self.results",
"def load(self, path):\n self._results = pd.read_pickle(os.path.join(path, 'results.pickle'))\n self._event_buffer = pd.read_pickle(os.path.join(path, 'events.pickle'))\n print('Load results and events from \\'{}\\''.format(path))",
"def compute_load(self):\n #pdb.set_trace()\n load = [ 0 for _ in range(self.signal_space.length) ]\n meanings = self.meaning_space.meanings()\n for position in range(self.signal_space.length):\n comparisons = 0\n for meaning in meanings:\n utterances = self.speak(meaning, pick=False)\n for utterance in utterances:\n neighbors = self.signal_space.compute_neighbors(utterance,position)\n for neighbor in neighbors:\n understandings = self.hear(neighbor, pick=False)\n for understanding in understandings:\n mdist = self.meaning_space.hamming(meaning,understanding)\n load[position] += (mdist / self.meaning_space.length)\n comparisons += 1\n load[position] /= comparisons\n #pdb.set_trace()\n return load",
"def load_all_results():\n directory = os.getcwd() + \"\\\\result\"\n\n return __load_results(directory)",
"def get_scores(loader, device, model,\n min_max_norm=False):\n\n model.eval()\n site_scores = []\n with torch.no_grad():\n for data in loader:\n data = data.to(device)\n #l_x = len(data.x)\n #print(\"len.x:\", l_x)\n #print(\"data.x:\", data.x)\n #print(\"data.y:\", data.y)\n output = model(data.x, data.edge_index, data.batch)\n output = torch.exp(output)\n output = output.cpu().detach().numpy()[:, 1]\n if min_max_norm:\n for o in output:\n o_norm = min_max_normalize_probs(o, 1, 0, borders=[-1, 1])\n site_scores.append(o_norm)\n else:\n site_scores.extend(output)\n return site_scores",
"def load_model_results():\n # pylint: disable=undefined-variable)\n model_results_filepath = os.path.join(MODELS_DIRECTORY, 'model_results.csv')\n df_results = pd.read_csv(model_results_filepath)\n\n return df_results",
"def load(self, year, month):\n\n temp = filter_samples(0,month,year,0)\n if temp: # either 1 record or no record , check models.py\n self.db_cal = temp[0]\n\n if self.area == 0:\n self.db_events = filter_samples(0,month,year,self.area)\n else:\n self.db_events = filter_samples(0,month,year,self.area)\n self.curr = eventCalBase.monthCalendar(self, year, month)\n#put events to map a month\n for db_e in self.db_events:\n e = eventCalBase.event(db_e.id, db_e.taken_by,db_e.date_taken, db_e.sampling_point)\n self.curr.addEvent(e, db_e.date_taken.day)\n else:\n self.curr = eventCalBase.monthCalendar(None,\n year, month)",
"def load_data_set(self) -> None:\n return",
"def load(self):\n\t\tif self.full_name.endswith('.fits'):\n\t\t\thdu_in = pyfits.open(self.Spath+self.full_name)\n\t\t\ttb = hdu_in[1].data\n\t\t\thdu_in.close()\n\t\t\tlam = tb.field('loglam')\n\t\t\tbase = np.full(len(lam),10)\n\t\t\tlam = np.power(base,lam)\n\t\t\tflux = tb.field(0)*1e-17\n\t\t\tflux_err = tb.field('ivar')\n\t\t\tid_z = np.where(flux_err==0)\n\t\t\tlam = np.delete(lam,id_z); flux = np.delete(flux,id_z); flux_err = np.delete(flux_err,id_z)\n\t\t\tflux_err = (1/np.sqrt(flux_err))*1e-17\n\t\t\tself.data = np.array([lam,flux,flux_err]).T\n\t\t\tself.obs_date = str(self.full_name[10:15])\n\t\telse:\n\t\t\tif os.path.isfile(self.Spath+'sdss_'+self.full_name):\n\t\t\t\tself.mod = import_module('sdss_'+self.obj)\n\t\t\t\tself.data = np.loadtxt(self.Spath+'sdss_'+self.full_name)\n\t\t\t\tself.survey = 'sdss'\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tself.data = np.loadtxt(self.Spath+self.full_name)\n\t\t\t\t\tself.mod = import_module(self.obj)\n\t\t\t\texcept Exception:\n\t\t\t\t\tprint 'Error: could not load data for {}'.format(self.full_name)\n\t\t\tself.z = self.mod.z_est; self.A_v = self.mod.A_v; self.lower = self.mod.lower; self.upper = self.mod.upper; self.fe_yn = self.mod.fev",
"def test_mh():\n\tmodel = pf.GASLLT(data=data, family=pf.GASNormal())\n\tx = model.fit('M-H',nsims=300)\n\tassert(len(model.latent_variables.z_list) == 3)\n\tlvs = np.array([i.value for i in model.latent_variables.z_list])\n\tassert(len(lvs[np.isnan(lvs)]) == 0)",
"def load_results_file(self):\n for exp in self._exps:\n name = \"\"\n if self._load_best_models:\n name_weights = \"model_weights_\" + str(self._best_exps[str(exp)]) + \".h5\"\n else:\n name_weights = self._name_weights\n if self._phase == \"train\":\n name = os.path.join(self._exps_path, \"exp_\" + str(exp),\n \"predict_truth_\" + set + \"_\" + name_weights + \"_1_\" + \".txt\")\n else:\n name = os.path.join(self._exps_path, \"exp_\" + str(exp),\n \"predict_truth_\" + set + \"_\" + name_weights + \"_0_\" + \".txt\")\n df = pd.read_csv(name, sep=\" \", engine=\"python\", encoding=\"ISO-8859-1\", names=['pred', 'real'])\n self._array_raw_exps.append(df)",
"def load_dataset(m):\n Tx = 0\n human_vocab = set()\n machine_vocab = set()\n dataset = []\n fake.add_provider(MyProvider)\n\n for i in tqdm(range(m)):\n facture,facture_date = fake.facture()\n if facture is not None:\n if Tx < len(set(facture)):\n Tx = len(set(facture))\n dataset.append((facture, facture_date))\n human_vocab.update(tuple(facture.lower()))\n machine_vocab.update(tuple(facture_date.lower()))\n\n human = dict(zip(sorted(human_vocab) + ['<unk>', '<pad>'],\n list(range(len(human_vocab) + 2))))\n inv_machine = dict(enumerate(sorted(machine_vocab)))\n machine = {v: k for k, v in inv_machine.items()}\n\n return dataset, human, machine, inv_machine,Tx",
"def load_hmm(self, initials, transitions, emissions):\n # load the initial probabilities\n for initial in initials:\n initial_list = initial.split()\n initial_state = initial_list[0]\n initial_prob = float(initial_list[1])\n\n # if the probability is not between 0 and 1 log an error and continue\n if initial_prob > 1 or initial_prob < 0:\n sys.stderr.write(\"warning: the prob is not in [0,1] range: \" + initial)\n self.initials[self.get_state_id(initial_state)] = initial_prob\n\n # load the transition probabilities\n for transition in transitions:\n transition_list = transition.split()\n from_state = transition_list[0]\n to_state = transition_list[1]\n transition_prob = float(transition_list[2])\n\n # if the probability is not between 0 and 1 log an error and continue\n if transition_prob > 1 or transition_prob < 0:\n sys.stderr.write(\"warning: the prob is not in [0,1] range: \" + transition)\n self.transitions[self.get_state_id(from_state)][self.get_state_id(to_state)] = transition_prob\n\n # load the emission probabilities\n for emission in emissions:\n emission_list = emission.split()\n emission_state = emission_list[0]\n emission = emission_list[1]\n emission_prob = float(emission_list[2])\n\n # if the probability is not between 0 and 1 log and error\n if emission_prob > 1 or emission_prob < 0:\n sys.stderr.write(\"warning: the prob is not in [0,1] range: \" + emission)\n self.emissions[self.get_state_id(emission_state)][self.get_symbol_id(emission, True)] = emission_prob",
"def load(self):\n if not self._loaded:\n if self._response is None:\n self._next_page()\n data = self.data_from_response(self._response)\n self._apply(data)\n self._loaded = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Opens an HPI session. `host` specifies the hostname or IP address to connect to. `port` is the port number the HPI daemon listens on.
|
def open_hpi_connection(self, host, port=4743, alias=None):
port = int(port)
self._info('Opening connection to %s:%d' % (host, port))
os.environ["OPENHPI_DAEMON_HOST"] = str(host)
os.environ["OPENHPI_DAEMON_PORT"] = str(port)
session = Session()
session.open()
session.attach_event_listener()
self._active_session = session
return self._cache.register(session, alias)
|
[
"def connect(cls, host, port):\n return cls(socket.create_connection((host, port)))",
"def open_connection(self, host, alias=None, port=23, timeout=None,\n newline=None, prompt=None, prompt_is_regexp=False):\n if timeout is None or timeout == '':\n timeout = self._timeout\n if newline is None:\n newline = self._newline\n if prompt is None:\n prompt, prompt_is_regexp = self._prompt\n print '*INFO* Opening connection to %s:%s with prompt: %s' \\\n % (host, port, self._prompt)\n self._conn = self._get_connection(host, port, timeout, newline,\n prompt, prompt_is_regexp)\n return self._cache.register(self._conn, alias)",
"def start_cli_session(handler= None,\r\n netmiko_platform= None,\r\n ip= None, \r\n cred= None, \r\n port= None):\r\n proc= 'cli.start_cli_session'\r\n \r\n print('Connecting to %s device %s' % (netmiko_platform, ip))\r\n \r\n assert isinstance(ip, str), proc+ ': Ip [{}] is not a string.'.format(type(ip)) \r\n \r\n result= {\r\n 'TCP_22': port_is_open(22, ip),\r\n 'TCP_23': port_is_open(23, ip),\r\n 'connection': None, \r\n 'cred': None,\r\n }\r\n \r\n _credList= []\r\n if cred is not None: \r\n _credList.append(cred)\r\n else:\r\n # Get credentials if none were acquired yet\r\n if len(gvars.CRED_LIST) == 0: gvars.CRED_LIST= getCreds()\r\n _credList= gvars.CRED_LIST\r\n \r\n # Error checking \r\n assert len(_credList) > 0, 'No credentials available'\r\n if port: assert port is 22 or port is 23, 'Invalid port number [{}]. Should be 22 or 23.'.format(str(port))\r\n if cred: assert isinstance(cred, dict), 'Cred is type [{}]. Should be dict.'.format(type(cred))\r\n \r\n # Check to see if SSH (port 22) is open\r\n if not result['TCP_22']:\r\n print('Port 22 is closed on %s' % ip, ip)\r\n elif port is None or port is 22: \r\n # Try logging in with each credential we have\r\n for cred in _credList:\r\n try:\r\n # Establish a connection to the device\r\n result['connection'] = handler(\r\n device_type=netmiko_platform,\r\n ip= ip,\r\n username= cred['user'],\r\n password= cred['password'],\r\n secret= cred['password'],\r\n )\r\n \r\n result['cred']= cred\r\n# print('Successful ssh auth to %s using %s, %s' % (ip, cred['user'], cred['password'][:2]))\r\n \r\n return result\r\n \r\n except NetMikoAuthenticationException:\r\n print ('SSH auth error to %s using %s, %s' % (ip, cred['user'], cred['password'][:2]))\r\n continue\r\n except NetMikoTimeoutException:\r\n print('SSH to %s timed out.' % ip)\r\n # If the device is unavailable, don't try any other credentials\r\n break\r\n \r\n # Check to see if port 23 (telnet) is open\r\n if not result['TCP_23']:\r\n print('Port 23 is closed on %s' % ip, ip)\r\n elif port is None or port is 23:\r\n for cred in _credList:\r\n try:\r\n # Establish a connection to the device\r\n result['connection'] = handler(\r\n device_type=netmiko_platform + '_telnet',\r\n ip= ip,\r\n username= cred['user'],\r\n password= cred['password'],\r\n secret= cred['password'],\r\n )\r\n \r\n result['cred']= cred\r\n# print('Successful telnet auth to %s using %s, %s' % (ip, cred['user'], cred['password'][:2]))\r\n \r\n return result\r\n \r\n except NetMikoAuthenticationException:\r\n print('Telnet auth error to %s using %s, %s' % \r\n (ip, cred['user'], cred['password'][:2]))\r\n continue\r\n except:\r\n print('Telnet to %s timed out.' % ip)\r\n # If the device is unavailable, don't try any other credentials\r\n break\r\n \r\n raise IOError('No CLI connection could be established')",
"def get_session(host, platform, username, password, secret):\n\n net_connect = ConnectHandler(device_type=platform,\n ip=host,\n global_delay_factor=0.2,\n username=username,\n password=password,\n secret=secret,\n timeout=20)\n if secret:\n net_connect.enable()\n\n return net_connect",
"def connect(port, host = '127.0.0.1'):\n rfooClient = open(os.path.join(_dir, 'rfooClient.py')).read()\n console = rconsole.ProxyConsole(port)\n \n # Taken from rconsole.py:interact()\n try:\n import readline\n readline.set_completer(console.complete)\n readline.parse_and_bind('tab: complete')\n except ImportError:\n pass\n \n console.conn = rfoo.InetConnection().connect(host = host, \n port = console.port)\n console.runsource('import sys, imp')\n console.runsource('clientModule = imp.new_module(\"rfooClient\")')\n console.runsource(\"exec {0} in clientModule.__dict__\".format(\n repr(rfooClient)))\n console.runsource('globals().update(clientModule.__dict__)')\n return rconsole.code.InteractiveConsole.interact(console, banner = None)",
"def open(self,host='',port=1314,nostart=False):\n\t\n from subprocess import STDOUT, Popen\n\t\n\t sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t try:\n\t \tsock.connect((host,port))\n\t except socket.error:\n\t \tif nostart:\n\t \t\traise socket.error\n\t \telse:\n self.festival_pid = Popen([\"festival\", \"--server\"]).pid \n\t\t \tatexit.register(self._kill_server)\n\t\t \tfor t in xrange(20):\n\t\t \t\ttry:\n\t\t \t\t\ttime.sleep(.25)\n\t\t \t\t\tsock.connect((host,port))\n\t\t \t\texcept socket.error:\n\t\t \t\t\tpass\n\t\t \t\telse:\n\t\t \t\t\tbreak\n\t\t \telse:\n\t\t \t\traise socket.error\n\t\t\n\t self.sock = sock\n return sock",
"def new_socket(self, host, port):\n\n raise NotImplementedError()",
"def connect(self, host):\n if not self.app.connect(host):\n command = 'Connect({0})'.format(host).encode(\"utf-8\")\n self.exec_command(command)\n self.last_host = host",
"def connectToHub(adr, port, pollMillis, user, password):\n\tinsteon.setPort(IOPort(HubStream(adr, port, pollMillis, user, password)))",
"def gh_bridge(\n host: str = \"127.0.0.1\",\n port: int = ghidra_bridge.ghidra_bridge.DEFAULT_SERVER_PORT,\n **kwargs,\n) -> ghidra_bridge.GhidraBridge:\n return ghidra_bridge.GhidraBridge(\n connect_to_host=host, connect_to_port=port, **kwargs\n )",
"def __init__(self, host, port=NIM_PORT):\r\n\t\t# Initialize the server's host and port\r\n\t\tself.host = host\r\n\t\tself.port = port\r\n\t\t# Initially not waiting for a response\r\n\t\tself.waiting = False\r\n\t\t# Connect to server\r\n\t\ttry:\r\n\t\t\tself.socket = socket.create_connection((self.host, self.port))\r\n\t\texcept socket.error as e:\r\n\t\t\traise NimException(e.strerror)",
"def login(username, password, host, port=8728):\n transport = create_transport(host, port)\n protocol = API(transport=transport, encoding='ASCII')\n routeros = RouterOS(protocol=protocol)\n\n try:\n sentence = routeros('/login')\n token = sentence[0]['ret']\n encoded = encode_password(token, password)\n routeros('/login', **{'name': username, 'response': encoded})\n except (ConnectionError, TrapError, FatalError):\n transport.close()\n raise\n\n return routeros",
"def create_session(self, host, name):\n \n session = GameSession(self, host)\n session.init(name) \n \n self.sessions[session.id] = session\n \n return session",
"def create_host(self, host, **kwargs):\n self.clientobj = Host(\n host = host,\n user = kwargs.pop(\"user\", \"\"),\n server = kwargs.pop(\"server\", self.server),\n nfsversion = kwargs.pop(\"nfsversion\", self.nfsversion),\n proto = kwargs.pop(\"proto\", self.proto),\n port = kwargs.pop(\"port\", self.port),\n sec = kwargs.pop(\"sec\", self.sec),\n export = kwargs.pop(\"export\", self.export),\n mtpoint = kwargs.pop(\"mtpoint\", self.mtpoint),\n datadir = kwargs.pop(\"datadir\", self.datadir),\n mtopts = kwargs.pop(\"mtopts\", self.mtopts),\n nomount = kwargs.pop(\"nomount\", self.nomount),\n sudo = kwargs.pop(\"sudo\", self.sudo),\n )\n\n self.clients.append(self.clientobj)\n return self.clientobj",
"def init(ip=\"localhost\", port=54321, start_h2o=True, enable_assertions=True,\n license=None, nthreads=-1, max_mem_size=None, min_mem_size=None, ice_root=None, \n strict_version_check=True, proxy=None, https=False, insecure=False, username=None, \n password=None, cluster_name=None, max_mem_size_GB=None, min_mem_size_GB=None, proxies=None, size=None):\n H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,\n nthreads=nthreads,max_mem_size=max_mem_size,min_mem_size=min_mem_size,ice_root=ice_root,\n strict_version_check=strict_version_check,proxy=proxy,https=https,insecure=insecure,username=username,\n password=password,cluster_name=cluster_name,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,proxies=proxies,size=size)\n return None",
"def setup_snmp_session(ip):\n session = Session(hostname=ip, community=SNMP_COMMUNITY_RW, version=2, timeout=2)\n return session",
"def open(self):\r\n self.telnet.open(self.host)\r\n self.telnet.read_until('Username: ')\r\n self.telnet.write(self.user+'\\n')\r\n self.telnet.read_until('Password: ')\r\n self.telnet.write(self.password+'\\n')",
"def __init__(self, host, port):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((\"%s\" % host, port))\n self.client_socket = client_socket",
"def open_connection(self):\r\n\r\n\r\n buf = ctypes.create_string_buffer(16) # at least 8 byte\r\n ret = self.check(self._dll.PH_OpenDevice(self._deviceID, ctypes.byref(buf)))\r\n self._serial = buf.value.decode() # .decode() converts byte to string\r\n if ret >= 0:\r\n self._connected_to_device = True\r\n self.logMsg('Connection to the Picoharp 300 established',\r\n msgType='status')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Switches between opened HPI session usigg an index or alias. The index is got from `Open HPI Connection` keyword, and an alias can be given to it. Returns the index of previously active connection.
|
def switch_hpi_connection(self, index_or_alias):
old_index = self._cache.current_index
self._active_device = self._cache.switch(index_or_alias)
return old_index
|
[
"def switch_ipmi_connection(self, index_or_alias):\n\n old_index = self._cache.current_index\n self._active_connection = self._cache.switch(index_or_alias)\n return old_index",
"def switch_couchbase_connection(self, index_or_alias: Union[int, str]) -> int:\n\n old_index = self._cache.current_index\n self._connection = self._cache.switch(index_or_alias)\n return old_index",
"def _index(self):\n index = self._whoosh_index\n if index is None:\n index = self.open(clear=False)\n return index",
"def switch_rest_connection(connection_alias):\n try:\n session = ConnectionKeywordsMixin.requests._cache.switch(connection_alias)\n except KeyError:\n raise NoRestConnectionFound(\"REST Connection \"\n \" to connection with alias %s does not exist\"\n %(connection_alias))\n return session",
"def open_hpi_connection(self, host, port=4743, alias=None):\n\n port = int(port)\n\n self._info('Opening connection to %s:%d' % (host, port))\n\n os.environ[\"OPENHPI_DAEMON_HOST\"] = str(host)\n os.environ[\"OPENHPI_DAEMON_PORT\"] = str(port)\n\n session = Session()\n session.open()\n session.attach_event_listener()\n\n self._active_session = session\n\n return self._cache.register(session, alias)",
"def currently_active_ipmi_connection(self):\n return self._cache.current_index",
"def close_session(self, alias):\n session = self._cache.switch(alias)\n try:\n session.close_session()\n except NcclientException as e:\n logger.error(str(e))\n raise str(e)",
"def code_2_idx(self, alias):\n\t\treturn self.alias_list.index(alias)",
"def active_index(self):\n return self._active_index",
"def GetSessionByIndex(self, request, context):\n self._validate_project_and_api_key(request, context)\n return get_handler.GetSessionByIndexHandler(\n request, context, self.data_store).get()",
"def update_alias(self, alias, current_index): # pragma: nocover ; mocked\n\n self.esclient.indices.put_alias(index=current_index, name=alias)\n for item in self.esclient.indices.get(index=f'{alias}-*'):\n if item != current_index:\n self.esclient.indices.delete(index=item)\n self.esclient.indices.refresh(index=current_index)",
"def get_index(name):\n return get_portal_catalog()._catalog.getIndex(name)",
"def assume_alias(index, alias):\n manager.index_put_alias(index, alias)",
"def stateindex(self, PS):\n try:\n return self.indexdict[PS][0]\n except:\n return None",
"def swap(self, index):\n letter = self.get_char_from_index(index)\n swap = self.find_swap(letter)\n swap_index = self.get_char_to_index(swap)\n return swap_index",
"def select(self, index):\r\n return self.execute_command(\"SELECT\", index)",
"def _change_tab_index(self):\r\n widget = QApplication.focusWidget()\r\n shortcut_index = getattr(widget, 'shortcut_index', None)\r\n if shortcut_index:\r\n obj = self.sender()\r\n shortcut_index(obj.index)",
"def index_key(self):\n return self.request.GET.get('index_key', default='current')",
"def GetIdxFromWindow(*args, **kwargs):\n return _aui.AuiTabContainer_GetIdxFromWindow(*args, **kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Closes the current HPI session.
|
def close_hpi_connection(self, loglevel=None):
self._active_session.close()
|
[
"def closeSession(self):\n self.hide()",
"def end_session(self):\n self.sess.close()",
"def close(self ):\n self.session.close()\n self.logger.info(\"Matlab session closed\")",
"def _close_session(cls):\n cls.coord.request_stop()\n cls.coord.join(cls.thread)\n cls.sess.close()",
"def close_all_hpi_connections(self):\n self._active_session = self._cache.close_all()",
"async def close(self) -> None:\n await super().close()\n await self.eval_session.close()",
"async def close(self, ctx: commands.Context):\n await self.bot.logout()",
"def closeSession(self):\n\n self.__lock.acquire()\n try:\n\n try:\n self.stopKeepAlive()\n except Exception, e:\n oldIc.getLogger().warning(\n \"While cleaning up resources: \" + str(e))\n\n self.__sf = None\n\n oldOa = self.__oa\n self.__oa = None\n\n oldIc = self.__ic\n self.__ic = None\n\n # Only possible if improperly configured.\n if not oldIc:\n return\n\n if oldOa:\n try:\n oldOa.deactivate()\n except Exception, e:\n self.__logger.warning(\"While deactivating adapter: \" + str(e.message))\n\n self.__previous = Ice.InitializationData()\n self.__previous.properties = oldIc.getProperties().clone()\n\n try:\n try:\n self.getRouter(oldIc).destroySession()\n except Glacier2.SessionNotExistException:\n # ok. We don't want it to exist\n pass\n except Ice.ConnectionLostException:\n # ok. Exception will always be thrown\n pass\n except Ice.ConnectionRefusedException:\n # ok. Server probably went down\n pass\n except Ice.ConnectTimeoutException:\n # ok. Server probably went down\n pass\n # Possible other items to handle/ignore:\n # * Ice.DNSException\n finally:\n oldIc.destroy()\n del oldIc._impl # WORKAROUND ticket:2007\n\n finally:\n self.__lock.release()",
"def close(self):\n if self.state == \"open\":\n self.require_ioctx_open()\n run_in_thread(self.librados.rados_ioctx_destroy, (self.io,))\n self.state = \"closed\"",
"def close(self):\n\t\tprint(\"Closing SPI interface..\")\n\t\tif self._spi.open:\n\t\t\tself.all_off()\n\t\t\tself._spi.close()\n\t\t\tself.open = False",
"def close_connection(self):\n self.imapSession.close()\n self.imapSession.logout()",
"def close(self):\n self.__requests_session.close()",
"def close(self):\n self.wsi.close()",
"async def close(self) -> None:\n await super().close()\n\n if self.http_session:\n await self.http_session.close()\n\n if self._connector:\n await self._connector.close()\n\n if self._resolver:\n await self._resolver.close()",
"def close(self):\n self.inst.close()\n self.rm.close()",
"def disconnect(self):\n self.current_session.disconnect()",
"def test_end_session(self):\n iiq = insightiq_api.InsightiqApi(username='pat', password='a')\n iiq.end_session()\n\n self.fake_session.get.assert_called()\n self.fake_session.close.assert_called()",
"def close_connection(self):\n self.pi.close(self.device)",
"def close_session(self, alias):\n session = self._cache.switch(alias)\n try:\n session.close_session()\n except NcclientException as e:\n logger.error(str(e))\n raise str(e)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Closes all open HPI sessions and empties the connection cache. After this keyword, new indexes got from the `Open HPI Connection` keyword are reset to 1. This keyword should be used in a test or suite teardown to make sure all connections to devices are closed.
|
def close_all_hpi_connections(self):
self._active_session = self._cache.close_all()
|
[
"def close_hpi_connection(self, loglevel=None):\n self._active_session.close()",
"def _close(self):\n print(\"Closing connections and unlinking memory...\", file=sys.stderr)\n self.csocket.close()\n self.ccontext.term()\n if hasattr(self, 'asocket'):\n self.asocket.close()\n self.acontext.term()\n try:\n self._lock.close()\n self._lock.unlink()\n except posix_ipc.ExistentialError:\n pass\n for shmref in self._shmrefs.values():\n try:\n shmref.unlink()\n except posix_ipc.ExistentialError:\n pass",
"def _disconnect_internal_services(self):\n\n try:\n self.harvester_settings.close()\n self.record_sets.close()\n except plyvel.Error as e:\n raise IndexerError('Failed to close the connection to LevelDB: {}'.format(e))",
"def close_all_couchbase_connections(self) -> None:\n\n self._connection = self._cache.close_all()",
"def __del__(self):\n self._sshClient.close()\n self._sshChannel.close()",
"def close_all(self):\n for conns in self._cm.get_all().values():\n for conn in conns:\n self._cm.remove_connection(conn)",
"def closeSession(self):\n\n self.__lock.acquire()\n try:\n\n try:\n self.stopKeepAlive()\n except Exception, e:\n oldIc.getLogger().warning(\n \"While cleaning up resources: \" + str(e))\n\n self.__sf = None\n\n oldOa = self.__oa\n self.__oa = None\n\n oldIc = self.__ic\n self.__ic = None\n\n # Only possible if improperly configured.\n if not oldIc:\n return\n\n if oldOa:\n try:\n oldOa.deactivate()\n except Exception, e:\n self.__logger.warning(\"While deactivating adapter: \" + str(e.message))\n\n self.__previous = Ice.InitializationData()\n self.__previous.properties = oldIc.getProperties().clone()\n\n try:\n try:\n self.getRouter(oldIc).destroySession()\n except Glacier2.SessionNotExistException:\n # ok. We don't want it to exist\n pass\n except Ice.ConnectionLostException:\n # ok. Exception will always be thrown\n pass\n except Ice.ConnectionRefusedException:\n # ok. Server probably went down\n pass\n except Ice.ConnectTimeoutException:\n # ok. Server probably went down\n pass\n # Possible other items to handle/ignore:\n # * Ice.DNSException\n finally:\n oldIc.destroy()\n del oldIc._impl # WORKAROUND ticket:2007\n\n finally:\n self.__lock.release()",
"def close(self):\n for e in reversed(self.endpoints):\n e.close()\n\n self.endpoints = []\n\n for t in self.timeslots:\n self.bus.free(t)\n\n self.timeslots = []",
"def close_connection_pool():\n for conn_list in ConnectionPool.__pool:\n if conn_list[1] == 1:\n conn_list[2].close()\n conn_list[1] = 0",
"def __clean__(self):\n if self.os_session:\n keystone_utils.close_session(self.os_session)",
"def close_sessions(context):\n context.close_sessions()\n context.debug(\"CloseSessions\", \"All session are closed\")",
"def close(self):\n\n self.cache.close()",
"def close(self):\n self.env = None",
"def close_connection(self):\n self.pi.close(self.device)",
"def close(self):\n self.redis.connection.disconnect()\n if self.listening:\n self.lredis.connection.disconnect()",
"def close(self ):\n self.session.close()\n self.logger.info(\"Matlab session closed\")",
"def close_conn(self):\n self.small_bot.close()",
"def _teardownSockets(self):\n\n if self.s_inject != None:\n self.s_inject.close()\n self.s_inject = None\n if self.s_snoop != None:\n self.s_snoop.close()\n self.s_snoop = None\n\n if self.hciport is not None:\n hciport = self.hciport\n self.device().killforward_all()",
"def close_all(self):\n\n for nc in self.nc:\n nc.close()\n\n for ncin in self.ncin:\n ncin.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the entity path all further keywords operates on.
|
def set_entity_path(self, ep):
try:
ep = EntityPath().from_string(ep)
except ValueError:
raise RuntimeError('Invalid entity path "%s"' % ep)
self._info('Setting entity path to %s' % (ep,))
self._cp['entity_path'] = ep
|
[
"def set_schema_paths(cls, schema_path, schema_entity_path):\n cls.__schema_path = schema_path\n cls.__schema_entity_path = schema_entity_path",
"def set_reference_path(self, pt):\n self.pt = pt",
"def path(self, path: str):\n self._occurrence_data['path'] = path",
"def hook_dataset_path(self, x):\n self.dataset_path = x",
"def setPartAsPath(self, *args):\n return _coin.SoInteractionKit_setPartAsPath(self, *args)",
"def __set_up_content_paths(self):\n\n client = self.p4.fetch_client()\n self.clientmap = Map(client[\"View\"])\n\n # local syntax client root, force trailing /\n self.contentlocalroot = client[\"Root\"]\n if not self.contentlocalroot.endswith(\"/\"):\n self.contentlocalroot += '/'\n\n # client sytax client root with wildcard\n self.contentclientroot = '//' + self.p4.client + '/...'",
"def set_path(self, path):\n self.url = parse.urlparse(self.url)._replace(path=path).geturl()",
"def setKeywords(self) -> None:\n # Add any new user keywords to leoKeywordsDict.\n d = self.keywordsDict\n keys = list(d.keys())\n for s in g.globalDirectiveList:\n key = '@' + s\n if key not in keys:\n d[key] = 'leokeyword'\n # Create a temporary chars list. It will be converted to a dict later.\n chars = [z for z in string.ascii_letters + string.digits]\n chars.append('_') # #2933.\n for key in list(d.keys()):\n for ch in key:\n if ch not in chars:\n chars.append(g.checkUnicode(ch))\n # jEdit2Py now does this check, so this isn't really needed.\n # But it is needed for forth.py.\n for ch in (' ', '\\t'):\n if ch in chars:\n # g.es_print('removing %s from word_chars' % (repr(ch)))\n chars.remove(ch)\n # Convert chars to a dict for faster access.\n self.word_chars: dict[str, str] = {}\n for z in chars:\n self.word_chars[z] = z",
"def set_pseudopotential_path(self, newpath):\n self.qe_input_data[\"pseudo_dir\"] = newpath",
"def setPath(self, path: 'SoPath') -> \"void\":\n return _coin.SoEventCallback_setPath(self, path)",
"def set_property(self, entity, **kwargs):",
"def setPath(self, path, update=True):\n self.path = path\n if update: self.updatePaths()\n elif self.__folderscreated: self.write()",
"def set_lookup(self, key, doc_type, id, path, index=None):\n self._key = key\n if isinstance(doc_type, pylastica.doc_type.DocType):\n doc_type = doc_type.name\n self._terms = {\n 'type': doc_type,\n 'id': id,\n 'path': path\n }\n if index is not None:\n if isinstance(index, pylastica.index.Index):\n index = index.name\n self._terms['index'] = index\n return self",
"def SetKeyword(key, value):",
"def propset(self, path, key, value):\n\n path = self.session._relative_path(path)\n\n kind, parent = self._check_path(path)\n\n if kind == svn_node_none:\n message = (\"Can't set property on '%s': \"\n \"No such file or directory\" % path)\n raise SubversionException(SVN_ERR_BAD_URL, message)\n\n node = parent.open(path, \"OPEN\", kind)\n node.propset(key, value)",
"def setTrainingPath(self, trainingPath) -> None:\n ...",
"def do_keyword(self):\n dirpath, keyword = self._parse_query(self.query)\n log.debug('dirpath=%r, keyword=%r', dirpath, keyword)\n\n # check for existing configurations for this dirpath and keyword\n profiles = []\n profile_exists = False\n keyword_warnings = []\n dirpath_warnings = []\n for profile in self.wf.settings.get('profiles', {}).values():\n profiles.append((profile['keyword'], profile['dirpath']))\n\n if (keyword, dirpath.abs_noslash) in profiles:\n profile_exists = True\n\n for k, p in profiles:\n if keyword == k:\n keyword_warnings.append(u\"'{}' searches {}\".format(\n k, Dirpath.dirpath(p).abbr_noslash))\n elif dirpath.abs_noslash == p:\n dirpath_warnings.append(u\"Folder already linked to '{}'\".format(k))\n\n if self.query.endswith(DELIMITER): # user has deleted trailing space\n # back up the file tree\n return run_trigger('choose-folder',\n arg=Dirpath.dirpath(os.path.dirname(dirpath)).abbr_slash)\n # return run_alfred(':fzychs {}'.format(\n # Dirpath.dirpath(os.path.dirname(dirpath)).abbr_slash))\n # return self.do_add()\n elif keyword == '': # no keyword as yet\n if not keyword:\n self.wf.add_item('Enter a keyword for the Folder',\n dirpath,\n valid=False,\n icon=ICON_NOTE)\n for warning in dirpath_warnings:\n self.wf.add_item(\n warning,\n 'But you can set multiple keywords per folders',\n valid=False,\n icon=ICON_INFO)\n self.wf.send_feedback()\n return 0\n else: # offer to set keyword\n if profile_exists:\n self.wf.add_item(\n 'This keyword > Fuzzy Folder already exists',\n u\"'{}' already linked to {}\".format(\n keyword,\n dirpath.abbr_noslash),\n valid=False,\n icon=ICON_WARNING)\n else:\n self.wf.add_item(u\"Set '{}' as keyword for {}\".format(\n keyword, dirpath.abbr_noslash),\n dirpath,\n arg='{} {} {}'.format(dirpath, DELIMITER, keyword),\n valid=True,\n icon='icon.png')\n for warning in dirpath_warnings:\n self.wf.add_item(\n warning,\n 'But you can set multiple keywords per folders',\n valid=False,\n icon=ICON_INFO)\n for warning in keyword_warnings:\n self.wf.add_item(\n warning,\n 'But you can use the same keyword for multiple folders',\n valid=False,\n icon=ICON_INFO)\n self.wf.send_feedback()",
"def update_artella_paths():\n\n return None",
"def set_special_path(self, path):\n if not self.special:\n raise Exception('NOT SPECIAL')\n self._special_path = path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the FUMI number for all further FUMI keywords.
|
def set_fumi_number(self, number):
self._cp['fumi_number'] = number
|
[
"def fmi_id(self, fmi_id: int):\n\n self._fmi_id = fmi_id",
"def set_feature_number(self):\r\n self.n_features = self.exprs.shape[1]",
"def fmi_text(self, fmi_text: str):\n\n self._fmi_text = fmi_text",
"def ftduino_id_set(self, identifier):\n self.comm('ftduino_id_set {0}'.format(identifier))",
"def numero_licenceffa(self, numero_licenceffa):\n\n self._numero_licenceffa = numero_licenceffa",
"def setKnotInU(*args, **kwargs):\n \n pass",
"def set_frequency(self, f, force=False):\n if not force and self.get_frequency() == int(f):\n return\n self._write('%s*F' % str(f).zfill(6))",
"def setNum(self, num: 'int const') -> \"void\":\n return _coin.SoMField_setNum(self, num)",
"def setKnotsInU(*args, **kwargs):\n \n pass",
"def set_mfi(self, mf_ispecies=None, mf_ilevel=None):\n\n if (mf_ispecies is not None):\n if (mf_ispecies != self.mf_ispecies):\n self.mf_ispecies = mf_ispecies\n elif not hasattr(self, 'mf_ispecies'):\n self.mf_ispecies = 1\n elif not hasattr(self, 'mf_ispecies'):\n self.mf_ispecies = 1\n\n if (mf_ilevel is not None):\n if (mf_ilevel != self.mf_ilevel):\n self.mf_ilevel = mf_ilevel\n elif not hasattr(self, 'mf_ilevel'):\n self.mf_ilevel = 1\n elif not hasattr(self, 'mf_ilevel'):\n self.mf_ilevel = 1",
"def set_family_nick_name(self, val):\n self.famnick = val",
"def setNumber(tag, option, value):\n ierr = c_int()\n lib.gmshModelMeshFieldSetNumber(\n c_int(tag),\n c_char_p(option.encode()),\n c_double(value),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshFieldSetNumber returned non-zero error code: \",\n ierr.value)",
"def fmi_id(self) -> int:\n return self._fmi_id",
"def set_n(self, value):\n self._n = value",
"def set_factuality(self, i, val):\n self.factuality[i] = val",
"def setNumbers(tag, option, value):\n api_value_, api_value_n_ = _ivectordouble(value)\n ierr = c_int()\n lib.gmshModelMeshFieldSetNumbers(\n c_int(tag),\n c_char_p(option.encode()),\n api_value_, api_value_n_,\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshFieldSetNumbers returned non-zero error code: \",\n ierr.value)",
"def set_FECID(self, value):\n super(GetLegislatorInputSet, self)._set_input('FECID', value)",
"def set_counts(self, counts):\r\n \t# check counts input\r\n\t\tif len(counts) != 4:\r\n\t\t\traise ValueError(\"Input counts must be of length 4. Received length {0} for motif (id='{1}', name='{2}'). Input counts are: {3}\".format(len(counts), self.id, self.name, counts))\r\n\t\tlengths = [len(base) for base in counts]\r\n\t\tif len(set(lengths)) != 1:\r\n\t\t\traise ValueError(\"All lists in counts must be of same length.\")\r\n\r\n\t\t# add counts and associated length/n to OneMotif object\r\n\t\tself.counts = counts\r\n\t\tself.length = lengths[0]\t#update motif length\r\n\t\tself.n = np.sum([row[0] for row in counts])\r\n\r\n\t\treturn self",
"def set_nfeatures(cls, n):\n if not isinstance(n, int):\n raise ValueError(\"Attribute 'nfeatures' must be of <type 'int'>, got %s\" % str(type(n)))\n cls.nfeatures = n"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fails unless the specified FUMI RDR exist. `id` is the ID string of the resource descriptor record. If the RDR is found, it will be automatically selected.
|
def fumi_rdr_should_exist(self, id):
self._rdr_should_exist(FumiRdr, id)
|
[
"def dimi_rdr_should_exist(self, id):\n self._rdr_should_exist(DimiRdr, id)",
"def id_available(self, _id):\n raise NotImplementedError",
"def is_this_record_exist(table, id_):\n if id_[0] not in [record[0] for record in table]:\n\n ui.print_error_message(\"Record with this ID not found\")\n return False\n return True",
"def check_existing_device(id):\n mydb=connect_database()\n print(id)\n with mydb.cursor() as mycursor:\n sql= \"SELECT device_id FROM devices WHERE device_id = %s\"\n val = (id,)\n mycursor.execute(sql,val)\n myresult = mycursor.fetchall()\n if len(myresult) > 0:\n return True\n else:\n return False",
"def find_by_id(self, id):\n\n raise NotImplementedError",
"def validate_resource_id(cls, resource_id: str, name: str = \"\", message: str = \"\") -> None:\n exc_message = (\n f\"Invalid balance ID '{resource_id}', it should start with '{cls.RESOURCE_ID_PREFIX}' or be the \"\n f\"string 'primary'.\"\n )\n\n if resource_id == \"primary\":\n return\n else:\n super().validate_resource_id(resource_id, message=exc_message)",
"def validate_rfid(rfid):\n if rfid in get_all_rfids():\n msg = \"This rfid is already in use!\"\n logger.info(msg)\n raise ValidationError(msg)",
"def exists(self, file_id):\n raise NotImplementedError",
"def test_nonexistent_odid(self):\n self.assertIsNone(get_object_detection_by_id(odid=999))",
"def _raise_file_exists(self, file_id):\n\n raise FileExists(\"file with _id %r already exists\" % file_id)",
"def test_existing_fid(self):\n f = get_folder_by_id(fid=self.rid)\n self.assertEqual(f.id, self.rid)\n self.assertEqual(f.path, self.r_path)\n self.assertEqual(f.name, self.r_name)\n self.assertIsNone(f.parent)",
"def find(self, id):\r\n try:\r\n detailsDict = self.flavorDetails(id)\r\n except ClientErrors.CloudServersAPIFault, e:\r\n if e.code == 404: # not found\r\n return None # just return None\r\n else: # some other exception, just re-raise\r\n raise\r\n retFlavor = Flavor(\"\")\r\n retFlavor.initFromResultDict(detailsDict)\r\n retFlavor._manager = self\r\n return retFlavor",
"def find_field(self, field_id):\n while field_id in self.ptg2_em.screen:\n return True\n\n return False # field_id requested not found in current screen",
"def _checkIdUniqueness(self, id):\n if id == 'time':\n logger.warn(\"Specifying 'time' as a variable is dangerous! Are you \"\n \"sure you know what you're doing?\")\n elif id == 'default':\n logger.warn(\"'default' is a reserved keyword in C. This will cause \"\n \"problems using the C-based integrator.\")\n elif id[0].isdigit():\n raise ValueError(\"The id %s is invalid. ids must not start with a \"\n \"number.\" % id)\n if id in list(self.variables.keys())\\\n or id in list(self.reactions.keys())\\\n or id in list(self.functionDefinitions.keys())\\\n or id in list(self.events.keys())\\\n or id in list(self.constraints.keys())\\\n or id == self.id:\n raise ValueError('The id %s is already in use!' % id)",
"def exists(self, identifier):\n return False",
"def check_id(id):\n return dict(id=int(id)) in execute('select id from tasks')",
"def check_doc_id(self, id_doc):\n\n try:\n self.doc.get(id = id_doc)\n return True\n except ObjectDoesNotExist:\n return False",
"def check_matching_id(self, id):\n return next((maze for maze in self.mazes if maze .id == id), None)",
"def test_exists_by_id(self, _id):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the DIMI number for all further DIMI keywords.
|
def set_dimi_number(self, number):
self._cp['dimi_number'] = number
|
[
"def SetDimensions(self, i: 'unsigned int', dim: 'unsigned long long') -> \"void\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_SetDimensions(self, i, dim)",
"def setidd(cls, iddinfo, iddindex, block, idd_version):\n cls.idd_info = iddinfo\n cls.block = block\n cls.idd_index = iddindex\n cls.idd_version = idd_version",
"def SetNumberOfDimensions(self, arg0: 'unsigned int') -> \"void\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_SetNumberOfDimensions(self, arg0)",
"def set_feature_number(self):\r\n self.n_features = self.exprs.shape[1]",
"def set_discrete_num(self, dn):\n self._dn = dn",
"def SetDimensionInformation(dims):",
"def _re_number(self):\n new_dataset_indices = []\n for g, graph in enumerate(self.graphs):\n graph._force_index(g)\n for s, graph_set in enumerate(graph.sets):\n graph_set._force_index(s)\n new_dataset_indices.append((g,s))\n for i, dataset in enumerate(self.datasets):\n dataset._force_index(*new_dataset_indices[i])",
"def set_domain_iiaxis(self, iinum=None, iiaxis='x'):\n iix = 'ii' + iiaxis\n if hasattr(self, iix):\n # if iinum is None or self.iix == iinum, do nothing and return nothing.\n if (iinum is None):\n return None\n elif np.all(iinum == getattr(self, iix)):\n return None\n\n if iinum is None:\n iinum = slice(None)\n\n if not np.array_equal(iinum, slice(None)):\n # smash self.variables. Necessary, since we will change the domain size.\n self.variables = {}\n\n if isinstance(iinum, (int, np.integer)): # we convert to slice, to maintain dimensions of output.\n iinum = slice(iinum, iinum+1) # E.g. [0,1,2][slice(1,2)] --> [1]; [0,1,2][1] --> 1\n\n # set self.iix\n setattr(self, iix, iinum)\n if self.verbose:\n # convert iinum to string that wont be super long (in case iinum is a long list)\n try:\n assert len(iinum) > 20\n except (TypeError, AssertionError):\n iinumprint = iinum\n else:\n iinumprint = 'list with length={:4d}, min={:4d}, max={:4d}, x[1]={:2d}'\n iinumprint = iinumprint.format(len(iinum), min(iinum), max(iinum), iinum[1])\n # print info.\n print('(set_domain) {}: {}'.format(iix, iinumprint),\n whsp*4, end=\"\\r\", flush=True)\n\n # set self.xLength\n if isinstance(iinum, slice):\n nx = getattr(self, 'n'+iiaxis+'b')\n indSize = len(range(*iinum.indices(nx)))\n else:\n iinum = np.asarray(iinum)\n if iinum.dtype == 'bool':\n indSize = np.sum(iinum)\n else:\n indSize = np.size(iinum)\n setattr(self, iiaxis + 'Length', indSize)\n\n return True",
"def setDimension(self, n, val):\n self.dimensions[n] = val",
"def setFieldNumIndices(self, fldnumind: 'int const') -> \"void\":\n return _coin.SoNotRec_setFieldNumIndices(self, fldnumind)",
"def set3Int(*args, **kwargs):\n \n pass",
"def set_nr_attributes(self):\n self.attr_nr.setText('Number of attributes: %s'\n % self.tree_widget.currentItem().childCount())",
"def setKeywords(self) -> None:\n # Add any new user keywords to leoKeywordsDict.\n d = self.keywordsDict\n keys = list(d.keys())\n for s in g.globalDirectiveList:\n key = '@' + s\n if key not in keys:\n d[key] = 'leokeyword'\n # Create a temporary chars list. It will be converted to a dict later.\n chars = [z for z in string.ascii_letters + string.digits]\n chars.append('_') # #2933.\n for key in list(d.keys()):\n for ch in key:\n if ch not in chars:\n chars.append(g.checkUnicode(ch))\n # jEdit2Py now does this check, so this isn't really needed.\n # But it is needed for forth.py.\n for ch in (' ', '\\t'):\n if ch in chars:\n # g.es_print('removing %s from word_chars' % (repr(ch)))\n chars.remove(ch)\n # Convert chars to a dict for faster access.\n self.word_chars: dict[str, str] = {}\n for z in chars:\n self.word_chars[z] = z",
"def setNumIndicesPerLine(self, *args):\r\n return _osgDB.Output_setNumIndicesPerLine(self, *args)",
"def setspotnum(self,num):\n self.spotnum = num",
"def __init__(self):\n self.indices_idf = {}",
"def _set_plot_keywords(self):\r\n try:\r\n keyword_page =\\\r\n self._get_page_mechanize('http://uk.imdb.com/title/%s/keywords' %\\\r\n (self.imdb_id,))\r\n\r\n tags = keyword_page.find('div', {'id': 'keywords_content'}).findAll('td')\r\n if tags:\r\n for tag in tags:\r\n try:\r\n keyword = tag.find('a')\r\n if keyword:\r\n keyword = keyword.contents[0].lower().strip()\r\n if len(keyword) > 0:\r\n self.plot_keywords.append(keyword)\r\n except KeyError:\r\n pass\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve plot keywords(%s)(%s)' %\r\n (self.imdb_id, e))",
"def set_elems_number(self, elems_number):\n assert len(elems_number) == self.natoms\n self.elems = [elements.number.keys()[i] for i in elems_number]\n return",
"def set_dimension(self, ind, dimension):\n if not isinstance(dimension, Dimension):\n raise TypeError('dimension needs to be a sidpy.Dimension object')\n self.__validate_dim(ind, dimension.name)\n # delattr(self, self._axes[ind].name)\n setattr(self, dimension.name, dimension)\n setattr(self, 'dim_{}'.format(ind), dimension)\n self._axes[ind] = dimension"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fails unless the specified DIMI RDR exist. A found RDR will be automatically selected. See also `FUMI RDR Should Exist` keyword.
|
def dimi_rdr_should_exist(self, id):
self._rdr_should_exist(DimiRdr, id)
|
[
"def fumi_rdr_should_exist(self, id):\n self._rdr_should_exist(FumiRdr, id)",
"def ExisteRelacion(self,dr,usuario):\n bRetorno=False\n query=db.GqlQuery(\"select * from Relacion where usuario=:1 and doctor=:2\",usuario, dr)\n if query.count()>0:\n bRetorno=1\n else: #no existe, entonces valido que NO sea un dr\n instanciaDr=ValidoDoctor()\n if instanciaDr.ExisteDr(usuario.usuario):#es un Dr. no se puede grabar ese tipo de relaciones, es pecado\n bRetorno=True\n return bRetorno",
"def ddo_exists(self, did: str) -> bool:\n response = self.requests_session.get(f\"{self.base_url}/ddo/{did}\").content\n\n return \"asset DID is not in OceanDB\" not in str(response)",
"def test_redflag_not_found(self):\n response = self.app.get(\"/api/v2/redflags/10000\", headers=self.headers)\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result['message'], \"Redflag does not exist\")",
"def _is_dmr_device(discovery_info: ssdp.SsdpServiceInfo) -> bool:\n # Abort if the device doesn't support all services required for a DmrDevice.\n discovery_service_list = discovery_info.upnp.get(ssdp.ATTR_UPNP_SERVICE_LIST)\n if not discovery_service_list:\n return False\n\n services = discovery_service_list.get(\"service\")\n if not services:\n discovery_service_ids: set[str] = set()\n elif isinstance(services, list):\n discovery_service_ids = {service.get(\"serviceId\") for service in services}\n else:\n # Only one service defined (etree_to_dict failed to make a list)\n discovery_service_ids = {services.get(\"serviceId\")}\n\n if not DmrDevice.SERVICE_IDS.issubset(discovery_service_ids):\n return False\n\n return True",
"def HasDRT(self):\n return self.__has('DRT')",
"def test_nonexistent_odid(self):\n self.assertIsNone(get_object_detection_by_id(odid=999))",
"def test_delete_nonexistent_intervention(self):\n self.app.post(\"/api/v2/interventions\", headers=self.headers,\n data=json.dumps(self.redflag_data))\n response = self.app.delete(\n \"/api/v2/interventions/10000\", headers=self.headers)\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result['message'], 'Intervention does not exist')",
"def HasqDRR(self):\n return self.__has('qDRR')",
"def vdi_exists(self, name):\n return name in self.list_vdi()",
"def rtlsdr_test(device_idx='0', rtl_sdr_path=\"rtl_sdr\"):\n\n _rtl_cmd = \"timeout 5 %s -d %s -n 200000 - > /dev/null\" % (rtl_sdr_path, str(device_idx))\n\n\n # First, check if the RTLSDR with a provided serial number is present.\n if device_idx == '0':\n # Check for the presence of any RTLSDRs.\n _rtl_exists = find_rtlsdr()\n\n else:\n # Otherwise, look for a particular RTLSDR\n _rtl_exists = find_rtlsdr(device_idx)\n \n if not _rtl_exists:\n logging.error(\"RTLSDR - RTLSDR with serial #%s is not present!\" % str(device_idx))\n return False\n\n # So now we know the rtlsdr we are attempting to test does exist.\n # We make an attempt to read samples from it:\n\n _rtlsdr_retries = 2\n\n while _rtlsdr_retries > 0:\n try:\n FNULL = open(os.devnull, 'w') # Inhibit stderr output\n _ret_code = subprocess.check_call(_rtl_cmd, shell=True, stderr=FNULL)\n FNULL.close()\n except subprocess.CalledProcessError:\n # This exception means the subprocess has returned an error code of one.\n # This indicates either the RTLSDR doesn't exist, or \n pass\n else:\n # rtl-sdr returned OK. We can return True now.\n time.sleep(1)\n return True\n\n # If we get here, it means we failed to read any samples from the RTLSDR.\n # So, we attempt to reset it.\n if device_idx == '0':\n reset_all_rtlsdrs()\n else:\n reset_rtlsdr_by_serial(device_idx)\n\n # Decrement out retry count, then wait a bit before looping\n _rtlsdr_retries -= 1\n time.sleep(2)\n\n # If we run out of retries, clearly the RTLSDR isn't working.\n return False",
"def is_rtlsdr(vid,pid):\n for _dev in KNOWN_RTLSDR_DEVICES:\n _vid = _dev[0]\n _pid = _dev[1]\n if (vid == _vid) and (pid == _pid):\n return True\n\n return False",
"def find_rtlsdr(serial=None):\n\n # If not Linux, return immediately, and assume the RTLSDR exists..\n if platform.system() != 'Linux':\n return True\n\n lsusb_info = lsusb()\n bus_num = None\n device_num = None\n\n for device in lsusb_info:\n try:\n device_serial = device['Device Descriptor']['iSerial']['_desc']\n device_product = device['Device Descriptor']['iProduct']['_desc']\n device_pid = device['Device Descriptor']['idProduct']['_value']\n device_vid = device['Device Descriptor']['idVendor']['_value']\n except:\n # If we hit an exception, the device likely doesn't have one of the required fields.\n continue\n\n if is_rtlsdr(device_vid, device_pid):\n # We have found a RTLSDR! If we're not looking for a particular serial number, we can just quit now.\n if serial == None:\n return True\n else:\n if (device_serial == serial):\n bus_num = int(device['bus'])\n device_num = int(device['device'])\n\n if bus_num and device_num:\n # We have found an RTLSDR with this serial number!\n return True\n\n else:\n # Otherwise, nope.\n return False",
"def check_drug(dbid, drugs):\n\n list_dbid = list(drugs.dict_drug.keys())\n\n return dbid in list_dbid",
"def gadget_exists(self) -> bool:\n\n if not self.architecture:\n raise Exception('Unable to determine path without architecture')\n\n return os.path.exists(self.get_frida_library_path())",
"def validate_rfid(rfid):\n if rfid in get_all_rfids():\n msg = \"This rfid is already in use!\"\n logger.info(msg)\n raise ValidationError(msg)",
"def test_rirs_create(self):\n pass",
"def HasFDR(self):\n return self.__has('FDR')",
"def _check_random_dists(rd):\n for key in rd.keys():\n if is_singleton(rd[key]):\n if isinstance(rd[key], RandomDistribution):\n check_rng(rd[key].rng, f\"RandomDistribtion for {key}\")\n else:\n for _start, _stop, val in rd[key].iter_ranges():\n if isinstance(val, RandomDistribution):\n check_rng(val.rng, f\"RandomDistribution for {key}\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
All team members may view a Context Admin members may change a Context Admin members may delete a Context
|
def grant_permissions(self):
assign_perm("context.view_context", self.team.group, self)
assign_perm("context.change_context", self.team.admingroup, self)
assign_perm("context.delete_context", self.team.admingroup, self)
|
[
"async def administrators(self, ctx, arg):\r\n # TODO should add initial check who can use this command\r\n # maybe only people with admin permissions\r\n pass",
"async def roles(self, ctx):\n pass",
"def test_otoroshi_controllers_adminapi_users_controller_delete_admin(self):\n pass",
"def test_delete_teams_id_memberships(self):\n pass",
"def test_projects_project_id_members_user_id_delete(self):\n pass",
"def test_otoroshi_controllers_adminapi_users_controller_web_authn_delete_admin(self):\n pass",
"def DeleteContext(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def test_portals_id_designs_nk_members_delete(self):\n pass",
"def test_portals_id_portal_members_delete(self):\n pass",
"def test_admin_granted(self, context):\n user, channel, _ = context\n user.update(access=Access.ADMIN)\n\n @gate(role=Role.MASTER)\n def fn(cid):\n return \"ok\"\n\n assert fn(cid=channel.id) == \"ok\"",
"def test_portals_id_designs_nk_design_members_delete(self):\n pass",
"def test_portals_id_members_delete(self):\n pass",
"def DeleteAllContexts(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def elevated(self):\n context = copy.copy(self)\n context.is_super = True\n\n if 'admin' not in context.roles:\n context.roles.append('admin')\n return context",
"def _admin_permission_for_context(self, context_id):\n return Permission(\n self.ADMIN_PERMISSION.action,\n self.ADMIN_PERMISSION.resource_type,\n None,\n context_id)",
"def upgrade():\n op.execute(\"\"\"\n UPDATE user_roles\n JOIN roles ON user_roles.role_id = roles.id\n SET user_roles.context_id = 0\n WHERE user_roles.context_id is NULL AND\n roles.name = 'Administrator'\n \"\"\")",
"def gcn_update_delete_logic(cls, user_or_token):\n\n if len({'Manage GCNs', 'System admin'} & set(user_or_token.permissions)) == 0:\n # nothing accessible\n return restricted.query_accessible_rows(cls, user_or_token)\n\n return DBSession().query(cls)",
"def has_delete_permission(self, request, obj=None):\n\t\t# TEMPORARY\n\t\treturn False if is_techadmin(request.user) else \\\n\t\t\tsuper(TechMHLUserAdmin, self).has_delete_permission(request, obj)",
"async def clearplayers(ctx):\n\n roles = [discord.utils.get(ctx.guild.roles, name='Players'),\n discord.utils.get(ctx.guild.roles, name='Paid')]\n\n if roles and ctx.message.author.guild_permissions.administrator is True:\n for role in roles:\n for x in role.members:\n await x.remove_roles(role)\n\n await ctx.channel.send(\"```Players + Paid roles have been emptied```\")\n\n elif ctx.message.author.guild_permissions.administrator is False:\n await ctx.channel.send(\"```You are not an administrator```\")\n\n else:\n await ctx.channel.send(\"```Players + Paid roles does not exist in this guild```\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save Context and grant permissions
|
def save(self, **kwargs):
super().save(**kwargs)
self.grant_permissions()
|
[
"def grant_permissions(self):\n assign_perm(\"context.view_context\", self.team.group, self)\n assign_perm(\"context.change_context\", self.team.admingroup, self)\n assign_perm(\"context.delete_context\", self.team.admingroup, self)",
"def save_context(self):\n if self.context is not None:\n self.context.save()",
"def grantPermissionOnContext(context, request):\n permission = request.matchdict.get('permission', None)\n if permission not in DEFAULT_CONTEXT_PERMISSIONS.keys():\n raise InvalidPermission(\"There's not any permission named '%s'\" % permission)\n\n subscription = context.subscription\n\n if subscription is None:\n raise ObjectNotFound('{} is not susbcribed to {}'.format(request.actor, context['hash']))\n\n if permission in subscription.get('_grants', []):\n # Already have the permission grant\n code = 200\n else:\n # Assign the permission\n code = 201\n subscription = request.actor.grantPermission(\n subscription,\n permission,\n permanent=request.params.get('permanent', DEFAULT_CONTEXT_PERMISSIONS_PERMANENCY))\n\n handler = JSONResourceEntity(request, subscription, status_code=code)\n return handler.buildResponse()",
"async def add_context(context: models.CreateContext, x_auth_key: str = Header(None)):\r\n\r\n logger.info(\"Auth Key : %s\", x_auth_key)\r\n logger.info(\"Context : %s\", context)\r\n\r\n if config.DEV_PASSWORD != x_auth_key:\r\n raise HTTPException(status_code=403, detail=\"Unauthorized\")\r\n\r\n context_key = context.context_key\r\n new_context = context.new_context\r\n\r\n contexts[context_key] = new_context\r\n\r\n return \"Context Changed Successfully\"",
"def get_permissions(self, context={}):\n context['has_permission'] = self.mongoadmin.has_permission(self.request)\n context['has_staff_permission'] = self.mongoadmin.has_staff_permission(self.request) \n return context",
"def softModCtx(*args, **kwargs):\n\n pass",
"def set_context(self, context):",
"def RoleGrantPermission(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def resetPermissionsOnContext(context, request):\n\n subscription = request.actor.reset_permissions(context.subscription, context)\n handler = JSONResourceEntity(request, subscription, status_code=200)\n return handler.buildResponse()",
"def save_request_token(self, token, request):\r\n log.debug('Save request token %r', token)\r\n self._grantsetter(token, request)",
"def prepare_for_use(self):\n self.active_permissions_vault = \\\n UserPermissionsVault.prepare_for_use(self.permissions_vault)",
"def load_personal_context(user, permissions):\n personal_context = _get_or_create_personal_context(user)\n\n permissions.setdefault('__GGRC_ADMIN__', {})\\\n .setdefault('__GGRC_ALL__', dict())\\\n .setdefault('contexts', list())\\\n .append(personal_context.id)",
"def mateCtx(*args, **kwargs):\n\n pass",
"def setPermission(self,user,permission):\n user.permissions = permission\n self.session.commit()",
"def softModContext(*args, **kwargs):\n\n pass",
"def set(contextIn):\n global context\n context = contextIn",
"def test_api_context_save(self) -> None:\n\n context_json = converter.class_to_json(self._API_CONTEXT)\n\n self._API_CONTEXT.save(self._TMP_FILE_PATH_FULL)\n\n with open(self._TMP_FILE_PATH_FULL, self._FILE_MODE_READ) as file_:\n context_retrieved = file_.read()\n\n os.remove(self._TMP_FILE_PATH_FULL)\n\n self.assertEqual(context_retrieved, context_json)",
"def commit_security(self, scope):\n self._security_policy_draft('commit', scope)",
"def test_grant_privileges(self):\n portal = self.portal\n logout()\n roles = getSecurityManager().getUser().getRolesInContext(portal)\n self.assertEqual(roles, ['Anonymous'])\n\n # We grant temporarily privileges of a real user to anonymous\n with GrantPrivilegesForToken(self.bar_token, portal):\n expected = set(['Member', 'Authenticated'])\n roles = getSecurityManager().getUser().getRolesInContext(portal)\n self.assertEqual(set(roles), expected)\n\n # We should have the former privileges since we left the context manager\n roles = getSecurityManager().getUser().getRolesInContext(portal)\n self.assertEqual(roles, ['Anonymous'])\n\n # But there should be no change in security with a junk token\n with GrantPrivilegesForToken('unknown-token', portal):\n roles = getSecurityManager().getUser().getRolesInContext(portal)\n self.assertEqual(roles, ['Anonymous'])\n return",
"def _updatecontext(self, aq_context):\n self._aq_context = aq_context"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetches the owner user id of the requested entity_type/entity_id
|
def get_owner_id(session, entity_type, entity_id):
if entity_type == "track":
owner_id_query = (
session.query(Track.owner_id)
.filter(
Track.track_id == entity_id,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
if not owner_id_query:
return None
owner_id = owner_id_query[0][0]
return owner_id
if entity_type == "album":
owner_id_query = (
session.query(Playlist.playlist_owner_id)
.filter(
Playlist.playlist_id == entity_id,
Playlist.is_delete == False,
Playlist.is_current == True,
Playlist.is_album == True,
)
.all()
)
if not owner_id_query:
return None
owner_id = owner_id_query[0][0]
return owner_id
if entity_type == "playlist":
owner_id_query = (
session.query(Playlist.playlist_owner_id)
.filter(
Playlist.playlist_id == entity_id,
Playlist.is_delete == False,
Playlist.is_current == True,
Playlist.is_album == False,
)
.all()
)
if not owner_id_query:
return None
owner_id = owner_id_query[0][0]
return owner_id
return None
|
[
"def RequireOwner(cls, photo_entity):\n if not photo_entity.from_datastore:\n raise endpoints.NotFoundException(Photo.NOT_FOUND_ERROR)\n\n current_picturesque_user = cls.RequirePicturesqueUser()\n\n if photo_entity.owner != current_picturesque_user.user_object:\n raise endpoints.ForbiddenException(Photo.FORBIDDEN_ERROR)\n\n return current_picturesque_user",
"def get_owner(self):\n if self.user is None:\n self.user = self.gh.get_user()\n if self.org is None:\n return self.user\n if self.owner is None:\n try:\n self.owner = [org for org in self.user.get_orgs() \\\n if org.login.lower() == self.org.lower()][0]\n except Exception as e:\n raise BaseException(\"Could not find organization '\" + str(self.org) + \\\n \"' because: \" + str(e))\n\n return self.owner",
"def get_owner_pi(context):\n assert interfaces.IOwned.providedBy(context), \\\n \"Not an Owned (parliamentary) Item: %s\" % (context)\n return dbutils.get_user(context.owner_id)",
"def get_owner(conn, owner_id):\n c = conn.cursor()\n sql = \"\"\"SELECT * FROM owners\n WHERE owner_id=?;\"\"\"\n c.execute(sql, (owner_id,))\n return c.fetchall()",
"def _get_entity_from_soco_uid(hass, uid):\n for entity in hass.data[DATA_SONOS].entities:\n if uid == entity.unique_id:\n return entity\n return None",
"def getProvenanceUser(self, obj):\n user = self.getCurrentUser()\n if obj and not user:\n user = obj.get('userId', None)\n if not user:\n user = obj.get('creatorId', None)\n if isinstance(user, tuple([ObjectId] + list(six.string_types))):\n user = User().load(user, force=True)\n return user",
"def get_owner_by_id(self, owner_id, **options):\n owners = self.get_owners(**options)\n for owner in owners:\n if int(owner[\"ownerId\"]) == int(owner_id):\n return owner\n return None",
"def user_entity( self ):\n return",
"def _get_attrib_owner_from_geometry_entity_type(entity_type):\n # If the class is a base class in the map then just return it.\n if entity_type in _GEOMETRY_ATTRIB_MAP:\n return _GEOMETRY_ATTRIB_MAP[entity_type]\n\n # If it is not in the map then it is most likely a subclass of hou.Prim,\n # such as hou.Polygon, hou.Face, hou.Volume, etc. We will check the class\n # against being a subclass of any of our valid types and if it is, return\n # the owner of that class.\n for key, value in _GEOMETRY_ATTRIB_MAP.iteritems():\n if issubclass(entity_type, key):\n return value\n\n # Something went wrong so raise an exception.\n raise TypeError(\"Invalid entity type: {}\".format(entity_type))",
"def getOwnerOrGroup(ownerkey):\n user_id, group_id = None, None\n if ownerkey is None:\n # ToDo: Is this insecure?\n user_id = \"all\"\n elif ownerkey.startswith(\"user_\"):\n user_id = ownerkey[5:]\n elif ownerkey.startswith(\"group_\"):\n group_id = ownerkey[6:]\n else:\n user_id = ownerkey\n return (user_id, group_id)",
"def get_single_user():",
"def get_entity_id(self):\n\n\t\treturn self.__entity_id",
"def get_owner_id(self, comment_soup):\n try:\n link = comment_soup.a['href']\n id_v1 = re.search('profile.php\\?id=[0-9]+&', link)\n if id_v1:\n return re.search('[0-9]+', id_v1.group(0)).group(0)\n else:\n return re.search('facebook.com/.*\\?', link).group(0).replace('facebook.com/', \"\").replace(\n '?comment_id', \"\")\n except Exception as e:\n print('crashed while searching comment owner id', e)\n return None",
"def get_wrapped_owner(owner_id, support_deleted=False):\n if not owner_id:\n return None\n\n if isinstance(owner_id, numbers.Number):\n return None\n\n def _get_class(doc_type):\n return {\n 'CommCareUser': CommCareUser,\n 'WebUser': WebUser,\n 'Group': Group,\n }.get(doc_type)\n\n def _get_deleted_class(doc_type):\n return {\n 'Group-Deleted': Group,\n }.get(doc_type)\n\n try:\n return SQLLocation.objects.get(location_id=owner_id)\n except SQLLocation.DoesNotExist:\n pass\n\n try:\n owner_doc = user_db().get(owner_id)\n except ResourceNotFound:\n pass\n else:\n cls = _get_class(owner_doc['doc_type'])\n if support_deleted and cls is None:\n cls = _get_deleted_class(owner_doc['doc_type'])\n return cls.wrap(owner_doc) if cls else None\n\n return None",
"def _get_user_id(self):\n return self._api_query_request('me')['id']",
"def get_owner(self):\n\n return self.owner",
"def get_owner_by_email(self, owner_email: str, **options):\n owners = self.get_owners(method=\"GET\", params={\"email\": owner_email}, **options)\n if owners:\n return owners[0]\n return None",
"def _get_odoo_project_owner(self):\n if not getattr(self, 'project_owner', None):\n odooclient = odoo_client.get_odoo_client()\n\n projects = odooclient.projects.list(\n [('tenant_id', '=', self.project_id)])\n if len(projects) == 0:\n raise OdooModelsIncorrect(\n 'Project \"%s\" is not set up in OpenERP.' % self.project_id)\n if len(projects) > 1:\n raise OdooModelsIncorrect(\n 'More than one project \"%s\" is set up in OpenERP.'\n % self.project_id)\n\n self.odoo_project = projects[0]\n self.add_note(\"Odoo Project ID: %s\" % self.odoo_project.id)\n\n project_rels = odooclient.project_relationships.list([\n ('cloud_tenant', '=', self.odoo_project.id),\n ('contact_type', '=', 'owner'),\n ])\n\n if len(project_rels) == 0:\n raise OdooModelsIncorrect(\n 'Project \"%s\" has no owner!' % self.project_id)\n elif len(project_rels) > 1:\n raise OdooModelsIncorrect(\n 'Project \"%s\" has more than one owner!' % self.project_id)\n\n self.project_owner = project_rels[0].partner_id\n\n self.add_note(\"Found owner: %s\" % self.project_owner.name)\n return self.project_owner",
"def owner_googleplus_user_id(self):\n raise endpoints.BadRequestException(\n 'ownerGoogleplusUserId value should never be accessed.')",
"def get_file_owner(data) -> str:\r\n return data.get(\"username\") if not data.get(\"owner\") else data[\"owner\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the notifications for remix tracks that are reposted/favorited by the parent remix author
|
def get_cosign_remix_notifications(session, max_block_number, remix_tracks):
if not remix_tracks:
return []
remix_notifications = []
remix_track_ids = [r["item_id"] for r in remix_tracks]
# Query for all the parent tracks of the remix tracks
tracks_subquery = (
session.query(Track)
.filter(
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.subquery()
)
parent_tracks = (
session.query(
Remix.child_track_id, Remix.parent_track_id, tracks_subquery.c.owner_id
)
.join(tracks_subquery, Remix.parent_track_id == tracks_subquery.c.track_id)
.filter(Remix.child_track_id.in_(remix_track_ids))
.all()
)
# Mapping of parent track users to child track to parent track
parent_track_users_to_remixes = {}
for track_parent in parent_tracks:
[remix_track_id, remix_parent_id, remix_parent_user_id] = track_parent
if not remix_parent_user_id in parent_track_users_to_remixes:
parent_track_users_to_remixes[remix_parent_user_id] = {
remix_track_id: remix_parent_id
}
else:
parent_track_users_to_remixes[remix_parent_user_id][
remix_track_id
] = remix_parent_id
for remix_track in remix_tracks:
user_id = remix_track["user_id"]
track_id = remix_track["item_id"]
if (
user_id in parent_track_users_to_remixes
and track_id in parent_track_users_to_remixes[user_id]
):
remix_notifications.append(
{
const.notification_type: const.notification_type_remix_cosign,
const.notification_blocknumber: remix_track[
const.notification_blocknumber
],
const.notification_timestamp: remix_track[
const.notification_timestamp
],
const.notification_initiator: user_id,
const.notification_metadata: {
const.notification_entity_id: track_id,
const.notification_entity_type: "track",
const.notification_entity_owner_id: remix_track[
"item_owner_id"
],
},
}
)
return remix_notifications
|
[
"def reactions(self):\n return self.__reactions.list()",
"def getNotifications(nodeIdentifier, items):",
"def detailed_reactions(self):\n return list(self._detailed_reactions)",
"def feeds_fanout_replied(action):\n # Fan out notification to parent Comment followers\n for follower in models_actstream.followers(action.action_object.parent_comment):\n if action.actor == follower:\n # If the reply author is the same as the parent comment author\n log.debug('Skipping notification generation for comment owner')\n continue\n log.debug(\n 'Generating notification for user %i about reply %i'\n % (follower.id, action.action_object.id)\n )\n follower.feed_entries.create(action=action)\n # Email notification\n content_name = truncatechars(action.action_object.entity.title, 20)\n content_text = truncatechars(action.action_object.content, 30)\n reply_context = dillo.views.emails.CommentOrReplyContext(\n subject='Your comment has a new reply!',\n own_name=follower.profile.first_name_guess or follower.username,\n own_profile_absolute_url=follower.profile.absolute_url,\n action_author_name=action.actor.profile.first_name_guess or action.actor.username,\n action_author_absolute_url=action.actor.profile.absolute_url,\n content_name=content_name,\n content_absolute_url=action.action_object.absolute_url,\n content_text=content_text,\n ).as_dict\n send_notification_mail(\n f'New reply to \"{content_name}\"', follower, template='reply', context=reply_context,\n )",
"def notifications(self):\r\n from .._impl.notification import Notification\r\n result = []\r\n url = \"%s/community/users/%s/notifications\" % (self._portal.resturl, self._user_id)\r\n params = {\"f\" : \"json\"}\r\n ns = self._portal.con.get(url, params)\r\n if \"notifications\" in ns:\r\n for n in ns[\"notifications\"]:\r\n result.append(Notification(url=\"%s/%s\" % (url, n['id']),\r\n user=self,\r\n data=n,\r\n initialize=False)\r\n )\r\n del n\r\n return result\r\n return result",
"def retrieve_reminders_by_parent_event_id(\n parent_event_id: int, app: Flask\n ) -> List[ReminderModel]:\n # TODO: implement this method\n raise NotImplementedError",
"def collect_response(channel_id, timestamp):\n logger.info(\"Getting reactions\")\n response = client.reactions_get(\n channel=channel_id,\n timestamp=timestamp\n )\n logger.debug(f\"Reactions response{response}\")\n # TODO: need to handle when the message isnt there\n reactions = response['message'].get('reactions', [])\n participants = []\n for reaction in reactions:\n participants += reaction['users']\n participants_set = list(set(participants))\n logger.info(\"Finished getting participants\")\n logger.debug(f\"Participants are{participants_set}\")\n return participants_set",
"def serialized_notifications(self):\n unread_count = self.notifications.unread().count()\n count = settings.NOTIFICATIONS_MAX_COUNT\n notifications = []\n\n if unread_count > count:\n count = unread_count\n\n for notification in self.notifications.prefetch_related(\n \"actor\", \"target\", \"action_object\"\n )[:count]:\n actor = None\n is_comment = False\n\n if hasattr(notification.actor, \"slug\"):\n if \"new string\" in notification.verb:\n actor = {\n \"anchor\": notification.actor.name,\n \"url\": reverse(\n \"pontoon.translate.locale.agnostic\",\n kwargs={\n \"slug\": notification.actor.slug,\n \"part\": \"all-resources\",\n },\n )\n + \"?status=missing,pretranslated\",\n }\n else:\n actor = {\n \"anchor\": notification.actor.name,\n \"url\": reverse(\n \"pontoon.projects.project\",\n kwargs={\"slug\": notification.actor.slug},\n ),\n }\n elif hasattr(notification.actor, \"email\"):\n actor = {\n \"anchor\": notification.actor.name_or_email,\n \"url\": reverse(\n \"pontoon.contributors.contributor.username\",\n kwargs={\"username\": notification.actor.username},\n ),\n }\n\n target = None\n if notification.target:\n t = notification.target\n # New string or Manual notification\n if hasattr(t, \"slug\"):\n target = {\n \"anchor\": t.name,\n \"url\": reverse(\n \"pontoon.projects.project\",\n kwargs={\"slug\": t.slug},\n ),\n }\n\n # Comment notifications\n elif hasattr(t, \"resource\"):\n is_comment = True\n target = {\n \"anchor\": t.resource.project.name,\n \"url\": reverse(\n \"pontoon.translate\",\n kwargs={\n \"locale\": notification.action_object.code,\n \"project\": t.resource.project.slug,\n \"resource\": t.resource.path,\n },\n )\n + f\"?string={t.pk}\",\n }\n\n notifications.append(\n {\n \"id\": notification.id,\n \"level\": notification.level,\n \"unread\": notification.unread,\n \"description\": {\n \"content\": notification.description,\n \"is_comment\": is_comment,\n },\n \"verb\": notification.verb,\n \"date\": notification.timestamp.strftime(\"%b %d, %Y %H:%M\"),\n \"date_iso\": notification.timestamp.isoformat(),\n \"actor\": actor,\n \"target\": target,\n }\n )\n\n return {\n \"has_unread\": unread_count > 0,\n \"notifications\": notifications,\n \"unread_count\": str(self.unread_notifications_display),\n }",
"def get_watched(cls, user):\r\n return cls.objects.filter(id__in=user.observeditem_set.filter(\r\n content_type=ContentType.objects.get_for_model(cls)\r\n ).values_list('object_id', flat=True).query)",
"def get_recruits(self, user, side):\n # use sensors.get_recruit_commenters to get new top-level comments\n # lock stuff down while handling the DB\n self.lock.acquire()\n new_recruits = botIO.recruit_getter(self.cfg,\n self.db,\n self.antenna,\n side)\n self.lock.release()\n self.log.info(\"Retrieved {} new recruits\".format(\n len(new_recruits)))\n\n # handle the recruits\n for recruit in new_recruits:\n self.lock.acquire()\n memory.handle_player_memory(self.db,\n str(recruit.author).lower(),\n side=side,\n recruited=True)\n self.lock.release()\n self.log.info(\"Handled player {} of side {}\".format(\n str(recruit.author), side))\n print(\"Handled player {} of side {}\".format(\n str(recruit.author), side))\n\n # if the bot hasn't already replied, then do so\n if user not in [str(rep.author) for rep in recruit.replies\n if not isinstance(rep,\n praw.objects.MoreComments)]:\n botIO.reply_to_signup(recruit, side, self.cfg)\n self.log.debug(\"Replied to player {}\".format(\n str(recruit.author), side))\n return new_recruits",
"def notificationsWithUID(uid): # @NoSelf",
"def subscribers_for(item_uid):",
"def AllReactions(self):\n rxns = []\n hashes = set()\n for r in self.reactions.all():\n if r.GetHash() not in hashes:\n rxns.append(r)\n hashes.add(r.GetHash())\n return rxns",
"def get_fixed_reactions(self):\n return self.__fixed_reactions",
"def list_notifications(request):\n notifications = Notification.objects.filter(\n receiving_user=request.user)\n data = NotificationModelSerializer(notifications, many=True).data\n return Response(data, status=status.HTTP_200_OK)",
"def get_notifications(self, context):\n module_context.init()\n LOG.info(\"Received RPC GET NOTIFICATIONS \")\n events = self.sc.get_stashed_events()\n notifications = []\n for event in events:\n notification = event.data\n msg = (\"Notification Data: %r\" % notification)\n notifications.append(notification)\n LOG.info(msg)\n return notifications",
"def reliable_recurring_event_notifications(self):\n pass",
"def notify_unreplied():\n\tfor email_account in frappe.get_all(\n\t\t\"Email Account\", \"name\", filters={\"enable_incoming\": 1, \"notify_if_unreplied\": 1}\n\t):\n\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n\n\t\tif email_account.use_imap:\n\t\t\tappend_to = [folder.get(\"append_to\") for folder in email_account.imap_folder]\n\t\telse:\n\t\t\tappend_to = email_account.append_to\n\n\t\tif append_to:\n\t\t\t# get open communications younger than x mins, for given doctype\n\t\t\tfor comm in frappe.get_all(\n\t\t\t\t\"Communication\",\n\t\t\t\t\"name\",\n\t\t\t\tfilters=[\n\t\t\t\t\t{\"sent_or_received\": \"Received\"},\n\t\t\t\t\t{\"reference_doctype\": (\"in\", append_to)},\n\t\t\t\t\t{\"unread_notification_sent\": 0},\n\t\t\t\t\t{\"email_account\": email_account.name},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"creation\": (\n\t\t\t\t\t\t\t\"<\",\n\t\t\t\t\t\t\tdatetime.now() - timedelta(seconds=(email_account.unreplied_for_mins or 30) * 60),\n\t\t\t\t\t\t)\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"creation\": (\n\t\t\t\t\t\t\t\">\",\n\t\t\t\t\t\t\tdatetime.now() - timedelta(seconds=(email_account.unreplied_for_mins or 30) * 60 * 3),\n\t\t\t\t\t\t)\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t):\n\t\t\t\tcomm = frappe.get_doc(\"Communication\", comm.name)\n\n\t\t\t\tif frappe.db.get_value(comm.reference_doctype, comm.reference_name, \"status\") == \"Open\":\n\t\t\t\t\t# if status is still open\n\t\t\t\t\tfrappe.sendmail(\n\t\t\t\t\t\trecipients=email_account.get_unreplied_notification_emails(),\n\t\t\t\t\t\tcontent=comm.content,\n\t\t\t\t\t\tsubject=comm.subject,\n\t\t\t\t\t\tdoctype=comm.reference_doctype,\n\t\t\t\t\t\tname=comm.reference_name,\n\t\t\t\t\t)\n\n\t\t\t\t# update flag\n\t\t\t\tcomm.db_set(\"unread_notification_sent\", 1)",
"def notifications():\n\n db = get_db_read_replica()\n min_block_number = request.args.get(\"min_block_number\", type=int)\n max_block_number = request.args.get(\"max_block_number\", type=int)\n\n track_ids_to_owner = []\n try:\n track_ids_str_list = request.args.getlist(\"track_id\")\n track_ids_to_owner = [int(y) for y in track_ids_str_list]\n except Exception as e:\n logger.error(f\"Failed to retrieve track list {e}\")\n\n # Max block number is not explicitly required (yet)\n if not min_block_number and min_block_number != 0:\n return api_helpers.error_response({\"msg\": \"Missing min block number\"}, 400)\n\n if not max_block_number:\n max_block_number = min_block_number + max_block_diff\n elif (max_block_number - min_block_number) > max_block_diff:\n max_block_number = min_block_number + max_block_diff\n\n with db.scoped_session() as session:\n current_block_query = session.query(Block).filter_by(is_current=True)\n current_block_query_results = current_block_query.all()\n current_block = current_block_query_results[0]\n current_max_block_num = current_block.number\n if current_max_block_num < max_block_number:\n max_block_number = current_max_block_num\n\n notification_metadata = {\n \"min_block_number\": min_block_number,\n \"max_block_number\": max_block_number,\n }\n\n # Retrieve milestones statistics\n milestone_info = {}\n\n # Cache owner info for network entities and pass in w/results\n owner_info = {const.tracks: {}, const.albums: {}, const.playlists: {}}\n\n # List of notifications generated from current protocol state\n notifications_unsorted = []\n with db.scoped_session() as session:\n #\n # Query relevant follow information\n #\n follow_query = session.query(Follow)\n\n # Impose min block number restriction\n follow_query = follow_query.filter(\n Follow.is_current == True,\n Follow.is_delete == False,\n Follow.blocknumber > min_block_number,\n Follow.blocknumber <= max_block_number,\n )\n\n follow_results = follow_query.all()\n # Used to retrieve follower counts for this window\n followed_users = []\n # Represents all follow notifications\n follow_notifications = []\n for entry in follow_results:\n follow_notif = {\n const.notification_type: const.notification_type_follow,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.follower_user_id,\n const.notification_metadata: {\n const.notification_follower_id: entry.follower_user_id,\n const.notification_followee_id: entry.followee_user_id,\n },\n }\n follow_notifications.append(follow_notif)\n # Add every user who gained a new follower\n followed_users.append(entry.followee_user_id)\n\n # Query count for any user w/new followers\n follower_counts = get_follower_count_dict(\n session, followed_users, max_block_number\n )\n milestone_info[\"follower_counts\"] = follower_counts\n\n notifications_unsorted.extend(follow_notifications)\n\n #\n # Query relevant favorite information\n #\n favorites_query = session.query(Save)\n favorites_query = favorites_query.filter(\n Save.is_current == True,\n Save.is_delete == False,\n Save.blocknumber > min_block_number,\n Save.blocknumber <= max_block_number,\n )\n favorite_results = favorites_query.all()\n\n # ID lists to query count aggregates\n favorited_track_ids = []\n favorited_album_ids = []\n favorited_playlist_ids = []\n\n # List of favorite notifications\n favorite_notifications = []\n favorite_remix_tracks = []\n\n for entry in favorite_results:\n favorite_notif = {\n const.notification_type: const.notification_type_favorite,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.user_id,\n }\n save_type = entry.save_type\n save_item_id = entry.save_item_id\n metadata = {\n const.notification_entity_type: save_type,\n const.notification_entity_id: save_item_id,\n }\n\n # NOTE if deleted, the favorite can still exist\n # TODO: Can we aggregate all owner queries and perform at once...?\n if save_type == SaveType.track:\n owner_id = get_owner_id(session, \"track\", save_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n favorited_track_ids.append(save_item_id)\n owner_info[const.tracks][save_item_id] = owner_id\n\n favorite_remix_tracks.append(\n {\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n \"user_id\": entry.user_id,\n \"item_owner_id\": owner_id,\n \"item_id\": save_item_id,\n }\n )\n\n elif save_type == SaveType.album:\n owner_id = get_owner_id(session, \"album\", save_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n favorited_album_ids.append(save_item_id)\n owner_info[const.albums][save_item_id] = owner_id\n\n elif save_type == SaveType.playlist:\n owner_id = get_owner_id(session, \"playlist\", save_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n favorited_playlist_ids.append(save_item_id)\n owner_info[const.playlists][save_item_id] = owner_id\n\n favorite_notif[const.notification_metadata] = metadata\n favorite_notifications.append(favorite_notif)\n notifications_unsorted.extend(favorite_notifications)\n\n track_favorite_dict = {}\n album_favorite_dict = {}\n playlist_favorite_dict = {}\n\n if favorited_track_ids:\n track_favorite_counts = get_save_counts(\n session,\n False,\n False,\n favorited_track_ids,\n [SaveType.track],\n max_block_number,\n )\n track_favorite_dict = dict(track_favorite_counts)\n\n favorite_remix_notifications = get_cosign_remix_notifications(\n session, max_block_number, favorite_remix_tracks\n )\n notifications_unsorted.extend(favorite_remix_notifications)\n\n if favorited_album_ids:\n album_favorite_counts = get_save_counts(\n session,\n False,\n False,\n favorited_album_ids,\n [SaveType.album],\n max_block_number,\n )\n album_favorite_dict = dict(album_favorite_counts)\n\n if favorited_playlist_ids:\n playlist_favorite_counts = get_save_counts(\n session,\n False,\n False,\n favorited_playlist_ids,\n [SaveType.playlist],\n max_block_number,\n )\n playlist_favorite_dict = dict(playlist_favorite_counts)\n\n milestone_info[const.notification_favorite_counts] = {}\n milestone_info[const.notification_favorite_counts][\n const.tracks\n ] = track_favorite_dict\n milestone_info[const.notification_favorite_counts][\n const.albums\n ] = album_favorite_dict\n milestone_info[const.notification_favorite_counts][\n const.playlists\n ] = playlist_favorite_dict\n\n #\n # Query relevant repost information\n #\n repost_query = session.query(Repost)\n repost_query = repost_query.filter(\n Repost.is_current == True,\n Repost.is_delete == False,\n Repost.blocknumber > min_block_number,\n Repost.blocknumber <= max_block_number,\n )\n repost_results = repost_query.all()\n\n # ID lists to query counts\n reposted_track_ids = []\n reposted_album_ids = []\n reposted_playlist_ids = []\n\n # List of repost notifications\n repost_notifications = []\n\n # List of repost notifications\n repost_remix_notifications = []\n repost_remix_tracks = []\n\n for entry in repost_results:\n repost_notif = {\n const.notification_type: const.notification_type_repost,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.user_id,\n }\n repost_type = entry.repost_type\n repost_item_id = entry.repost_item_id\n metadata = {\n const.notification_entity_type: repost_type,\n const.notification_entity_id: repost_item_id,\n }\n if repost_type == RepostType.track:\n owner_id = get_owner_id(session, \"track\", repost_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n reposted_track_ids.append(repost_item_id)\n owner_info[const.tracks][repost_item_id] = owner_id\n repost_remix_tracks.append(\n {\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n \"user_id\": entry.user_id,\n \"item_owner_id\": owner_id,\n \"item_id\": repost_item_id,\n }\n )\n\n elif repost_type == RepostType.album:\n owner_id = get_owner_id(session, \"album\", repost_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n reposted_album_ids.append(repost_item_id)\n owner_info[const.albums][repost_item_id] = owner_id\n\n elif repost_type == RepostType.playlist:\n owner_id = get_owner_id(session, \"playlist\", repost_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n reposted_playlist_ids.append(repost_item_id)\n owner_info[const.playlists][repost_item_id] = owner_id\n\n repost_notif[const.notification_metadata] = metadata\n repost_notifications.append(repost_notif)\n\n # Append repost notifications\n notifications_unsorted.extend(repost_notifications)\n\n track_repost_count_dict = {}\n album_repost_count_dict = {}\n playlist_repost_count_dict = {}\n\n # Aggregate repost counts for relevant fields\n # Used to notify users of entity-specific milestones\n if reposted_track_ids:\n track_repost_counts = get_repost_counts(\n session,\n False,\n False,\n reposted_track_ids,\n [RepostType.track],\n max_block_number,\n )\n track_repost_count_dict = dict(track_repost_counts)\n\n repost_remix_notifications = get_cosign_remix_notifications(\n session, max_block_number, repost_remix_tracks\n )\n notifications_unsorted.extend(repost_remix_notifications)\n\n if reposted_album_ids:\n album_repost_counts = get_repost_counts(\n session,\n False,\n False,\n reposted_album_ids,\n [RepostType.album],\n max_block_number,\n )\n album_repost_count_dict = dict(album_repost_counts)\n\n if reposted_playlist_ids:\n playlist_repost_counts = get_repost_counts(\n session,\n False,\n False,\n reposted_playlist_ids,\n [RepostType.playlist],\n max_block_number,\n )\n playlist_repost_count_dict = dict(playlist_repost_counts)\n\n milestone_info[const.notification_repost_counts] = {}\n milestone_info[const.notification_repost_counts][\n const.tracks\n ] = track_repost_count_dict\n milestone_info[const.notification_repost_counts][\n const.albums\n ] = album_repost_count_dict\n milestone_info[const.notification_repost_counts][\n const.playlists\n ] = playlist_repost_count_dict\n\n # Query relevant created entity notification - tracks/albums/playlists\n created_notifications = []\n\n #\n # Query relevant created tracks for remix information\n #\n remix_created_notifications = []\n\n # Aggregate track notifs\n tracks_query = session.query(Track)\n # TODO: Is it valid to use Track.is_current here? Might not be the right info...\n tracks_query = tracks_query.filter(\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.stem_of == None,\n Track.blocknumber > min_block_number,\n Track.blocknumber <= max_block_number,\n )\n tracks_query = tracks_query.filter(Track.created_at == Track.updated_at)\n track_results = tracks_query.all()\n for entry in track_results:\n track_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n },\n }\n created_notifications.append(track_notif)\n\n if entry.remix_of:\n # Add notification to remix track owner\n parent_remix_tracks = [\n t[\"parent_track_id\"] for t in entry.remix_of[\"tracks\"]\n ]\n remix_track_parents = (\n session.query(Track.owner_id, Track.track_id)\n .filter(\n Track.track_id.in_(parent_remix_tracks),\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.is_current == True,\n )\n .all()\n )\n for remix_track_parent in remix_track_parents:\n [\n remix_track_parent_owner,\n remix_track_parent_id,\n ] = remix_track_parent\n remix_notif = {\n const.notification_type: const.notification_type_remix_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n const.notification_remix_parent_track_user_id: remix_track_parent_owner,\n const.notification_remix_parent_track_id: remix_track_parent_id,\n },\n }\n remix_created_notifications.append(remix_notif)\n\n # Handle track update notifications\n # TODO: Consider switching blocknumber for updated at?\n updated_tracks_query = session.query(Track)\n updated_tracks_query = updated_tracks_query.filter(\n Track.is_unlisted == False,\n Track.stem_of == None,\n Track.created_at != Track.updated_at,\n Track.blocknumber > min_block_number,\n Track.blocknumber <= max_block_number,\n )\n updated_tracks = updated_tracks_query.all()\n for entry in updated_tracks:\n prev_entry_query = (\n session.query(Track)\n .filter(\n Track.track_id == entry.track_id,\n Track.blocknumber < entry.blocknumber,\n )\n .order_by(desc(Track.blocknumber))\n )\n # Previous unlisted entry indicates transition to public, triggering a notification\n prev_entry = prev_entry_query.first()\n\n # Tracks that were unlisted and turned to public\n if prev_entry.is_unlisted == True:\n track_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n },\n }\n created_notifications.append(track_notif)\n\n # Tracks that were not remixes and turned into remixes\n if not prev_entry.remix_of and entry.remix_of:\n # Add notification to remix track owner\n parent_remix_tracks = [\n t[\"parent_track_id\"] for t in entry.remix_of[\"tracks\"]\n ]\n remix_track_parents = (\n session.query(Track.owner_id, Track.track_id)\n .filter(\n Track.track_id.in_(parent_remix_tracks),\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.is_current == True,\n )\n .all()\n )\n for remix_track_parent in remix_track_parents:\n [\n remix_track_parent_owner,\n remix_track_parent_id,\n ] = remix_track_parent\n remix_notif = {\n const.notification_type: const.notification_type_remix_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n const.notification_remix_parent_track_user_id: remix_track_parent_owner,\n const.notification_remix_parent_track_id: remix_track_parent_id,\n },\n }\n remix_created_notifications.append(remix_notif)\n\n notifications_unsorted.extend(remix_created_notifications)\n\n # Aggregate playlist/album notifs\n collection_query = session.query(Playlist)\n # TODO: Is it valid to use is_current here? Might not be the right info...\n collection_query = collection_query.filter(\n Playlist.is_delete == False,\n Playlist.is_private == False,\n Playlist.blocknumber > min_block_number,\n Playlist.blocknumber <= max_block_number,\n )\n collection_query = collection_query.filter(\n Playlist.created_at == Playlist.updated_at\n )\n collection_results = collection_query.all()\n\n for entry in collection_results:\n collection_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.playlist_owner_id,\n }\n metadata = {\n const.notification_entity_id: entry.playlist_id,\n const.notification_entity_owner_id: entry.playlist_owner_id,\n const.notification_collection_content: entry.playlist_contents,\n }\n\n if entry.is_album:\n metadata[const.notification_entity_type] = \"album\"\n else:\n metadata[const.notification_entity_type] = \"playlist\"\n collection_notif[const.notification_metadata] = metadata\n created_notifications.append(collection_notif)\n\n # Playlists that were private and turned to public aka 'published'\n # TODO: Consider switching blocknumber for updated at?\n publish_playlists_query = session.query(Playlist)\n publish_playlists_query = publish_playlists_query.filter(\n Playlist.is_private == False,\n Playlist.created_at != Playlist.updated_at,\n Playlist.blocknumber > min_block_number,\n Playlist.blocknumber <= max_block_number,\n )\n publish_playlist_results = publish_playlists_query.all()\n for entry in publish_playlist_results:\n prev_entry_query = (\n session.query(Playlist)\n .filter(\n Playlist.playlist_id == entry.playlist_id,\n Playlist.blocknumber < entry.blocknumber,\n )\n .order_by(desc(Playlist.blocknumber))\n )\n # Previous private entry indicates transition to public, triggering a notification\n prev_entry = prev_entry_query.first()\n if prev_entry.is_private == True:\n publish_playlist_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.playlist_owner_id,\n }\n metadata = {\n const.notification_entity_id: entry.playlist_id,\n const.notification_entity_owner_id: entry.playlist_owner_id,\n const.notification_collection_content: entry.playlist_contents,\n const.notification_entity_type: \"playlist\",\n }\n publish_playlist_notif[const.notification_metadata] = metadata\n created_notifications.append(publish_playlist_notif)\n\n notifications_unsorted.extend(created_notifications)\n\n # Get additional owner info as requested for listen counts\n tracks_owner_query = session.query(Track).filter(\n Track.is_current == True, Track.track_id.in_(track_ids_to_owner)\n )\n track_owner_results = tracks_owner_query.all()\n for entry in track_owner_results:\n owner = entry.owner_id\n track_id = entry.track_id\n owner_info[const.tracks][track_id] = owner\n\n # Get playlist updates\n today = date.today()\n thirty_days_ago = today - timedelta(days=30)\n thirty_days_ago_time = datetime(\n thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 0, 0, 0\n )\n playlist_update_query = session.query(Playlist)\n playlist_update_query = playlist_update_query.filter(\n Playlist.is_current == True,\n Playlist.is_delete == False,\n Playlist.last_added_to >= thirty_days_ago_time,\n Playlist.blocknumber > min_block_number,\n Playlist.blocknumber <= max_block_number,\n )\n\n playlist_update_results = playlist_update_query.all()\n\n # Represents all playlist update notifications\n playlist_update_notifications = []\n playlist_update_notifs_by_playlist_id = {}\n for entry in playlist_update_results:\n playlist_update_notifs_by_playlist_id[entry.playlist_id] = {\n const.notification_type: const.notification_type_playlist_update,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.playlist_owner_id,\n const.notification_metadata: {\n const.notification_entity_id: entry.playlist_id,\n const.notification_entity_type: \"playlist\",\n const.notification_playlist_update_timestamp: entry.last_added_to,\n },\n }\n\n # get all favorited playlists\n # playlists may have been favorited outside the blocknumber bounds\n # e.g. before the min_block_number\n playlist_favorites_query = session.query(Save)\n playlist_favorites_query = playlist_favorites_query.filter(\n Save.is_current == True,\n Save.is_delete == False,\n Save.save_type == SaveType.playlist,\n )\n playlist_favorites_results = playlist_favorites_query.all()\n\n # dictionary of playlist id => users that favorited said playlist\n # e.g. { playlist1: [user1, user2, ...], ... }\n # we need this dictionary to know which users need to be notified of a playlist update\n users_that_favorited_playlists_dict = ft.reduce(\n lambda accumulator, current: accumulator.update(\n {\n current.save_item_id: accumulator[current.save_item_id]\n + [current.user_id]\n if current.save_item_id in accumulator\n else [current.user_id]\n }\n )\n or accumulator,\n playlist_favorites_results,\n {},\n )\n\n for playlist_id in users_that_favorited_playlists_dict:\n if playlist_id not in playlist_update_notifs_by_playlist_id:\n continue\n playlist_update_notif = playlist_update_notifs_by_playlist_id[playlist_id]\n playlist_update_notif[const.notification_metadata].update(\n {\n const.notification_playlist_update_users: users_that_favorited_playlists_dict[\n playlist_id\n ]\n }\n )\n playlist_update_notifications.append(playlist_update_notif)\n\n notifications_unsorted.extend(playlist_update_notifications)\n\n # Final sort - TODO: can we sort by timestamp?\n sorted_notifications = sorted(\n notifications_unsorted,\n key=lambda i: i[const.notification_blocknumber],\n reverse=False,\n )\n\n return api_helpers.success_response(\n {\n \"notifications\": sorted_notifications,\n \"info\": notification_metadata,\n \"milestones\": milestone_info,\n \"owners\": owner_info,\n }\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetches the notifications events that occurred between the given block numbers
|
def notifications():
db = get_db_read_replica()
min_block_number = request.args.get("min_block_number", type=int)
max_block_number = request.args.get("max_block_number", type=int)
track_ids_to_owner = []
try:
track_ids_str_list = request.args.getlist("track_id")
track_ids_to_owner = [int(y) for y in track_ids_str_list]
except Exception as e:
logger.error(f"Failed to retrieve track list {e}")
# Max block number is not explicitly required (yet)
if not min_block_number and min_block_number != 0:
return api_helpers.error_response({"msg": "Missing min block number"}, 400)
if not max_block_number:
max_block_number = min_block_number + max_block_diff
elif (max_block_number - min_block_number) > max_block_diff:
max_block_number = min_block_number + max_block_diff
with db.scoped_session() as session:
current_block_query = session.query(Block).filter_by(is_current=True)
current_block_query_results = current_block_query.all()
current_block = current_block_query_results[0]
current_max_block_num = current_block.number
if current_max_block_num < max_block_number:
max_block_number = current_max_block_num
notification_metadata = {
"min_block_number": min_block_number,
"max_block_number": max_block_number,
}
# Retrieve milestones statistics
milestone_info = {}
# Cache owner info for network entities and pass in w/results
owner_info = {const.tracks: {}, const.albums: {}, const.playlists: {}}
# List of notifications generated from current protocol state
notifications_unsorted = []
with db.scoped_session() as session:
#
# Query relevant follow information
#
follow_query = session.query(Follow)
# Impose min block number restriction
follow_query = follow_query.filter(
Follow.is_current == True,
Follow.is_delete == False,
Follow.blocknumber > min_block_number,
Follow.blocknumber <= max_block_number,
)
follow_results = follow_query.all()
# Used to retrieve follower counts for this window
followed_users = []
# Represents all follow notifications
follow_notifications = []
for entry in follow_results:
follow_notif = {
const.notification_type: const.notification_type_follow,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.follower_user_id,
const.notification_metadata: {
const.notification_follower_id: entry.follower_user_id,
const.notification_followee_id: entry.followee_user_id,
},
}
follow_notifications.append(follow_notif)
# Add every user who gained a new follower
followed_users.append(entry.followee_user_id)
# Query count for any user w/new followers
follower_counts = get_follower_count_dict(
session, followed_users, max_block_number
)
milestone_info["follower_counts"] = follower_counts
notifications_unsorted.extend(follow_notifications)
#
# Query relevant favorite information
#
favorites_query = session.query(Save)
favorites_query = favorites_query.filter(
Save.is_current == True,
Save.is_delete == False,
Save.blocknumber > min_block_number,
Save.blocknumber <= max_block_number,
)
favorite_results = favorites_query.all()
# ID lists to query count aggregates
favorited_track_ids = []
favorited_album_ids = []
favorited_playlist_ids = []
# List of favorite notifications
favorite_notifications = []
favorite_remix_tracks = []
for entry in favorite_results:
favorite_notif = {
const.notification_type: const.notification_type_favorite,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.user_id,
}
save_type = entry.save_type
save_item_id = entry.save_item_id
metadata = {
const.notification_entity_type: save_type,
const.notification_entity_id: save_item_id,
}
# NOTE if deleted, the favorite can still exist
# TODO: Can we aggregate all owner queries and perform at once...?
if save_type == SaveType.track:
owner_id = get_owner_id(session, "track", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_track_ids.append(save_item_id)
owner_info[const.tracks][save_item_id] = owner_id
favorite_remix_tracks.append(
{
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
"user_id": entry.user_id,
"item_owner_id": owner_id,
"item_id": save_item_id,
}
)
elif save_type == SaveType.album:
owner_id = get_owner_id(session, "album", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_album_ids.append(save_item_id)
owner_info[const.albums][save_item_id] = owner_id
elif save_type == SaveType.playlist:
owner_id = get_owner_id(session, "playlist", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_playlist_ids.append(save_item_id)
owner_info[const.playlists][save_item_id] = owner_id
favorite_notif[const.notification_metadata] = metadata
favorite_notifications.append(favorite_notif)
notifications_unsorted.extend(favorite_notifications)
track_favorite_dict = {}
album_favorite_dict = {}
playlist_favorite_dict = {}
if favorited_track_ids:
track_favorite_counts = get_save_counts(
session,
False,
False,
favorited_track_ids,
[SaveType.track],
max_block_number,
)
track_favorite_dict = dict(track_favorite_counts)
favorite_remix_notifications = get_cosign_remix_notifications(
session, max_block_number, favorite_remix_tracks
)
notifications_unsorted.extend(favorite_remix_notifications)
if favorited_album_ids:
album_favorite_counts = get_save_counts(
session,
False,
False,
favorited_album_ids,
[SaveType.album],
max_block_number,
)
album_favorite_dict = dict(album_favorite_counts)
if favorited_playlist_ids:
playlist_favorite_counts = get_save_counts(
session,
False,
False,
favorited_playlist_ids,
[SaveType.playlist],
max_block_number,
)
playlist_favorite_dict = dict(playlist_favorite_counts)
milestone_info[const.notification_favorite_counts] = {}
milestone_info[const.notification_favorite_counts][
const.tracks
] = track_favorite_dict
milestone_info[const.notification_favorite_counts][
const.albums
] = album_favorite_dict
milestone_info[const.notification_favorite_counts][
const.playlists
] = playlist_favorite_dict
#
# Query relevant repost information
#
repost_query = session.query(Repost)
repost_query = repost_query.filter(
Repost.is_current == True,
Repost.is_delete == False,
Repost.blocknumber > min_block_number,
Repost.blocknumber <= max_block_number,
)
repost_results = repost_query.all()
# ID lists to query counts
reposted_track_ids = []
reposted_album_ids = []
reposted_playlist_ids = []
# List of repost notifications
repost_notifications = []
# List of repost notifications
repost_remix_notifications = []
repost_remix_tracks = []
for entry in repost_results:
repost_notif = {
const.notification_type: const.notification_type_repost,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.user_id,
}
repost_type = entry.repost_type
repost_item_id = entry.repost_item_id
metadata = {
const.notification_entity_type: repost_type,
const.notification_entity_id: repost_item_id,
}
if repost_type == RepostType.track:
owner_id = get_owner_id(session, "track", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_track_ids.append(repost_item_id)
owner_info[const.tracks][repost_item_id] = owner_id
repost_remix_tracks.append(
{
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
"user_id": entry.user_id,
"item_owner_id": owner_id,
"item_id": repost_item_id,
}
)
elif repost_type == RepostType.album:
owner_id = get_owner_id(session, "album", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_album_ids.append(repost_item_id)
owner_info[const.albums][repost_item_id] = owner_id
elif repost_type == RepostType.playlist:
owner_id = get_owner_id(session, "playlist", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_playlist_ids.append(repost_item_id)
owner_info[const.playlists][repost_item_id] = owner_id
repost_notif[const.notification_metadata] = metadata
repost_notifications.append(repost_notif)
# Append repost notifications
notifications_unsorted.extend(repost_notifications)
track_repost_count_dict = {}
album_repost_count_dict = {}
playlist_repost_count_dict = {}
# Aggregate repost counts for relevant fields
# Used to notify users of entity-specific milestones
if reposted_track_ids:
track_repost_counts = get_repost_counts(
session,
False,
False,
reposted_track_ids,
[RepostType.track],
max_block_number,
)
track_repost_count_dict = dict(track_repost_counts)
repost_remix_notifications = get_cosign_remix_notifications(
session, max_block_number, repost_remix_tracks
)
notifications_unsorted.extend(repost_remix_notifications)
if reposted_album_ids:
album_repost_counts = get_repost_counts(
session,
False,
False,
reposted_album_ids,
[RepostType.album],
max_block_number,
)
album_repost_count_dict = dict(album_repost_counts)
if reposted_playlist_ids:
playlist_repost_counts = get_repost_counts(
session,
False,
False,
reposted_playlist_ids,
[RepostType.playlist],
max_block_number,
)
playlist_repost_count_dict = dict(playlist_repost_counts)
milestone_info[const.notification_repost_counts] = {}
milestone_info[const.notification_repost_counts][
const.tracks
] = track_repost_count_dict
milestone_info[const.notification_repost_counts][
const.albums
] = album_repost_count_dict
milestone_info[const.notification_repost_counts][
const.playlists
] = playlist_repost_count_dict
# Query relevant created entity notification - tracks/albums/playlists
created_notifications = []
#
# Query relevant created tracks for remix information
#
remix_created_notifications = []
# Aggregate track notifs
tracks_query = session.query(Track)
# TODO: Is it valid to use Track.is_current here? Might not be the right info...
tracks_query = tracks_query.filter(
Track.is_unlisted == False,
Track.is_delete == False,
Track.stem_of == None,
Track.blocknumber > min_block_number,
Track.blocknumber <= max_block_number,
)
tracks_query = tracks_query.filter(Track.created_at == Track.updated_at)
track_results = tracks_query.all()
for entry in track_results:
track_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
},
}
created_notifications.append(track_notif)
if entry.remix_of:
# Add notification to remix track owner
parent_remix_tracks = [
t["parent_track_id"] for t in entry.remix_of["tracks"]
]
remix_track_parents = (
session.query(Track.owner_id, Track.track_id)
.filter(
Track.track_id.in_(parent_remix_tracks),
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
for remix_track_parent in remix_track_parents:
[
remix_track_parent_owner,
remix_track_parent_id,
] = remix_track_parent
remix_notif = {
const.notification_type: const.notification_type_remix_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
const.notification_remix_parent_track_user_id: remix_track_parent_owner,
const.notification_remix_parent_track_id: remix_track_parent_id,
},
}
remix_created_notifications.append(remix_notif)
# Handle track update notifications
# TODO: Consider switching blocknumber for updated at?
updated_tracks_query = session.query(Track)
updated_tracks_query = updated_tracks_query.filter(
Track.is_unlisted == False,
Track.stem_of == None,
Track.created_at != Track.updated_at,
Track.blocknumber > min_block_number,
Track.blocknumber <= max_block_number,
)
updated_tracks = updated_tracks_query.all()
for entry in updated_tracks:
prev_entry_query = (
session.query(Track)
.filter(
Track.track_id == entry.track_id,
Track.blocknumber < entry.blocknumber,
)
.order_by(desc(Track.blocknumber))
)
# Previous unlisted entry indicates transition to public, triggering a notification
prev_entry = prev_entry_query.first()
# Tracks that were unlisted and turned to public
if prev_entry.is_unlisted == True:
track_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
},
}
created_notifications.append(track_notif)
# Tracks that were not remixes and turned into remixes
if not prev_entry.remix_of and entry.remix_of:
# Add notification to remix track owner
parent_remix_tracks = [
t["parent_track_id"] for t in entry.remix_of["tracks"]
]
remix_track_parents = (
session.query(Track.owner_id, Track.track_id)
.filter(
Track.track_id.in_(parent_remix_tracks),
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
for remix_track_parent in remix_track_parents:
[
remix_track_parent_owner,
remix_track_parent_id,
] = remix_track_parent
remix_notif = {
const.notification_type: const.notification_type_remix_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
const.notification_remix_parent_track_user_id: remix_track_parent_owner,
const.notification_remix_parent_track_id: remix_track_parent_id,
},
}
remix_created_notifications.append(remix_notif)
notifications_unsorted.extend(remix_created_notifications)
# Aggregate playlist/album notifs
collection_query = session.query(Playlist)
# TODO: Is it valid to use is_current here? Might not be the right info...
collection_query = collection_query.filter(
Playlist.is_delete == False,
Playlist.is_private == False,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
collection_query = collection_query.filter(
Playlist.created_at == Playlist.updated_at
)
collection_results = collection_query.all()
for entry in collection_results:
collection_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
}
metadata = {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_owner_id: entry.playlist_owner_id,
const.notification_collection_content: entry.playlist_contents,
}
if entry.is_album:
metadata[const.notification_entity_type] = "album"
else:
metadata[const.notification_entity_type] = "playlist"
collection_notif[const.notification_metadata] = metadata
created_notifications.append(collection_notif)
# Playlists that were private and turned to public aka 'published'
# TODO: Consider switching blocknumber for updated at?
publish_playlists_query = session.query(Playlist)
publish_playlists_query = publish_playlists_query.filter(
Playlist.is_private == False,
Playlist.created_at != Playlist.updated_at,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
publish_playlist_results = publish_playlists_query.all()
for entry in publish_playlist_results:
prev_entry_query = (
session.query(Playlist)
.filter(
Playlist.playlist_id == entry.playlist_id,
Playlist.blocknumber < entry.blocknumber,
)
.order_by(desc(Playlist.blocknumber))
)
# Previous private entry indicates transition to public, triggering a notification
prev_entry = prev_entry_query.first()
if prev_entry.is_private == True:
publish_playlist_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
}
metadata = {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_owner_id: entry.playlist_owner_id,
const.notification_collection_content: entry.playlist_contents,
const.notification_entity_type: "playlist",
}
publish_playlist_notif[const.notification_metadata] = metadata
created_notifications.append(publish_playlist_notif)
notifications_unsorted.extend(created_notifications)
# Get additional owner info as requested for listen counts
tracks_owner_query = session.query(Track).filter(
Track.is_current == True, Track.track_id.in_(track_ids_to_owner)
)
track_owner_results = tracks_owner_query.all()
for entry in track_owner_results:
owner = entry.owner_id
track_id = entry.track_id
owner_info[const.tracks][track_id] = owner
# Get playlist updates
today = date.today()
thirty_days_ago = today - timedelta(days=30)
thirty_days_ago_time = datetime(
thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 0, 0, 0
)
playlist_update_query = session.query(Playlist)
playlist_update_query = playlist_update_query.filter(
Playlist.is_current == True,
Playlist.is_delete == False,
Playlist.last_added_to >= thirty_days_ago_time,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
playlist_update_results = playlist_update_query.all()
# Represents all playlist update notifications
playlist_update_notifications = []
playlist_update_notifs_by_playlist_id = {}
for entry in playlist_update_results:
playlist_update_notifs_by_playlist_id[entry.playlist_id] = {
const.notification_type: const.notification_type_playlist_update,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
const.notification_metadata: {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_type: "playlist",
const.notification_playlist_update_timestamp: entry.last_added_to,
},
}
# get all favorited playlists
# playlists may have been favorited outside the blocknumber bounds
# e.g. before the min_block_number
playlist_favorites_query = session.query(Save)
playlist_favorites_query = playlist_favorites_query.filter(
Save.is_current == True,
Save.is_delete == False,
Save.save_type == SaveType.playlist,
)
playlist_favorites_results = playlist_favorites_query.all()
# dictionary of playlist id => users that favorited said playlist
# e.g. { playlist1: [user1, user2, ...], ... }
# we need this dictionary to know which users need to be notified of a playlist update
users_that_favorited_playlists_dict = ft.reduce(
lambda accumulator, current: accumulator.update(
{
current.save_item_id: accumulator[current.save_item_id]
+ [current.user_id]
if current.save_item_id in accumulator
else [current.user_id]
}
)
or accumulator,
playlist_favorites_results,
{},
)
for playlist_id in users_that_favorited_playlists_dict:
if playlist_id not in playlist_update_notifs_by_playlist_id:
continue
playlist_update_notif = playlist_update_notifs_by_playlist_id[playlist_id]
playlist_update_notif[const.notification_metadata].update(
{
const.notification_playlist_update_users: users_that_favorited_playlists_dict[
playlist_id
]
}
)
playlist_update_notifications.append(playlist_update_notif)
notifications_unsorted.extend(playlist_update_notifications)
# Final sort - TODO: can we sort by timestamp?
sorted_notifications = sorted(
notifications_unsorted,
key=lambda i: i[const.notification_blocknumber],
reverse=False,
)
return api_helpers.success_response(
{
"notifications": sorted_notifications,
"info": notification_metadata,
"milestones": milestone_info,
"owners": owner_info,
}
)
|
[
"def get_notifications(self, context):\n module_context.init()\n LOG.info(\"Received RPC GET NOTIFICATIONS \")\n events = self.sc.get_stashed_events()\n notifications = []\n for event in events:\n notification = event.data\n msg = (\"Notification Data: %r\" % notification)\n notifications.append(notification)\n LOG.info(msg)\n return notifications",
"def get_internal_transaction_by_block_range(self, min_block, max_block):\n result_list = []\n for i in range(min_block, max_block):\n result_list.extend(self.get_internal_transaction_by_block(i))\n print(\"block \", i, \" finished.\\n\")\n time.sleep(60)\n return result_list",
"def getNotifications(nodeIdentifier, items):",
"def poll_bitcoin(self):\n # Removed expired subscriptions before generating callbacks\n self._remove_expired_subscriptions()\n\n logger.debug(\"BLOCK: {}\".format(self._monitor.current_block))\n #\n transactions = self._monitor.get_confirmed()\n\n callbacks = []\n for tran in transactions:\n callbacks.extend(self._transaction_to_callbacks(tran))\n return callbacks",
"async def async_iter_latest_block_ranges(\n w3: \"Web3\",\n from_block: BlockNumber,\n to_block: Optional[Union[BlockNumber, LatestBlockParam]] = None,\n) -> AsyncIterable[Tuple[Optional[BlockNumber], Optional[BlockNumber]]]:\n latest_block_iterator = async_iter_latest_block(w3, to_block)\n async for latest_block in latest_block_iterator:\n if latest_block is None:\n yield (None, None)\n elif from_block > latest_block:\n yield (None, None)\n else:\n yield (from_block, latest_block)\n from_block = BlockNumber(latest_block + 1)",
"def test_get_inbox_replier_events(self):\n pass",
"def get_pulled_events(self):\n # create cursor\n conn = mysql.connector.connect(**self.config)\n cursor = conn.cursor()\n\n # set filter date to previous date\n filter_date = datetime.datetime.today() - datetime.timedelta(days=1)\n\n # execute query and get ids of events already scraped with a start time after yesterday\n cursor.execute(\"select source_id from events where start_time >= %s\", (filter_date, ))\n self.pulled_events = set(event[0] for event in cursor)\n cursor.close()\n conn.close()",
"def find_busy():\n busy_list = [] #list of dicts\n credentials = client.OAuth2Credentials.from_json(flask.session['credentials'])\n service = get_gcal_service(credentials)\n for id in flask.session['selected_cal']:\n events = service.events().list(calendarId=id, pageToken=None).execute()\n for event in events['items']:\n if ('transparency' in event) and event['transparency']=='transparent':\n continue \n start_datetime = arrow.get(event['start']['dateTime'])\n end_datetime = arrow.get(event['end']['dateTime'])\n if overlap(start_datetime, end_datetime): \n event_dict = {\"desc\":event['summary'], \"begin\":start_datetime.isoformat(), \"end\":end_datetime.isoformat()}\n busy_list.append(event_dict)\n \n flask.session['busy_list'] = busy_list",
"def user_blocks(self, user_name):\n blocks = self.site.logevents(title=f'User:{user_name}', type=\"block\")\n events = []\n for block in blocks:\n action = block['action']\n timestamp = struct_to_datetime(block['timestamp'])\n id = block['logid']\n mw_expiry = block['params'].get('expiry')\n expiry = mw_expiry and isoparse(mw_expiry)\n if action == 'block':\n events.append(BlockEvent(user_name, timestamp, id, expiry))\n elif action == 'reblock':\n events.append(BlockEvent(user_name, timestamp, id, expiry, is_reblock=True))\n elif action == 'unblock':\n events.append(UnblockEvent(user_name, timestamp, id))\n else:\n logger.error('Ignoring block due to unknown block action in %s', block)\n return events",
"def get_blocked_numbers():\n print 'Getting blocked numbers'\n client = create_client()\n client.block_numbers([\"+61412345678\"])\n # will retrieve a maximum of 10 blocked numbers\n recipients = client.get_blocked_numbers(10)\n\n for recipient in recipients:\n print 'Blocked number:' + recipient.value",
"def getEvents(self):\n while True:\n response = requests.get(self.longPollBaseUrl, self.longPollPayload)\n jsonResponse = json.loads(response.text)\n logger.debug('Get response from longPoll - {0}'.format(jsonResponse))\n\n if 'ts' not in jsonResponse:\n self._setUpLongPoll()\n continue\n\n self._updateTs(jsonResponse['ts'])\n yield jsonResponse['updates']",
"async def notification_list(self, context):\n if self.db == None:\n await self.start() # Initiate DB, because it's not initialized yet\n\n notifications = self.get_notifications(context.message.author.id)\n if not notifications:\n return await self.bot.send_message(context.message.author, 'You have no notifications at this time.')\n else:\n notifications_list_str = ''\n for notification in notifications.values():\n time_until = notification['notification_time'] - int(datetime.now().timestamp()) # Time until notification\n notifications_list_str += '%s %s in %s\\n' % (notification['uid'], notification['notification_message'], self.get_time_string(time_until))\n return await self.bot.send_message(context.message.author, notifications_list_str) # Full list of notifications\n return",
"def getNotifications():\n # gets the data from the notifications db\n try:\n conn = sqlite3.connect('notifications.db')\n c = conn.cursor()\n\n # get all the data from the db except id (ie. timestamp, message, division)\n c.execute(\"SELECT division, timestamp, notification FROM notifications\")\n result = c.fetchall()\n logging.debug(\"The database returned {} rows\".format((len(result))))\n c.close()\n except sqlite3.OperationalError as e:\n errorMessage = json.dumps({\"error\": str(e)})\n return bottle.HTTPResponse(body=errorMessage, status=400, headers=getHeaders())\n except Exception as e:\n errorMessage = json.dumps({\"error\": str(e)})\n return bottle.HTTPResponse(body=errorMessage, status=400,\n headers=getHeaders())\n\n # format the data so the front end can consume it easily\n # we know the order of the data because it's the same order we passed into the select statement\n resultDict = [{'division': notification[0], 'timestamp': notification[1], 'notification': notification[2]} for\n notification in result]\n return bottle.HTTPResponse(body=json.dumps(resultDict), status=200, headers=getHeaders())",
"async def async_get_logs_multipart(\n w3: \"Web3\",\n start_block: BlockNumber,\n stop_block: BlockNumber,\n address: Union[Address, ChecksumAddress, List[Union[Address, ChecksumAddress]]],\n topics: List[Optional[Union[_Hash32, List[_Hash32]]]],\n max_blocks: int,\n) -> AsyncIterable[List[LogReceipt]]:\n _block_ranges = block_ranges(start_block, stop_block, max_blocks)\n for from_block, to_block in _block_ranges:\n params = {\n \"fromBlock\": from_block,\n \"toBlock\": to_block,\n \"address\": address,\n \"topics\": topics,\n }\n params_with_none_dropped = cast(\n FilterParams, drop_items_with_none_value(params)\n )\n next_logs = await w3.eth.get_logs(params_with_none_dropped) # type: ignore\n yield next_logs",
"def get_cosign_remix_notifications(session, max_block_number, remix_tracks):\n if not remix_tracks:\n return []\n\n remix_notifications = []\n remix_track_ids = [r[\"item_id\"] for r in remix_tracks]\n\n # Query for all the parent tracks of the remix tracks\n tracks_subquery = (\n session.query(Track)\n .filter(\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.is_current == True,\n )\n .subquery()\n )\n\n parent_tracks = (\n session.query(\n Remix.child_track_id, Remix.parent_track_id, tracks_subquery.c.owner_id\n )\n .join(tracks_subquery, Remix.parent_track_id == tracks_subquery.c.track_id)\n .filter(Remix.child_track_id.in_(remix_track_ids))\n .all()\n )\n # Mapping of parent track users to child track to parent track\n parent_track_users_to_remixes = {}\n for track_parent in parent_tracks:\n [remix_track_id, remix_parent_id, remix_parent_user_id] = track_parent\n if not remix_parent_user_id in parent_track_users_to_remixes:\n parent_track_users_to_remixes[remix_parent_user_id] = {\n remix_track_id: remix_parent_id\n }\n else:\n parent_track_users_to_remixes[remix_parent_user_id][\n remix_track_id\n ] = remix_parent_id\n\n for remix_track in remix_tracks:\n user_id = remix_track[\"user_id\"]\n track_id = remix_track[\"item_id\"]\n\n if (\n user_id in parent_track_users_to_remixes\n and track_id in parent_track_users_to_remixes[user_id]\n ):\n remix_notifications.append(\n {\n const.notification_type: const.notification_type_remix_cosign,\n const.notification_blocknumber: remix_track[\n const.notification_blocknumber\n ],\n const.notification_timestamp: remix_track[\n const.notification_timestamp\n ],\n const.notification_initiator: user_id,\n const.notification_metadata: {\n const.notification_entity_id: track_id,\n const.notification_entity_type: \"track\",\n const.notification_entity_owner_id: remix_track[\n \"item_owner_id\"\n ],\n },\n }\n )\n\n return remix_notifications",
"def _get_events_in_range(events: SortedList, earliest_timestamp: datetime, latest_timestamp: datetime) -> List[Event]:\n def _sorting_event_of(timestamp):\n return Event(timestamp, 0)\n\n if earliest_timestamp and latest_timestamp:\n return events[\n events.bisect_left(_sorting_event_of(earliest_timestamp)):\n events.bisect_right(_sorting_event_of(latest_timestamp))\n ]\n if earliest_timestamp:\n return events[events.bisect_left(_sorting_event_of(earliest_timestamp)):]\n\n return events[:events.bisect_right(_sorting_event_of(latest_timestamp))]",
"def get_notification_list(user):\n\tnow = datetime.datetime.now()\n\tdays = 3 \n\n\ttransactions = db.get_transaction_notifications(user, days)\n\tresolutions = db.get_recent_resolutions(user, days)\n\tnews_posts = models.NewsPost.objects.filter(\n\t\ttime_created__gte=now - datetime.timedelta(days),\n\t\tsite=config.SITE_ID\n\t)\n\n\tdef build_transaction_message(trans):\n\t\tif trans.status == 'pending':\n\t\t\taction = 'created a'\n\t\telif trans.status == 'rejected':\n\t\t\taction = 'rejected your'\n\t\telse:\n\t\t\taction = 'confirmed your'\n\t\treturn \"%s %s transaction for a %s of %s.\" % (\n\t\t\ttrans.creator_person,\n\t\t\taction,\n\t\t\ttrans.targets_transaction_type,\n\t\t\ttrans.value_repr\n\t\t), trans.time_created\n\n\tdef build_resolution_message(res):\n\t\treturn \"Your balance with %s was resolved for %s.\" % (\n\t\t\tres.other_person, res.relative_value_repr\n\t\t), res.resolution.time_confirmed\n\n\tdef build_news_post_message(post):\n\t\treturn \"%s posted '%s'.\" % (\n\t\t\tpost.author, post.title\n\t\t), post.time_created\n\n\treturn [i[0] for i in sorted(\n\t\titertools.chain(\n\t\t\t(build_transaction_message(t) for t in transactions),\n\t\t\t(build_resolution_message(r) for r in resolutions),\n\t\t\t(build_news_post_message(n) for n in news_posts)\n\t\t),\n\t\tkey=itemgetter(1),\n\t\treverse=True\n\t)]",
"def testCheckLastTwoEvents(self):\n event_tester = EventTester()\n event1 = Event()\n event2 = Event()\n event3 = Event()\n event_tester.notify(event1)\n event_tester.notify(event2)\n event_tester.notify(event3)\n self.assertEqual([event1, event2], event_tester.last_n_events(2))",
"def get_events(self, idx_start=None, idx_end=None):\r\n idx_start, idx_end = self.__handle_indices(idx_start, idx_end)\r\n events = self.data[..., idx_start:idx_end]\r\n return (events * MCS_TICK.magnitude, MCS_TICK.units)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
flatten pywt coefficients to a vector
|
def flatten_coeffs(coeffs):
x0 = []
for c in coeffs:
x0.append(np.array(c).ravel())
xvec = np.concatenate(x0)
return xvec
|
[
"def get_polyterms_w_xform(self):\n if self.polytermx_cache:\n return self.polytermx_cache\n greens = self.decompose_greens()\n self.polytermx_cache = []\n for (pp,hs,xi) in [self.poly_term_w_xi(t) for t in greens]:\n self.polytermx_cache += [(pp.full_simplify(), hs, xi)]\n\n return self.polytermx_cache",
"def coeffs(self):\n\t\treturn [self.a,self.b,self.c,self.d]",
"def reconstruct(u):\n nx, ny, nz = u.shape[:3]\n c = 2*N+1\n\n Wx = zeros([nx, ny, nz, N+1, n])\n tempW = extend(u, N, 0)\n for i, j, k in product(range(nx), range(ny), range(nz)):\n Wx[i, j, k] = coeffs(tempW[i:i+c, j, k])\n if ndim==1:\n return Wx\n\n Wxy = zeros([nx, ny, nz, N+1, N+1, n])\n tempWx = extend(Wx, N, 1)\n for i, j, k, a in product(range(nx), range(ny), range(nz), range(N+1)):\n Wxy[i, j, k, a] = coeffs(tempWx[i, j:j+c, k, a])\n if ndim==2:\n return Wxy\n\n Wxyz = zeros([nx, ny, nz, N+1, N+1, N+1, n])\n tempWxy = extend(Wxy, N, 2)\n for i, j, k, a, b in product(range(nx), range(ny), range(nz), range(N+1), range(N+1)):\n Wxyz[i, j, k, a, b] = coeffs(tempWxy[i, j, k:k+c, a, b])\n return Wxyz",
"def get_trajectory_from_weights(self, w):\n traj = np.dot(self.psi_matrix.T, np.squeeze(w))\n return traj",
"def reflection_about_x(self):\n\n p = Polynomial(self.coeff[:])\n for (index, value) in enumerate(p.coeff):\n p.coeff[index] = -1 * value\n\n return p",
"def model_weights_as_vector(model):\r\n weights_vector = []\r\n\r\n for layer in model.layers: # model.get_weights():\r\n if layer.trainable:\r\n layer_weights = layer.get_weights()\r\n for l_weights in layer_weights:\r\n vector = numpy.reshape(l_weights, newshape=(l_weights.size))\r\n weights_vector.extend(vector)\r\n\r\n return numpy.array(weights_vector)",
"def build_coeffs(xvec):\n nc = int(np.log2(len(xvec)))/2\n coeffs = [xvec[0].reshape(1, 1)]\n for i in range(nc):\n c1 = xvec[4**i:2*4**i].reshape(2**i, 2**i)\n c2 = xvec[2*4**i:3*4**i].reshape(2**i, 2**i)\n c3 = xvec[3*4**i:4*4**i].reshape(2**i, 2**i)\n coeffs.append((c1, c2, c3))\n return coeffs",
"def _construct_coefficients(self):\n coeffs = [0]*self.degree\n\n N = float(self.evalpts)\n\n lvals = np.arange(self.evalpts).astype('float')\n xpts = self._c2x(np.cos(np.pi*(lvals + 0.5)/N))\n fpts = np.rollaxis(self.func(xpts, *self.args), -1)\n\n for a in range(self.degree):\n inner = [\n fpts[b] * np.cos(np.pi*a*(lvals[b]+0.5)/N)\n for b in range(self.evalpts)\n ]\n coeffs[a] = 2.0/N * np.sum(inner, axis=0)\n\n coeffs[0] *= 0.5\n self._coeffs = np.array(coeffs)",
"def V(X,w,t):\r\n results = []\r\n amplitudes = []\r\n phases = []\r\n for x in X:\r\n results.append((x)*(e**(1j*w*t)))\r\n amplitudes.append(abs(x))\r\n phases.append(phase((x)*(e**(1j*w*t))))\r\n return [results,amplitudes,phases]",
"def list_to_poly(coeffs):\n return RING([F(e) for e in coeffs[::-1]])",
"def lift_coefficient(self) -> np.ndarray:\n\n # Force per unit of free-stream velocity\n unit_force = (\n -self.pressure_coefficients * self.method.panels.lengths\n ).T # Shape = (n_alphas, n_panels, 1)\n panel_normal_force = (\n unit_force[:, :, None] * self.method.panels.normals[None, :, :]\n ) # Result -> (n_alphas, n_panels, 2)\n\n # Shape of lift vector: (n_alphas, 2, 1)\n lift_vector = rotate_2d_90ccw(self.flow_direction)[:, :, None]\n\n return np.sum(panel_normal_force @ lift_vector, axis=1).view(\n np.ndarray\n )",
"def get_wz_dot(psi_acc,x_c,x_b,psi_rate,w_z,y_b,w_y,z_b,w_x,y_c,w_y_dot):\n\ta = psi_acc*(x_c.T)*x_b\n\tb = 2.0*psi_rate*w_z*(x_c.T)*y_b\n\tc = -2.0*psi_rate*w_y*(x_c.T)*z_b\n\td = -1.0*w_x*w_y*(y_c.T)*z_b\n\te = -1.0*w_x*w_z*(y_c.T)*z_b\n\tf = w_y_dot*(y_c.T)*z_b\n\tg = np.linalg.norm(np.cross(y_c,z_b,axis = 0))\n\tw_z_dot = (a+b+c+d+e+f)/g\n\t#print(\"w_z_dot type is: {}\".format(type(w_z_dot)))\n\treturn w_z_dot",
"def transmit(self, x):\n\n X = self.setup_basis_matrix(x)\n coeffs = self.coeffs.flatten().copy()\n return np.dot(X, coeffs)",
"def extract_wt(matrix):\n\n approx_trials = []\n\n for trial in matrix:\n approx = []\n\n for channel in trial:\n # For the signals coming from each channel, extract the corresponding wavelet decomposition\n\n ca, _ = pywt.dwt(channel, 'sym9')\n\n approx.append(ca)\n\n approx_trials.append(approx)\n\n return np.array(approx_trials)",
"def get_coeffs(weights):\n coeff_num = weights.__len__() - 1\n pub_key = weights.public_key\n\n bn = []\n exp = []\n for i in range(coeff_num):\n bn.append(weights.ciphertextBN(i))\n exp.append(weights.exponent(i))\n ct = ipclCipherText(pub_key.pubkey, bn)\n return IpclPaillierEncryptedNumber(pub_key, ct, exp, coeff_num)",
"def _model_vec(self, i):\n return np.outer(self.coeff[:, i], self.eigvec[i])",
"def _poly(coefficients, x):\n out = coefficients[0]\n for coefficient in coefficients[1:]:\n out = out * x + coefficient\n return out",
"def projection_as_vec_v3(v, w):\n proj_len = projection_v3(v, w)\n return scale_v3(v, proj_len)",
"def project(v: np.ndarray, w: np.ndarray) -> np.ndarray:\n return np.dot(v, w) * (w / np.linalg.norm(w))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
build pywt coefficients to from a vector this is the inverse of flatten_coeffs
|
def build_coeffs(xvec):
nc = int(np.log2(len(xvec)))/2
coeffs = [xvec[0].reshape(1, 1)]
for i in range(nc):
c1 = xvec[4**i:2*4**i].reshape(2**i, 2**i)
c2 = xvec[2*4**i:3*4**i].reshape(2**i, 2**i)
c3 = xvec[3*4**i:4*4**i].reshape(2**i, 2**i)
coeffs.append((c1, c2, c3))
return coeffs
|
[
"def flatten_coeffs(coeffs):\n x0 = []\n for c in coeffs:\n x0.append(np.array(c).ravel())\n xvec = np.concatenate(x0)\n return xvec",
"def _construct_coefficients(self):\n coeffs = [0]*self.degree\n\n N = float(self.evalpts)\n\n lvals = np.arange(self.evalpts).astype('float')\n xpts = self._c2x(np.cos(np.pi*(lvals + 0.5)/N))\n fpts = np.rollaxis(self.func(xpts, *self.args), -1)\n\n for a in range(self.degree):\n inner = [\n fpts[b] * np.cos(np.pi*a*(lvals[b]+0.5)/N)\n for b in range(self.evalpts)\n ]\n coeffs[a] = 2.0/N * np.sum(inner, axis=0)\n\n coeffs[0] *= 0.5\n self._coeffs = np.array(coeffs)",
"def list_to_poly(coeffs):\n return RING([F(e) for e in coeffs[::-1]])",
"def reconstruct(u):\n nx, ny, nz = u.shape[:3]\n c = 2*N+1\n\n Wx = zeros([nx, ny, nz, N+1, n])\n tempW = extend(u, N, 0)\n for i, j, k in product(range(nx), range(ny), range(nz)):\n Wx[i, j, k] = coeffs(tempW[i:i+c, j, k])\n if ndim==1:\n return Wx\n\n Wxy = zeros([nx, ny, nz, N+1, N+1, n])\n tempWx = extend(Wx, N, 1)\n for i, j, k, a in product(range(nx), range(ny), range(nz), range(N+1)):\n Wxy[i, j, k, a] = coeffs(tempWx[i, j:j+c, k, a])\n if ndim==2:\n return Wxy\n\n Wxyz = zeros([nx, ny, nz, N+1, N+1, N+1, n])\n tempWxy = extend(Wxy, N, 2)\n for i, j, k, a, b in product(range(nx), range(ny), range(nz), range(N+1), range(N+1)):\n Wxyz[i, j, k, a, b] = coeffs(tempWxy[i, j, k:k+c, a, b])\n return Wxyz",
"def _compute_build_coeffs_proj(self):\n return self.correlation_array_eigvecs.dot(\n np.diag(self.correlation_array_eigvals ** -0.5).dot(\n self.R_low_order_eigvecs))",
"def _calculate_coeffs(self):\n for joint in self._joint_names:\n self._ovrl_disp[joint] = self._start_pos[joint] - self._end_pos[joint]\n self._coeffs[joint] = [(2*self._ovrl_disp[joint])/(self._motion_time ** 3), (3*-self._ovrl_disp[joint])/(self._motion_time ** 2)]",
"def coeffs(self):\n\t\treturn [self.a,self.b,self.c,self.d]",
"def _compute_build_coeffs_proj(self):\n return self.sum_correlation_array_eigvecs.dot(\n self.sum_correlation_array_eigvecs.conj().T.dot(\n self.proj_correlation_array_eigvecs.dot(\n np.diag(self.proj_correlation_array_eigvals ** -0.5).dot(\n self.R_low_order_eigvecs))))",
"def build_polynomial(coefficients):\n ret = 0\n prod = 1\n for d in range(0, len(coefficients)):\n ret += coefficients[d] * prod\n prod *= delta\n return ret",
"def _atten_coeffs(t, f):\n # Based on the code from Besson, et.al.\n # but simplified significantly since w1=0\n\n t_C = t - scipy.constants.zero_Celsius\n w0 = np.log(1e-4)\n # w1 = 0\n w2 = np.log(3.16)\n\n b0 = -6.7489 + t_C * (0.026709 - 8.84e-4 * t_C)\n b1 = -6.2212 - t_C * (0.070927 + 1.773e-3 * t_C)\n b2 = -4.0947 - t_C * (0.002213 + 3.32e-4 * t_C)\n\n if isinstance(t, np.ndarray) and isinstance(f, np.ndarray):\n # t and f are both arrays, so return 2-D array of coefficients\n # where each row is a single t and each column is a single f.\n a = np.broadcast_to(b1[:,np.newaxis], (len(t), len(f)))\n b = np.zeros((len(t),len(f)))\n # Use numpy slicing to calculate different values for b when\n # f<1e9 and f>=1e9. Transpose b0, b1, b2 into column vectors\n # so numpy multiplies properly\n b[:,f<1e9] += (b0[:,np.newaxis] - b1[:,np.newaxis]) / w0\n b[:,f>=1e9] += (b2[:,np.newaxis] - b1[:,np.newaxis]) / w2\n\n elif isinstance(f, np.ndarray):\n # t is a scalar, so return an array of coefficients based\n # on the frequencies\n a = np.full(len(f), b1)\n b = np.zeros(len(f))\n # Again use numpy slicing to differentiate f<1e9 and f>=1e9\n b[f<1e9] += (b0 - b1) / w0\n b[f>=1e9] += (b2 - b1) / w2\n\n # Past this point, f must be a scalar\n # Then an array or single coefficient is returned based on the type of t\n elif f < 1e9:\n a = b1\n b = (b0 - b1) / w0\n else:\n a = b1\n b = (b2 - b1) / w2\n\n return a, b",
"def detrivialize_stokes_basis(coeffs, psi):\n ji = coeffs[...,0]\n ai = coeffs[...,1]\n jq = coeffs[...,2]\n aq = coeffs[...,3]\n jv = coeffs[...,4]\n av = coeffs[...,5]\n rq = coeffs[...,6]\n rv = coeffs[...,7]\n\n twochi = 2 * (np.pi - psi) # note the sign convention!\n s = np.sin(twochi)\n c = np.cos(twochi)\n\n xformed = np.empty(coeffs.shape[:-1] + (11,))\n xformed[...,0] = ji # j_I\n xformed[...,1] = ai # alpha_I\n xformed[...,2] = c * jq # j_Q\n xformed[...,3] = c * aq # alpha_Q\n xformed[...,4] = s * jq # j_U\n xformed[...,5] = s * aq # alpha_U\n xformed[...,6] = jv # j_V\n xformed[...,7] = av # alpha_V\n xformed[...,8] = c * rq # rho_Q\n xformed[...,9] = s * rq # rho_U\n xformed[...,10] = rv # rho_V\n\n return xformed",
"def get_coeffs(weights):\n coeff_num = weights.__len__() - 1\n pub_key = weights.public_key\n\n bn = []\n exp = []\n for i in range(coeff_num):\n bn.append(weights.ciphertextBN(i))\n exp.append(weights.exponent(i))\n ct = ipclCipherText(pub_key.pubkey, bn)\n return IpclPaillierEncryptedNumber(pub_key, ct, exp, coeff_num)",
"def _compute_build_coeffs_exact(self):\n return self.correlation_array_eigvecs.dot(\n np.diag(self.correlation_array_eigvals ** -0.5).dot(\n self.R_low_order_eigvecs.dot(\n np.diag(self.eigvals ** -1.))))",
"def compute_coefficients_ref(ks):\n coeffs = [1]\n for k in ks:\n coeffs = zipWith(lambda x,y:x+y,coeffs+[0],[0]+[-k*c for c in coeffs])\n return coeffs",
"def wavelet_power(coeffs,logpower=False):\n if 'numpy' in str(type(coeffs)):\n power = (coeffs*coeffs.conj()).real\n print(\"Received numpy array for processing!\")\n else:\n epochs = coeffs[0].shape[0]\n power = list()\n channels = len(coeffs)\n for c in range(channels):\n dummy = np.zeros(coeffs[0].shape,dtype=np.float64)\n for i in range(epochs):\n dummy[i,:,:] = (coeffs[c][i,:,:]*coeffs[c][i,:,:].conj()).real\n power.append(dummy)\n\n if logpower:\n power = 10*np.log10(power)\n\n return power",
"def build_poly(x, degree):\n matrix = np.ones((x.shape[0], 1))\n for j in range(1, degree+1):\n extend = np.power(x, j)\n matrix = np.concatenate((matrix, extend), axis=1)\n\n return matrix",
"def build_poly(x, degree):\n # Build matrix of powers (columns of 1's, 2's, ..., (degree+1)'s)\n power_mx = np.tile(np.arange(degree + 1), (len(x), 1))\n # Build matrix whose columns are duplicated x's arrays\n augmented_x = np.tile(np.array(x).reshape(-1, 1), (1, degree + 1))\n # Raise features to powers of `power_mx`, element-wise\n return np.power(augmented_x, power_mx)",
"def _poly(coefficients, x):\n out = coefficients[0]\n for coefficient in coefficients[1:]:\n out = out * x + coefficient\n return out",
"def calc_basis_vecs(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Populates a given SdFec instance with parameters from an HWH file. Parameters include... + Basic IP config settings (XSdFec_Config struct) + LDPC parameter table (a dict of named XSdFecLdpcParameters)
|
def populate_params(obj, params):
obj._config = _ffi.new('XSdFec_Config*')
obj._code_params = type('', (), {})
_set_params(obj._config, params, _config)
_set_params(obj._code_params, params, _code_params)
|
[
"def __init__(self, description : dict):\n super().__init__(description)\n if 'parameters' in description:\n populate_params(self, description['parameters'])\n else:\n warnings.warn(\"Please use an hwh file with the SD-FEC driver\"\n \" - the default configuration is being used\")\n self._config = _lib.XSdFecLookupConfig(0)\n # TODO consider how we should set default LDPC and Turbo code params\n self._instance = _ffi.new(\"XSdFec*\")\n self._config.BaseAddress = self.mmio.array.ctypes.data\n _lib.XSdFecCfgInitialize(self._instance, self._config)",
"def aggregate_params_and_data(yaml_fp):\r\n\r\n config = param_parser.load(yaml_fp, validate=False)\r\n\r\n # -------------Get data/params from get_data/params ----------------\r\n\r\n # handling of legacy param names, formatted as:\r\n # [old name which is still supported, new name]\r\n legacy_conversions = tuple([\r\n ['sd_date', 'c_reduction_date'],\r\n ['DATA_FOLDER', 'data_folder'],\r\n ['CITY', 'city'],\r\n ])\r\n for conversion in legacy_conversions:\r\n old_name = conversion[0]\r\n new_name = conversion[1]\r\n if new_name not in config:\r\n assert old_name in config, \"config YAML has no field \" + \\\r\n \"`{}` (formerly known as `{}`)\".format(new_name, old_name)\r\n config[new_name] = config[old_name]\r\n\r\n # get demographics, school calendar, and transmission data from Excel files\r\n AgeGroupDict, metro_pop, school_calendar, \\\r\n time_begin, FallStartDate, Phi, symp_h_ratio_overall, \\\r\n symp_h_ratio, hosp_f_ratio = SEIR_get_data(config=config)\r\n\r\n config.update({\r\n \"AgeGroupDict\": AgeGroupDict,\r\n 'metro_pop': metro_pop,\r\n 'school_calendar': school_calendar,\r\n 'time_begin': time_begin,\r\n 'FallStartDate': FallStartDate,\r\n 'phi': Phi,\r\n #initial_state': config['initial_state'],\r\n 'initial_i': config['I0'],\r\n 'symp_h_ratio_overall': symp_h_ratio_overall,\r\n 'symp_h_ratio': symp_h_ratio,\r\n 'hosp_f_ratio': hosp_f_ratio\r\n })\r\n\r\n # -------------Get initial state of model --------------------------\r\n ## -- get initial state of compartments\r\n # todo: SEIR model should take a new arg \"init_type\" that explicitly states whether to initialize every compartment or just infected\r\n # todo: currently the type of initialization is inferred from the instance type of \"initial_i\" -- that is sure to break at some point\r\n init_state = InitialModelState(config['total_time'], config['interval_per_day'], config['n_age'], config['n_risk'],\r\n config['I0'], metro_pop)\r\n compartments = init_state.initialize()\r\n # todo: more graceful and transparent override of user config specified start date\r\n # todo: perhaps in param_parser we can check that time_begin_sim is None if a I0 is a file path\r\n if init_state.start_day:\r\n print('Start date as specified in the config file is overridden by initialization from a deterministic solution.')\r\n print('The new start date is {}'.format(init_state.start_day))\r\n date_begin = init_state.start_day\r\n config['time_begin_sim'] = datetime.strftime(date_begin, '%Y%m%d') # return datetime to its expected string format\r\n # todo: we should re-save this config to reflect the updated start time\r\n\r\n # ------------- Update config with revised initial conditions -------\r\n config['initial_state'] = compartments\r\n config['t_offset'] = init_state.offset\r\n\r\n return config",
"def setup_hds(self):\n if self.hds_kperk is None or len(self.hds_kperk) == 0:\n return\n from .gw_utils import setup_hds_obs\n # if len(self.hds_kperk) == 2:\n # try:\n # if len(self.hds_kperk[0] == 2):\n # pass\n # except:\n # self.hds_kperk = [self.hds_kperk]\n oc = self.m.get_package(\"OC\")\n if oc is None:\n raise Exception(\"can't find OC package in model to setup hds grid obs\")\n if not oc.savehead:\n raise Exception(\"OC not saving hds, can't setup grid obs\")\n hds_unit = oc.iuhead\n hds_file = self.m.get_output(unit=hds_unit)\n assert os.path.exists(os.path.join(self.org_model_ws,hds_file)),\\\n \"couldn't find existing hds file {0} in org_model_ws\".format(hds_file)\n shutil.copy2(os.path.join(self.org_model_ws,hds_file),\n os.path.join(self.m.model_ws,hds_file))\n inact = None\n if self.m.lpf is not None:\n inact = self.m.lpf.hdry\n elif self.m.upw is not None:\n inact = self.m.upw.hdry\n if inact is None:\n skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x\n else:\n skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x\n print(self.hds_kperk)\n setup_hds_obs(os.path.join(self.m.model_ws,hds_file),\n kperk_pairs=self.hds_kperk,skip=skip)\n self.frun_post_lines.append(\"pyemu.gw_utils.apply_hds_obs('{0}')\".format(hds_file))\n self.tmp_files.append(hds_file)",
"def load():\n\n dc_params = wd_containers.DCParameterContainer()\n\n # general params\n dc_params[\"jdphs\"] = 2\n dc_params[\"ifcgs\"] = 0\n dc_params[\"mode\"] = 5\n dc_params[\"icor1\"] = 0\n dc_params[\"icor2\"] = 0\n\n # system params\n dc_params[\"hjd0\"] = 54954.534784\n dc_params[\"pzero\"] = 1.655473\n dc_params[\"dpdt\"] = 0\n dc_params[\"pshift\"] = 0.0015983790\n dc_params[\"delph\"] = 0\n dc_params[\"nga\"] = 1\n dc_params[\"e\"] = 0\n dc_params[\"a\"] = 7.51\n dc_params[\"f1\"] = 1\n dc_params[\"f2\"] = 1\n dc_params[\"vga\"] = -16.2446\n dc_params[\"xincl\"] = 70.966\n dc_params[\"tavh\"] = 7000\n dc_params[\"tavc\"] = 4293\n dc_params[\"phsv\"] = 4.9582\n dc_params[\"pcsv\"] = 2.2574\n dc_params[\"rm\"] = 0.21\n dc_params[\"perr\"] = 1.570796327\n dc_params[\"dperdt\"] = 0\n dc_params[\"the\"] = 0\n dc_params[\"vunit\"] = 1\n dc_params[\"abunin\"] = 0\n dc_params[\"dpclog\"] = 1.83714\n\n # surface params\n dc_params[\"ifat1\"] = 1\n dc_params[\"ifat2\"] = 1\n dc_params[\"gr1\"] = 0.320\n dc_params[\"gr2\"] = 0.270\n dc_params[\"ipb\"] = 0\n dc_params[\"mref\"] = 1\n dc_params[\"nref\"] = 1\n dc_params[\"n1\"] = 30\n dc_params[\"n2\"] = 30\n dc_params[\"alb1\"] = 0.5\n dc_params[\"alb2\"] = 0.699\n dc_params[\"ld1\"] = 1\n dc_params[\"ld2\"] = 1\n dc_params[\"xbol1\"] = 0.471\n dc_params[\"xbol2\"] = 0.531\n dc_params[\"ybol1\"] = 0\n dc_params[\"ybol2\"] = 0\n\n # third body\n dc_params[\"if3b\"] = 0\n dc_params[\"a3b\"] = 0\n dc_params[\"p3b\"] = 0\n dc_params[\"xincl3b\"] = 0\n dc_params[\"e3b\"] = 0\n dc_params[\"perr3b\"] = 0\n dc_params[\"tc3b\"] = 0\n\n # general dc params\n dc_params[\"isym\"] = 1\n dc_params[\"maglite\"] = 0\n dc_params[\"linkext\"] = 0\n dc_params[\"desextinc\"] = 0\n dc_params[\"n1l\"] = 30\n dc_params[\"n2l\"] = 30\n\n # spot params\n dc_params[\"nomax\"] = 0\n dc_params[\"kspev\"] = 0\n dc_params[\"kspot\"] = 1\n dc_params[\"fspot1\"] = 1\n dc_params[\"fspot2\"] = 1\n dc_params[\"ifsmv1\"] = 0\n dc_params[\"ifsmv2\"] = 0\n\n # which spots to fit?\n dc_params[\"kspa\"] = 1\n dc_params[\"nspa\"] = 1\n dc_params[\"kspb\"] = 0\n dc_params[\"nspb\"] = 0\n\n # params that can remain in their default values\n # lc_params[\"ko\"]\n # lc_params[\"kdisk\"]\n # lc_params[\"ifder\"]\n # lc_params[\"iflcin\"]\n # lc_params[\"ifoc\"]\n\n # params that should not be modified, utility handles these itself\n # lc_params[\"ifvc1\"]\n # lc_params[\"ifvc2\"]\n # lc_params[\"nlc\"]\n # lc_params[\"iftime\"]\n # lc_params[\"nppl\"]\n\n return dc_params",
"def load_experiment_params(parmfile, rasterfs=100, sub_spont=True):\n params = {}\n expt = BAPHYExperiment(parmfile)\n rec = expt.get_recording(rasterfs=rasterfs, resp=True, stim=False)\n resp = rec['resp'].rasterize()\n if sub_spont == True:\n prestimsilence = resp.extract_epoch('PreStimSilence')\n # average over reps(0) and time(-1), preserve neurons\n spont_rate = np.expand_dims(np.nanmean(prestimsilence, axis=(0, -1)), axis=1)\n std_per_neuron = resp._data.std(axis=1, keepdims=True)\n std_per_neuron[std_per_neuron == 0] = 1\n resp = resp._modified_copy(data=(resp._data - spont_rate) / std_per_neuron)\n\n rec['resp'] = rec['resp'].rasterize()\n e = resp.epochs\n expt_params = expt.get_baphy_exptparams() #Using Charlie's manager\n ref_handle = expt_params[0]['TrialObject'][1]['ReferenceHandle'][1]\n\n params['animal'], params['experiment'] = parmfile.split('/')[-3], parmfile.split('/')[-2]\n params['fs'] = resp.fs\n params['PreStimSilence'], params['PostStimSilence'] = ref_handle['PreStimSilence'], ref_handle['PostStimSilence']\n params['Duration'], params['SilenceOnset'] = ref_handle['Duration'], ref_handle['SilenceOnset']\n params['max reps'] = e[e.name.str.startswith('STIM')].pivot_table(index=['name'], aggfunc='size').max()\n params['stim length'] = int(e.loc[e.name.str.startswith('REF')].iloc[0]['end']\n - e.loc[e.name.str.startswith('REF')].iloc[0]['start'])\n params['combos'] = ['Full BG', 'Full FG', 'Full BG/Full FG', 'Half BG/Full FG', 'Half BG', 'Half FG',\n 'Half BG/Half FG', 'Full BG/Half FG']\n params['Background'], params['Foreground'] = ref_handle['Background'], ref_handle['Foreground']\n\n soundies = list(ref_handle['SoundPairs'].values())\n params['pairs'] = [tuple([j for j in (soundies[s]['bg_sound_name'].split('.')[0],\n soundies[s]['fg_sound_name'].split('.')[0])])\n for s in range(len(soundies))]\n params['pairs'] = [(bb.replace(' ', ''), ff.replace(' ', '')) for (bb, ff) in params['pairs']]\n params['units'], params['response'] = resp.chans, resp\n params['rec'] = resp #could be rec, was using for PCA function, might need to fix with spont/std\n\n return params",
"def veg_parameters(config_path):\n # Initialize hru_parameters class\n hru = support.HRUParameters(config_path)\n\n # Open input parameter config file\n inputs_cfg = ConfigParser.ConfigParser()\n try:\n inputs_cfg.readfp(open(config_path))\n except Exception as e:\n logging.error(\n '\\nERROR: Config file could not be read, '\n 'is not an input file, or does not exist\\n'\n ' config_file = {}\\n'\n ' Exception: {}\\n'.format(config_path, e))\n sys.exit()\n\n # Log DEBUG to file\n log_file_name = 'veg_parameters_log.txt'\n log_console = logging.FileHandler(\n filename=os.path.join(hru.log_ws, log_file_name), mode='w')\n log_console.setLevel(logging.DEBUG)\n log_console.setFormatter(logging.Formatter('%(message)s'))\n logging.getLogger('').addHandler(log_console)\n logging.info('\\nGSFLOW Vegetation Parameters')\n\n # Landfire Vegetation Type\n veg_type_orig_path = inputs_cfg.get('INPUTS', 'veg_type_orig_path')\n veg_type_cs = inputs_cfg.getint('INPUTS', 'veg_type_cellsize')\n try:\n veg_type_field = inputs_cfg.get('INPUTS', 'veg_type_field')\n except ConfigParser.NoOptionError:\n veg_type_field = None\n logging.info(\n ' Missing INI parameter, setting {} = {}'.format(\n 'veg_type_field', veg_type_field))\n\n # Landfire Vegetation Cover\n veg_cover_orig_path = inputs_cfg.get('INPUTS', 'veg_cover_orig_path')\n veg_cover_cs = inputs_cfg.getint('INPUTS', 'veg_cover_cellsize')\n\n # Remap\n remap_ws = inputs_cfg.get('INPUTS', 'remap_folder')\n cov_type_remap_name = inputs_cfg.get('INPUTS', 'cov_type_remap')\n covden_sum_remap_name = inputs_cfg.get('INPUTS', 'covden_sum_remap')\n covden_win_remap_name = inputs_cfg.get('INPUTS', 'covden_win_remap')\n snow_intcp_remap_name = inputs_cfg.get('INPUTS', 'snow_intcp_remap')\n srain_intcp_remap_name = inputs_cfg.get('INPUTS', 'srain_intcp_remap')\n wrain_intcp_remap_name = inputs_cfg.get('INPUTS', 'wrain_intcp_remap')\n root_depth_remap_name = inputs_cfg.get('INPUTS', 'root_depth_remap')\n\n # Get remap conversion factors\n try:\n snow_intcp_remap_factor = inputs_cfg.getfloat(\n 'INPUTS', 'snow_intcp_remap_factor')\n except ConfigParser.NoOptionError:\n snow_intcp_remap_factor = 0.01\n logging.info(\n ' Missing INI parameter, setting {} = {}'.format(\n 'snow_intcp_remap_factor', snow_intcp_remap_factor))\n try:\n wrain_intcp_remap_factor = inputs_cfg.getfloat(\n 'INPUTS', 'wrain_intcp_remap_factor')\n except ConfigParser.NoOptionError:\n wrain_intcp_remap_factor = 0.01\n logging.info(\n ' Missing INI parameter, setting {} = {}'.format(\n 'wrain_intcp_remap_factor', wrain_intcp_remap_factor))\n try:\n srain_intcp_remap_factor = inputs_cfg.getfloat(\n 'INPUTS', 'srain_intcp_remap_factor')\n except ConfigParser.NoOptionError:\n srain_intcp_remap_factor = 0.01\n logging.info(\n ' Missing INI parameter, setting {} = {}'.format(\n 'srain_intcp_remap_factor', srain_intcp_remap_factor))\n\n # Check input paths\n if not arcpy.Exists(hru.polygon_path):\n logging.error(\n '\\nERROR: Fishnet ({}) does not exist'.format(\n hru.polygon_path))\n sys.exit()\n # Check that either the original vegetation raster exist\n if not arcpy.Exists(veg_cover_orig_path):\n logging.error(\n '\\nERROR: Vegetation cover raster does not exist')\n sys.exit()\n if not arcpy.Exists(veg_type_orig_path):\n logging.error(\n '\\nERROR: Vegetation type raster does not exist')\n sys.exit()\n # Vegetation cover can be set from another field in the raster\n # This is mostly for US_120EVT\n if not veg_type_field:\n logging.info('\\n Using VALUE field to set vegetation type')\n veg_type_field = 'VALUE'\n elif len(arcpy.ListFields(veg_type_orig_path, veg_type_field)) == 0:\n logging.info(\n ' veg_type_field {} does not exist\\n Using VALUE '\n 'field to set vegetation type'.format(veg_type_field))\n veg_type_field = 'VALUE'\n elif arcpy.ListFields(veg_type_orig_path, veg_type_field)[0].type not in ['Integer', 'SmallInteger']:\n logging.info(\n ' veg_type_field {} is not an integer type\\n Using VALUE '\n 'field to set vegetation type'.format(veg_type_field))\n veg_type_field = 'VALUE'\n\n # Check that remap folder is valid\n if not os.path.isdir(remap_ws):\n logging.error('\\nERROR: Remap folder does not exist')\n sys.exit()\n # Check that remap files exist\n # Check remap files comment style\n cov_type_remap_path = os.path.join(remap_ws, cov_type_remap_name)\n covden_sum_remap_path = os.path.join(remap_ws, covden_sum_remap_name)\n covden_win_remap_path = os.path.join(remap_ws, covden_win_remap_name)\n snow_intcp_remap_path = os.path.join(remap_ws, snow_intcp_remap_name)\n srain_intcp_remap_path = os.path.join(remap_ws, srain_intcp_remap_name)\n wrain_intcp_remap_path = os.path.join(remap_ws, wrain_intcp_remap_name)\n root_depth_remap_path = os.path.join(remap_ws, root_depth_remap_name)\n remap_path_list = [\n cov_type_remap_path, covden_sum_remap_path, covden_win_remap_path,\n snow_intcp_remap_path, srain_intcp_remap_path,\n wrain_intcp_remap_path, root_depth_remap_path]\n for remap_path in remap_path_list:\n support.remap_check(remap_path)\n\n # Check other inputs\n if veg_type_cs <= 0:\n logging.error('\\nERROR: Veg. type cellsize must be greater than 0')\n sys.exit()\n if veg_cover_cs <= 0:\n logging.error('\\nERROR: Veg. cover cellsize must be greater than 0')\n sys.exit()\n\n # Build output folders if necesssary\n veg_temp_ws = os.path.join(hru.param_ws, 'veg_rasters')\n if not os.path.isdir(veg_temp_ws):\n os.mkdir(veg_temp_ws)\n # Output paths\n veg_cover_path = os.path.join(veg_temp_ws, 'veg_cover.img')\n veg_type_path = os.path.join(veg_temp_ws, 'veg_type.img')\n cov_type_path = os.path.join(veg_temp_ws, 'cov_type.img')\n covden_sum_path = os.path.join(veg_temp_ws, 'covden_sum.img')\n covden_win_path = os.path.join(veg_temp_ws, 'covden_win.img')\n snow_intcp_path = os.path.join(veg_temp_ws, 'snow_intcp.img')\n wrain_intcp_path = os.path.join(veg_temp_ws, 'wrain_intcp.img')\n srain_intcp_path = os.path.join(veg_temp_ws, 'srain_intcp.img')\n root_depth_path = os.path.join(veg_temp_ws, 'root_depth.img')\n rad_trncf_path = os.path.join(veg_temp_ws, 'rad_trncf.img')\n\n # Set ArcGIS environment variables\n arcpy.CheckOutExtension('Spatial')\n env.overwriteOutput = True\n env.pyramid = 'PYRAMIDS -1'\n # env.pyramid = 'PYRAMIDS 0'\n env.workspace = veg_temp_ws\n env.scratchWorkspace = hru.scratch_ws\n\n # Check fields\n logging.info('\\nAdding vegetation fields if necessary')\n support.add_field_func(hru.polygon_path, hru.cov_type_field, 'SHORT')\n support.add_field_func(hru.polygon_path, hru.covden_sum_field, 'DOUBLE')\n support.add_field_func(hru.polygon_path, hru.covden_win_field, 'DOUBLE')\n support.add_field_func(hru.polygon_path, hru.rad_trncf_field, 'DOUBLE')\n support.add_field_func(hru.polygon_path, hru.snow_intcp_field, 'DOUBLE')\n support.add_field_func(hru.polygon_path, hru.srain_intcp_field, 'DOUBLE')\n support.add_field_func(hru.polygon_path, hru.wrain_intcp_field, 'DOUBLE')\n # support.add_field_func(hru.polygon_path, hru.root_depth_field, 'DOUBLE')\n\n # Check that remaps have all necessary values\n logging.info(\n '\\nChecking remap tables against all raster cells'\n ' (i.e. even those outside the study area)')\n check_remap_keys(cov_type_remap_path, veg_type_orig_path)\n check_remap_keys(covden_sum_remap_path, veg_cover_orig_path)\n check_remap_keys(root_depth_remap_path, veg_type_orig_path)\n\n # Assume all vegetation rasters will need to be rebuilt\n # Check veg cover and veg type rasters\n # This will check for matching spat. ref., snap point, and cellsize\n\n # Project/clip veg cover to match HRU\n logging.info('\\nProjecting/clipping vegetation cover raster')\n veg_cover_orig_sr = arcpy.sa.Raster(veg_cover_orig_path).spatialReference\n # Remove existing clipped/projected veg cover raster\n if arcpy.Exists(veg_cover_path):\n arcpy.Delete_management(veg_cover_path)\n # Set preferred transforms\n transform_str = support.transform_func(hru.sr, veg_cover_orig_sr)\n logging.debug(' Transform: {}'.format(transform_str))\n logging.debug(' Projection method: NEAREST')\n\n # Project veg cover\n # DEADBEEF - Arc10.2 ProjectRaster does not extent\n support.project_raster_func(\n veg_cover_orig_path, veg_cover_path, hru.sr,\n 'NEAREST', veg_cover_cs, transform_str,\n '{} {}'.format(hru.ref_x, hru.ref_y), veg_cover_orig_sr, hru)\n # env.extent = hru.extent\n # arcpy.ProjectRaster_management(\n # veg_cover_orig_path, veg_cover_path, hru.sr,\n # 'NEAREST', veg_cover_cs, transform_str,\n # '{} {}'.format(hru.ref_x, hru.ref_y),\n # veg_cover_orig_sr)\n # arcpy.ClearEnvironment('extent')\n del transform_str, veg_cover_orig_sr\n\n # Project/clip veg type to match HRU\n logging.info('Projecting/clipping vegetation type raster')\n veg_type_orig_sr = arcpy.sa.Raster(veg_type_orig_path).spatialReference\n # Remove existing clipped/projected veg type raster\n if arcpy.Exists(veg_type_path):\n arcpy.Delete_management(veg_type_path)\n # Set preferred transforms\n transform_str = support.transform_func(hru.sr, veg_type_orig_sr)\n logging.debug(' Transform: {}'.format(transform_str))\n logging.debug(' Projection method: NEAREST')\n # Use a different field to calculate vegetation type\n if veg_type_field != 'VALUE':\n logging.info(\n ' Calculating vegetation type from {} field'.format(\n veg_type_field))\n veg_type_obj = arcpy.sa.Lookup(veg_type_orig_path, veg_type_field)\n else:\n veg_type_obj = arcpy.sa.Raster(veg_type_orig_path)\n\n # Project veg type\n # DEADBEEF - Arc10.2 ProjectRaster does not honor extent\n support.project_raster_func(\n veg_type_obj, veg_type_path, hru.sr,\n 'NEAREST', veg_type_cs, transform_str,\n '{} {}'.format(hru.ref_x, hru.ref_y), veg_type_orig_sr, hru)\n # env.extent = hru.extent\n # arcpy.ProjectRaster_management(\n # veg_type_obj, veg_type_path, hru.sr,\n # 'NEAREST', veg_type_cs, transform_str,\n # '{} {}'.format(hru.ref_x, hru.ref_y),\n # veg_type_orig_sr)\n # arcpy.ClearEnvironment('extent')\n del transform_str, veg_type_orig_sr, veg_type_obj\n\n # Reclassifying vegetation cover type\n logging.info('\\nCalculating COV_TYPE')\n logging.debug(' Reclassifying: {}'.format(cov_type_remap_path))\n cov_type_obj = arcpy.sa.ReclassByASCIIFile(\n veg_type_path, cov_type_remap_path)\n cov_type_obj.save(cov_type_path)\n del cov_type_obj\n\n # Summer cover density\n logging.info('Calculating COVDEN_SUM')\n logging.debug(' Reclassifying: {}'.format(covden_sum_remap_path))\n covden_sum_obj = arcpy.sa.ReclassByASCIIFile(\n veg_cover_path, covden_sum_remap_path)\n covden_sum_obj *= 0.01\n covden_sum_obj.save(covden_sum_path)\n del covden_sum_obj\n\n # Winter cover density\n logging.info('Calculating COVDEN_WIN')\n logging.debug(' Reclassifying: {}'.format(covden_win_remap_path))\n covden_win_obj = arcpy.sa.ReclassByASCIIFile(\n cov_type_path, covden_win_remap_path)\n covden_win_obj *= 0.01\n covden_win_obj *= arcpy.sa.Raster(covden_sum_path)\n covden_win_obj.save(covden_win_path)\n del covden_win_obj\n\n # Snow interception storage capacity\n logging.info('Calculating SNOW_INTCP')\n logging.debug(' Reclassifying: {}'.format(snow_intcp_remap_path))\n snow_intcp_obj = arcpy.sa.ReclassByASCIIFile(\n cov_type_path, snow_intcp_remap_path)\n snow_intcp_obj *= snow_intcp_remap_factor\n snow_intcp_obj.save(snow_intcp_path)\n del snow_intcp_obj\n\n # Winter rain interception storage capacity\n logging.info('Calculating WRAIN_INTCP')\n logging.debug(' Reclassifying: {}'.format(wrain_intcp_remap_path))\n wrain_intcp_obj = arcpy.sa.ReclassByASCIIFile(\n cov_type_path, wrain_intcp_remap_path)\n wrain_intcp_obj *= wrain_intcp_remap_factor\n wrain_intcp_obj.save(wrain_intcp_path)\n del wrain_intcp_obj\n\n # Summer rain interception storage capacity\n logging.info('Calculating SRAIN_INTCP')\n logging.debug(' Reclassifying: {}'.format(srain_intcp_remap_path))\n srain_intcp_obj = arcpy.sa.ReclassByASCIIFile(\n cov_type_path, srain_intcp_remap_path)\n srain_intcp_obj *= srain_intcp_remap_factor\n srain_intcp_obj.save(srain_intcp_path)\n del srain_intcp_obj\n\n # Root depth\n logging.info('Calculating ROOT_DEPTH')\n logging.debug(' Reclassifying: {}'.format(root_depth_remap_path))\n root_depth_obj = arcpy.sa.ReclassByASCIIFile(\n veg_type_path, root_depth_remap_path)\n root_depth_obj.save(root_depth_path)\n del root_depth_obj\n\n # Short-wave radiation transmission coefficent\n logging.info('Calculating {}'.format(hru.rad_trncf_field))\n rad_trncf_obj = 0.9917 * arcpy.sa.Exp(\n -2.7557 * arcpy.sa.Raster(covden_win_path))\n rad_trncf_obj.save(rad_trncf_path)\n del rad_trncf_obj\n\n # List of rasters, fields, and stats for zonal statistics\n zs_veg_dict = dict()\n zs_veg_dict[hru.cov_type_field] = [cov_type_path, 'MAJORITY']\n zs_veg_dict[hru.covden_sum_field] = [covden_sum_path, 'MEAN']\n zs_veg_dict[hru.covden_win_field] = [covden_win_path, 'MEAN']\n zs_veg_dict[hru.snow_intcp_field] = [snow_intcp_path, 'MEAN']\n zs_veg_dict[hru.srain_intcp_field] = [srain_intcp_path, 'MEAN']\n zs_veg_dict[hru.wrain_intcp_field] = [wrain_intcp_path, 'MEAN']\n # zs_veg_dict[hru.root_depth_field] = [root_depth_path, 'MEAN']\n zs_veg_dict[hru.rad_trncf_field] = [rad_trncf_path, 'MEAN']\n\n # Calculate zonal statistics\n logging.info('\\nCalculating vegetation zonal statistics')\n support.zonal_stats_func(\n zs_veg_dict, hru.polygon_path, hru.point_path, hru)\n\n # Short-wave radiation transmission coefficient\n # logging.info('\\nCalculating {}'.format(hru.rad_trncf_field))\n # arcpy.CalculateField_management(\n # hru.polygon_path, hru.rad_trncf_field,\n # '0.9917 * math.exp(-2.7557 * !{}!)'.format(hru.covden_win_field),\n # 'PYTHON')\n\n # Clear COV_TYPE values for lake cells (HRU_TYPE == 2)\n if True:\n logging.info('\\nClearing lake nodata vegetation parameters')\n # logging.info(\n # '\\nClearing vegetation parameters for lake and inactive cells')\n hru_polygon_layer = \"hru_polygon_layer\"\n arcpy.MakeFeatureLayer_management(\n hru.polygon_path, hru_polygon_layer)\n arcpy.SelectLayerByAttribute_management(\n hru_polygon_layer, \"NEW_SELECTION\",\n '\"{0}\" = 2 OR (\"{0}\" = 0 AND \"{1}\" = 0)'.format(\n hru.type_field, hru.dem_adj_field))\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.cov_type_field, 0, 'PYTHON')\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.covden_sum_field, 0, 'PYTHON')\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.covden_win_field, 0, 'PYTHON')\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.snow_intcp_field, 0, 'PYTHON')\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.srain_intcp_field, 0, 'PYTHON')\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.wrain_intcp_field, 0, 'PYTHON')\n arcpy.CalculateField_management(\n hru_polygon_layer, hru.rad_trncf_field, 0, 'PYTHON')\n arcpy.Delete_management(hru_polygon_layer)\n del hru_polygon_layer",
"def retrieve_data_from_hdf_suitcase(fpath):\n data_dict = {}\n with h5py.File(fpath, \"r+\") as f:\n other_data_list = [v for v in f.keys() if v != \"xrfmap\"]\n if len(other_data_list) > 0:\n f_hdr = f[other_data_list[0]].attrs[\"start\"]\n if not isinstance(f_hdr, str):\n f_hdr = f_hdr.decode(\"utf-8\")\n start_doc = ast.literal_eval(f_hdr)\n other_data = f[other_data_list[0] + \"/primary/data\"]\n\n if start_doc[\"beamline_id\"] == \"HXN\":\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = \"hxn_pv_config.json\"\n config_path = sep_v.join(current_dir.split(sep_v)[:-2] + [\"configs\", config_file])\n with open(config_path, \"r\") as json_data:\n config_data = json.load(json_data)\n extra_list = config_data[\"other_list\"]\n fly_type = start_doc.get(\"fly_type\", None)\n subscan_dims = start_doc.get(\"subscan_dims\", None)\n\n if \"dimensions\" in start_doc:\n datashape = start_doc[\"dimensions\"]\n elif \"shape\" in start_doc:\n datashape = start_doc[\"shape\"]\n else:\n logger.error(\"No dimension/shape is defined in hdr.start.\")\n\n datashape = [datashape[1], datashape[0]] # vertical first, then horizontal\n for k in extra_list:\n # k = k.encode('utf-8')\n if k not in other_data.keys():\n continue\n _v = np.array(other_data[k])\n v = _v.reshape(datashape)\n if fly_type in (\"pyramid\",):\n # flip position the same as data flip on det counts\n v = flip_data(v, subscan_dims=subscan_dims)\n data_dict[k] = v\n return data_dict",
"def _from_File(self, fname):\n\n # load_seds - load wavelength and seds\n if self._get_type(fname) == \"fits\":\n with pyfits.open(fname) as f:\n self.seds = f[0].data[:-1]\n self.lamb = f[0].data[-1]\n self.grid = Table(fname)\n\n elif self._get_type(fname) == \"hdf\":\n with HDFStore(fname, mode=\"r\") as s:\n self.seds = s[\"/seds\"].read()\n self.lamb = s[\"/lamb\"].read()\n try:\n self.cov_diag = s[\"/covdiag\"].read()\n except Exception:\n self.cov_diag = None\n try:\n self.cov_offdiag = s[\"/covoffdiag\"].read()\n except Exception:\n self.cov_offdiag = None\n self.grid = Table(fname, tablename=\"/grid\")\n\n self._header = self.grid.header",
"def read_param_def(self, param_file):\n if param_file == None:\n file_string = pkg_resources.resource_string('sonde',\n DEFAULT_ESPEY_PARAM_DEF)\n elif type(param_file) == str:\n with open(param_file, 'rb') as fid:\n file_string = fid.read()\n\n elif type(param_file) == file:\n file_string = param_file.read()\n\n file_string = re.sub(\"\\n\\s*\\n*\", \"\\n\", file_string)\n file_string = re.sub(\";.*\\n*\", \"\", file_string)\n file_string = re.sub(\"\\t\", \"\", file_string)\n file_string = re.sub(\"\\\"\", \"\", file_string)\n self.espey_file_version = int(file_string.splitlines()[0].split('=')[-1])\n self.espey_num_param_in_def = int(\n file_string.splitlines()[1].split('=')[-1])\n self.espey_ecowatch_version = int(\n file_string.splitlines()[2].split('=')[-1])\n dtype = np.dtype([('espey_id', '<i8'),\n ('name', '|S20'),\n ('unit', '|S11'),\n ('shortname', '|S9'),\n ('num_dec_places', '<i8')])\n self.espey_param_def = np.genfromtxt(StringIO(file_string),\n delimiter=',',\n usecols=(0, 1, 3, 5, 7),\n skip_header=3, dtype=dtype)",
"def _load_config(self):\n\n channel_spec = self.spec.channels\n\n _spec_fields = (\n 'name', 'long', 'word_len', 'bit_mask', 'max_range', 'log_max',\n 'log_min', 'gain')\n\n ParamSpec = namedtuple('SPn', _spec_fields)\n\n self._config = {\n num: ParamSpec(*format_attr(self.spec.type_i, **channel_spec[num]))\n for num in channel_spec}\n\n self.par_ids = tuple(sorted(channel_spec.keys()))\n self.names = self.__get_ch_attr('name')\n self.__load_id_maps()",
"def load_param_from_pcs_file(self, pcs_path):\n self.parameter_space = Parameters.load_param_from_pcs_file(pcs_path)",
"def insert_hyperparam_defns(hpo_id, yamlfile):\n with open(yamlfile) as fp:\n s = fp.read()\n y = yaml.load(s)\n for hp in y:\n print(\"hyperparameter '%s' has %2i values\" % (hp, len(y[hp][\"values\"])))\n param_id = DB.insert(\n table=\"hpo_hyperparam_defns\",\n names=[\"hpo_id\", \"name\"],\n values=[q(hpo_id), q(hp)],\n )\n # print(\"param_id \" + str(param_id))\n values = y[hp][\"values\"]\n for p in values:\n print(\" \" + p)\n DB.insert(\n table=\"hpo_hyperparam_values\",\n names=[\"param_id\", \"value\"],\n values=[q(param_id), q(p)],\n )",
"def readHlist(filepath):\n\n #Check to see how many fields in hlist\n with open(filepath, 'r') as fp:\n\n l = fp.readline()\n ls = l.split(' ')\n nfields = len(ls)\n print('Number of fields in hlist {0}: {1}'.format(filepath, nfields))\n\n if nfields == 66:\n dtype = np.dtype([('scale',float),('id',int),('mvir',float),('rvir',float),('rs',float),\\\n ('vrms',float),('vmax',float), ('Rs_Klypin',float),('PX', float),\\\n ('PY', float), ('PZ', float), ('Mvir_all',float), ('M200b',float),\\\n ('M200c',float),('M500c',float),('M2500c',float),('Macc',float),\\\n ('Mpeak',float),('Vacc',float),('Vpeak',float)])\n usecols = [0,1,10,11,12,13,16,34,17,18,19,35,36,37,38,39,56,57,58,59]\n\n elif nfields == 67:\n dtype = np.dtype([('scale',float),('id',int),('mvir',float),('rvir',float),('rs',float),\\\n ('vrms',float),('vmax',float), ('Rs_Klypin',float),('PX', float),\\\n ('PY', float), ('PZ', float),('Mvir_all',float),('M200b',float),\\\n ('M200c',float),('M500c',float),('M2500c',float),('Macc',float),\\\n ('Mpeak',float),('Vacc',float),('Vpeak',float)])\n usecols = [0,1,10,11,12,13,16,18,19,20,34,35,36,37,38,39,54,55,56,57]\n\n else:\n print('Unrecognized Hlist format, check file or update readHlist with new format')\n raise Exception\n\n\n halos = np.genfromtxt(filepath,dtype=dtype,usecols=usecols)\n halos = halos[halos['id']!=0]\n\n return halos",
"def adjust_hspfmodel(self, hspfmodel, parameters, ifraction = None, \n evap = None, ccfact = None, lzetp = None, lzsn = None, \n uzsn = None, intfw = None, infilt = None, \n agwrc = None, kvary = None, deepfr = None, irc = None, \n ftable = None):\n \n if ifraction is None: ifraction = parameters.ifraction\n if evap is None: evap_multiplier = parameters.evap\n if ccfact is None: CCFACT = parameters.ccfact\n if lzetp is None: LZETP_multiplier = parameters.lzetp\n if lzsn is None: LZSN_multiplier = parameters.lzsn\n if uzsn is None: UZSN_multiplier = parameters.uzsn\n if intfw is None: INTFW_multiplier = parameters.intfw\n if infilt is None: INFILT_multiplier = parameters.infilt\n if agwrc is None: AGWRC = parameters.agwrc\n if kvary is None: KVARY = parameters.kvary\n if deepfr is None: DEEPFR = parameters.deepfr\n if irc is None: IRC = parameters.irc\n if ftable is None: ftable = parameters.ftable\n\n # update the parameters\n\n hspfmodel.ifraction = ifraction\n hspfmodel.evap_multiplier = evap_multiplier\n\n # set the values for each PERLND -- Note the limits on values\n\n for p in hspfmodel.perlnds:\n if p.VLE == 1: p.monLZETP = [min(l * LZSN_multiplier, 0.99) \n for l in p.monLZETP]\n else: p.LZETP = min(p.LZETP * LZETP_multiplier, 0.99)\n p.LZSN = max(0.26, min(p.LZSN * LZSN_multiplier, 2400))\n p.UZSN = max(0.26, min(p.UZSN * UZSN_multiplier, 240))\n p.INTFW = p.INTFW * INTFW_multiplier\n p.INFILT = max(0.003, min(p.INFILT * INFILT_multiplier, 2400))\n p.AGWRC = AGWRC\n p.KVARY = KVARY\n p.DEEPFR = DEEPFR\n p.IRC = IRC\n\n for o in hspfmodel.perlnds + hspfmodel.implnds:\n o.CCFACT = CCFACT\n\n for r in hspfmodel.rchreses:\n for i in range(len(r.ftable)): \n r.ftable[i][3] = r.ftable[i][3] * ftable",
"def ldsc_h2_part(args, **kwargs):\n \n # handle args\n phname = str(args[0])\n phdesc = str(args[1])\n phsource = str(args[2]).replace(\"'\",\"\") ######### EDITED #########\n n = float(args[3])\n ncas = float(args[4])\n ncon = float(args[5])\n \n # define names\n ss_name = str(phname)+'.tsv.bgz' \n sspath_local = wd+'/'+ss_name\n sspath_cloud = ss_bucket+'/'+ss_name\n \n h2_out = 'h2part.ukbb.'+str(phsource)+'_'+str(phname)\n \n # download sumstats file\n subprocess.call(['gsutil','cp',sspath_cloud,sspath_local])\n \n # run ldsc\n args_h2 = Namespace(out=h2_out, \n bfile=None,\n l2=None,\n extract=None,\n keep=None,\n ld_wind_snps=None,\n ld_wind_kb=None,\n ld_wind_cm=None,\n print_snps=None,\n annot=None,\n thin_annot=False,\n cts_bin=None,\n cts_break=None,\n cts_names=None,\n per_allele=False,\n pq_exp=None,\n no_print_annot=False,\n maf=0.05,\n h2=sspath_local,\n rg=None,\n ref_ld=None,\n ref_ld_chr=ld_ref_panel,\n w_ld=None,\n w_ld_chr=ld_w_panel,\n overlap_annot=True,\n no_intercept=False, ######## CHECK (default: False) ########\n intercept_h2=None,\n intercept_gencov=None,\n M=None,\n two_step=None,\n chisq_max=99999,\n print_cov=False,\n print_delete_vals=False,\n chunk_size=50,\n pickle=False,\n invert_anyway=False,\n yes_really=False,\n n_blocks=200,\n not_M_5_50=False,\n return_silly_things=False,\n no_check_alleles=False,\n print_coefficients=True,\n samp_prev=None,\n pop_prev=None,\n frqfile=None,\n h2_cts=None,\n frqfile_chr=ld_frq_panel,\n print_all_cts=False,\n sumstats_frames=None,\n rg_mat=False)\n \n print \"Launching ldsc for \"+str(phname)\n h2_results = ldsc.sumstats.estimate_h2(args_h2, Logger_to_Logging())\n print \"Completed ldsc for \"+str(phname)\n \n # cleanup sumstats file\n subprocess.call(['rm',sspath_local])\n \n return process_h2_part(h2_results, h2_out+'.results', phname, phdesc, phsource, float(n), float(ncas), float(ncon))",
"def init(filename=None, **kwargs):\n if filename is None:\n filename = os.path.join(hax_dir, 'hax.ini')\n\n # Do NOT move import to top of file, will crash docs building\n global config\n configp = ConfigParser(inline_comment_prefixes='#', strict=True)\n configp.read(filename)\n log.debug(\"Read in hax configuration file %s\" % filename)\n\n # Pick the correct section for this host\n section_to_use = 'DEFAULT'\n full_domain_name = socket.getfqdn()\n for section_name in configp.sections():\n if section_name in full_domain_name:\n section_to_use = section_name\n break\n\n # Evaluate the values in the ini file\n config = {}\n for key, value in configp[section_to_use].items():\n config[key] = eval(value, {'hax_dir': hax_dir, 'os': os})\n\n # Override with kwargs\n config.update(kwargs)\n\n # This import can't be at the top, would be circular\n from hax.runs import update_datasets\n update_datasets()\n\n from hax.minitrees import update_treemakers\n update_treemakers()",
"def load_ek80_raw(self, raw):\n print('%s converting file: %s' % (dt.now().strftime('%H:%M:%S'), os.path.basename(raw)))\n\n with RawSimradFile(raw, 'r') as fid:\n self.config_datagram = fid.read(1)\n self.config_datagram['timestamp'] = np.datetime64(self.config_datagram['timestamp'], '[ms]')\n\n # IDs of the channels found in the dataset\n self.ch_ids = list(self.config_datagram[self.config_datagram['subtype']])\n\n for ch_id in self.ch_ids:\n self.ping_data_dict[ch_id] = defaultdict(list)\n self.ping_data_dict[ch_id]['frequency'] = \\\n self.config_datagram['configuration'][ch_id]['transducer_frequency']\n self.power_dict[ch_id] = []\n self.angle_dict[ch_id] = []\n self.complex_dict[ch_id] = []\n\n # Parameters recorded for each frequency for each ping\n self.parameters[ch_id]['frequency_start'] = []\n self.parameters[ch_id]['frequency_end'] = []\n self.parameters[ch_id]['frequency'] = []\n self.parameters[ch_id]['pulse_duration'] = []\n self.parameters[ch_id]['pulse_form'] = []\n self.parameters[ch_id]['sample_interval'] = []\n self.parameters[ch_id]['slope'] = []\n self.parameters[ch_id]['transmit_power'] = []\n self.parameters[ch_id]['timestamp'] = []\n\n # Read the rest of datagrams\n self._read_datagrams(fid)\n # Remove empty lists\n for ch_id in self.ch_ids:\n if all(x is None for x in self.power_dict[ch_id]):\n self.power_dict[ch_id] = None\n if all(x is None for x in self.complex_dict[ch_id]):\n self.complex_dict[ch_id] = None\n\n if len(self.ch_ids) != len(self.recorded_ch_ids):\n self.ch_ids = self.recorded_ch_ids",
"def import_from_sdc(cls, values: Dict[str, Any]) -> 'SDC':",
"def __init__(\n self,\n approx_neb_wf_uuid,\n end_points_combo,\n mobile_specie,\n n_images,\n selective_dynamics_scheme,\n launch_mode=\"all\",\n db_file=DB_FILE,\n vasp_input_set=None,\n vasp_cmd=VASP_CMD,\n override_default_vasp_params=None,\n handler_group=None,\n parents=None,\n add_additional_fields=None,\n add_tags=None,\n **kwargs,\n ):\n fw_name = f\"hop: {mobile_specie} {end_points_combo}\"\n fw_spec = {\"tags\": [\"approx_neb\", approx_neb_wf_uuid, \"evaluate_path\"]}\n\n t = []\n # apply pathfinder pymatgen function and store outputs in approx_neb collection\n t.append(\n PathfinderToDb(\n db_file=db_file,\n n_images=n_images,\n end_points_combo=end_points_combo,\n approx_neb_wf_uuid=approx_neb_wf_uuid,\n )\n )\n # apply selective dynamics to pathfinder outputs to get images input structures\n t.append(\n AddSelectiveDynamics(\n approx_neb_wf_uuid=approx_neb_wf_uuid,\n pathfinder_key=end_points_combo,\n mobile_specie=mobile_specie,\n selective_dynamics_scheme=selective_dynamics_scheme,\n db_file=db_file,\n )\n )\n # add dynamic firetask that will launch image relaxations as desired\n t.append(\n GetImageFireworks(\n launch_mode=launch_mode,\n images_key=end_points_combo,\n approx_neb_wf_uuid=approx_neb_wf_uuid,\n vasp_cmd=vasp_cmd,\n db_file=db_file,\n vasp_input_set=vasp_input_set,\n override_default_vasp_params=override_default_vasp_params,\n handler_group=handler_group,\n add_additional_fields=add_additional_fields,\n add_tags=add_tags,\n )\n )\n\n super().__init__(tasks=t, spec=fw_spec, name=fw_name, parents=parents, **kwargs)",
"def loadParameters(self, filepath) -> retval:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a cdata XSdFecLdpcParameters version of the given dict
|
def _pack_ldpc_param(param_dict : dict) -> any:
key_lookup = {
'k': 'K',
'n': 'N',
'p': 'PSize',
'nlayers': 'NLayers',
'nqc': 'NQC',
'nmqc': 'NMQC',
'nm': 'NM',
'norm_type': 'NormType',
'no_packing': 'NoPacking',
'special_qc': 'SpecialQC',
'no_final_parity': 'NoFinalParity',
'max_schedule': 'MaxSchedule',
'sc_table': 'SCTable',
'la_table': 'LATable',
'qc_table': 'QCTable',
}
# Flush non-struct keys
sub_dict = {key_lookup[key]: param_dict[key] for key in param_dict
if key in key_lookup.keys()}
# Pack tables as C arrays
def to_c_array(lst):
# Convert scalars to singleton lists
if not isinstance(lst, list):
lst = [lst]
# Copy to C array
c_arr = _ffi.new('u32[]', len(lst))
for i, x in enumerate(lst):
c_arr[i] = x
return c_arr
for table_key in filter(lambda k: k.endswith('Table'), sub_dict.keys()):
sub_dict[table_key] = to_c_array(sub_dict[table_key])
c_struct = _pack_value('XSdFecLdpcParameters', sub_dict)
_c_array_weakkeydict[c_struct] = [sub_dict[table_key]
for table_key in filter(lambda k: k.endswith('Table'), sub_dict.keys())
]
return c_struct
|
[
"def make_crds_parameter_dict(self):\n\n parameters = {}\n parameters['INSTRUME'] = self.instrument.upper()\n parameters['DETECTOR'] = self.detector.upper()\n parameters['READPATT'] = self.read_pattern.upper()\n parameters['SUBARRAY'] = self.subarray.upper()\n parameters['DATE-OBS'] = datetime.date.today().isoformat()\n current_date = datetime.datetime.now()\n parameters['TIME-OBS'] = current_date.time().isoformat()\n\n return parameters",
"def encodeCRFparams(self):\n crfParams = {}\n crfParams['unaryWeights'] = self.unaryWeights\n crfParams['binaryWeights'] = self.binaryWeights\n crfParams['epsWeight'] = self.epsWeight\n crfParams['regNorm'] = self.regNorm\n crfParams['regLambda'] = self.regLambda\n crfParams['omega'] = self.omega\n return crfParams",
"def _get_parameters(self):\n parameters = getattr(self, \"CF_PARAMETERS\",\n getattr(self, \"PARAMETERS\", {}))\n\n for var_name, attrs in self.defined_variables().iteritems():\n var_type = attrs.get(\"type\")\n if isinstance(var_type, CFNType):\n cfn_attrs = copy.deepcopy(attrs)\n cfn_attrs[\"type\"] = var_type.parameter_type\n parameters[var_name] = cfn_attrs\n return parameters",
"def to_dict(cxn_params):\n return dict( (p['@key'], p['$']) for p in cxn_params['entry'] )",
"def calibrationParameters(self, f_c, f_sb):\n return self._calibration.calibrationParameters(f_c=f_c, f_sb=f_sb)",
"def available_ldpc_params(self) -> list:\n return list(self._code_params.ldpc.keys())",
"def _crysol_parameters(pdb, dat, p, lm=25, fb=17, sm=1, ns=256, un=1, dns=0.334, dro=0, err=True, cst=True):\n\n\t\t# Define CRYSOL input parameters\n\t\tparameters = {\"pdb\": [\"{}\".format(pdb)],\n\t\t\t\t\t\"dat\": [\"{}\".format(dat)],\n\t\t\t\t\t\"p\": [\"-p\"] + [\"fit_{}\".format(p)],\n\t\t\t\t\t\"lm\": [\"-lm\"] + [\"{}\".format(lm)],\n\t\t\t\t\t\"fb\": [\"-fb\"] + [\"{}\".format(fb)],\n\t\t\t\t\t\"sm\": [\"-sm\"] + [\"{}\".format(sm)],\n\t\t\t\t\t\"ns\": [\"-ns\"] + [\"{}\".format(ns)],\n\t\t\t\t\t\"un\": [\"-un\"] + [\"{}\".format(un)],\n\t\t\t\t\t\"dns\": [\"-dns\"] + [\"{}\".format(dns)],\n\t\t\t\t\t\"dro\": [\"-dro\"] + [\"{}\".format(dro)]}\n\n\t\t# Check if err and cst flags need to be set up\n\t\tif err:\n\t\t\tparameters[\"err\"] = [\"-err\"]\n\t\tif cst:\n\t\t\tparameters[\"cst\"] = [\"-cst\"]\n\n\t\t# Construct CRYSOL call with associated parameters\n\t\tcrysol_command = [\"crysol\"]\n\t\tfor key in parameters.keys():\n\t\t\tcrysol_command += parameters.get(key, []) # return empty list to avoid None addition\n\n\t\treturn crysol_command",
"def load():\n\n dc_params = wd_containers.DCParameterContainer()\n\n # general params\n dc_params[\"jdphs\"] = 2\n dc_params[\"ifcgs\"] = 0\n dc_params[\"mode\"] = 5\n dc_params[\"icor1\"] = 0\n dc_params[\"icor2\"] = 0\n\n # system params\n dc_params[\"hjd0\"] = 54954.534784\n dc_params[\"pzero\"] = 1.655473\n dc_params[\"dpdt\"] = 0\n dc_params[\"pshift\"] = 0.0015983790\n dc_params[\"delph\"] = 0\n dc_params[\"nga\"] = 1\n dc_params[\"e\"] = 0\n dc_params[\"a\"] = 7.51\n dc_params[\"f1\"] = 1\n dc_params[\"f2\"] = 1\n dc_params[\"vga\"] = -16.2446\n dc_params[\"xincl\"] = 70.966\n dc_params[\"tavh\"] = 7000\n dc_params[\"tavc\"] = 4293\n dc_params[\"phsv\"] = 4.9582\n dc_params[\"pcsv\"] = 2.2574\n dc_params[\"rm\"] = 0.21\n dc_params[\"perr\"] = 1.570796327\n dc_params[\"dperdt\"] = 0\n dc_params[\"the\"] = 0\n dc_params[\"vunit\"] = 1\n dc_params[\"abunin\"] = 0\n dc_params[\"dpclog\"] = 1.83714\n\n # surface params\n dc_params[\"ifat1\"] = 1\n dc_params[\"ifat2\"] = 1\n dc_params[\"gr1\"] = 0.320\n dc_params[\"gr2\"] = 0.270\n dc_params[\"ipb\"] = 0\n dc_params[\"mref\"] = 1\n dc_params[\"nref\"] = 1\n dc_params[\"n1\"] = 30\n dc_params[\"n2\"] = 30\n dc_params[\"alb1\"] = 0.5\n dc_params[\"alb2\"] = 0.699\n dc_params[\"ld1\"] = 1\n dc_params[\"ld2\"] = 1\n dc_params[\"xbol1\"] = 0.471\n dc_params[\"xbol2\"] = 0.531\n dc_params[\"ybol1\"] = 0\n dc_params[\"ybol2\"] = 0\n\n # third body\n dc_params[\"if3b\"] = 0\n dc_params[\"a3b\"] = 0\n dc_params[\"p3b\"] = 0\n dc_params[\"xincl3b\"] = 0\n dc_params[\"e3b\"] = 0\n dc_params[\"perr3b\"] = 0\n dc_params[\"tc3b\"] = 0\n\n # general dc params\n dc_params[\"isym\"] = 1\n dc_params[\"maglite\"] = 0\n dc_params[\"linkext\"] = 0\n dc_params[\"desextinc\"] = 0\n dc_params[\"n1l\"] = 30\n dc_params[\"n2l\"] = 30\n\n # spot params\n dc_params[\"nomax\"] = 0\n dc_params[\"kspev\"] = 0\n dc_params[\"kspot\"] = 1\n dc_params[\"fspot1\"] = 1\n dc_params[\"fspot2\"] = 1\n dc_params[\"ifsmv1\"] = 0\n dc_params[\"ifsmv2\"] = 0\n\n # which spots to fit?\n dc_params[\"kspa\"] = 1\n dc_params[\"nspa\"] = 1\n dc_params[\"kspb\"] = 0\n dc_params[\"nspb\"] = 0\n\n # params that can remain in their default values\n # lc_params[\"ko\"]\n # lc_params[\"kdisk\"]\n # lc_params[\"ifder\"]\n # lc_params[\"iflcin\"]\n # lc_params[\"ifoc\"]\n\n # params that should not be modified, utility handles these itself\n # lc_params[\"ifvc1\"]\n # lc_params[\"ifvc2\"]\n # lc_params[\"nlc\"]\n # lc_params[\"iftime\"]\n # lc_params[\"nppl\"]\n\n return dc_params",
"def _read_para_cert(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument\n _ctgp = self._read_unpack(1)\n _ctct = self._read_unpack(1)\n _ctid = self._read_unpack(1)\n _cttp = self._read_unpack(1)\n _ctdt = self._read_fileng(clen-4)\n\n cert = dict(\n type=desc,\n critical=cbit,\n length=clen,\n group=_GROUP_ID.get(_ctgp),\n count=_ctct,\n id=_ctid,\n cert_type=_CERT_TYPE.get(_cttp),\n certificate=_ctdt,\n )\n\n _plen = length - clen\n if _plen:\n self._read_fileng(_plen)\n\n return cert",
"def getparamexogdict():\n paramssdict = {'ALPHA': 0.3, 'BETA': 0.95, 'DELTA': 0.1, 'RHO': 0.9, 'SIGMA': 0.1, 'ME_c': 0.01, 'ME_y': 0.01}\n return(paramssdict)",
"def fcs_params(self):\n FCS_PARAMS = []\n if self.GENERAL_SNAP:\n for fcs in self.__snap_adapter_list('fcs'):\n fcs_params = self.__snap_stanza_read(self.GENERAL_SNAP, 'lsattr -El ' + fcs)\n if fcs_params:\n FCS_PARAMS.append({'name' : fcs, 'atname_1' : fcs_params[5].split()[0],\n 'atval_1' : fcs_params[5].split()[1], 'atname_2' : fcs_params[9].split()[0],\n 'atval_2' : fcs_params[9].split()[1], 'atname_3' : fcs_params[10].split()[0],\n 'atval_3' : fcs_params[10].split()[1]})\n else:\n return None\n return FCS_PARAMS",
"def create_ccd_code_map(ccd_lyr):\n polys_dict = {}\n for ccd_shp in ccd_lyr:\n ccd_code = ccd_shp.GetField(CCD_CODE_FIELD) \n ccd_geom = ccd_shp.GetGeometryRef()\n polys_dict[ccd_code] = ccd_shp\n ccd_lyr.ResetReading()\n return polys_dict",
"def from_dict(cls, schema_dict):\n cls._validate_dict(schema_dict)\n\n def constraints():\n constraints = schema_dict.get(CONSTRAINTS)\n if constraints is None:\n return\n\n if not isinstance(constraints, list):\n raise constr.InvalidSchemaError(\n _(\"Invalid parameter constraints, expected a list\"))\n\n valid_keys = (DESCRIPTION, LENGTH, RANGE, ALLOWED_VALUES,\n ALLOWED_PATTERN, CUSTOM_CONSTRAINT)\n\n for constraint in constraints:\n cls._check_dict(constraint, valid_keys,\n 'parameter constraints')\n desc = constraint.get(DESCRIPTION)\n if RANGE in constraint:\n cdef = constraint.get(RANGE)\n cls._check_dict(cdef, (MIN, MAX), 'range constraint')\n yield constr.Range(parameters.Schema.get_num(MIN, cdef),\n parameters.Schema.get_num(MAX, cdef),\n desc)\n elif LENGTH in constraint:\n cdef = constraint.get(LENGTH)\n cls._check_dict(cdef, (MIN, MAX), 'length constraint')\n yield constr.Length(parameters.Schema.get_num(MIN, cdef),\n parameters.Schema.get_num(MAX, cdef),\n desc)\n elif ALLOWED_VALUES in constraint:\n cdef = constraint.get(ALLOWED_VALUES)\n yield constr.AllowedValues(cdef, desc)\n elif ALLOWED_PATTERN in constraint:\n cdef = constraint.get(ALLOWED_PATTERN)\n yield constr.AllowedPattern(cdef, desc)\n elif CUSTOM_CONSTRAINT in constraint:\n cdef = constraint.get(CUSTOM_CONSTRAINT)\n yield constr.CustomConstraint(cdef, desc)\n else:\n raise constr.InvalidSchemaError(\n _(\"No constraint expressed\"))\n\n # make update_allowed true by default on TemplateResources\n # as the template should deal with this.\n return cls(schema_dict[cls.TYPE],\n description=schema_dict.get(HOTParamSchema.DESCRIPTION),\n default=schema_dict.get(HOTParamSchema.DEFAULT),\n constraints=list(constraints()),\n hidden=schema_dict.get(HOTParamSchema.HIDDEN, False),\n label=schema_dict.get(HOTParamSchema.LABEL))",
"def parameters_map(self):\n return dict([(p.name, p) for p in self.component.parameters])",
"def get_ecdh_param(self):\r\n # type: () -> Dict[str, str]\r\n d = self._openssl_str_to_dic(self._ssl.get_ecdh_param(), ' ')\r\n d['GroupSize'] = d.pop('ECDSA_Parameters').strip('( bit)')\r\n d['Type'] = \"ECDH\"\r\n if 'Cofactor' in d :\r\n d['Cofactor'] = d['Cofactor'].split(' ')[0]\r\n\r\n for k in d.keys() :\r\n if k.startswith('Generator') :\r\n d['Generator'] = d.pop(k)\r\n d['GeneratorType'] = k.split('_')[1].strip('()')\r\n break\r\n else :\r\n d['GeneratorType'] = 'Unknown'\r\n return d",
"def params_from_dict(self, params):\n return [{'ParameterKey': key, 'ParameterValue': value} for key, value in params.items()] if params else []",
"def loadCTD(ctd):\n\n S = ctd['s']\n T = ctd['t']\n p = ctd['p']\n lat = ctd['lat']\n lon = ctd['lon']\n\n\n return S, T, p, lat, lon",
"def find_parameters(upc):\n return {\n 'OPERATION-NAME': config['find_operation_name'],\n 'SERVICE-VERSION': config['service_version'],\n 'SECURITY-APPNAME': config['app_id'],\n 'GLOBAL-ID': config['global_id'],\n 'RESPONSE-DATA-FORMAT': config['response_format'],\n 'categoryId': config['category_id'],\n 'keywords': upc\n }",
"def from_dict(cls, _dict: Dict) -> 'VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolumeIdentityByCRN':\n args = {}\n if 'crn' in _dict:\n args['crn'] = _dict.get('crn')\n else:\n raise ValueError('Required property \\'crn\\' not present in VolumeAttachmentVolumePrototypeInstanceContextVolumeIdentityVolumeIdentityByCRN JSON')\n return cls(**args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper to call C functions, checking if they exist and their return status.
|
def _safe_wrapper(name: str, *args, check_return: bool=True, **kwargs) -> any:
with sys_pipes():
if not hasattr(_lib, name):
raise RuntimeError(f"Function {name} not in library")
ret = getattr(_lib, name)(*args, **kwargs)
if check_return and ret:
raise RuntimeError(f"Function {name} call failed")
return ret
|
[
"def check_func (self, func,\r\n headers=None, include_dirs=None,\r\n libraries=None, library_dirs=None,\r\n decl=0, call=0):\r\n\r\n self._check_compiler()\r\n body = []\r\n if decl:\r\n body.append(\"int %s ();\" % func)\r\n body.append(\"int main () {\")\r\n if call:\r\n body.append(\" %s();\" % func)\r\n else:\r\n body.append(\" %s;\" % func)\r\n body.append(\"}\")\r\n body = string.join(body, \"\\n\") + \"\\n\"\r\n\r\n return self.try_link(body, headers, include_dirs,\r\n libraries, library_dirs)",
"def isAvailable(cls, raise_exception=False):\n \n libc_name = \"libc.so.6\"\n func_name = \"\"\n try:\n if sys.platform.startswith('linux'):\n cls.libc = CDLL(libc_name)\n func_name = \"prctl\"\n cls.prctl = cls.libc.prctl\n func_name = \"syscall\"\n cls.syscall = cls.libc.syscall\n return True\n else:\n return False\n except OSError as e:\n if raise_exception:\n raise LibraryNotFoundError(libc_name, *e.args)\n return False\n except AttributeError as e:\n if raise_exception:\n raise FuncNotFoundError(libc_name, func_name, *e.args)\n return False",
"def check_for_func(lib_names, func_name):\n\n for lib_name in lib_names:\n\n lib_path = find_library(lib_name)\n if not lib_path:\n continue\n\n # Open the lib. Look in the path returned by find_library, but also all\n # the paths returned by pkg-config (since we don't get an absolute path\n # on linux).\n lib_paths = [lib_path]\n lib_paths.extend(\n os.path.join(root, os.path.basename(lib_path))\n for root in set(extension_extra.get('library_dirs', []))\n )\n for lib_path in lib_paths:\n if not os.path.exists(lib_path):\n continue\n lib = ctypes.CDLL(lib_path)\n break\n else:\n print('Could not find', lib_name, 'with ctypes; looked in:')\n print('\\n'.join('\\t' + path for path in lib_paths))\n continue\n\n return hasattr(lib, func_name)\n\n else:\n print('Could not find %r with ctypes.util.find_library' % (lib_names, ))\n print('Some libraries can not be found for inspection; aborting!')\n exit(2)",
"def _check_call(ret):\n if ret != 0:\n raise TreeliteError(_LIB.TreeliteGetLastError().decode(\"utf-8\"))",
"def CHK(return_code, funcname, *args):\n if return_code==0: # call was succesful\n pass\n else:\n buf_size = default_buf_size\n while buf_size < 1000000:\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n try:\n r = libnidaqmx.DAQmxGetExtendedErrorInfo(ctypes.byref(buf), buf_size)\n if r != 0:\n r = libnidaqmx.DAQmxGetErrorString(return_code, ctypes.byref(buf), buf_size)\n except RuntimeError, msg:\n if 'Buffer is too small to fit the string' in str(msg):\n buf_size *= 2\n else:\n raise NIDAQmxRuntimeError(msg)\n else:\n break\n if r:\n if return_code < 0:\n raise NIDAQmxRuntimeError('%s%s failed with error %s=%d: %s'%\\\n (funcname, args, error_map[return_code], return_code, repr(buf.value)))\n else:\n warning = error_map.get(return_code, return_code)\n sys.stderr.write('%s%s warning: %s\\n' % (funcname, args, warning)) \n else:\n text = '\\n '.join(['']+textwrap.wrap(buf.value, 80)+['-'*10])\n if return_code < 0:\n raise NIDAQmxRuntimeError('%s%s:%s' % (funcname,args, text))\n else:\n sys.stderr.write('%s%s warning:%s\\n' % (funcname, args, text))\n return return_code",
"def _call(self, function, *args):\n status = ctypes.c_short(0)\n args = args + (byref(status), )\n function(*args)\n \n if status.value != 0:\n err_str = create_string_buffer(b'', size=256)\n GxFpga.GxFpgaGetErrorString(status, err_str, 256, byref(status))\n raise FpgaError(err_str.value)",
"def safe_call(func, *args, **kwargs):\n\n try:\n return func(*args, **kwargs)\n except APIException as e:\n if e.error_code == 404:\n return None # Not found. Exiting with None\n raise # Other errors are not expected here",
"def _do_call(call):\n try:\n return _CALL_CACHE[call]\n except KeyError:\n if callable(call[0]):\n result = call[0](*call[1:])\n else:\n result = _run(call)\n _CALL_CACHE[call] = result\n return result",
"def _handle_c_result(c_result):\n if c_result == ffi.NULL:\n raise ValueError(\"CResult should not be NULL\")\n error = None\n success = None\n if c_result.error_message != ffi.NULL:\n error = _handle_rust_str(c_result.error_message)\n if c_result.success != ffi.NULL:\n success = c_result.success\n lib.free_c_result(c_result)\n _maybe_raise_error_str(error)\n return success",
"def test_library_mMR_c():\n number = 101 # just a number\n descriptor = [ {'name':'input', 'type':'int', 'value':number},\n {'name':'output', 'type':'int', 'value':None }, ]\n r = call_c_function( mMR_c.echo, descriptor ) \n return r.output == number",
"def test_that_when_checking_if_a_function_exists_and_a_function_does_not_exist_the_function_exists_method_returns_false(\n self,\n ):\n self.conn.list_functions.return_value = {\"Functions\": [function_ret]}\n func_exists_result = boto_lambda.function_exists(\n FunctionName=\"myfunc\", **conn_parameters\n )\n\n self.assertFalse(func_exists_result[\"exists\"])",
"def status_success():\n r = call_c_function( mMR_c.status_success, [{'name':'return_value', 'type':'int', 'value':None}] ) \n return r.return_value",
"def test_searches_for_c_stdlib_and_raises_if_missing(self):\n\n import ctypes.util\n\n # Patch manually since unittest.mock.patch is not available in old Python versions\n old_find_library = ctypes.util.find_library\n\n searched_libraries = set()\n\n try:\n ctypes.util.find_library = lambda library: searched_libraries.add(library)\n\n with self.assertRaises(ImportError):\n reload(fakenewsredis)\n\n self.assertEqual(set(['c', 'msvcrt']), searched_libraries)\n finally:\n ctypes.util.find_library = old_find_library\n\n reload(fakenewsredis)",
"def IsPinvokeImpl(self) -> bool:",
"def check_call(args):\n proc = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd='/tmp')\n stdout, stderr = proc.communicate()\n \n # Print the standard output always.\n print(stdout)\n\n # If an error occurred, then print that information as well.\n if proc.returncode != 0:\n print(stderr)\n raise subprocess.CalledProcessError(\n returncode=proc.returncode,\n cmd=args)",
"def test_ctypes(c):\r\n print(\"Testing ctypes Module\")\r\n invoke.run(\"python3 ctypes_test.py\", pty=True)",
"def _is_empty_function(func, unwrap=False):\n if isinstance(func, (staticmethod, classmethod, types.MethodType)):\n func = six.get_method_function(func)\n if isinstance(func, property):\n func = property.fget\n if unwrap:\n func = _unwrap_function(func)\n try:\n code_obj = six.get_function_code(func)\n except AttributeError:\n # This callable is something else - assume it is OK.\n return True\n\n # quick check\n if code_obj.co_code == b'd\\x00\\x00S' and code_obj.co_consts[0] is None:\n return True\n if code_obj.co_code == b'd\\x01\\x00S' and code_obj.co_consts[1] is None:\n return True\n # convert bytes to instructions\n instructions = _get_instructions(code_obj)\n if len(instructions) < 2:\n return True # this never happens as there is always the implicit return None which is 2 instructions\n assert instructions[-1].opname == 'RETURN_VALUE' # returns TOS (top of stack)\n instruction = instructions[-2]\n if not (instruction.opname == 'LOAD_CONST' and code_obj.co_consts[instruction.arg] is None): # TOS is None\n return False # return is not None\n instructions = instructions[:-2]\n if len(instructions) == 0:\n return True\n # look for raise NotImplementedError\n if instructions[-1].opname == 'RAISE_VARARGS':\n # the thing we are raising should be the result of __call__ (instantiating exception object)\n if instructions[-2].opname == 'CALL_FUNCTION':\n for instr in instructions[:-2]:\n if instr.opname == 'LOAD_GLOBAL' and code_obj.co_names[instr.arg] == 'NotImplementedError':\n return True\n\n return False",
"def try_exec(fn: Callable[..., B], *args: B, **kwargs: V) -> Optional[B]:\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n print(f\" {fn.__name__} failed with exc {e}, returning None\")\n return None",
"def is_call_function(code: CodeType, bytei: ByteCodeIndex) -> bool:\n return any(\n (\n ins.offset == bytei\n and ins.opcode in ScaleneFuncUtils.__call_opcodes\n )\n for ins in dis.get_instructions(code)\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make an SD FEC instance as described by a HWH file snippet
|
def __init__(self, description : dict):
super().__init__(description)
if 'parameters' in description:
populate_params(self, description['parameters'])
else:
warnings.warn("Please use an hwh file with the SD-FEC driver"
" - the default configuration is being used")
self._config = _lib.XSdFecLookupConfig(0)
# TODO consider how we should set default LDPC and Turbo code params
self._instance = _ffi.new("XSdFec*")
self._config.BaseAddress = self.mmio.array.ctypes.data
_lib.XSdFecCfgInitialize(self._instance, self._config)
|
[
"def __init__(self, name, header):\n\n self.header = header.copy()\n#\n# Check if the file already exists. If it does not, check to see\n# if we were provided with a Primary Header. If not we will need\n# to prepend a default PrimaryHDU to the file before writing the\n# given header.\n#\n if not os.path.exists(name):\n if not self.header.has_key('SIMPLE'):\n hdulist = HDUList([PrimaryHDU()])\n hdulist.writeto(name, 'exception')\n else:\n if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0:\n#\n# This will not be the first extension in the file so we\n# must change the Primary header provided into an image\n# extension header.\n#\n self.header.update('XTENSION','IMAGE','Image extension',\n after='SIMPLE')\n del self.header['SIMPLE']\n\n if not self.header.has_key('PCOUNT'):\n dim = self.header['NAXIS']\n \n if dim == 0:\n dim = ''\n else:\n dim = str(dim)\n\n self.header.update('PCOUNT', 0, 'number of parameters',\n after='NAXIS'+dim)\n\n if not self.header.has_key('GCOUNT'):\n self.header.update('GCOUNT', 1, 'number of groups',\n after='PCOUNT')\n\n self._ffo = _File(name, 'append')\n self._ffo.getfile().seek(0,2)\n\n self._hdrLoc = self._ffo.writeHDUheader(self)\n self._datLoc = self._ffo.getfile().tell()\n self._size = self.size()\n\n if self._size != 0:\n self.writeComplete = 0\n else:\n self.writeComplete = 1",
"def create_instance(c_instance):\n return OhmModesHH(c_instance)",
"def generate_modelSED_specphoto_fit(sp=None,imf_type=1,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None, params_val=None,\n\tDL_Gpc=0.0,cosmo='flat_LCDM',H0=70.0,Om0=0.3,interp_filters_waves=[],interp_filters_trans=[]):\n\t\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\tnparams_fsps = len(params_fsps)\n\tfor pp in range(0,nparams_fsps):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\tsp.params['imf_type'] = imf_type\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass \n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type==0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type==1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\t# get central wavelength of all filters:\n\tphoto_cwave = cwave_filters(filters)\n\n\tspec_SED = {}\n\tspec_SED['spec_wave'] = redsh_wave\n\tspec_SED['spec_flux'] = redsh_spec\n\n\tphoto_SED = {}\n\tphoto_SED['photo_wave'] = photo_cwave\n\tphoto_SED['photo_flux'] = photo_SED_flux\n\n\treturn spec_SED,photo_SED",
"def generate_modelSED_spec_fit(sp=None,imf_type=1,sfh_form=4,add_igm_absorption=0,igm_type=0,\n\tparams_fsps=None, params_val=None,DL_Gpc=0.0,cosmo='flat_LCDM',H0=70.0,Om0=0.3):\n\t\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\tnparams_fsps = len(params_fsps)\n\tfor pp in range(0,nparams_fsps):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\tsp.params['imf_type'] = imf_type\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass\n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tage=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type==0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type==1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\n\tspec_SED = {}\n\tspec_SED['wave'] = redsh_wave\n\tspec_SED['flux'] = redsh_spec\n\n\treturn spec_SED",
"def __init__(self, cards=[]):\n\n # decide which kind of header it belongs to\n try:\n if cards[0].key == 'SIMPLE':\n if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True:\n self._hdutype = GroupsHDU\n elif cards[0].value == True:\n self._hdutype = PrimaryHDU\n else:\n self._hdutype = _ValidHDU\n elif cards[0].key == 'XTENSION':\n xtension = cards[0].value.rstrip()\n if xtension == 'TABLE':\n self._hdutype = TableHDU\n elif xtension == 'IMAGE':\n self._hdutype = ImageHDU\n elif xtension in ('BINTABLE', 'A3DTABLE'):\n self._hdutype = BinTableHDU\n else:\n self._hdutype = _ExtensionHDU\n else:\n self._hdutype = _ValidHDU\n except:\n self._hdutype = _CorruptedHDU\n\n # populate the cardlist\n self.ascard = CardList(cards)",
"def decode_ccsds_header(self,binary):\n\n\t # pull out CCSDS Version (Bits 0-2)\n version = int(binary[0:3],2)\n\n # pull out CCSDS Type (Bit 3) \n # [0 if TLM, 1 if CMD]\n CCSDStype = int(binary[3],2)\n\n # pull out CCSDS Secondary Packet Header Flag (SHF) (Bit 4) \n # [0 if FALSE, 1 if TRUE]\n sphf = int(binary[4],2)\n\n # pull out CCSDS APID (Bits 5-15) \n apid = int(binary[5:16],2)\n\n # pull out CCSDS Sequence/Grouping Flags (Bits 16-17) \n # [01 1st pkt; 00 cont pkt; 10 last pkt; 11 no group]\n seqflag = int(binary[16:18],2)\n\n # pull out CCSDS Sequence Count (Bits 18-31) \n seqcount = int(binary[18:32],2)\n\n # pull out CCSDS Packet Data Length (Bits 32-47)\n # number ot octets of packet data field minus 1\n datalen = int(binary[32:48],2)\n\n # pull out CCSDS Packet Data Field Data (Bits 48 - end)\n data = binary[49:]\n\n # if DEBUG print out CCSDS data to screen\n if self.debug:\n print(\"CCSDS Version: \",binary[0:3],\"(\",str(version),\")\")\n print(\"CCSDS Type: \",binary[3],\"(\",str(CCSDStype),\")\")\n print(\"CCSDS SPHF: \",binary[4],\"(\",str(sphf),\")\")\n print(\"CCSDS APID: \",binary[5:16],\"(\",str(apid),\")\")\n print(\"CCSDS Sequence Flag: \",binary[16:18],\"(\",str(seqflag),\")\")\n print(\"CCSDS Sequence Count: \",binary[18:32],\"(\",str(seqcount),\")\")\n print(\"CCSDS Data Length: \",binary[32:48],\"(\",str(datalen),\")\")\n print(\"\")\n return data",
"def create_file_sch(flowgate_lines: dict, flowgate_name: str) -> None:\n\n # Create new empty .sch file based on template\n rastr.Save('sech.sch', 'rastr_file_patterns/сечения.sch')\n # Open the created file\n rastr.Load(1, 'sech.sch', 'rastr_file_patterns/сечения.sch')\n\n # Redefining objects RastrWin3\n flow_gate = rastr.Tables('sechen')\n group_line = rastr.Tables('grline')\n\n # Just in case clear rows in .sch\n flow_gate.DelRows()\n group_line.DelRows()\n\n # Create flowgate\n flow_gate.AddRow()\n flow_gate.Cols('ns').SetZ(0, 1)\n # Give a name for the flowgate\n flow_gate.Cols('name').SetZ(0, flowgate_name)\n flow_gate.Cols('sta').SetZ(0, 1)\n\n # Fill a list of transmission lines forms the flowgate\n for i, line in enumerate(flowgate_lines):\n group_line.AddRow()\n group_line.Cols('ns').SetZ(i, 1)\n\n # Start of the transmission line\n start_node = flowgate_lines[line]['ip']\n # End of the transmission line\n end_node = flowgate_lines[line]['iq']\n\n group_line.Cols('ip').SetZ(i, start_node)\n group_line.Cols('iq').SetZ(i, end_node)\n\n # Resave .sch file\n rastr.Save('sech.sch', 'rastr_file_patterns/сечения.sch')",
"def createMachHeader(factory: generic.continues.GenericFactory, provider: ghidra.app.util.bin.ByteProvider, machHeaderStartIndexInProvider: long) -> ghidra.app.util.bin.format.macho.MachHeader:\n ...",
"def setup_hds(self):\n if self.hds_kperk is None or len(self.hds_kperk) == 0:\n return\n from .gw_utils import setup_hds_obs\n # if len(self.hds_kperk) == 2:\n # try:\n # if len(self.hds_kperk[0] == 2):\n # pass\n # except:\n # self.hds_kperk = [self.hds_kperk]\n oc = self.m.get_package(\"OC\")\n if oc is None:\n raise Exception(\"can't find OC package in model to setup hds grid obs\")\n if not oc.savehead:\n raise Exception(\"OC not saving hds, can't setup grid obs\")\n hds_unit = oc.iuhead\n hds_file = self.m.get_output(unit=hds_unit)\n assert os.path.exists(os.path.join(self.org_model_ws,hds_file)),\\\n \"couldn't find existing hds file {0} in org_model_ws\".format(hds_file)\n shutil.copy2(os.path.join(self.org_model_ws,hds_file),\n os.path.join(self.m.model_ws,hds_file))\n inact = None\n if self.m.lpf is not None:\n inact = self.m.lpf.hdry\n elif self.m.upw is not None:\n inact = self.m.upw.hdry\n if inact is None:\n skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x\n else:\n skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x\n print(self.hds_kperk)\n setup_hds_obs(os.path.join(self.m.model_ws,hds_file),\n kperk_pairs=self.hds_kperk,skip=skip)\n self.frun_post_lines.append(\"pyemu.gw_utils.apply_hds_obs('{0}')\".format(hds_file))\n self.tmp_files.append(hds_file)",
"def __init__(self, obscode, nsd_data=None, syd_data=None):\n\n if nsd_data is not None:\n foo = struct.unpack(self.NSD_PACK_TEMPLATE, nsd_data)\n field_id = foo[0]\n det_id = None\n det_num = foo[1]\n object_name = 'NS'\n else:\n foo = struct.unpack(self.SYD_PACK_TEMPLATE, syd_data)\n field_id = foo[0]\n det_id = foo[1]\n det_num = None\n object_name = foo[10].strip(' \\x00')[0:9] # strip leading/trailing whitespace, NULLs, first 9 chars\n\n MOPS.Detection.Detection.__init__(self, \n det_id, # _id (detId)\n foo[3], # ra\n foo[4], # dec\n foo[2], # epoch_mjd\n foo[5], # mag\n foo[5], # refMag\n foo[6], # s2n\n foo[7], # raErr\n foo[8], # decErr\n foo[9], # magSigma\n 0.0, # orient\n 0.0, # length\n 'L', # LSD\n field_id, # fieldId\n 'r', # filt, ugh, XXX need to fix this!\n obscode, # MPC obscode\n object_name, # objectName\n det_num # detNum\n )",
"def stego_make_hcm(wav_files_path, mp3_files_path, bitrate, cost=\"2\",\n embed=embedding_file_path, frame_num=\"50\", embedding_rate=\"10\", start_idx=None, end_idx=5000):\n if not os.path.exists(wav_files_path):\n print(\"The wav files path does not exist.\")\n else:\n wav_files_list = get_files_list(file_dir=wav_files_path, file_type=\"wav\", start_idx=start_idx, end_idx=end_idx)\n if not os.path.exists(mp3_files_path):\n os.mkdir(mp3_files_path)\n for wav_file_path in wav_files_list:\n file_name = get_file_name(wav_file_path)\n mp3_file_name = file_name.replace(\".wav\", \".mp3\")\n mp3_file_path = fullfile(mp3_files_path, mp3_file_name)\n if not os.path.exists(mp3_file_path):\n temp_secret_file_path = message_random(embed)\n command = \"encode_HCM.exe -b \" + bitrate + \" -embed \" + temp_secret_file_path + \" -cost \" + cost + \" -er \" + embedding_rate \\\n + \" -framenumber \" + frame_num + \" \" + wav_file_path + \" \" + mp3_file_path\n os.system(command)\n else:\n pass",
"def temp_emsoft_h5ebsd_file(tmpdir, request):\n f = File(tmpdir.join(\"emsoft_h5ebsd_file.h5\"), mode=\"w\")\n\n # Unpack parameters\n map_shape, (dy, dx), example_rotations, n_top_matches, refined = request.param\n ny, nx = map_shape\n map_size = ny * nx\n\n # Create groups used in reader\n ebsd_group = f.create_group(\"Scan 1/EBSD\")\n data_group = ebsd_group.create_group(\"Data\")\n header_group = ebsd_group.create_group(\"Header\")\n phase_group = header_group.create_group(\"Phase/1\") # Always single phase\n\n # Create `header_group` datasets used in reader\n for name, data, dtype in zip(\n [\"nRows\", \"nColumns\", \"Step Y\", \"Step X\"],\n [ny, nx, dy, dx],\n [np.int32, np.int32, np.float32, np.float32],\n ):\n header_group.create_dataset(name, data=np.array([data], dtype=dtype))\n\n # Create `data_group` datasets, mostly quality metrics\n data_group.create_dataset(\"X Position\", data=np.tile(np.arange(nx) * dx, ny))\n # Note that \"Y Position\" is wrongly written to their h5ebsd file by EMsoft\n data_group.create_dataset(\n \"Y Position\",\n data=np.tile(np.arange(nx) * dx, ny), # Wrong\n # data=np.sort(np.tile(np.arange(ny) * dy, nx)), # Correct\n )\n for name, shape, dtype in [\n (\"AvDotProductMap\", map_shape, np.int32),\n (\"CI\", map_size, np.float32),\n (\"CIMap\", map_shape, np.int32),\n (\"IQ\", map_size, np.float32),\n (\"IQMap\", map_shape, np.int32),\n (\"ISM\", map_size, np.float32),\n (\"ISMap\", map_shape, np.int32),\n (\"KAM\", map_shape, np.float32),\n (\"OSM\", map_shape, np.float32),\n (\"Phase\", map_size, np.uint8),\n ]:\n data_group.create_dataset(name, data=np.zeros(shape, dtype=dtype))\n\n # `data_group` with rotations\n # Sample as many rotations from `rotations` as `map_size`\n rot_idx = np.random.choice(np.arange(len(example_rotations)), map_size)\n rot = example_rotations[rot_idx]\n n_sampled_oris = 333227 # Cubic space group with Ncubochoric = 100\n data_group.create_dataset(\"FZcnt\", data=np.array([n_sampled_oris], dtype=np.int32))\n data_group.create_dataset(\n \"TopMatchIndices\",\n data=np.vstack(\n (np.random.choice(np.arange(n_sampled_oris), n_top_matches),) * map_size\n ),\n dtype=np.int32,\n )\n data_group.create_dataset(\n \"TopDotProductList\",\n data=np.vstack((np.random.random(size=n_top_matches),) * map_size),\n dtype=np.float32,\n )\n data_group.create_dataset(\n \"DictionaryEulerAngles\",\n data=np.column_stack(\n (np.random.uniform(low=0, high=2 * np.pi, size=n_sampled_oris),) * 3\n ),\n dtype=np.float32,\n )\n\n if refined:\n data_group.create_dataset(\"RefinedEulerAngles\", data=rot.astype(np.float32))\n data_group.create_dataset(\n \"RefinedDotProducts\", data=np.zeros(map_size, dtype=np.float32)\n )\n\n # Number of top matches kept\n f.create_dataset(\n \"NMLparameters/EBSDIndexingNameListType/nnk\",\n data=np.array([n_top_matches], dtype=np.int32),\n )\n\n # `phase_group`\n for name, data in [\n (\"Point Group\", \"Cubic (Oh) [m3m]\"),\n (\"MaterialName\", \"austenite/austenite\"),\n (\"Lattice Constant a\", \"3.595\"),\n (\"Lattice Constant b\", \"3.595\"),\n (\"Lattice Constant c\", \"3.595\"),\n (\"Lattice Constant alpha\", \"90.000\"),\n (\"Lattice Constant beta\", \"90.000\"),\n (\"Lattice Constant gamma\", \"90.000\"),\n ]:\n phase_group.create_dataset(name, data=np.array([data], dtype=np.dtype(\"S\")))\n\n yield f\n gc.collect()",
"def pd_create(cd):\n\n # check that 'c' or 'd' is passed\n #assert cd == (\n # 'c' or 'd'), 'This must be charge (c) or discharge (d) data'\n\n # number of descriptors it generates\n n_desc = 19\n\n # determines prefix string based on need for a charge or\n # discharge dataframe\n if cd == 'c':\n prefix = 'ch_'\n else:\n prefix = 'dc_'\n\n # generates list of names for the top of the descriptors dataframe\n names = []\n for ch in np.arange(n_desc):\n names.append(prefix + str(int(ch)))\n\n # adds names of error parameters to the end of the descriptor list\n names = names + [prefix+'AIC', prefix+'BIC', prefix+'red_chi_squared']\n\n # creates pandas dataframe with necessary heading\n # print(names)\n desc = pd.DataFrame(columns=names)\n\n return desc",
"def ldsc_h2_part(args, **kwargs):\n \n # handle args\n phname = str(args[0])\n phdesc = str(args[1])\n phsource = str(args[2]).replace(\"'\",\"\") ######### EDITED #########\n n = float(args[3])\n ncas = float(args[4])\n ncon = float(args[5])\n \n # define names\n ss_name = str(phname)+'.tsv.bgz' \n sspath_local = wd+'/'+ss_name\n sspath_cloud = ss_bucket+'/'+ss_name\n \n h2_out = 'h2part.ukbb.'+str(phsource)+'_'+str(phname)\n \n # download sumstats file\n subprocess.call(['gsutil','cp',sspath_cloud,sspath_local])\n \n # run ldsc\n args_h2 = Namespace(out=h2_out, \n bfile=None,\n l2=None,\n extract=None,\n keep=None,\n ld_wind_snps=None,\n ld_wind_kb=None,\n ld_wind_cm=None,\n print_snps=None,\n annot=None,\n thin_annot=False,\n cts_bin=None,\n cts_break=None,\n cts_names=None,\n per_allele=False,\n pq_exp=None,\n no_print_annot=False,\n maf=0.05,\n h2=sspath_local,\n rg=None,\n ref_ld=None,\n ref_ld_chr=ld_ref_panel,\n w_ld=None,\n w_ld_chr=ld_w_panel,\n overlap_annot=True,\n no_intercept=False, ######## CHECK (default: False) ########\n intercept_h2=None,\n intercept_gencov=None,\n M=None,\n two_step=None,\n chisq_max=99999,\n print_cov=False,\n print_delete_vals=False,\n chunk_size=50,\n pickle=False,\n invert_anyway=False,\n yes_really=False,\n n_blocks=200,\n not_M_5_50=False,\n return_silly_things=False,\n no_check_alleles=False,\n print_coefficients=True,\n samp_prev=None,\n pop_prev=None,\n frqfile=None,\n h2_cts=None,\n frqfile_chr=ld_frq_panel,\n print_all_cts=False,\n sumstats_frames=None,\n rg_mat=False)\n \n print \"Launching ldsc for \"+str(phname)\n h2_results = ldsc.sumstats.estimate_h2(args_h2, Logger_to_Logging())\n print \"Completed ldsc for \"+str(phname)\n \n # cleanup sumstats file\n subprocess.call(['rm',sspath_local])\n \n return process_h2_part(h2_results, h2_out+'.results', phname, phdesc, phsource, float(n), float(ncas), float(ncon))",
"def build_header_SN(self):\n\n print(\"Build hdr (SN)\")\n\n nrho = len(self.eqdsk.rhopsi)\n dummy=np.linspace(0,1,nrho)\n \n self.hdr={'nSHOT':0,'tSHOT':0,'modflg':0,'FPPkat':0,'IpiFPP':self.eqdsk.Ip,\\\n 'PFxx':np.array([]),'RPFx':np.array([]),'zPFx':np.array([]),'SSQ':np.array([]), 'devnam':self.devnam,\\\n 'rhoPF':nrho,'PFL':dummy,'Vol':dummy,'Area':dummy,'Qpl':dummy} \n\n #Find x-point\n f = plt.figure()\n ax2d = f.add_subplot(111)\n r,z = self.R_eqd, self.Z_eqd\n ax2d.contour(r,z, self.eqdsk.psi, 50)\n ax2d.set_title('choose x point position')\n ax2d.axis('equal')\n x0 = plt.ginput()\n plt.close(f)\n self.xpoint = self._min_grad(x0=x0)\n self.xflux = self.psi_coeff(self.xpoint[0], self.xpoint[1])*(2*np.pi)\n # find axis\n self.ax = self._min_grad(x0=[self.eqdsk.Raxis, self.eqdsk.Zaxis])\n self.axflux = self.psi_coeff(self.ax[0], self.ax[1])*(2*np.pi)\n print(\"remember: I am multiplying psi axis and x-point times 2pi since in ascot it divides by it!\")\n\n # poloidal flux of the special points. First axis, then edge (i.e. X point)\n self.hdr['PFxx'] = np.array([self.axflux[0], self.xflux[0]])\n print(self.hdr['PFxx'])\n self.hdr['RPFx'] = np.array([self.ax[0], self.xpoint[0]])\n self.hdr['zPFx'] = np.array([self.ax[1], self.xpoint[1]])\n self.hdr['SSQ'] = np.array([self.eqdsk.R0EXP, self.eqdsk.Zaxis, 0, 0])",
"def create_sd_file(name, smiles, save_directory):\n # create sdf file for ligand and save to hit directory\n canon_smiles = Chem.CanonSmiles(smiles)\n mol = Chem.MolFromSmiles(canon_smiles)\n AllChem.Compute2DCoords(mol)\n print(('Generating sdf file and saving to ' + name + ' directory...\\n'))\n sd_file = Chem.SDWriter(save_directory)\n sd_file.write(mol)",
"def prepare_file( self, file ):\n\n try:\n hdul = fits.open( file )\n except:\n return -1\n\n name = hdul[0].header[ 'SRC_NAME' ]\n fe = hdul[0].header[ 'FRONTEND' ]\n mjd = hdul[0].header[ 'STT_IMJD' ]\n if hdul[0].header[ 'OBS_MODE' ] != \"PSR\" or name != self.psr_name:\n hdul.close()\n return -1\n hdul.close()\n\n tmp_fn = \"{0}_{1}_nchan{2}_template.npy\".format( self.psr_name, fe, self.subbands )\n try:\n template = self.load_template( self.temp_dir, tmp_fn )\n except TemplateLoadError:\n print( \"Template not found\" )\n reply = str( input( \"Would you like to make a suitable one? (y / n)\" ) ).lower().strip()\n if reply[0] == 'y':\n temp = FD_Template( self.psr_name, fe, self.subbands, template_dir = \"templates\", verbose = self.verbose, *self.dirs )\n template = temp.make_template()\n else:\n raise TemplateLoadError( \"You can make a suitable template via the following command: python template_builder.py psr_name -b [frontend] -d [dirs]\" )\n\n ar = Archive( file, verbose = self.verbose )\n ar.tscrunch( nsubint = self.epochs )\n ar.fscrunch( nchan = self.subbands )\n\n return ar, template, fe, mjd",
"def createMachHeader(factory: generic.continues.GenericFactory, provider: ghidra.app.util.bin.ByteProvider, machHeaderStartIndexInProvider: long, isRemainingMachoRelativeToStartIndex: bool) -> ghidra.app.util.bin.format.macho.MachHeader:\n ...",
"def makeImageHDU(fd, table_hdr, data_array, name=\"SCI\"):\n\n # Create an image header from the table header.\n imhdr = cosutil.tableHeaderToImage(table_hdr)\n if name == \"DQ\":\n imhdr[\"BUNIT\"] = \"UNITLESS\"\n else:\n imhdr[\"BUNIT\"] = \"count /s\"\n\n if data_array is not None:\n if \"npix1\" in imhdr:\n del(imhdr[\"npix1\"])\n if \"npix2\" in imhdr:\n del(imhdr[\"npix2\"])\n if \"pixvalue\" in imhdr:\n del(imhdr[\"pixvalue\"])\n\n hdu = fits.ImageHDU(data=data_array, header=imhdr, name=name)\n fd.append(hdu)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List the available LDPC code names
|
def available_ldpc_params(self) -> list:
return list(self._code_params.ldpc.keys())
|
[
"def list(cls):\n\n codes = []\n\n for key in cls.options.keys():\n\n opt = {\n 'key': key,\n 'value': cls.options[key]\n }\n\n label = cls.labels.get(key)\n\n if label:\n opt['label'] = label\n\n codes.append(opt)\n\n return codes",
"def return_country_name_list(code_list):\n name_list = []\n for alpha_code in code_list:\n try:\n name_list.append(pycountry.countries.get(alpha_3=alpha_code).name)\n except:\n name_list.append(None)\n return name_list",
"def list_() -> None:\n available_sources = [\n \"Wikipedia (wiki) [with different locales]\",\n \"Accadde Oggi (accadde)\",\n ]\n print(\"\\nAvailable sources:\\n\")\n\n for source in available_sources:\n print(f\" • {source}\")",
"def getDevicesCodenames():\n return config.sections()",
"def do_list(self, args):\n code = args if args else None\n results = self.library.filter_search(code=code)\n\n if not len(results):\n print(\"No cards could be found\")\n return None\n\n if len(results) > self.list_max:\n results = results[:self.list_max]\n\n for codename in results:\n print(\"{Cval}{0}{Csym}: {Cval}{1}\".format(*codename,\n **self.colormap))",
"def getLanguageCodes(self): #$NON-NLS-1$\r",
"def list_countries() -> List[str]:\n nord_output = subprocess.Popen([\"nordvpn\", \"countries\"],\n stdout=subprocess.PIPE)\n countries = re.split(\"[\\t \\n]\",\n nord_output.communicate()[0].decode(\"utf-8\"))\n\n while \"\" in countries:\n countries.remove(\"\")\n countries.remove(\"\\r-\\r\")\n for i, c in enumerate(countries):\n if c.startswith('\\r'):\n countries[i] = c[1:]\n\n return countries",
"def list_symbols(self) -> str:\n pass",
"def get_programs() :\n\n [prog_names, descriptions, cmd_line_prefixes] = db.get_programs()\n\n return [prog_names, descriptions, cmd_line_prefixes]",
"def collect_language_codes(user_code):\n codes = [user_code]\n if '-' in user_code:\n codes.append(user_code.split('-')[0])\n codes.append(settings.LANGUAGE_CODE)\n if '-' in settings.LANGUAGE_CODE:\n codes.append(settings.LANGUAGE_CODE.split('-')[0])\n codes.append('_default')\n return codes",
"def __query_all_pd_names(self):\n cerebro_client = CerebroInterfaceTool()\n pd_names = set()\n arg = ListProtectionDomainsArg()\n ret = cerebro_client.list_protection_domains(arg)\n for pd_name in ret.protection_domain_name:\n pd_names.add(PD(pd_name, False))\n\n # Get the name of the System PD\n system_pd_arg = ListProtectionDomainsArg()\n system_pd_arg.list_system_pd = True\n ret = cerebro_client.list_protection_domains(system_pd_arg)\n for pd_name in ret.protection_domain_name:\n pd_names.add(PD(pd_name, True))\n\n return pd_names",
"def load_country_codes():\n\n url = '/'.join([NAGER_API_BASE, 'AvailableCountries'])\n try:\n response = requests.get(url)\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n country_codes = response.json()\n country_codes_only = [pair[\"key\"] for pair in country_codes]\n\n return country_codes_only",
"def Pc_methods(CASRN):\n return list_available_methods_from_df_dict(Pc_sources, CASRN, 'Pc')",
"def show_compilers():\r\n # XXX this \"knows\" that the compiler option it's describing is\r\n # \"--compiler\", which just happens to be the case for the three\r\n # commands that use it.\r\n from distutils.fancy_getopt import FancyGetopt\r\n compilers = []\r\n for compiler in compiler_class.keys():\r\n compilers.append((\"compiler=\"+compiler, None,\r\n compiler_class[compiler][2]))\r\n compilers.sort()\r\n pretty_printer = FancyGetopt(compilers)\r\n pretty_printer.print_help(\"List of available compilers:\")",
"def codes():\n\n result = []\n\n for asset_type in ASSET_TYPES:\n result.append(asset_type.code)\n\n return result",
"def List(self):\n col = terminal.Color()\n print col.Color(col.BLUE, 'List of available toolchains (%d):' %\n len(self.toolchains))\n if len(self.toolchains):\n for key, value in sorted(self.toolchains.iteritems()):\n print '%-10s: %s' % (key, value.gcc)\n else:\n print 'None'",
"def available_locales_list(self):\n return list(self.locales.all().values_list(\"code\", flat=True))",
"def get_all_codelists():\n\n codelists = {}\n\n for codelist_file in os.listdir(CODELISTS_DIR):\n codelist_name, _ = os.path.splitext(codelist_file)\n codelist = get_codelist(codelist_name)\n codelists.update({codelist_name: codelist})\n\n return codelists",
"def getList(name):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stub for setting Turbo code parameters
|
def set_turbo_params(self, turbo_params: dict) -> None:
# TODO
pass
|
[
"def test_set_system_param(self):\n pass",
"def do_set(self, args):\n\n split_args = args.split()\n if len(split_args) < 1:\n module_logger.error(\"You must provide at least one argument\".format(args))\n elif len(split_args) == 1:\n if split_args[0] == \"iface\":\n iface = interface.get_first_interface()\n\n if iface is not None:\n self._params.iface = iface\n else:\n module_logger.error(\"There are no wireless interfaces available.\")\n elif split_args[0] == 'macs':\n self._params.macs = []\n else:\n module_logger.error(\"Parameters require a value\".format(split_args[0]))\n elif split_args[0] in meta.Params.VALID_PARAMS:\n try:\n param = split_args[0]\n value = split_args[1]\n # Validate certain parameters\n if split_args[0] == \"iface\":\n self._params.iface = value\n elif param == \"duration\":\n self._params.duration = value\n elif param == \"degrees\":\n self._params.degrees = value\n elif param == \"bearing\":\n self._params.bearing_magnetic = value\n elif param == \"hop_int\":\n self._params.hop_int = value\n elif param == \"hop_dist\":\n self._params.hop_dist = value\n elif param == \"mac\":\n self._params.add_mac(value)\n elif param == \"macs\":\n # Load macs from provided file\n self._params.add_mac(localizer.load_macs(value))\n elif param == \"channel\":\n self._params.channel = value\n elif param == \"capture\":\n self._params.capture = value\n\n print(\"Parameter '{}' set to '{}'\".format(param, value))\n\n except (ValueError, FileNotFoundError) as e:\n module_logger.error(e)\n else:\n module_logger.error(\"Invalid parameter '{}'\".format(split_args[0]))\n\n self._update_prompt()",
"def scriptCtx(setSelectionHeadsUp=\"string\", spring=bool, follicle=bool, plane=bool, surfaceParameterPoint=bool, curve=bool, subdivMeshFace=bool, curveOnSurface=bool, nurbsSurface=bool, toolFinish=\"string\", expandSelectionList=bool, setSelectionCount=int, animOutTangent=bool, animCurve=bool, surfaceKnot=bool, joint=bool, sculpt=bool, collisionModel=bool, particle=bool, editPoint=bool, dimension=bool, ignoreInvalidItems=bool, orientationLocator=bool, curveParameterPoint=bool, lastAutoComplete=bool, history=bool, polymeshEdge=bool, image2=\"string\", latticePoint=bool, finalCommandScript=\"string\", hairSystem=bool, polymeshFreeEdge=bool, toolStart=\"string\", showManipulators=bool, springComponent=bool, imagePlane=bool, locatorUV=bool, nurbsCurve=bool, name=\"string\", scalePivot=bool, fluid=bool, rigidConstraint=bool, surfaceFace=bool, nParticle=bool, isoparm=bool, texture=bool, polymesh=bool, cumulativeLists=bool, handle=bool, setDoneSelectionPrompt=\"string\", particleShape=bool, lattice=bool, baseClassName=\"string\", selectHandle=bool, rigidBody=bool, rotatePivot=bool, curveKnot=bool, light=bool, stroke=bool, subdivMeshEdge=bool, allObjects=bool, nCloth=bool, localRotationAxis=bool, image3=\"string\", exitUponCompletion=bool, polymeshVertex=bool, subdivMeshPoint=bool, setNoSelectionHeadsUp=\"string\", polymeshUV=bool, hull=bool, edge=bool, implicitGeometry=bool, jointPivot=bool, emitter=bool, polymeshFace=bool, objectComponent=bool, setAutoToggleSelection=bool, surfaceUV=bool, animKeyframe=bool, toolCursorType=\"string\", surfaceRange=bool, ikEndEffector=bool, allComponents=bool, subdivMeshUV=bool, vertex=bool, subdiv=bool, nRigid=bool, surfaceEdge=bool, setNoSelectionPrompt=\"string\", setAutoComplete=bool, totalSelectionSets=int, setSelectionPrompt=\"string\", enableRootSelection=bool, locatorXYZ=bool, image1=\"string\", field=bool, forceAddSelect=bool, controlVertex=bool, camera=bool, animBreakdown=bool, facet=bool, exists=bool, cluster=bool, dynamicConstraint=bool, polymeshVtxFace=bool, nonlinear=bool, ikHandle=bool, setAllowExcessCount=bool, animInTangent=bool, locator=bool, title=\"string\", nParticleShape=bool):\n pass",
"def setParam(self, layer, numParam, blob) -> None:\n ...",
"def setParameters():\n ip = '192.168.1.143'\n port = 9559\n myBroker = naoqi.ALBroker(\"myBroker\", \"0.0.0.0\", 0, ip, port)\n connector = RobotConnect(\"Naomi\")\n connector.setPostureProxy()\n connector.setMotionProxy()\n connector.setVideoProxy()\n return connector",
"def set_bits(*args) -> \"void\":\n return _ida_pro.set_bits(*args)",
"def test_set_agent_parameter(self):\n pass",
"def set_parameter(self, param, value, location=3):\n self.reb.set_parameter(param, value, self.stripe, location)\n logging.info(\"Set REB parameter %s to %s at location %d\" % (param, repr(value), location))",
"def set_api(svc=\"blockcypher\", code=\"\"):\n pass",
"def scene_setting_init():\n sce = bpy.context.scene.name\n bpy.data.scenes[sce].render.engine = 'CYCLES'\n bpy.data.scenes[sce].cycles.film_transparent = True\n\n #output\n bpy.data.scenes[sce].render.image_settings.color_mode = 'RGB'\n bpy.data.scenes[sce].render.image_settings.color_depth = '16'\n bpy.data.scenes[sce].render.image_settings.file_format = 'PNG'\n\n #dimensions\n #bpy.data.scenes[sce].render.resolution_x = g_resolution_x\n #bpy.data.scenes[sce].render.resolution_y = g_resolution_y\n #bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage",
"def setparms(hamtrain, spamtrain, hamtest=None, spamtest=None, seed=None):\n global HAMTEST, SPAMTEST, HAMTRAIN, SPAMTRAIN, SEED\n HAMTRAIN, SPAMTRAIN = hamtrain, spamtrain\n if hamtest is None:\n HAMTEST = HAMTRAIN\n else:\n HAMTEST = hamtest\n if spamtest is None:\n SPAMTEST = SPAMTRAIN\n else:\n SPAMTEST = spamtest\n if seed is not None:\n SEED = seed",
"def config(self, param: str, /) -> Any:",
"def init_coupled_parameters(self):\n params=NamedObjects(scenario=self,cast_value=cast_to_parameter)\n # All of the current known options:\n # params['Tau']=1\n # params['TauFlow']=1\n # params['Velocity']=1\n if self.model.mdu.get_bool('physics','Salinity'):\n params['salinity']=1 \n if self.model.mdu.get_bool('physics','Temperature'):\n params['temp']=1 \n params['vwind']=1\n #params['winddir']=1\n #params['rain']=1\n return params",
"def set_turbo_mode(self, enable: params.Toggle, /) -> GoProResp:",
"def setparam(self, param, value):\n\t\treturn self.__command(\"param.set %s %s\" % (param, value))",
"def setup_parameter(self, parameter, value):\n self.__dict__[parameter] = value",
"def test_override_ua_parameters(self):\n\n with ThreadedWebServer(\n JavascriptRequestDetector,\n binding_address=self.GetBindingAddress()) as server:\n with self.CreateCobaltRunner(\n url=server.GetURL(file_name='testdata/override_ua_parameters.html'),\n target_params=[\n '--user_agent_client_hints='\\\n 'aux_field=foo.bar.baz.qux/21.2.1.41.0;'\\\n 'brand=Cobalt;'\\\n 'build_configuration=debug;'\\\n 'chipset_model_number=foobar0000;'\\\n 'cobalt_build_version_number=289852;'\\\n 'cobalt_version=21.lts.2;'\\\n 'connection_type=Wireless;'\\\n 'device_type=ATV;'\\\n 'evergreen_type=;'\\\n 'evergreen_version=;'\\\n 'javascript_engine_version=v8/7.7.299.8-jit;'\\\n 'firmware_version=;'\\\n 'model=QUUX;'\\\n 'model_year=2018;'\\\n 'original_design_manufacturer=Quuz;'\\\n 'os_name_and_version=Corge grault-v7a\\\\; Garply 7.1.2\\\\; '\\\n 'Waldo OS 6.0;'\\\n 'starboard_version=Starboard/12;'\\\n 'rasterizer_type=gles'\n ]) as runner:\n runner.WaitForJSTestsSetup()\n self.assertTrue(runner.JSTestsSucceeded())",
"def setup(self, code):\n\t\tServo.classLock.acquire()\n\t\ttry:\n\t\t\tcode.upper()\n\t\t\tif code==\"QQC\":\n\t\t\t\tself.focus=focus.secondary\n\t\t\t\tself.axisNumber=5\n\t\t\t\tself.upperLimit=[100,100,100,100,100]\n\t\t\t\tself.lowerLimit=[-100,-100,-100,-100,-100]\n\t\t\t\tself.offset=[0.0,0.0,0.0,0.0,0.0]\n\t\t\t\tself.polX[0]=0.0\n\t\t\t\tself.polX[1]=0.0\n\t\t\t\tself.polX[2]=0.89\n\t\t\t\tself.polY[0]=8.3689e-4\n\t\t\t\tself.polY[1]=0.152495\n\t\t\t\tself.polY[2]=20.91\n\t\t\t\tself.polZ1[0]=0.00168640 \n\t\t\t\tself.polZ1[1]=-0.271430\n\t\t\t\tself.polZ1[2]=67.55\n\t\t\t\tself.polZ2[0]=0.00168640 \n\t\t\t\tself.polZ2[1]=-0.271430\n\t\t\t\tself.polZ2[2]=84.37\n\t\t\t\tself.polZ3[0]=0.00168640 \n\t\t\t\tself.polZ3[1]=-0.271430\n\t\t\t\tself.polZ3[2]=-57.40\n\t\t\telif code==\"KKC\":\n\t\t\t\tself.focus=focus.secondary\n\t\t\t\tself.axisNumber=5\n\t\t\t\tself.upperLimit=[85,100,85,85,85]\n\t\t\t\tself.lowerLimit=[-85,-85,-85,-85,-85]\n\t\t\t\tself.offset=[0.0,0.0,0.0,0.0,0.0]\n\t\t\t\tself.polX[0]=0.0\n\t\t\t\tself.polX[1]=0.0\n\t\t\t\tself.polX[2]=-1\n\t\t\t\tself.polY[0]=8.3689e-4\n\t\t\t\tself.polY[1]=0.152495\n\t\t\t\tself.polY[2]=-10.4\n\t\t\t\tself.polZ1[0]=0.00128 \n\t\t\t\tself.polZ1[1]=-0.13644\n\t\t\t\tself.polZ1[2]=9.4\n\t\t\t\tself.polZ2[0]=0.00128 \n\t\t\t\tself.polZ2[1]=-0.23394\n\t\t\t\tself.polZ2[2]=10.6\n\t\t\t\tself.polZ3[0]=0.00128 \n\t\t\t\tself.polZ3[1]=-0.23394\n\t\t\t\tself.polZ3[2]=13.4\n\t\t\telse:\n\t\t\t\tnewEx=ManagementErrorsImpl.ConfigurationErrorExImpl()\n\t\t\t\tnewEx.log(self.services.getLogger(),ACSLog.ACS_LOG_DEBUG)\n\t\t\t\traise newEx\n\t\t\tself.currentConf=code\n\t\t\t\"\"\"\n\t\t\t*****************************************\n\t\t\tit should be set when the migration /primary/secondary is completed\n\t\t\t\"\"\"\n\t\t\tself.configured=True\n\t\t\tself.trackingEnabled=True\n\t\tfinally:\n\t\t\tServo.classLock.release()",
"def setpars(self, pars):\n #self.set_branch(pars[0])\n self.set_rc(pars)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the callback function triggered on __setitem__
|
def set_callback(self, callback):
self.callback = callback
|
[
"def setter(self, fn):\n self.cb_set = fn",
"def set_callback(self, data_id, func):\n self.callbacks[data_id] = func",
"def register_change_item_callback(self, callback):\n self.callbacks.append(callback)",
"def set_callback(name, new_callback=None):\n getattr(mujoco, \"set_\" + name)(new_callback)",
"def set_redis_callback(self, function: Callable):\n self._redis_sub_callback = function",
"def add_on_change(self, setting, func):\n self._listeners.setdefault(setting, []).append(func)",
"def __setitem__(self, item, value):\n self._data.__setitem__(item, value)",
"def on_update(self, func):\n self._on_update = func\n return func",
"def _listen_callback(_, key, value, __):\n print(\"{!r} updated: {!r}\".format(key, value))",
"def set_callback(self,cb):\n self.__midiin.set_callback(cb)",
"def set_func(self, func):\n self._func = func",
"def set_callback(self, key_name, callback, silent=False):\n if not silent:\n self.sanity_check_cb(key_name, callback)\n self.keymap[key_name] = callback",
"def __setitem__(self, key, value):\n self.fcmdict[key] = value",
"def addCallback(*args, **kwargs):\n \n pass",
"def register(self, callback):\n self.callbacks.append(callback)",
"def register_derived_callback(self, key, callback):\n if key in self.callbacks:\n self.callbacks[key].append(callback)\n else:\n self.callbacks[key] = [callback]\n\n # Run new callback\n if key in self:\n updates = callback(self[key])\n self.update(updates)",
"def on_change(self, callback, *args, **kwargs):\n self._var.trace(\"w\", lambda *_: callback(*args, **kwargs))",
"def set_mapping_callback(callback):\n assert (dir(callback).count('im_func') is 1)\n InputControlElement._mapping_callback = callback",
"def set_default_callback(self, func):\n self.default_callback = func",
"def setCallback(self, func: 'SbTesselatorCB *', data: 'void *') -> \"void\":\n return _coin.SbTesselator_setCallback(self, func, data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
check for not bigger than L1 size
|
def _l1_buffer_size_check(max_feature_map_l1, fusion_para):
l1_buffer_size = cce_conf.get_soc_spec("L1_SIZE")
l1_fusion_type = fusion_para.get("l1_fusion_type")
if (l1_fusion_type == 1) or (l1_fusion_type == 0):
pass
elif max_feature_map_l1 > l1_buffer_size:
raise RuntimeError(
"Input is too large, the minimum tiling may exceed L1_Buffer")
|
[
"def __gt__(self, other):\n return self.get_size() > int(other)",
"def isFull(self):\n #In this Case the function is Only FOr Understanding because the Linked List \n #Is store the value is Heap Memory Because it is Dyanamic in nature\n #So This Is Only For Understanding Purpose\n pass",
"def __lt__(self, other):\n return self.size < other.size",
"def verify_layer_size(layer_size, l_num):\r\n\r\n if (isinstance(layer_size, int) and layer_size > 0):\r\n return layer_size\r\n else:\r\n raise ValueError(\"'\" + str(layer_size) + \"' (layer \" + str(l_num + 1) + \")\")",
"def is_full(self) -> bool:\n return self.get_size() >= self.size",
"def is_full(self):\n # length of the array is one more than the max size of heap\n return self.counter + 1 == len(self.array)",
"def is_overflow(self):\n return self.count > self.max_size",
"def _is_too_big(self):\n return len(self.signal_list) >= MAX_NUMBER_OF_SIGNALS_IN_GROUP",
"def need_new_stack(self):\n return self.size() >= self.capacity",
"def will_overflow_merge_cap():\n if args.merge_batch_size and len(consecutive_chunks) == args.merge_batch_size:\n return True\n trust_estimations = consecutive_chunks.trust_batch_estimation and 'defrag_collection_est_size' in c\n return (trust_estimations and\n consecutive_chunks.batch_size_estimation + c['defrag_collection_est_size'] >\n (target_chunk_size_kb * 1.20))",
"def __ne__(self, other):\n return self.get_size() != int(other)",
"def minLength(table_split, length):\r\n return len(table_split) >= length",
"def fits_into_dims(block_size):\n for md, bs in zip(max_dims, block_size):\n if md < bs:\n return False\n return True",
"def check_dataset_size(self, minimum_dataset_size):\n pass",
"def is_biallelic(baselist):\n\treturn unique_list_size(baselist) < 3",
"def _check_lengths(self, signal):\n if signal.shape[0] != self.tlet_index.shape[0]:\n missing = signal.shape[0] - self.tlet_index.shape[0]\n for i in range(missing):\n self.tlet_index = np.append(self.tlet_index, self.tlet_index[-1])",
"def __check_size__(self, size):\n # size must be an integer, otherwise raise a TypeError exception\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n # if size is less than 0, raise a ValueError\n if size < 0:\n raise ValueError(\"size must be >= 0\")",
"def check_len(ls, n, name=\"list\"):\n if len(ls) != n:\n raise ValueError(\"{0} must contain {1} elements\".format(name, n))",
"def is_undersized(self, cluster):\n return sum(self.clusters[cluster]['indicator']) < self.d"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get the tensor_map in convparam
|
def get_tensor_map(self):
return self.TENSOR_MAP
|
[
"def tensorflow_param(ckpt_path):\r\n tf_param = {}\r\n reader = tf.train.load_checkpoint(ckpt_path)\r\n for name in reader.get_variable_to_shape_map():\r\n try:\r\n print(name, reader.get_tensor(name).shape)\r\n tf_param[name] = reader.get_tensor(name)\r\n except AttributeError as e:\r\n print(e)\r\n return tf_param",
"def restore_map(self, fine_tune_checkpoint_type='detection'):\n return {var.op.name: var for var in tf.global_variables()}",
"def get_precon_map(self):\n\t\treturn self.__precon_action_map",
"def context(tensor):\n return {'ctx':tensor.context, 'dtype':tensor.dtype}",
"def _get_conv_linear_params(model, layer_to_be_corrected):\n\n bias_tensor = libpymo.TensorParamBiasCorrection()\n\n # get weight tensor\n weight_tensor, _ = get_weight_tensor_with_shape(model, layer_to_be_corrected)\n\n if weight_tensor is None:\n logger.error('Weight tensor extraction failed for layer {%s}', layer_to_be_corrected.name)\n\n bias_tensor.data = BiasUtils.get_bias_as_numpy_data(model, layer_to_be_corrected)\n bias_tensor.shape = BiasUtils.get_shape(layer_to_be_corrected)\n\n return bias_tensor, weight_tensor",
"def type_map_tensor() -> torch.Tensor:\n path = Path().resolve()\n path = path / \"cellseg_models_pytorch/training/tests/data/type_target_batch8.pt\"\n return torch.load(path.as_posix())",
"def __apply_tconv(self, tensor_in, params,\n activation, op_name):\n weights, biases = self.__make_tconv_wb(params[0],op_name)\n tensor_out = activation(\n tf.nn.conv2d_transpose(\n tensor_in, weights, strides=self.pool_strides,\n output_shape=(tf.shape(tensor_in)[0],*params[1]),\n padding=self.pad) + biases, name=op_name)\n return tensor_out",
"def get_map_fn(transformation_list: List[str], param_dict: Dict, n_classes: int):\n def map_fn(image, label):\n label = tf.one_hot(label, n_classes)\n image = augmentations.apply_list_of_transformations(image, transformation_list, param_dict)\n return image, label\n return map_fn",
"def load_conv_layer(input_x, w, b, name='conv_layer'):\n with tf.name_scope(name):\n conv = tf.nn.conv2d(input_x, w, strides=[1, 1, 1, 1], padding=\"SAME\")\n act = tf.nn.relu(conv + b)\n \n return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")",
"def _get_input_tensor_name(): # TODO: only for OID API pretrained\n return 'image_tensor:0'",
"def taskParameters(self, task):\n return (p for n, p in self.named_parameters() if f'taskSpecificLayer.{task}' in n)",
"def __apply_conv_pool(self, tensor_in, params,\n activation, op_name):\n weights, biases = self.__make_conv_wb(params,op_name)\n tensor_out = tf.nn.max_pool(\n activation(tf.nn.conv2d(\n tensor_in, weights, strides=self.conv_strides,\n padding=self.pad) + biases), ksize=self.pool_ksize,\n strides=self.pool_strides, padding=self.pad,\n name=op_name)\n return tensor_out",
"def tensorflow2mindspore(tf_ckpt_dir, param_mapping_dict, ms_ckpt_path):\r\n reader = tf.train.load_checkpoint(tf_ckpt_dir)\r\n new_params_list = []\r\n for name in param_mapping_dict:\r\n param_dict = {}\r\n parameter = reader.get_tensor(name)\r\n if 'conv' in name and 'weight' in name:\r\n # 对卷积权重进行转置\r\n parameter = np.transpose(parameter, axes=[3, 2, 0, 1])\r\n if 'fc' in name and 'kernel' in name:\r\n parameter = np.transpose(parameter, axes=[1, 0])\r\n param_dict['name'] = param_mapping_dict[name]\r\n param_dict['data'] = Tensor(parameter)\r\n new_params_list.append(param_dict)\r\n save_checkpoint(new_params_list, os.path.join(ms_ckpt_path, 'tf2mindspore.ckpt'))",
"def __init_decoder_params_tconv(self):\n filters = np.flipud(self.enc_params_conv)\n output_shapes = np.array(list(reversed(\n [ self.tensors['input'].get_shape().as_list()[1:] ]+\n [ self.__get_tensor('encoder',id).get_shape().\n as_list()[1:] for id in range(1,len(self.layer_cfg))])))\n self.dec_params_tconv = list(zip(filters, output_shapes))",
"def _reshape_param_data(param_data, dev_mat, tensor_map):\n\n device_count = 1\n for dim in dev_mat:\n device_count *= dim\n\n tensor_slices = np.split(param_data.asnumpy(), device_count, axis=0)\n tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)\n\n # get the actual number of slices,as: different devices may load the same slice\n slice_count = 1\n for dim in tensor_strategy:\n slice_count *= dim\n\n # reorder slices and remove duplicates based on device matrix and tensor_map\n tensor_slices_new = list(range(slice_count))\n for i in range(device_count):\n slice_index = _get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i)\n tensor_slices_new[int(slice_index)] = np.array(tensor_slices[i])\n\n # combine slices to generate complete parameter\n dim_len = len(tensor_strategy)\n for i in range(dim_len):\n ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])\n tensor_slices_new_inner = []\n for j in range(ele_count):\n new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]\n for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,\n (j + 1) * tensor_strategy[dim_len - 1 - i]):\n new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)\n\n tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))\n tensor_slices_new = tensor_slices_new_inner\n\n return Tensor(tensor_slices_new[0])",
"def _getConstraintParameterMap(self, tableId):\n if tableId is not None and tableId in self.__attributeConstraintParameterMap:\n return self.__attributeConstraintParameterMap[tableId]\n else:\n return []",
"def _get_output_tensor_name(): # TODO: only for OID API pretrained\n return ['detection_classes:0',\n 'detection_scores:0',\n 'detection_boxes:0']",
"def _get_tensor(self):\n return self._input",
"def list_act_1_neur(layer, neur_x , neur_y , act_map):\n list_act = []\n layer_activ = layers_activations[layer] \n for i in range(layer_activ.shape[0]):\n list_act.append(layer_activ[i, neur_x, neur_y, act_map])\n return(list_act)",
"def conv_layer_idxs(self):\n conv_layers = filter(lambda x: type(x[1]).__name__ == 'Conv2D', enumerate(self.model.layers))\n conv_layers = conv_layers[:-len(self.model.output)]\n return list(zip(*conv_layers)[0])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
translate tvm.shape to list type in python
|
def shape_to_list(shape):
if isinstance(shape, (list, tuple)):
return shape
tmp = []
for i in shape:
tmp.append(i.value)
return tmp
|
[
"def _shape_to_list(shape):\r\n if isinstance(shape, (list, tuple)):\r\n return shape\r\n tmp = []\r\n if shape == \"\":\r\n return ()\r\n for i in shape:\r\n tmp.append(i.value)\r\n return tmp",
"def shape_from_tagged(node: TaggedDict) -> list[int]:\n if \"shape\" in node: # this should not be reached but lets make sure\n return node[\"shape\"]\n return [1] # scalar",
"def _to_shape(shape):\n return tuple(int(sh) for sh in shape)",
"def shp_to_list(shpfile):\n with fiona.open(shpfile) as src:\n return [geometry.shape(rec['geometry']) for rec in src]",
"def tensorshape_to_intlist(tensorshape):\n\treturn list(map(lambda j: 1 if j is None else int(j), tensorshape))",
"def _tolist(ndarray):\n if np.issubdtype(ndarray.dtype, np.number):\n return ndarray\n elem_list = []\n for sub_elem in ndarray:\n if isinstance(sub_elem, spio.matlab.mio5_params.mat_struct):\n elem_list.append(_todict(sub_elem))\n elif isinstance(sub_elem, np.ndarray):\n elem_list.append(_tolist(sub_elem))\n else:\n elem_list.append(sub_elem)\n return elem_list",
"def shapes(self):\n return self._shapes",
"def shape(self) -> S:",
"def get_type_shapes(self):\n type_shapes = self.cpp_force.getTypeShapesPy()\n ret = [json.loads(json_string) for json_string in type_shapes]\n return ret",
"def box_shape_to_numpy(box_shape: BoxShape):\n return np.array([box_shape.length, box_shape.width, box_shape.height])",
"def get_shapeof_metatypes() -> List[OperatorMetatype]:",
"def flatten_tuple_annotation_shape(shape: tuple[Any, ...]) -> List[Any]:\n return _flatten(\n shape,\n predicate=bool,\n select=_identity,\n )",
"def list_shape(shape, elem=None):\n\n if (len(shape) == 0):\n return []\n\n def helper(elem, shape, i):\n if len(shape) - 1 == i:\n return [elem] * shape[i]\n return [ helper(elem, shape, i+1) for _ in range(shape[i]) ]\n\n return helper(elem, shape, 0)",
"def _infer_raw_shape(tt_cores):\n num_dims = len(tt_cores)\n num_tensor_shapes = len(tt_cores[0].shape) - 2\n raw_shape = [[] for _ in range(num_tensor_shapes)]\n for dim in range(num_dims):\n curr_core_shape = tt_cores[dim].shape \n for i in range(num_tensor_shapes):\n raw_shape[i].append(curr_core_shape[i+1])\n for i in range(num_tensor_shapes):\n raw_shape[i] = list(raw_shape[i])\n\n return tuple(raw_shape)",
"def flatten_xla_shape(\n xla_shape: xla_client.Shape,\n) -> Sequence[xla_client.Shape]:\n py_typecheck.check_type(xla_shape, xla_client.Shape)\n if xla_shape.is_tuple():\n tensor_shapes = []\n for shape in xla_shape.tuple_shapes():\n tensor_shapes += flatten_xla_shape(shape)\n return tensor_shapes\n else:\n # Must be a tensor (array) type; verify this by probing for dimensions and\n # element_type, since there's no explicit way to check otherwise.\n py_typecheck.check_type(xla_shape.element_type(), np.dtype)\n py_typecheck.check_type(xla_shape.dimensions(), tuple)\n return [xla_shape]",
"def shapes(self):\n shapes = []\n curRes = self\n while curRes is not None:\n shapes.append(curRes.shape)\n curRes = curRes.nReservation\n return shapes",
"def extract_shape(temp):\n tmp=[]\n for i in range(0, len(temp)):\n if type(temp[i]) == np.ndarray:\n a = np.shape(temp[i])\n if len(a) == 1: tmp.append(str(a[0]) + \"x1\")\n else: tmp.append(str(a[0]) + \"x\" + str(a[1]))\n else:\n tmp.append(temp[i])\n return tmp",
"def padded_to_list(padded_tensor, shape_per_tensor):\n return [padded_tensor[[i] + [slice(dim) for dim in shape]]\n for i, shape in enumerate(shape_per_tensor)]",
"def aslistoflist(self):\n class_names = self._keys()\n n = len(class_names)\n\n idx_map = dict()\n for i,k in enumerate(class_names):\n idx_map[k] = i\n\n m = ListOfListMat(n, n)\n\n for actual_class,detected_classes in self.mat.items():\n for detected_class,count in detected_classes.items():\n m[ idx_map[actual_class] ][ idx_map[detected_class] ] = count\n\n return m"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
fmap c0 check value
|
def _fmap_c0_check_value(dtype, optim_dict):
fmap_c0_check_value = 4 if optim_dict["c0_optim_flg"] and \
(is_v200_version() or is_lhisi_version()) else CUBE_MKN[dtype]['mac'][1]
return fmap_c0_check_value
|
[
"def is_scalar_zero(expr):\n return is_scalar_x(expr, 0)",
"def zero_crossings(data):\n pos = data > 0\n npos = ~pos\n return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]",
"def _convert_c_if_args(self, cond_tuple, bit_map):\n if isinstance(cond_tuple[0], Clbit):\n return (bit_map[cond_tuple[0]], cond_tuple[1])\n # ClassicalRegister conditions should already be in the outer circuit.\n return cond_tuple",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageCF3___nonzero__(self)",
"def __check_value__(self, value):\n\t\tcount = 0\n\t\tfor j in range(1, (value / 2) + 1):\n\t\t\t# if value % j == 0:\n\t\t\t# \tcount = count + 1\n\t\t\t# \tif count > 1:\n\t\t\t# \t\tbreak\n\t\t\t#FIXME: this call is not working with NoneType\n\t\t\tcount = self.__iteration__(value, j, count)\n\t\t\tif count > 1:\n\t\t\t\tbreak\n\n\t\tif count <= 1:\n\t\t\treturn value\n\t\treturn 0",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageCF2___nonzero__(self)",
"def get_zero(self, ctype):\n ntype = self(ctype)\n zero = self.default_zero_map.get(ntype, 'FAILURE')\n if zero == 'FAILURE':\n print('{}({!r}): failed to find zero constant value, returning'\n ' None.'.format(type(self).__name__, ntype))\n zero = None\n return zero",
"def positive(x):\r\n return x > 0",
"def cfcheck(**das):\n return True",
"def nonzero_values(x):\n return x[x != 0]",
"def test_cp0(self):\n self.assertAlmostEqual(self.thermodata.Cp0.value_si / constants.R, self.Cp0, 4)",
"def _x_0_validator(self, val):\n if np.any(val == 0):\n raise InputParameterError(\"0 is not an allowed value for x_0\")",
"def zero2one(self, x):\n if x == 0:\n x = 1\n return x",
"def rule_zero_width_nonjoiner(value, offset, ucd):\n assert value[offset] == '\\u200c'\n if ucd.combining_virama(_before(value, offset)):\n return True\n if ucd.valid_jointype(value, offset):\n return True\n return False",
"def isValid(self):\n return self.falsify(\n {},\n lambda alist,no: 0,\n lambda : 1)",
"def test_strip_zeros( self ) :\n\n self.assertEqual( 0, verscmp( self.v1, self.v10 ) )",
"def _required_value(converter: typing.Callable) -> typing.Callable:\n @functools.wraps(converter)\n def main(value: typing.Any) -> typing.Any:\n if value is not None:\n return converter(value)\n raise utils.RequestError(3101)\n return main",
"def transform_bool(col_data):\n return col_data.apply(lambda x: 0 if x == 'f' else 1)",
"def __bool__(self):\n return self.zero.defined and self.zero.value == 0.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
check L1 fusion fmap select
|
def _fusion_fmap_select(fmap):
valid_shape = ConvParam.fusion_para.get("valid_shape")
offset = ConvParam.fusion_para.get("slice_offset")
input_memory_type = ConvParam.fusion_para.get("input_memory_type")
if offset and input_memory_type != 1 :
if TENSOR_MAP["strideh_opti_flag"]:
# do it in _fmap_ddr2l1
pass
else:
n_offset, c1_offset, h_offset, w_offset, c0_offset = offset
data_res = tvm.compute(valid_shape, lambda n, c1, h, w, c0:
fmap(n + n_offset,
c1 + c1_offset,
h + h_offset,
w + w_offset,
c0 + c0_offset),
name="fusion_fmap_select")
fmap = data_res
TENSOR_MAP['fusion_fmap_select'] = fmap
return fmap
|
[
"def _is_select(self, op):\n return hasattr(op, \"select\") and getattr(op, \"select\") is not None",
"def test_select(self):\n tList = self.tList\n # Should be able to detect regardless of case\n nList = \"transformXOR, Transformadd\"\n transList = select_transformers(\n tList,\n nList,\n yes=1,\n listing=True)\n self.assertTrue(len(transList) == 2)\n # If we add an invalid transformer, it should just be ignored\n nList += \", TransformNoAvailable\"\n transList = select_transformers(\n self.tList,\n nList,\n yes=1,\n listing=True)\n self.assertTrue(len(transList) == 2)\n # name > only > level\n transList = select_transformers(\n tList,\n nList, select=2, level=1,\n yes=1,\n listing=True)\n self.assertTrue(len(transList) == 2)\n # only > level (also check if we get the right amount of\n # trans on the requested level)\n for i in range(0, 3):\n with self.subTest(i=i):\n transList = select_transformers(\n tList,\n select=(i + 1), level=1, listing=True)\n self.assertTrue(len(transList) == len(tList[i]))\n # test if level will get us all transformers from the requested\n # level and below\n transList = select_transformers(\n tList,\n level=2,\n listing=True)\n self.assertTrue(len(transList) == len(tList[0] + tList[1]))\n\n # test default action (uses level = 3)\n transList = select_transformers(tList, listing=True)\n self.assertTrue(len(transList) == len(tList[0] + tList[1] + tList[2]))",
"def ISelectLaminaFaces(arg=None):\n\n\tselectLaminaFaces()",
"def test_select_type(self):\n input_tensor = torch.ones((1, 2, 3))\n with pytest.raises(\n AssertionError,\n match=re.escape(\"'select' must be a Tensor the same length as 'fill' (2)\"),\n ):\n replace_missing(input_tensor, fill=torch.tensor([1, 2]), select={0, 1, 2})",
"def selection(t, f):\n\n selection_table = []\n # Iterates through table 1\n for row in t:\n if f(row):\n selection_table.append(row)\n # If tables only have one schema column\n if len(selection_table) == 1 or len(selection_table) == 0:\n return None\n else:\n selection_table = remove_duplicates(selection_table)\n return selection_table",
"def polySelectCtx(*args, **kwargs):\n\n pass",
"def _of_proc(left, right):\n left = _normalize_type(left)\n if left in ('list', 'tuple', 'array', 'matrix', 'seq'):\n return left\n return None",
"def will_select(\n self, pack_name: str, pack: DataPack, multi_pack: MultiPack\n ) -> bool:\n raise NotImplementedError",
"def L1(v, dfs_data):\n return dfs_data['lowpoint_1_lookup'][v]",
"def test_REESelector(self):\n df = self.df\n tmr = REESelector()\n for input in [df]:\n with self.subTest(input=input):\n out = tmr.transform(input)",
"def test_TypeSelector(self):\n df = self.df\n tmr = TypeSelector(float)\n for input in [df]:\n with self.subTest(input=input):\n out = tmr.transform(input)",
"def L_selected_index(index):\n\n\t\tx_img = tomap['ex']['x']\n\t\ty_img = tomap['ex']['y']\n\t\ttimes_img = tomap['ex']['times']\n\n\t\tif index:\n\t\t\tif plot_ex:\n\t\t\t\tex_len = tomap['ex']['x'].shape[0]\n\t\t\telse:\n\t\t\t\tex_len = 0\n\n\t\t\t# ex_len = tomap['ex']['x'].shape[0]\n\t\t\tH_len = tomap['H']['x'].shape[0]\n\n\t\t\tif index[0] < ex_len:\n\t\t\t\tselected = hv.RGB(x_img[index]).opts(width=width, height=height)\n\t\t\t\tlabel = 'No selection'\n\t\t\telse:\n\t\t\t\tx_img = tomap['L']['x']\n\t\t\t\ty_img = tomap['L']['y']\n\t\t\t\ttimes_img = tomap['L']['times']\n\n\t\t\t\tif index[0] < ex_len + H_len:\n\t\t\t\t\tselected = hv.RGB(x_img[index[0] - ex_len]).opts(width=width, height=height)\n\t\t\t\t\tlabel = 'L1 (H1): %s, %f, %d selected' % (y_img[index[0] - ex_len], times_img[index[0] - ex_len], len(index))\n\t\t\t\telse:\n\t\t\t\t\tselected = hv.RGB(x_img[index[0] - (ex_len + H_len)]).opts(width=width, height=height)\n\t\t\t\t\tlabel = 'L1 (L1): %s, %f, %d selected' % (y_img[index[0] - (ex_len + H_len)], times_img[index[0] - (ex_len + H_len)], len(index))\n\t\telse:\n\t\t\tselected = hv.RGB(x_img[index]).opts(width=width, height=height)\n\t\t\tlabel = 'No selection'\n\t\treturn selected.relabel(label).opts(labelled=[])",
"def map_fn(item):\n ctrls = item[1]\n return (item[0], list(filter(filter_fn, ctrls)))",
"def search_select(df, search_cols, callback, mode='flat', search_on_type=True,\n initial_filter='', label=''):\n\n assert mode in ('flat', 'nested', 'all'), \\\n 'Please select \"flat\", \"nested\" or \"all\" for mode'\n\n if len(search_cols) < 2:\n mode = 'flat'\n\n disp_cols = []\n id_cols = []\n for col in search_cols:\n if isinstance(col, tuple):\n disp_col, id_col = col\n else:\n disp_col = id_col = col\n disp_cols.append(disp_col)\n id_cols.append(id_col)\n\n def populate_options(ww):\n if wfilter.value != '':\n s = [False] * len(df)\n for st in wfilter.value.split('|'):\n s |= df[disp_cols].apply(lambda r:\n any([st.lower().strip()\n in c.lower()\n for c in r]),\n axis=1)\n else:\n s = [True] * len(df)\n\n if ww not in sorted_selectors: # flat_selector\n disp_vals = ['--'.join(x) for x in df[s].sort(disp_cols)[disp_cols].values]\n id_vals = [tuple(x) for x in df[s].sort(disp_cols)[id_cols].values]\n\n if len(disp_vals) > 0:\n ww.values = OrderedDict((disp, idx)\n for disp, idx in zip(disp_vals, id_vals))\n else:\n ww.values = {'': -1}\n\n else:\n i = sorted_selectors.index(ww)\n for j in xrange(i):\n wprev = sorted_selectors[j]\n s &= df[id_cols[j]] == wprev.value\n\n vals = sorted(set(tuple(x)\n for x in df[s][[disp_cols[i], id_cols[i]]].values))\n if len(vals) > 0:\n ww.values = OrderedDict((str(disp), idx)\n for disp, idx in vals)\n else:\n ww.values = {'': -1}\n\n if len(ww.values) > 0:\n ww.value = ww.values.values()[0]\n else:\n ww.value = None\n\n sorted_selectors = []\n widgets_to_update_on_search = []\n if mode in ('nested', 'all'):\n def make_callback(w):\n def on_selection():\n i = sorted_selectors.index(w)\n if i == len(search_cols) - 1:\n callback(tuple(ww.value for ww in sorted_selectors))\n else:\n populate_options(sorted_selectors[i+1])\n return on_selection\n\n for col in disp_cols:\n w = widgets.DropdownWidget(description=col)\n w.on_trait_change(make_callback(w), 'value')\n sorted_selectors.append(w)\n\n # only need to update the first - the others will be triggered\n widgets_to_update_on_search.append(sorted_selectors[0])\n\n all_widgets = sorted_selectors[:]\n if mode in ('flat', 'all'):\n if mode == 'flat':\n desc = '' if label is None else label\n else:\n desc = 'All'\n wflat = widgets.DropdownWidget(description=desc)\n wflat.on_trait_change(lambda: callback(wflat.value), 'value')\n all_widgets.append(wflat)\n widgets_to_update_on_search.append(wflat)\n\n wfilter = widgets.TextWidget(description='Filter', value=initial_filter)\n\n def update_options(w=None):\n for w in widgets_to_update_on_search:\n populate_options(w)\n\n if search_on_type:\n all_widgets.append(wfilter)\n def update_on_keystroke():\n global last_keystroke\n now = time.time()\n if now > last_keystroke + 1:\n update_options()\n\n wfilter.on_trait_change(update_on_keystroke, 'value')\n else:\n container = widgets.ContainerWidget()\n container.remove_class('vbox')\n container.add_class('hbox')\n b = widgets.ButtonWidget(description='Search')\n b.on_click(update_options)\n container.children = [wfilter, b]\n all_widgets.append(container)\n\n container = widgets.ContainerWidget()\n display.display(container)\n if mode == 'flat':\n container.remove_class('vbox')\n container.add_class('hbox')\n\n container.children = all_widgets\n update_options()\n\n return container",
"def choose_one(filter_fun: Predicate[T], iterable: Iteratable) -> T:\n return choose(filter_fun, iterable, True)",
"def select_vec(self, evecs):",
"def select_sources(self, selection):\n\n # store selection\n self.selection = selection\n\n # make selection\n self.unit_vector = [self.unit_vector[i] for i in selection]\n self.distance = [self.distance[i] for i in selection]\n\n self.N = len(self.distance)\n\n self.coord = self.coord[selection]\n try:\n self.flux = self.flux[selection]\n self.flux_weight = self.flux_weight[selection]\n except:\n pass",
"def f1_larch(x, _larch=None):\n if _larch is None:\n return x\n else:\n return 2*x",
"def call_select_on_initial_values(\n grid: np.ndarray, candidate_per_constraint, constraint_map_per_cell\n):\n for (row_index, col_index), cell_value in np.ndenumerate(grid):\n if cell_value == 0:\n continue\n select(\n candidate_per_constraint,\n constraint_map_per_cell,\n (row_index, col_index, cell_value),\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calculate im2col_fractal tvm lambda function
|
def __im2col_fractal_indices(indices, fmap):
block_size = config['mac'][1]
block_size_m = config['mac'][0]
_, howo, _, kernel_h, kernel_w, _ = fmap.shape
batch_size, index_i1, index_j1, index_i0, index_j0 = indices
n_index = batch_size
hw_index = index_i1*block_size_m + index_i0
c1_index = (((index_j1*block_size + index_j0) // block_size) //
kernel_w.value) // kernel_h.value
kh_index = (((index_j1*block_size + index_j0) // block_size) //
kernel_w.value) % kernel_h.value
kw_index = ((index_j1*block_size + index_j0) \
// block_size) % kernel_w.value
c0_index = (index_j1*block_size + index_j0) % block_size
if optim_dict["c0_optim_flg"]:
c1_index = 0
kh_index = (index_j1*4 + index_j0 // 4) // kernel_w.value
kw_index = (index_j1*4 + index_j0 // 4) % kernel_w.value
c0_index = index_j0 % 4
dtype = compute_dtype
return tvm.select( \
tvm.any(hw_index < 0, hw_index > howo.value - 1), \
tvm.const(0.0, dtype), \
fmap(n_index, hw_index, \
c1_index, kh_index, kw_index, c0_index))
|
[
"def color_deconvolution(img):\n\n\t#Note: I am simply copying the naming conventions used in the matlab script\n\t\n\timg = img.copy()\n\n\t#STAIN VECTORS FOR H&E DECONVOLUTION (can add support for more later)\n\tMODx = [0.644211, 0.092789, 0]\n\tMODy = [0.716556, 0.954111, 0]\n\tMODz = [0.266844, 0.283111, 0]\n\n\t#Normalize columns to length 1 in 3D space\n\tleng = [0, 0, 0]\n\tcosx = [0, 0, 0]\n\tcosy = [0, 0, 0]\n\tcosz = [0, 0, 0]\n\tfor i in range(3):\n\t\tleng[i] = sqrt(MODx[i]*MODx[i] + MODy[i]*MODy[i] + MODz[i]*MODz[i])\n\t\tif not (leng[i] == 0):\n\t\t\tcosx[i] = MODx[i]/leng[i]\n\t\t\tcosy[i] = MODy[i]/leng[i]\n\t\t\tcosz[i] = MODz[i]/leng[i]\n\n\t#translation matrix\n\tif cosx[1] == 0:\n\t\tif cosy[1] == 0:\n\t\t\tif cosz[1] == 0: #2nd color is unspecified\n\t\t\t\tcosx[1] = cosz[0]\n\t\t\t\tcosy[1] = cosx[0]\n\t\t\t\tcosz[1] = cosy[0]\n\n\tif cosx[2] == 0:\n\t\tif cosy[2] == 0:\n\t\t\tif cosz[2] == 0: #3rd color is unspecified\n\t\t\t\t#3rd column will be cross product of first 2\n\t\t\t\t#fiji implementation allows for computation of 3rd color via Ruifroks method\n\t\t\t\t# but this is unnecessary for extracting just H&E \n\t\t\t\tcosx[2] = cosy[0] * cosz[1] - cosz[0] * cosy[1];\n\t\t\t\tcosy[2] = cosz[0] * cosx[1] - cosx[0] * cosz[1];\n\t\t\t\tcosz[2] = cosx[0] * cosy[1] - cosy[0] * cosx[1];\n\n\t#renormalize 3rd column\n\tleng = sqrt(cosx[2]*cosx[2] + cosy[2]*cosy[2] + cosz[2]*cosz[2])\n\tif leng != 0 and leng != 1:\n\t\tcosx[2] = cosx[2]/leng\n\t\tcosy[2] = cosy[2]/leng\n\t\tcosz[2] = cosz[2]/leng\n\n\tCOS3x3Mat = np.matrix([\n\t\t\t\t[cosx[0], cosy[0], cosz[0]], \n\t\t\t\t[cosx[1], cosy[1], cosz[1]],\n\t\t\t\t[cosx[2], cosy[2], cosz[2]]\n\t\t\t\t])\n\n\t#Note: I am skipping lines 390-459 of the matlab code, since\n\t# the determinant of the COS3x3Mat matrix is > 0 (~0.5). I think that\n\t# bit of code is trying to make the matrix invertible, but it already is\n\t# for H&E stain matrix \n\t#print(np.linalg.det(COS3x3Mat))\n\n\t#Invert the matrix\n\t# Note that this is done manually in the matlab code.\n\tQ3x3Mat = np.linalg.inv(COS3x3Mat)\n\tQ3x3MatInverted = COS3x3Mat #Just following the matlab code...\n\n\t#Compute transmittance \n\trowR = img.shape[0]\n\tcolR = img.shape[1]\n\n\t#These are the 1 channel transmittances of each dye \n\tDye1_transmittance = np.zeros([rowR, colR])\n\tDye2_transmittance = np.zeros([rowR, colR])\n\tDye3_transmittance = np.zeros([rowR, colR])\n\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\tRGB1 = img[r, c]\n\t\t\tRGB1[RGB1==0] = 1 #Avoid log0\n\t\t\tACC = -np.log(RGB1 / 255)\n\t\t\ttransmittances = 255 * np.exp(-ACC*Q3x3Mat)\n\t\t\ttransmittances = transmittances[0,:]\n\t\t\ttransmittances[transmittances>255] = 255\n\n\t\t\tDye1_transmittance[r,c] = transmittances[0,0]\n\t\t\tDye2_transmittance[r,c] = transmittances[0,1]\n\t\t\tDye3_transmittance[r,c] = transmittances[0,2]\n\n\t#Construct lookup tables to convert 1 channel dye images to \n\t# \t3 channel RGB representations \n\trLUT = np.zeros([256,3])\n\tgLUT = np.zeros([256,3])\n\tbLUT = np.zeros([256,3])\n\n\tfor i in range(3):\n\t\tfor j in range(256):\n\t\t\tif cosx[i] < 0:\n\t\t\t\trLUT[255-j, i] = 255 + (j * cosx[i])\n\t\t\telse:\n\t\t\t\trLUT[255-j, i] = 255 - (j * cosx[i])\n\n\t\t\tif cosy[i] < 0:\n\t\t\t\tgLUT[255-j, i] = 255 + (j * cosy[i])\n\t\t\telse:\n\t\t\t\tgLUT[255-j, i] = 255 - (j * cosy[i])\n\n\t\t\tif cosz[i] < 0:\n\t\t\t\tbLUT[255-j, i] = 255 + (j * cosz[i])\n\t\t\telse:\n\t\t\t\tbLUT[255-j, i] = 255 - (j * cosz[i])\n\n\t#Apply the lookup table to first dye (Hematoxilin)\n\tDye1_color_im = np.zeros(img.shape)\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\t#print(floor(Dye1_transmittance[r,c]))\n\t\t\tDye1_color_im[r,c,0] = rLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,1] = gLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,2] = bLUT[floor(Dye1_transmittance[r,c]),0]\n\n\tDye1_color_im = Dye1_color_im.astype(np.uint8)\n\n\treturn Dye1_transmittance, Dye1_color_im",
"def surface2im(u, v):\n n = u.shape[0]\n e = np.zeros([n-2, n-2])\n for i in range(n-2):\n for j in range(n-2):\n e[i, j] = e1_f(u[i+1, j], u[i+2, j+1], u[i+1, j+2], u[i, j+1], v[0], v[1], v[2], 2/n)\n \n return e",
"def internal_virial(r,f):\n virial = 0\n for i in range(r.shape[0]):\n for dim in range(r.shape[1]):\n virial += r[i,dim]*f[i,dim]\n\n return virial",
"def vectorizar(self):\n img = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)\n self.vector = img.T.flatten().T\n return None",
"def inverse_transform(self, matrix):\n #return np.fft.ifft(matrix) #just wanted to see what is to be expected\n sx = matrix.shape[0]\n sy = matrix.shape[1]\n N = max(matrix.shape[0], matrix.shape[1])\n newimage = np.zeros((sx,sy),dtype=np.complex)\n for u in range(sx):\n for v in range(sy):\n t = 0\n\n for i in range(sx):\n for j in range(sy):\n t = t + ((matrix[i, j] * (math.cos(((math.pi * 2) / N) * ((u * i) + (v * j))) - (\n ((1j) * math.sin(((math.pi * 2) / N) * ((u * i) + (v * j))))))))\n\n #t = t + (matrix[i,j]*math.exp((1j.imag)*((2*math.pi)/N)*((u*i) +(v*j))))\n\n #t = t + (matrix[i, j] * (math.cos(((math.pi * 2) / N) * ((u * i) + (v * j))) + (\n #(((1j).imag) * math.sin(((math.pi * 2) / N) * ((u * i) + (v * j)))))))\n\n newimage[u, v] = t #round(t)\n\n if (False):\n for u in range(sx):\n for v in range(sy):\n newimage[u,v] = math.floor(math.log(abs(newimage[u,v])))\n\n return newimage",
"def extract_col(pars) :\n data,err,bitmask,cols,models,rad,pix0,back,sigmodels = pars\n spec = np.zeros([len(models),len(cols)])\n sig2 = np.zeros([len(models),len(cols)])\n mask = np.zeros([len(models),len(cols)],dtype=np.uintc)\n ny=data.shape[0]\n ncol=data.shape[1]\n y,x = np.mgrid[0:data.shape[0],0:data.shape[1]]\n pix=np.zeros(data.shape)\n\n for i,model in enumerate(models) :\n\n # center of trace\n ymid=model(cols)+pix0\n\n # calculate distance of each pixel from trace center\n ylo = np.int(np.min(np.floor(ymid-rad)))\n yhi = np.int(np.max(np.ceil(ymid+rad)))\n dist=y[ylo:yhi+1,:]-ymid\n\n # determine contribution of each pixel to boxcar\n contrib = np.zeros(dist.shape,float)\n # full pixel contribution\n iy,ix = np.where( (np.abs(dist)<rad-0.5) )\n contrib[iy,ix] = 1.\n # fractional pixel contribution\n iy,ix = np.where( (np.abs(dist)>rad-0.5) & (np.abs(dist)<rad+0.5) )\n contrib[iy,ix] = 1-(np.abs(dist[iy,ix])-(rad-0.5))\n \n # add the contributions\n spec[i,:] = np.sum( data[ylo:yhi+1,:]*contrib, axis=0)\n sig2[i,:] = np.sum(err[ylo:yhi+1,:]**2*contrib**2, axis=0)\n # for bitmask take bitwise_or of pixels that have full contribution\n mask[i,:] = np.bitwise_or.reduce(\n bitmask[ylo:yhi+1,:]*contrib.astype(int),axis=0) \n\n # background\n background = np.empty_like(data)\n background[:] = np.nan\n background_err = copy.copy(background)\n if len(back) > 0 :\n dist = y - ymid\n\n nback=0\n for bk in back :\n iy,ix = np.where( (dist>bk[0]) & (dist<bk[1]) )\n background[iy,ix] = data[iy,ix]\n background_err[iy,ix] = err[iy,ix]**2\n nback+=np.abs(bk[1]-bk[0])\n\n spec[i,:] -= np.nanmedian(background,axis=0)*2*rad\n sig2[i,:] += np.nansum(background_err,axis=0)/nback*(2*rad)\n\n return spec, np.sqrt(sig2), mask",
"def dftImage(d,uvw,px,res,mask=False):\n nants=uvw.shape[0]\n im=numpy.zeros((px[0],px[1]),dtype=complex)\n mid_k=int(px[0]/2.)\n mid_l=int(px[1]/2.)\n u=uvw[:,:,0]\n v=uvw[:,:,1]\n w=uvw[:,:,2]\n u/=mid_k\n v/=mid_l\n start_time=time.time()\n for k in range(px[0]):\n for l in range(px[1]):\n im[k,l]=dft2(d,(k-mid_k),(l-mid_l),u,v)\n if mask: #mask out region beyond field of view\n rad=(((k-mid_k)*res)**2 + ((l-mid_l)*res)**2)**.5\n if rad > mid_k*res: im[k,l]=0\n #else: im[k,l]=dft2(d,(k-mid_k),(l-mid_l),u,v)\n print time.time()-start_time\n return im",
"def itkScalarImageToTextureFeaturesFilterIUS2_cast(*args):\n return _itkScalarImageToGreyLevelCooccurrenceMatrixGeneratorPython.itkScalarImageToTextureFeaturesFilterIUS2_cast(*args)",
"def matrix(img):\n return img[1]",
"def DFT2(image):\n dim_im = np.ndim(image)\n if dim_im == 2:\n # (M.N) matrix as an image case, we will work on it\n im_orig = image.astype(np.complex128)\n else:\n # (M.N.1) matrix as an image case, we will divide it to 2d matrix and work on it\n im_orig = image[:, :, 0]\n im_orig = im_orig.astype(np.complex128)\n # DFT for rows\n im_orig = DFT(im_orig)\n # DFT for cols\n im_orig = DFT(im_orig.T)\n im_orig = im_orig.T\n if dim_im == 2:\n # no need to return to 3d so return 2d array.\n return im_orig\n else:\n # need to return to 3d.\n image = image.astype(np.complex128)\n image[:, :, 0] = im_orig\n return image",
"def conv_der(im):\n conv_x = signal.convolve2d(im, np.asarray([[0.5, 0, -0.5]]), \"same\")\n conv_y = signal.convolve2d(im, np.asarray([[0.5], [0], [-0.5]]), \"same\")\n magnitude = np.sqrt(np.abs(conv_x)**2 + np.abs(conv_y)**2)\n return magnitude",
"def vectorize(img):\n r = img[:,:,0].flatten()\n g = img[:,:,1].flatten()\n b = img[:,:,2].flatten()\n vect = np.concatenate((r,g,b))\n return vect",
"def applyNormalisation(image):\n #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n #image[:,:,3] = clahe.apply(image[:,:,3])\n return image / 255.",
"def itkScalarImageToTextureFeaturesFilterIUC2_cast(*args):\n return _itkScalarImageToGreyLevelCooccurrenceMatrixGeneratorPython.itkScalarImageToTextureFeaturesFilterIUC2_cast(*args)",
"def fourier_der(im):\n\n shift_v = calculate_shift_u_or_v(im, V)\n shift_u = calculate_shift_u_or_v(im, U)\n\n fourier_im = DFT2(im)\n fourier_im_trans = fourier_im.transpose()\n\n square_sum = calculate_square_sum(im, shift_u, shift_v, fourier_im,\n fourier_im_trans)\n\n return np.sqrt(square_sum).astype(FLOAT_TYPE)",
"def perspective_transform_values(tl=-60, tr=60, bl=-10, br=40, img_size=(1280, 720)):\n src = np.float32(\n [[(img_size[0] / 2) + tl, img_size[1] / 2 + 100],\n [((img_size[0] / 6) + bl), img_size[1]],\n [(img_size[0] * 5 / 6) + br, img_size[1]],\n [(img_size[0] / 2 + tr), img_size[1] / 2 + 100]])\n dst = np.float32(\n [[(img_size[0] / 4), 0],\n [(img_size[0] / 4), img_size[1]],\n [(img_size[0] * 3 / 4), img_size[1]],\n [(img_size[0] * 3 / 4), 0]])\n return src, dst",
"def FI(image):\n a = iulib.floatarray()\n iulib.narray_of_numpy(a,transpose(image[::-1,...]))\n return a",
"def forward_transform(self, matrix):\n #return np.fft.fft(matrix) #just wanted to see what is to be expected\n sx = matrix.shape[0]\n sy = matrix.shape[1]\n N = max(matrix.shape[0],matrix.shape[1])\n newimage = np.zeros((sx,sy),dtype=np.complex)\n\n\n W = np.exp(-1j * ((2*math.pi)/N))\n for u in range(sx):\n for v in range(sy):\n t = 0\n\n for i in range(sx):\n for j in range(sy):\n #t = t + (matrix[i,j]*math.exp((-1j.imag)*((2*math.pi)/N)*((u*i) +(v*j))))\n t = t + ((matrix[i,j]*(math.cos(((math.pi*2)/N)*((u*i)+(v*j))) - (((1j)*math.sin(((math.pi*2)/N)*((u*i)+(v*j))))))))\n #((1j).imag)\n newimage[u,v] = t\n\n\n\n return newimage",
"def f2c(t):\n\treturn int(round(float(5*(t-32))/9))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
merage im2col_row_major axis of input_C1, filter_h, filter_w, input_C0
|
def _im2col_row_major_reshape(fmap_im2col_shape, \
fmap_row_major, compute_dtype):
_, howo, input_c1, filter_h, filter_w, input_c0 = fmap_row_major.shape
row_major_reshape = tvm.compute(
fmap_im2col_shape, lambda i, j, k: tvm.select(
tvm.all(k < input_c1*filter_h*filter_w*input_c0, j < howo),
fmap_row_major(i, j, k // (filter_h*filter_w*input_c0),
k // (filter_w*input_c0) % filter_h,
k // (input_c0) % (filter_w),
k % input_c0), tvm.const(0.0, compute_dtype)),
name="row_major_reshape",
tag=OP_TAG + 'row_major_reshape')
return row_major_reshape
|
[
"def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):",
"def conv_forward_im2col(x, w, b, conv_param):\n N, C, H, W = x.shape\n num_filters, _, filter_height, filter_width = w.shape\n stride, pad = conv_param['stride'], conv_param['pad']\n\n # Check dimensions\n assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'\n assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'\n\n # Create output\n out_height = (H + 2 * pad - filter_height) // stride + 1\n out_width = (W + 2 * pad - filter_width) // stride + 1\n out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)\n\n # x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)\n x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)\n res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)\n\n out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])\n out = out.transpose(3, 0, 1, 2)\n\n cache = (x, w, b, conv_param, x_cols)\n return out, cache",
"def _postprocess_conv2d_output(x, data_format):\n\n if data_format == 'channels_first':\n x = tf.transpose(x, (0, 3, 1, 2))\n\n if K.floatx() == 'float64':\n x = tf.cast(x, 'float64')\n return x",
"def convolve(im, kernel):\n if (len(im.shape)==2):\n im = np.expand_dims(im, 2)\n H, W, B = im.shape\n imc = np.zeros((H, W, B))\n for band in range(B):\n imc[:, :, band] = sps.correlate2d(im[:, :, band], kernel, mode='same')\n return imc",
"def _postprocess_conv2d_output(x, data_format):\n\tif data_format == 'channels_first':\n\t\tx = tf.transpose(x, (0, 3, 1, 2))\n\n\tif floatx() == 'float64':\n\t\tx = tf.cast(x, 'float64')\n\treturn x",
"def coordinate_image(num_rows,num_cols,r0,r1,c0,c1):\n rval=np.linspace(r0,r1,num_rows)\n cval=np.linspace(c0,c1,num_cols)\n c,r=np.meshgrid(cval,rval)\n M = np.stack([r,c,np.ones(r.shape)],-1)\n return M",
"def _preprocess_conv2d_input(x, data_format):\n\tif dtype(x) == 'float64':\n\t\tx = tf.cast(x, 'float32')\n\tif data_format == 'channels_first':\n\t\tx = tf.transpose(x, (0, 2, 3, 1))\n\treturn x",
"def conv_backward_im2col(dout, cache):\n x, w, b, conv_param, x_cols = cache\n stride, pad = conv_param['stride'], conv_param['pad']\n\n db = np.sum(dout, axis=(0, 2, 3))\n\n num_filters, _, filter_height, filter_width = w.shape\n dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)\n dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)\n\n dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)\n # dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)\n dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],\n filter_height, filter_width, pad, stride)\n\n return dx, dw, db",
"def from_nhwc_to_nchw(input_data):\n return np.moveaxis(input_data, -1, -3)",
"def max_pool_forward_im2col(x, pool_param):\n\tN, C, H, W = x.shape\n\tpool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n\tstride = pool_param['stride']\n\n\tassert (H - pool_height) % stride == 0, 'Invalid height'\n\tassert (W - pool_width) % stride == 0, 'Invalid width'\n\n\tout_height = (H - pool_height) / stride + 1\n\tout_width = (W - pool_width) / stride + 1\n\n\tx_split = x.reshape(N * C, 1, H, W)\n\tx_cols = im2col_indices(x_split, pool_height, pool_width, padding=0, stride=stride)\n\tx_cols_argmax = np.argmax(x_cols, axis=0)\n\tx_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]\n\tout = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)\n\n\tcache = (x, x_cols, x_cols_argmax, pool_param)\n\treturn out, cache",
"def _preprocess_conv2d_kernel(kernel, data_format):\n\tif dtype(kernel) == 'float64':\n\t\tkernel = tf.cast(kernel, 'float32')\n\tif data_format == 'channel_first':\n\t\tkernel = tf.transpose(kernel, (2, 3, 1, 0))\n\treturn kernel",
"def transpose_input(from_cudnn):\n order = 'F' if from_cudnn else 'C'\n\n\n def transform(kernel):\n return kernel.T.reshape(kernel.shape, order=order)\n\n\n return transform",
"def convolution2d(image, kernel):\n\n # get the height/width of the image, kernel, and output\n im_h, im_w = image.shape\n ker_h, ker_w = kernel.shape\n out_h = im_h - ker_h + 1\n out_w = im_w - ker_w + 1\n\n # create an empty matrix in which to store the output\n output = np.zeros((out_h, out_w))\n\n # iterate over the different positions at which to apply the kernel,\n # storing the results in the output matrix\n for out_row in range(out_h):\n for out_col in range(out_w):\n # overlay the kernel on part of the image\n # (multiply each element of the kernel with some element of the image, then sum)\n # to determine the output of the matrix at a point\n current_product = 0\n for i in range(ker_h):\n for j in range(ker_w):\n current_product += image[out_row + i, out_col + j] * kernel[i, j]\n\n output[out_row, out_col] = current_product\n\n return output",
"def color_deconvolution(img):\n\n\t#Note: I am simply copying the naming conventions used in the matlab script\n\t\n\timg = img.copy()\n\n\t#STAIN VECTORS FOR H&E DECONVOLUTION (can add support for more later)\n\tMODx = [0.644211, 0.092789, 0]\n\tMODy = [0.716556, 0.954111, 0]\n\tMODz = [0.266844, 0.283111, 0]\n\n\t#Normalize columns to length 1 in 3D space\n\tleng = [0, 0, 0]\n\tcosx = [0, 0, 0]\n\tcosy = [0, 0, 0]\n\tcosz = [0, 0, 0]\n\tfor i in range(3):\n\t\tleng[i] = sqrt(MODx[i]*MODx[i] + MODy[i]*MODy[i] + MODz[i]*MODz[i])\n\t\tif not (leng[i] == 0):\n\t\t\tcosx[i] = MODx[i]/leng[i]\n\t\t\tcosy[i] = MODy[i]/leng[i]\n\t\t\tcosz[i] = MODz[i]/leng[i]\n\n\t#translation matrix\n\tif cosx[1] == 0:\n\t\tif cosy[1] == 0:\n\t\t\tif cosz[1] == 0: #2nd color is unspecified\n\t\t\t\tcosx[1] = cosz[0]\n\t\t\t\tcosy[1] = cosx[0]\n\t\t\t\tcosz[1] = cosy[0]\n\n\tif cosx[2] == 0:\n\t\tif cosy[2] == 0:\n\t\t\tif cosz[2] == 0: #3rd color is unspecified\n\t\t\t\t#3rd column will be cross product of first 2\n\t\t\t\t#fiji implementation allows for computation of 3rd color via Ruifroks method\n\t\t\t\t# but this is unnecessary for extracting just H&E \n\t\t\t\tcosx[2] = cosy[0] * cosz[1] - cosz[0] * cosy[1];\n\t\t\t\tcosy[2] = cosz[0] * cosx[1] - cosx[0] * cosz[1];\n\t\t\t\tcosz[2] = cosx[0] * cosy[1] - cosy[0] * cosx[1];\n\n\t#renormalize 3rd column\n\tleng = sqrt(cosx[2]*cosx[2] + cosy[2]*cosy[2] + cosz[2]*cosz[2])\n\tif leng != 0 and leng != 1:\n\t\tcosx[2] = cosx[2]/leng\n\t\tcosy[2] = cosy[2]/leng\n\t\tcosz[2] = cosz[2]/leng\n\n\tCOS3x3Mat = np.matrix([\n\t\t\t\t[cosx[0], cosy[0], cosz[0]], \n\t\t\t\t[cosx[1], cosy[1], cosz[1]],\n\t\t\t\t[cosx[2], cosy[2], cosz[2]]\n\t\t\t\t])\n\n\t#Note: I am skipping lines 390-459 of the matlab code, since\n\t# the determinant of the COS3x3Mat matrix is > 0 (~0.5). I think that\n\t# bit of code is trying to make the matrix invertible, but it already is\n\t# for H&E stain matrix \n\t#print(np.linalg.det(COS3x3Mat))\n\n\t#Invert the matrix\n\t# Note that this is done manually in the matlab code.\n\tQ3x3Mat = np.linalg.inv(COS3x3Mat)\n\tQ3x3MatInverted = COS3x3Mat #Just following the matlab code...\n\n\t#Compute transmittance \n\trowR = img.shape[0]\n\tcolR = img.shape[1]\n\n\t#These are the 1 channel transmittances of each dye \n\tDye1_transmittance = np.zeros([rowR, colR])\n\tDye2_transmittance = np.zeros([rowR, colR])\n\tDye3_transmittance = np.zeros([rowR, colR])\n\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\tRGB1 = img[r, c]\n\t\t\tRGB1[RGB1==0] = 1 #Avoid log0\n\t\t\tACC = -np.log(RGB1 / 255)\n\t\t\ttransmittances = 255 * np.exp(-ACC*Q3x3Mat)\n\t\t\ttransmittances = transmittances[0,:]\n\t\t\ttransmittances[transmittances>255] = 255\n\n\t\t\tDye1_transmittance[r,c] = transmittances[0,0]\n\t\t\tDye2_transmittance[r,c] = transmittances[0,1]\n\t\t\tDye3_transmittance[r,c] = transmittances[0,2]\n\n\t#Construct lookup tables to convert 1 channel dye images to \n\t# \t3 channel RGB representations \n\trLUT = np.zeros([256,3])\n\tgLUT = np.zeros([256,3])\n\tbLUT = np.zeros([256,3])\n\n\tfor i in range(3):\n\t\tfor j in range(256):\n\t\t\tif cosx[i] < 0:\n\t\t\t\trLUT[255-j, i] = 255 + (j * cosx[i])\n\t\t\telse:\n\t\t\t\trLUT[255-j, i] = 255 - (j * cosx[i])\n\n\t\t\tif cosy[i] < 0:\n\t\t\t\tgLUT[255-j, i] = 255 + (j * cosy[i])\n\t\t\telse:\n\t\t\t\tgLUT[255-j, i] = 255 - (j * cosy[i])\n\n\t\t\tif cosz[i] < 0:\n\t\t\t\tbLUT[255-j, i] = 255 + (j * cosz[i])\n\t\t\telse:\n\t\t\t\tbLUT[255-j, i] = 255 - (j * cosz[i])\n\n\t#Apply the lookup table to first dye (Hematoxilin)\n\tDye1_color_im = np.zeros(img.shape)\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\t#print(floor(Dye1_transmittance[r,c]))\n\t\t\tDye1_color_im[r,c,0] = rLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,1] = gLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,2] = bLUT[floor(Dye1_transmittance[r,c]),0]\n\n\tDye1_color_im = Dye1_color_im.astype(np.uint8)\n\n\treturn Dye1_transmittance, Dye1_color_im",
"def th_affine2d(x, matrix, output_img_width, output_img_height):\n assert (matrix.ndim == 2)\n matrix = matrix[:2, :]\n transform_matrix = matrix\n src = x\n\n # cols, rows, channels = src.shape\n dst = cv2.warpAffine(src, transform_matrix, (output_img_width, output_img_height),\n cv2.INTER_AREA, cv2.BORDER_CONSTANT, borderValue=(0, 0, 0))\n # for gray image\n if dst.ndim == 2:\n dst = np.expand_dims(np.asarray(dst), axis=2)\n\n return dst",
"def th_affine2d(x, matrix, output_img_width, output_img_height):\n assert matrix.ndim == 2\n matrix = matrix[:2, :]\n transform_matrix = matrix\n src = x\n\n # cols, rows, channels = src.shape\n dst = cv2.warpAffine(\n src,\n transform_matrix,\n (output_img_width, output_img_height),\n cv2.INTER_AREA,\n cv2.BORDER_CONSTANT,\n borderValue=(0, 0, 0),\n )\n # for gray image\n if dst.ndim == 2:\n dst = np.expand_dims(np.asarray(dst), axis=2)\n\n return dst",
"def _conv3x3(\n in_channel: int,\n out_channel: int,\n stride: int = 1,\n) -> nn.Conv2d:\n return nn.Conv2d(\n in_channel,\n out_channel,\n kernel_size=3,\n stride=stride,\n padding=1,\n pad_mode='pad',\n )",
"def _conv_layers(self):\n layers, activations = [], []\n\n # The first out_channels should be the second to last filter size\n tmp = self.filters.pop()\n\n # self.output_shape[0] Needs to be the last out_channels to match the input matrix\n for i, (filter_, kernel, stride) in enumerate(\n zip(\n (*self.filters, self.output_shape[0]),\n self.kernels,\n self.strides,\n )\n ):\n shape = self.encoder_shapes[-1 * i - 1]\n\n # TODO: this is a quick fix but might not generalize to some architectures\n if stride == 1:\n padding = same_padding(shape[1:], kernel, stride)\n else:\n padding = tuple(\n int(dim % 2 == 0) for dim in self.encoder_shapes[-1 * i - 2][1:]\n )\n\n layers.append(\n nn.ConvTranspose2d(\n in_channels=shape[0],\n out_channels=filter_,\n kernel_size=kernel,\n stride=stride,\n padding=padding,\n )\n )\n\n # TODO: revist padding, output_padding, see github issue.\n # This code may not generalize to other examples. Needs testing.\n # this also needs to be addressed in conv_output_dim\n\n activations.append(get_activation(self.activation))\n\n # Overwrite output activation\n activations[-1] = get_activation(self.output_activation)\n\n # Restore invariant state\n self.filters.append(tmp)\n\n return nn.ModuleList(layers), activations",
"def __im2col_fractal_indices(indices, fmap):\n block_size = config['mac'][1]\n block_size_m = config['mac'][0]\n _, howo, _, kernel_h, kernel_w, _ = fmap.shape\n batch_size, index_i1, index_j1, index_i0, index_j0 = indices\n n_index = batch_size\n\n hw_index = index_i1*block_size_m + index_i0\n\n c1_index = (((index_j1*block_size + index_j0) // block_size) //\n kernel_w.value) // kernel_h.value\n\n kh_index = (((index_j1*block_size + index_j0) // block_size) //\n kernel_w.value) % kernel_h.value\n\n kw_index = ((index_j1*block_size + index_j0) \\\n // block_size) % kernel_w.value\n\n c0_index = (index_j1*block_size + index_j0) % block_size\n if optim_dict[\"c0_optim_flg\"]:\n c1_index = 0\n kh_index = (index_j1*4 + index_j0 // 4) // kernel_w.value\n kw_index = (index_j1*4 + index_j0 // 4) % kernel_w.value\n c0_index = index_j0 % 4\n dtype = compute_dtype\n\n return tvm.select( \\\n tvm.any(hw_index < 0, hw_index > howo.value - 1), \\\n tvm.const(0.0, dtype), \\\n fmap(n_index, hw_index, \\\n c1_index, kh_index, kw_index, c0_index))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get fmap_shape_nc1hwc0 for dsl interface
|
def _get_dsl_fmap_shape_nc1hwc0():
valid_shape = ConvParam.fusion_para.get("valid_shape")
if valid_shape:
fmap_shape_nc1hwc0 = tuple(shape_to_list(valid_shape))
else:
fmap_shape_nc1hwc0 = tuple(shape_to_list(data.shape))
return fmap_shape_nc1hwc0
|
[
"def shape(self) -> S:",
"def shape(name):\n return Formex(pattern(Pattern[name]))",
"def shape_from_config_jungfrau(co):\n return (co.numberOfModules(), co.numberOfRowsPerModule(), co.numberOfColumnsPerModule())",
"def output_shape(self):\n return None",
"def get_image_shape(self) -> Tuple[int, int]:",
"def shape_from_args(self):\r\n return u.Shape(self.rows, self.cols)",
"def shape(input, name=None):\n return array_ops.shape(input, name=name)",
"def _internal_weight_shapes(self):\n coeff = 4 if self._use_lstm else 1\n shapes = []\n\n # Initial fully-connected layers.\n prev_dim = self._n_in\n for n_fc in self._fc_layers_pre:\n shapes.append([n_fc, prev_dim])\n if self._use_bias:\n shapes.append([n_fc])\n\n prev_dim = n_fc\n\n # Recurrent layers.\n for n_rec in self._rnn_layers:\n # Input-to-hidden\n shapes.append([n_rec*coeff, prev_dim])\n if self._use_bias:\n shapes.append([n_rec*coeff])\n\n # Hidden-to-hidden\n shapes.append([n_rec*coeff, n_rec])\n if self._use_bias:\n shapes.append([n_rec*coeff])\n\n if not self._use_lstm:\n # Hidden-to-output\n shapes.append([n_rec, n_rec])\n if self._use_bias:\n shapes.append([n_rec])\n\n prev_dim = n_rec\n\n # Fully-connected layers.\n for n_fc in self._fc_layers:\n shapes.append([n_fc, prev_dim])\n if self._use_bias:\n shapes.append([n_fc])\n\n prev_dim = n_fc\n\n return shapes",
"def image_shape(fidelity=None):\n return [2 * Bridge.HEIGHT, Bridge.WIDTH]",
"def canonical_bias_shapes(self):\n return self._canonical_bias_shape(0) * self._num_layers",
"def this_shape(self):\n _logger.debug('%s', where_am_i())\n return self._metadata['instance']['shape']",
"def get_data_shape(self):\n raise NotImplementedError",
"def raw_shape_types(self: Fdef) -> Optional[dict[str, Any]]:\n self._resolve_if_needed()\n return self._raw_shape_types",
"def shape_p0(self):\n return self.topology.n_elements[0], self.index",
"def shape(x):\n\treturn tf.shape(x)",
"def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n\n # Encoder\n x = OutputShapeFor(self.convbnrelu1.cbr_unit)(x)\n x = OutputShapeFor(self.maxpool)(x)\n\n e1 = OutputShapeFor(self.encoder1)(x)\n e2 = OutputShapeFor(self.encoder2)(e1)\n e3 = OutputShapeFor(self.encoder3)(e2)\n e4 = OutputShapeFor(self.encoder4)(e3)\n\n # Decoder with Skip Connections\n d4 = OutputShapeFor(self.decoder4)(e4)\n # d4 += e3\n d3 = OutputShapeFor(self.decoder3)(d4)\n # d3 += e2\n d2 = OutputShapeFor(self.decoder2)(d3)\n # d2 += e1\n d1 = OutputShapeFor(self.decoder1)(d2)\n\n # Final Classification\n f1 = OutputShapeFor(self.finaldeconvbnrelu1)(d1)\n f2 = OutputShapeFor(self.finalconvbnrelu2)(f1)\n f3 = OutputShapeFor(self.finalconv3)(f2)\n return f3",
"def localShapeOutAttr(*args, **kwargs):\n \n pass",
"def _create_shapely(self):\n\n pass",
"def layer_shapes(image_shape, model):\n shape = {model.layers[0].name: (None,) + image_shape,}\n\n for layer in model.layers[1:]:\n nodes = layer._inbound_nodes\n for node in nodes:\n inputs = [shape[lr.name] for lr in node.inbound_layers]\n if not inputs:\n continue\n shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)\n\n return shape"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test all the option max_of
|
def test_option_max():
for t1 in tipes :
o1= rawOptionType(t1)
assert_max(t1, rawSomeType(), t1)
assert_max(o1, rawSomeType(), o1)
for t2 in tipes:
o2 = rawOptionType(t2)
assert_max(o1,t2, rawOptionType( t1.max_of(t2)))
assert_max(o1,o2, rawOptionType( t1.max_of(t2)))
|
[
"def test_result_has_max_requested_or_less(self):\n pass",
"def test_maximum():\n test_maximum_case(0, [0, 0, 0], 0)\n test_maximum_case(1, [2, 0, 0], 2)\n test_maximum_case(2, [1, 2, 1], 2)\n test_maximum_case(3, [4, 5, 6], 6)\n test_maximum_case(4, [4.5, 5.1, 6.7], 6.7)\n test_maximum_case(5, [], None)",
"def hasMax(*args, **kwargs):\n \n pass",
"def test_maxend(self):\n self.assertEqual(max_integer([4, 3, 9]), 9)",
"def _check_max_num(self, obj):\n\n if obj.max_num is None:\n return []\n elif not isinstance(obj.max_num, int):\n return must_be(\"an integer\", option=\"max_num\", obj=obj, id=\"admin.E204\")\n else:\n return []",
"def test__validate_max_presences__0():\n for input_value, expected_output in (\n (None, MAX_PRESENCES_DEFAULT),\n (0, 0),\n (1, 1),\n ):\n output = validate_max_presences(input_value)\n vampytest.assert_eq(output, expected_output)",
"def maxValue(max=None):",
"def test_multiselect_option_over_max_selections(app: Page):\n app.locator(\".stCheckbox\").first.click()\n expect(app.locator(\".element-container .stException\")).to_contain_text(\n \"Multiselect has 2 options selected but max_selections\\nis set to 1\"\n )",
"def test_only_min_max(self):\n self.assertEqual(1, solution(12, 12, 12))",
"def test__validate_max_presences__1():\n for input_value in (\n -1,\n ):\n with vampytest.assert_raises(ValueError):\n validate_max_presences(input_value)",
"def test_maximum_case(num_test, array, expected):\n if maximum(array) == expected:\n print(\"Test\", num_test, \"OK\")\n return\n\n print(\"Test\", num_test, \"FAIL\")",
"def test_options_limit(self):\n\n assert query.QueryOptions(limit=50).limit == 50",
"def maxIterations(number):\n return lambda iterationNumber, corrections, values, datasetSize: iterationNumber < number",
"def test_find_max():\n \n another_test_list = [41, 26, 66, 5, 40]\n \n assert find_max(another_test_list) == 66",
"def test_scale_max(self):\n u = self.abUsage([1,3])\n self.assertEqual(u.scale_max(12), self.abUsage([4.0, 12.0]))\n self.assertEqual(u.scale_max(1), self.abUsage([1/3.0,1.0]))\n #default is max to 1\n self.assertEqual(u.scale_max(), self.abUsage([1/3.0,1.0]))",
"def test_filtering_of_more_than_max_cap_count_instances(self):\n self.assertEqual(\n maximal_suffix_match(\n 'ATAGCATTA',\n 'CAGTCAGACCCATACCAATAGCATTAATAGCATTA',\n max_cap_count=1\n ),\n None\n )",
"def _check_list_max_show_all(self, obj):\n\n if not isinstance(obj.list_max_show_all, int):\n return must_be(\n \"an integer\", option=\"list_max_show_all\", obj=obj, id=\"admin.E119\"\n )\n else:\n return []",
"def get_max_with_many_arguments(*args):\n result = args[0]\n for num in args:\n if (num > result):\n result = num\n return result",
"def max_validation(\n property_schema: 'OnticProperty',\n value: [str, int, float, date, datetime, time]) -> bool:\n if property_schema.max:\n if property_schema.type in BOUNDABLE_TYPES:\n if len(value) > property_schema.max:\n return False\n if property_schema.type in COMPARABLE_TYPES:\n if value > property_schema.max:\n return False\n\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test all the option compatible with
|
def test_option_compatible():
for t1 in tipes :
o1= rawOptionType(t1)
assert_compatible(o1, rawSomeType(), True)
for t2 in tipes:
o2 = rawOptionType(t2)
assert_compatible(o1, t2, t1.compatible_with(t2))
assert_compatible(o1, o2, t1.compatible_with(t2))
|
[
"def match_options(self): # pragma: no cover",
"def test_get_options_expirations(self):\n pass",
"def test_get_options(self):\n pass",
"def ValidateOptions(self, opt, args):",
"def test_get_option_expirations_realtime(self):\n pass",
"def test_get_options_chain_eod(self):\n pass",
"def _check_option_support(options):\n for opt in options:\n if _is_option_supported(opt) is None:\n try:\n cmd = ipmitool_command_options[opt]\n out, err = utils.execute(*cmd)\n except processutils.ProcessExecutionError:\n _is_option_supported(opt, False)\n else:\n _is_option_supported(opt, True)\n\n return",
"def test_get_options_chain(self):\n pass",
"def test_opts_type():\n type_check = re.compile(r'oslo_config\\.cfg\\.(\\w+Opt)')\n for opt in opts:\n match = type_check.search(str(opt))\n assert match, str(\"{} is not recognized as a oslo_config.cfg.*\"\n \" object!\").format(opt)\n assert hasattr(cfg, match.group(1)), \\\n str(\"{} is not a subclass of oslo_config.cfg\").format(opt)",
"def _check_options(self, p, idx, feature):\n if self.options.has_feature(feature):\n return\n self._add_error('Feature \"%s\" is not allowed by options' % feature,\n p.lineno(idx), p.lexpos(idx))",
"def toolHasOptions():\n pass",
"def test_get_option_strikes_realtime(self):\n pass",
"def validate_options():\n if os.environ.get(\"POAP_PHASE\", None) == \"USB\" and options[\"mode\"] == \"personality\":\n abort(\"POAP Personality is not supported via USB!\")\n \n os.system(\"rm -rf /bootflash/poap_files\")\n os.system(\"rm -rf /bootflash_sup-remote/poap_files\")\n # Compare the list of what options users have to what options we actually support.\n supplied_options = set(options.keys())\n # Anything extra shouldn't be there\n invalid_options = supplied_options.difference(valid_options)\n for option in invalid_options:\n poap_log(\"Invalid option detected: %s (check spelling, capitalization, and underscores)\" %\n option)\n if len(invalid_options) > 0:\n abort()",
"def test_get_all_options_tickers(self):\n pass",
"def _check_dataset_options(datasets):\n for dataset in datasets:\n if not dataset.get('opts'):\n continue\n\n valid_options = [\n 'row_pattern',\n 'add_rows',\n 'base_row_index',\n 'increment'\n ]\n if not all(val in list(dataset['opts'].keys()) for val in valid_options[:2]): # noqa\n raise OptionError(f'Options must include {valid_options[:2]}')\n for key in dataset['opts'].keys():\n if key not in valid_options:\n raise OptionError(f'Invalid option {key}.') # noqa\n if (dataset['opts']['row_pattern'] not in ['copy']\n and\n not callable(dataset['opts']['row_pattern'])):\n raise OptionError(f'row_pattern option must be one of [\\'copy\\'] or a function') # noqa",
"def _task_submit_check_options():\n update_personid = bibtask.task_get_option(\"update_personid\")\n disambiguate = bibtask.task_get_option(\"disambiguate\")\n merge = bibtask.task_get_option(\"merge\")\n\n record_ids = bibtask.task_get_option(\"record_ids\")\n all_records = bibtask.task_get_option(\"all_records\")\n from_scratch = bibtask.task_get_option(\"from_scratch\")\n\n commands = bool(update_personid) + bool(disambiguate) + bool(merge)\n\n if commands == 0:\n bibtask.write_message(\"ERROR: At least one command should be specified!\"\n , stream=sys.stdout, verbose=0)\n return False\n\n if commands > 1:\n bibtask.write_message(\"ERROR: The options --update-personid, --disambiguate \"\n \"and --merge are mutually exclusive.\"\n , stream=sys.stdout, verbose=0)\n return False\n\n assert commands == 1\n\n if update_personid:\n if any((from_scratch,)):\n bibtask.write_message(\"ERROR: The only options which can be specified \"\n \"with --update-personid are --record-ids and \"\n \"--all-records\"\n , stream=sys.stdout, verbose=0)\n return False\n\n options = bool(record_ids) + bool(all_records)\n if options > 1:\n bibtask.write_message(\"ERROR: conflicting options: --record-ids and \"\n \"--all-records are mutually exclusive.\"\n , stream=sys.stdout, verbose=0)\n return False\n\n if record_ids:\n for iden in record_ids:\n if not iden.isdigit():\n bibtask.write_message(\"ERROR: Record_ids expects numbers. \"\n \"Provided: %s.\" % iden)\n return False\n\n if disambiguate:\n if any((record_ids, all_records)):\n bibtask.write_message(\"ERROR: The only option which can be specified \"\n \"with --disambiguate is from-scratch\"\n , stream=sys.stdout, verbose=0)\n return False\n\n if merge:\n if any((record_ids, all_records, from_scratch)):\n bibtask.write_message(\"ERROR: There are no options which can be \"\n \"specified along with --merge\"\n , stream=sys.stdout, verbose=0)\n return False\n\n return True",
"def test_expected_options_have_default_values(self):\n\n skip_option_classes = [\n eo.HelpOption,\n eo.IgnoreOption,\n eo.UnsupportedOption,\n eo.BuildScriptImplOption,\n ]\n\n missing_defaults = set()\n for option in eo.EXPECTED_OPTIONS:\n if option.__class__ in skip_option_classes:\n continue\n\n if option.dest not in eo.EXPECTED_DEFAULTS:\n missing_defaults.add(option.dest)\n\n if len(missing_defaults) > 0:\n self.fail('non-exhaustive default values for options, missing: {}'\n .format(missing_defaults))",
"def test_extract_options():\n options = extract_options(CONF)\n assert options == OPTIONS",
"def test_autotools_args_from_conditional_variant(config, mock_packages):\n s = Spec(\"autotools-conditional-variants-test\").concretized()\n assert \"example\" not in s.variants\n assert len(s.package._activate_or_not(\"example\", \"enable\", \"disable\")) == 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Production line for correcting and estimating CPOL data radar parameters. The naming convention for these parameters is assumed to be DBZ, ZDR, VEL, PHIDP, KDP, SNR, RHOHV, and NCP. KDP, NCP, and SNR are optional and can be recalculated.
|
def production_line(radar_file_name, sound_dir, is_cpol=True, use_unravel=True):
# !!! READING THE RADAR !!!
if is_cpol:
radar = pyart.io.read(radar_file_name)
else:
radar = radar_codes.read_radar(radar_file_name)
# Correct data type manually
try:
radar.longitude['data'] = radar.longitude['data'].filled(0).astype(np.float32)
radar.latitude['data'] = radar.latitude['data'].filled(0).astype(np.float32)
radar.altitude['data'] = radar.altitude['data'].filled(0).astype(np.int32)
except Exception:
pass
if is_cpol:
if radar.nsweeps < 10:
raise ValueError(f'Problem with CPOL PPIs, only {radar.nsweeps} elevations.')
# Check if radar reflecitivity field is correct.
if not radar_codes.check_reflectivity(radar):
raise TypeError(f"Reflectivity field is empty in {radar_file_name}.")
if not radar_codes.check_azimuth(radar):
raise TypeError(f"Azimuth field is empty in {radar_file_name}.")
if not radar_codes.check_year(radar):
print(f'{radar_file_name} date probably wrong. Had to correct century.')
new_azimuth, azi_has_changed = radar_codes.correct_azimuth(radar)
if azi_has_changed:
radar.azimuth['data'] = new_azimuth
# Getting radar's date and time.
radar_start_date = netCDF4.num2date(radar.time['data'][0], radar.time['units'].replace("since", "since "))
radar.time['units'] = radar.time['units'].replace("since", "since ")
# Get radiosoundings:
if sound_dir is not None:
radiosonde_fname = radar_codes.get_radiosoundings(sound_dir, radar_start_date)
# Correct Doppler velocity units.
try:
radar.fields['VEL']['units'] = "m/s"
vel_missing = False
except KeyError:
vel_missing = True
# Check if the nyquist velocity is present in the radar parameters.
if not vel_missing:
velocity.check_nyquist_velocity(radar)
# Looking for RHOHV field
# For CPOL, season 09/10, there are no RHOHV fields before March!!!!
try:
radar.fields['RHOHV']
fake_rhohv = False # Don't need to delete this field cause it's legit.
except KeyError:
# Creating a fake RHOHV field.
fake_rhohv = True # We delete this fake field later.
rho = pyart.config.get_metadata('cross_correlation_ratio')
rho['data'] = np.ones_like(radar.fields['DBZ']['data'])
radar.add_field('RHOHV', rho)
radar.add_field('RHOHV_CORR', rho)
# Compute SNR and extract radiosounding temperature.
# Requires radiosoundings
if sound_dir is not None:
try:
height, temperature, snr = radar_codes.snr_and_sounding(radar, radiosonde_fname)
radar.add_field('temperature', temperature, replace_existing=True)
radar.add_field('height', height, replace_existing=True)
except ValueError:
traceback.print_exc()
print(f"Impossible to compute SNR {radar_file_name}")
return None
# Looking for SNR
try:
radar.fields['SNR']
except KeyError:
radar.add_field('SNR', snr, replace_existing=True)
# Correct RHOHV
if not fake_rhohv:
rho_corr = radar_codes.correct_rhohv(radar)
radar.add_field_like('RHOHV', 'RHOHV_CORR', rho_corr, replace_existing=True)
# Correct ZDR
corr_zdr = radar_codes.correct_zdr(radar)
radar.add_field_like('ZDR', 'ZDR_CORR', corr_zdr, replace_existing=True)
# GateFilter
if is_cpol:
gatefilter = filtering.do_gatefilter_cpol(radar,
refl_name='DBZ',
phidp_name="PHIDP",
rhohv_name='RHOHV_CORR',
zdr_name="ZDR")
else:
gatefilter = filtering.do_gatefilter(radar,
refl_name='DBZ',
phidp_name="PHIDP",
rhohv_name='RHOHV_CORR',
zdr_name="ZDR")
# Check if NCP exists.
try:
radar.fields['NCP']
fake_ncp = False
except KeyError:
fake_ncp = True
ncp = pyart.config.get_metadata('normalized_coherent_power')
ncp['data'] = np.zeros_like(radar.fields['RHOHV']['data'])
ncp['data'][gatefilter.gate_included] = 1
radar.add_field('NCP', ncp)
phidp, kdp = phase.valentin_phase_processing(radar, gatefilter, phidp_name='PHIDP')
radar.add_field('PHIDP_VAL', phidp)
radar.add_field('KDP_VAL', kdp)
kdp_field_name = 'KDP_VAL'
phidp_field_name = 'PHIDP_VAL'
# Unfold VELOCITY
if not vel_missing:
# Dealias velocity.
unfvel_tick = time.time()
if use_unravel:
vdop_unfold = velocity.unravel(radar, gatefilter)
else:
vdop_unfold = velocity.unfold_velocity(radar, gatefilter)
radar.add_field('VEL_UNFOLDED', vdop_unfold, replace_existing=True)
print('Doppler velocity unfolded in %0.2f s.' % (time.time() - unfvel_tick))
# Correct Attenuation ZH
zh_corr = attenuation.correct_attenuation_zh_pyart(radar, phidp_field=phidp_field_name)
radar.add_field('DBZ_CORR', zh_corr, replace_existing=True)
# radar.add_field('specific_attenuation_reflectivity', atten_spec, replace_existing=True)
# Correct Attenuation ZDR
zdr_corr = attenuation.correct_attenuation_zdr(radar, gatefilter=gatefilter, phidp_name=phidp_field_name, zdr_name='ZDR_CORR')
radar.add_field('ZDR_CORR_ATTEN', zdr_corr)
# Hydrometeors classification
hydro_class = hydrometeors.hydrometeor_classification(radar,
gatefilter,
kdp_name=kdp_field_name,
zdr_name='ZDR_CORR_ATTEN')
radar.add_field('radar_echo_classification', hydro_class, replace_existing=True)
# Rainfall rate
rainfall = hydrometeors.rainfall_rate(radar, gatefilter, kdp_name=kdp_field_name,
refl_name='DBZ_CORR', zdr_name='ZDR_CORR_ATTEN')
radar.add_field("radar_estimated_rain_rate", rainfall)
# DSD retrieval
nw_dict, d0_dict = hydrometeors.dsd_retrieval(radar, gatefilter, kdp_name=kdp_field_name, zdr_name='ZDR_CORR_ATTEN')
radar.add_field("D0", d0_dict)
radar.add_field("NW", nw_dict)
# Removing fake and useless fields.
if fake_ncp:
radar.fields.pop('NCP')
if fake_rhohv:
radar.fields.pop("RHOHV")
radar.fields.pop("RHOHV_CORR")
# Remove obsolete fields:
for obsolete_key in ["Refl", "PHI_UNF", "PHI_CORR", "height", 'TH', 'TV', 'ZDR_CORR',
'RHOHV']:
try:
radar.fields.pop(obsolete_key)
except KeyError:
continue
# Rename fields to pyart defaults.
fields_names = [('VEL', 'raw_velocity'),
('VEL_UNFOLDED', 'velocity'),
('DBZ', 'total_power'),
('DBZ_CORR', 'reflectivity'),
('RHOHV_CORR', 'cross_correlation_ratio'),
('ZDR', 'differential_reflectivity'),
('ZDR_CORR_ATTEN', 'corrected_differential_reflectivity'),
('PHIDP', 'differential_phase'),
('PHIDP_BRINGI', 'bringi_differential_phase'),
('PHIDP_GG', 'giangrande_differential_phase'),
('PHIDP_VAL', 'corrected_differential_phase'),
('KDP', 'specific_differential_phase'),
('KDP_BRINGI', 'bringi_specific_differential_phase'),
('KDP_GG', 'giangrande_specific_differential_phase'),
('KDP_VAL', 'corrected_specific_differential_phase'),
('WIDTH', 'spectrum_width'),
('SNR', 'signal_to_noise_ratio'),
('NCP', 'normalized_coherent_power'),
('DBZV', 'reflectivity_v'),
('WRADV', 'spectrum_width_v'),
('SNRV', 'signal_to_noise_ratio_v'),
('SQIV', 'normalized_coherent_power_v')]
for old_key, new_key in fields_names:
try:
radar.add_field(new_key, radar.fields.pop(old_key), replace_existing=True)
except KeyError:
continue
hardcode_keys = ["reflectivity",
"radar_echo_classification",
"corrected_differential_reflectivity",
"region_dealias_velocity",
"D0", "NW"]
for mykey in hardcode_keys:
try:
radar.fields[mykey]['data'] = filtering.filter_hardcoding(radar.fields[mykey]['data'], gatefilter)
except KeyError:
continue
goodkeys = ["radar_echo_classification", "D0", "NW", "velocity", "total_power", "raw_velocity",
"reflectivity", "cross_correlation_ratio", "corrected_differential_reflectivity", "radar_estimated_rain_rate",
"corrected_differential_phase", "corrected_specific_differential_phase", "spectrum_width"]
# Delete working variables.
for k in list(radar.fields.keys()):
if k not in goodkeys:
radar.fields.pop(k)
# TODO: Set Deflate Level
return radar
|
[
"def _fetch_radar_params(self):\n resp = self._send_command('GRPS')\n if resp != Response.OK:\n raise KLD7Exception(\"GRPS command failed: {}\".format(resp))\n code, payload = self._read_packet()\n if code != 'RPST':\n raise KLD7Exception(\"GRPS data has wrong packet type\")\n\n values = struct.unpack(_RPS_FORMAT, payload)\n for index, cmd in _param_struct_fields.items():\n self._param_dict[cmd] = values[index]",
"def evaluateReconciliationParams(eng):\n # select data\n strWorkingDir = \"../../data/evaluation/reconciliation/\"\n strFileNamePattern= None\n lsFilePath = cf.getFileList(strWorkingDir, strFileNamePattern)\n \n # parameter \n strCoder = ecc.CODER_RS\n lsM = [4,]\n lsR = range(1, 8)\n dRectWnd = 2.0\n dSMWnd = 2.0\n dSCWnd = 0.15\n \n # evaluate\n lsResult = []\n if (strCoder == ecc.CODER_RS): \n for m in lsM:\n for r in lsR:\n n = 2**m - 1\n k = n - 2*r\n if(k<1 or n*m>=500):\n break\n \n print \"testing m=%d, r=%d...\" % (m, r)\n for fn in lsFilePath:\n lsDataResult = sd.evaluateSingleData(strWorkingDir, fn,\n dRectDuration=dRectWnd, dSMDuration=dSMWnd,\n dSCDuration=dSCWnd,\n eng=eng, strCoder=strCoder, n=n, k=k, m=m, r=r)\n lsResult.extend(lsDataResult)\n elif strCoder == ecc.CODER_GOLAY:\n n = 23\n k = 12\n m = 1\n r = 2\n for fn in lsFilePath:\n lsDataResult = sd.evaluateSingleData(strWorkingDir, fn, \n dRectDuration=dRectWnd, dSMDuration=dSMWnd,\n dSCDuration=dSCWnd,\n eng=eng, strCoder=strCoder, n=n, k=k, m=m, r=r)\n lsResult.extend(lsDataResult)\n\n # result\n dfResult = pd.DataFrame(lsResult)\n dcMatchingRate = {}\n for r in lsR:\n nMatchedKey = (dfResult[sd.ERR_USER_EC][ (dfResult[sd.R]==r) & \\\n (dfResult[sd.ERR_USER_EC]==0) ]).count()\n nTotalKey = dfResult[sd.ERR_USER_EC][dfResult[sd.R]==r].count()\n dMatchingRate = nMatchedKey * 1.0 / nTotalKey\n dcMatchingRate[r] = dMatchingRate\n srMatchingRate = pd.Series(dcMatchingRate)\n \n return dfSummary, dfResult, srMatchingRate",
"def __init__(self, props, data):\n name = 'PVGrad'\n super(STJPV, self).__init__(name=name, props=props, data=data)\n # Some config options should be properties for ease of access\n self.pv_lev = self.props['pv_value']\n self.fit_deg = self.props['fit_deg']\n self.min_lat = self.props['min_lat']\n\n if self.props['poly'].lower() in ['cheby', 'cby', 'cheb', 'chebyshev']:\n self.pfit = poly.chebyshev.chebfit\n self.pder = poly.chebyshev.chebder\n self.peval = poly.chebyshev.chebval\n\n elif self.props['poly'].lower() in ['leg', 'legen', 'legendre']:\n self.pfit = poly.legendre.legfit\n self.pder = poly.legendre.legder\n self.peval = poly.legendre.legval\n\n elif self.props['poly'].lower() in ['poly', 'polynomial']:\n self.pfit = poly.polynomial.polyfit\n self.pder = poly.polynomial.polyder\n self.peval = poly.polynomial.polyval\n\n # Initialise latitude & theta output dicts\n self.out_data = {}",
"def get_NPVs( self,\r\n leg,\r\n value_date,\r\n instrument,\r\n disc_cv_details ):\r\n \"\"\" Step one cal of leg1 \r\n leg1 = { \"currency\":...,\r\n \"balance_tb\":...,\r\n \"acc_cpn_detail\":...,\r\n \"pay_convention\":....,\r\n \"day_convention\":....,}\r\n \"\"\"\r\n leg1 = instrument[leg]\r\n Day_Counter = Day_Count.Day_Counter(leg1[\"day_convention\"])\r\n currency = leg1[\"currency\"]\r\n convention = self.convention[currency]\r\n cv_instrument = self.curve_instrument[currency]\r\n fx_instrument = self.cv_fx_instrument[currency]\r\n \"\"\" Discounting Curve settings below\r\n \"\"\"\r\n if disc_cv_details[\"type\"].upper() == \"XCS\":\r\n \"\"\" For XCS calculation we have to use \r\n dual curves method libor curve for \r\n coupon calculation and basis adjusted\r\n curve for discounting \r\n \"\"\"\r\n cv_dis = self.gen_swap_curve( value_date,\r\n convention,\r\n fx_instrument, \r\n disc_cv_details,\r\n Day_Counter )\r\n disc_cv_details[\"type\"] = \"SWAP\"\r\n cv_fwd = self.gen_swap_curve( value_date,\r\n convention,\r\n cv_instrument, \r\n disc_cv_details,\r\n Day_Counter )\r\n disc_cv_details[\"type\"] = \"XCS\"\r\n else:\r\n Day_Counter.set_convention_by_ccy(currency)\r\n cv_fwd = self.gen_swap_curve( value_date,\r\n convention,\r\n cv_instrument, \r\n disc_cv_details,\r\n Day_Counter )\r\n \r\n cv_dis = cv_fwd\r\n cf_tb = CF_Gen( leg1, \r\n cv_fwd,\r\n self.cv_keeper,\r\n Day_Counter )\r\n INT_flow = [[ele[\"End_Time\"],ele[\"Interests\"]] for ele in cf_tb]\r\n NPV_INT = Tools.cal_npv( INT_flow, cv_dis, Day_Counter )\r\n PRI_flow = [[ele[\"End_Time\"],ele[\"Principal\"]] for ele in cf_tb]\r\n NPV_PRI = Tools.cal_npv( PRI_flow, cv_dis, Day_Counter )\r\n return NPV_INT,NPV_PRI",
"def process_correct_phidp0(procstatus, dscfg, radar_list=None):\n\n if procstatus != 1:\n return None, None\n\n for datatypedescr in dscfg['datatype']:\n radarnr, datagroup, datatype, dataset, product = get_datatype_fields(\n datatypedescr)\n if datatype == 'dBZ':\n refl_field = 'reflectivity'\n if datatype == 'dBZc':\n refl_field = 'corrected_reflectivity'\n if datatype == 'PhiDP':\n psidp_field = 'differential_phase'\n if datatype == 'PhiDPc':\n psidp_field = 'corrected_differential_phase'\n if datatype == 'uPhiDP':\n psidp_field = 'uncorrected_differential_phase'\n\n ind_rad = int(radarnr[5:8])-1\n if radar_list[ind_rad] is None:\n warn('No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n if (refl_field not in radar.fields) or (psidp_field not in radar.fields):\n warn('Unable to correct PhiDP system offset. Missing data')\n return None, None\n\n ind_rmin = np.where(radar.range['data'] > dscfg['rmin'])[0][0]\n ind_rmax = np.where(radar.range['data'] < dscfg['rmax'])[0][-1]\n r_res = radar.range['data'][1]-radar.range['data'][0]\n min_rcons = int(dscfg['rcell']/r_res)\n\n if psidp_field.startswith('corrected_'):\n phidp_field = psidp_field\n elif psidp_field.startswith('uncorrected_'):\n phidp_field = psidp_field.replace('uncorrected_', 'corrected_', 1)\n else:\n phidp_field = 'corrected_'+psidp_field\n\n phidp = pyart.correct.correct_sys_phase(\n radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, min_rcons=min_rcons,\n zmin=dscfg['Zmin'], zmax=dscfg['Zmax'], psidp_field=psidp_field,\n refl_field=refl_field, phidp_field=phidp_field)\n\n # prepare for exit\n new_dataset = deepcopy(radar)\n new_dataset.fields = dict()\n new_dataset.add_field(phidp_field, phidp)\n\n return new_dataset, ind_rad",
"def getSensPolar(pfrac,ston,area,mode='optimal',goal=False,silent=False):\n \n # Check the input parameters\n if (mode != 'optimal'):\n print(\"Observing mode parameter is not in allowed values\")\n print(\"Allowed values are [optimal]\")\n return\n \n # There is ambiguity in how the user will understand the polarisation fraction \n # so I make a test\n if (pfrac >= 1.0):\n print(\"Assuming polarisation fraction has been provided in %\")\n pfrac = pfrac/100.\n\n \n # We will need the pixel size\n pixSizeBand = getPixSizeBand(over=useDef)\n # if over=True it will return the default pixel size, useDef is set in the\n # preamble.\n # We will need as well diam4 and eefD4Band\n data = getGeomQuant(pixSizeBand)\n diam4 = data[1]\n eefD4Band = data[2]\n\n if (mode == 'optimal'):\n # get the total NEP, with the detector contribution \n # Though we do not need it for the sensitivity, let's get the background\n forePower = [getForeground(1),getForeground(2),getForeground(3)]\n forePower = np.asarray(forePower)\n if not(silent):\n bandStr = str(bandWave[0]*1e6)+' '+str(bandWave[1]*1e6)+' '+str(bandWave[2]*1e6)\n print('Foreground power at '+bandStr+' are: {0:8.3g}, {1:8.3g} and {2:8.3g} W.'\\\n .format(forePower[0],forePower[1],forePower[2]))\n # 12/05/2020 the comments below do not apply anymore. Same NEP in polar\n # or total power mode as all grids always contribute to the measurement.\n # the polar option divides the background contribution by sqrt(2)\n # because only 1/2 of the pixel is sensitive to a given polarisation.\n if not(silent):\n if (goal):\n print('NEP includes GOAL detector NEP')\n else:\n print('NEP uses REQuirement detector NEP')\n pixNEP = [getNEP(1,withDet=True,goal=goal,silent=True),\\\n getNEP(2,withDet=True,goal=goal,silent=True),\\\n getNEP(3,withDet=True,goal=goal,silent=True)]\n pixNEP = np.asarray(pixNEP)\n # get the timing information\n timings = getObsTime(scanSpeed,area,mode=mode)\n # Note that in the case of a polarisation observation, we must consider\n # that we have 2 kinds of pixels and that we need both information to\n # derive the polarisation fraction and angle so the on-source time per\n # kind of pixels is divided by two\n onSourceTime = timings[0]/2.\n # In the case of a \"polarisation\" measurement we have a number of elements\n # to consider:\n # As we express the s/n on the polarisation fraction, this is not the \n # S/N that enters the NEP equation, and as the source is only one \n # component of the incident power on the pixel, there is a complex\n # relation between the two leading to the presence in the limiting \n # flux equation of s/n.(1/sqrt(2)).sqrt(1/p^2 + 2)\n # These terms alreay take into account the fact that a background subtraction\n # must occur to get to the source component.\n \n # Extended source case\n # Contrary to the total power case I cannot simply break down this \n # expression into components. See the sensitivity note for\n # explanations\n extSrcPow = ston*math.sqrt(2)*math.sqrt(1/pfrac**2+2)*pixNEP/math.sqrt(2*onSourceTime)\n # convert to MJy/sr\n extSrcFlux = [compPixPowSrc(extSrcPow[0],1,extended=True,silent=True),\\\n compPixPowSrc(extSrcPow[1],2,extended=True,silent=True),\\\n compPixPowSrc(extSrcPow[2],3,extended=True,silent=True)]\n \n # Point source case.\n # Further considerations apply.\n # First a point source measurement is always the result of the integration\n # in an aperture. We assume that the s/n specified by the user on the polarisation\n # fraction is that in the integrated flux and thus that the s/n on the \n # polarisation fraction per pixel in the aperture can be sqrt(Naper) lower\n # where Naper is the number of pixels in the aperture.\n # Compute the number of pixels in the aperture for which we know the\n # encircled energy fraction\n # Here I need the array of fwhm. This is what getBeamProfile provides\n fwhm = getBeamProfile(fwhm=1)\n nPixAper = (math.pi/4) * (diam4*fwhm)**2 / pixSizeBand**2\n # As with the extended case it is too complex to break down the limiting flux \n # equation into its component\n pointSrcPow = math.sqrt(2)*np.sqrt(nPixAper)*ston*math.sqrt(2+1/pfrac**2)*pixNEP/math.sqrt(2*onSourceTime)\n # recall that we only have a certain EEF in that aperture\n pointSrcPow /= eefD4Band\n # convert to mJy - Warning: if I use compPixPowSrc I am assuming that\n # pointSrcPow is a power falling on a single pixel (and the routine will make\n # the correction). But this has already been taken into accound in the formula\n # above so now I just need to convert something that is in W into mJy using\n # the collecting area and the filter width.\n pointSrcFlux = [convPow2Flux(pointSrcPow[0],1),\\\n convPow2Flux(pointSrcPow[1],2),\\\n convPow2Flux(pointSrcPow[2],3)]\n \n if (silent == False):\n # prints the result\n print(\"Sensitivity figures derived under foreground case \"+foreCases[useFGCase])\n print(\"Observing parameters: p = {0:.2f}, S/N = {1:.1f}, area = {2:.2f}\".format(pfrac,ston,area))\n print(\"Observing time = {0:.2f} (s) for an efficiency of {1:.2f}\".format(timings[1],timings[2]))\n print(\"Number of scan legs = \",timings[3])\n print(\"pixel size used = \",pixSizeBand)\n print(\"Wavelengths: {0:8.1f} {1:8.1f} {2:8.1f}\".format(1e6*bandWave[0],1e6*bandWave[1],1e6*bandWave[2]))\n print(\"Sensitivity to extended sources (MJy/sr)\")\n print(\" {0:.2e} {1:.2e} {2:.2e}\".format(extSrcFlux[0],extSrcFlux[1],extSrcFlux[2]))\n print(\"Sensitivity to point sources (mJy)\")\n print(\" {0:.2e} {1:.2e} {2:.2e}\".format(pointSrcFlux[0],pointSrcFlux[1],pointSrcFlux[2]))\n \n return [extSrcFlux,pointSrcFlux,timings]",
"def evaluateShapeCodingParams(eng):\n # select data\n strWorkingDir = \"../../data/evaluation/BER/\"\n strFileNamePattern= None\n lsFilePath = cf.getFileList(strWorkingDir, strFileNamePattern)\n \n # params\n lsSCWnd = np.arange(0.05, 0.3, 0.05)\n dRectWnd = 2.0\n dSMWnd = 2.0\n strCoder = ecc.CODER_GOLAY\n m = 1\n n = 23\n k = 12\n r = 2\n nInterleaving = 25\n print \"%s: n=%d, k=%d, m=%d, r=%d, interleave=%d\" % \\\n (strCoder, n, k, m, r, nInterleaving)\n \n # test\n lsResult = []\n for dCodingWnd in lsSCWnd:\n print \"evalauting SCWnd=%.2f...\" % dCodingWnd\n for fn in lsFilePath:\n lsDataResult = sd.evaluateSingleData(strWorkingDir, fn,\n dRectDuration = dRectWnd,\n dSMDurction = dSMWnd,\n dSCDuration = dCodingWnd,\n eng=eng, strCoder=strCoder, \n n=n, k=k, m=m, r=r,\n nInterleaving=nInterleaving)\n lsResult.extend(lsDataResult)\n dfResult = pd.DataFrame(lsResult)\n gp = dfResult.groupby(dfResult[sd.WND_SC])\n dfMean = gp.mean()\n return dfMean, dfResult",
"def _process_line_vals(self, line, pricelist, ppi_vals):\n\t\tppi_vals['applied_on'] = self._get_applied_on(line,ppi_vals)\n\t\tppi_vals['compute_price'] = self._get_compute_price(line,ppi_vals)\n\n\t\tall_fields = self._field_methods\n\t\trequired_fields = [x for x in all_fields\n\t\t\t\t\t\t if all_fields[x].get('required')]\n\t\tfor rf in required_fields:\n\t\t\tif rf not in ppi_vals:\n\t\t\t\tmsg = _(\"The '%s' field is a required field \"\n\t\t\t\t\t\t\"that must be correctly set.\") % rf\n\t\t\t\tself._log_line_error(line, msg)",
"def rvs(self) -> Parameter:\n # Sample a parameter vector from the KDE\n val = self.kde.rvs()\n # If any of the parameter values are negative, then resample them \n while val['asymptomatic']<0 or val['secondary_school']<0 or val['primary_school']<0 or val['retail']<0 or val['presymptomatic']<0 or val['symptomatic']<0 or val['work']<0:\n val = self.kde.rvs()\n return val",
"def radar_scan_precip():\n request_velocity = DwdRadarValues(\n parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,\n start_date=DwdRadarDate.MOST_RECENT,\n site=DwdRadarSite.ESS,\n fmt=DwdRadarDataFormat.HDF5,\n subset=DwdRadarDataSubset.POLARIMETRIC,\n settings=Settings(cache_disable=True),\n )\n request_reflectivity = DwdRadarValues(\n parameter=DwdRadarParameter.SWEEP_PCP_REFLECTIVITY_H,\n start_date=DwdRadarDate.MOST_RECENT,\n site=DwdRadarSite.ESS,\n fmt=DwdRadarDataFormat.HDF5,\n subset=DwdRadarDataSubset.POLARIMETRIC,\n settings=Settings(cache_disable=True),\n )\n\n log.info(f\"Acquiring radar SWEEP_PCP data for {DwdRadarSite.ESS} at \" f\"{request_velocity.start_date}\")\n\n # Submit requests.\n results = chain(request_velocity.query(), request_reflectivity.query())\n\n # Collect list of buffers.\n files = [item.data for item in results]\n\n # Decode data using xradar odim backend\n data = xr.open_mfdataset(files, engine=\"odim\")\n\n # Output debug information.\n print(data)\n\n # Plot and display data.\n plot(data)\n if \"PYTEST_CURRENT_TEST\" not in os.environ:\n plt.show()",
"def set_equation_sides(self, clp):\n\t\tif '=' in clp:\n\t\t\tself.l_side, _, self.r_side = map(lambda x: x.strip(),\n\t\t\t clp.partition('='))\n\t\telse:\n\t\t\tself.r_side = clp",
"def prepare_to_calc_EndOfPrdvP(self):\n\n if self.zero_bound:\n # if zero is BoroCnstNat, do not evaluate at 0.0\n aNrmNow = self.aXtraGrid\n\n if self.IndepDstnBool:\n bNrmNext = np.append(\n aNrmNow[0] * self.RiskyDstn.atoms.min(),\n aNrmNow * self.RiskyDstn.atoms.max(),\n )\n wNrmNext = np.append(\n bNrmNext[0] / (self.PermGroFac * self.PermShkDstn.atoms.max()),\n bNrmNext / (self.PermGroFac * self.PermShkDstn.atoms.min()),\n )\n else:\n # add zero to aNrmNow\n aNrmNow = np.append(self.BoroCnstArt, self.aXtraGrid)\n\n if self.IndepDstnBool:\n bNrmNext = aNrmNow * self.RiskyDstn.atoms.max()\n wNrmNext = bNrmNext / (self.PermGroFac * self.PermShkDstn.atoms.min())\n\n self.aNrmNow = aNrmNow\n\n if self.IndepDstnBool:\n # these grids are only used if the distributions of income and\n # risky asset are independent\n self.bNrmNext = bNrmNext\n self.wNrmNext = wNrmNext\n\n return self.aNrmNow",
"def unconsolidated_sand_line(Vs):\r\n\r\n a = 2.3311\r\n b = -0.2886\r\n c = 6.05\r\n d = 4.09\r\n\r\n g = a + b*Vs\r\n Vp = 2**g + Vs**g *(c**g - 2**g)/(d**g)\r\n Vp = Vp**(1.0/g)\r\n\r\n return Vp",
"def rational_parameterization(self):\n if self.genus() != 0:\n raise TypeError(\"this curve must have geometric genus zero\")\n if not is_RationalField(self.base_ring()):\n raise TypeError(\"this curve must be defined over the rational field\")\n singular.lib(\"paraplanecurves.lib\")\n R = singular.paraPlaneCurve(self.defining_polynomial())\n singular.setring(R)\n param = singular('PARA').sage().gens()\n R = R.sage()\n C = self.change_ring(R.base_ring())\n H = Hom(ProjectiveSpace(R.base_ring(), 1, R.gens()), C)\n return H(param)",
"def test_get_params_carn(self):\n assert Carnivore.parameters[\"beta\"] == 0.75\n assert Carnivore.parameters[\"a_half\"] == 60.0\n assert Carnivore.parameters[\"lambda\"] == 1.0\n assert Carnivore.parameters[\"F\"] == 50.0",
"def __init__(self, props, data):\n name = 'KangPolvani'\n super(STJKangPolvani, self).__init__(name=name, props=props, data=data)\n self.wh_200 = 20000.0 / self.data.cfg[\"pfac\"]\n self.wh_1000 = 100000.0 / self.data.cfg[\"pfac\"]",
"def get_solar_rad_pres_switch(self):\n return self.get_abstract_item(\n \"General\",\n \"Solar radiation pressure switch\")",
"def data_process(data_point_dict, line, oldline, thedate, error_dir, qc_dir, bad_data_val=6999) :\n # current data types for processing:\n # num = normal float\n # therm = thermistor... specify the coefficients in the coefficient table.\n # poly = polynomial... specify the coefficients in the coefficient table.\n # net = net radiation... specify in the coefficients table the windspeed column so net can be corrected if needed\n # precip = Could do a totalize down the road but for present, maybe check the air temperature (column specified in the coefficients table again)\n \n\n\n \n old_line_str = oldline.split(',')\n line_str = line.split(',')\n ### Okay, before ramping up... need to account for \"NAN\" of Table based loggers right here.\n temp_de = line_str[ int( data_point_dict[ 'Input_Array_Pos' ] ) ]\n temp_ode = old_line_str[ int( data_point_dict[ 'Input_Array_Pos' ] ) ] \n # .isdigit() was failing testing the whole floating point number so now we're just looking at the last character / digit.\n if temp_de[-1].isdigit() :\n data_element = float( temp_de )\n else :\n data_element = float(bad_data_val)\n \n if len(temp_ode)>1:\n if temp_ode[-1].isdigit() :\n old_data_element = float( temp_ode)\n else :\n old_data_element = float(bad_data_val)\n else:\n old_data_element = float(bad_data_val)\n\n if data_point_dict['Data_Type'] == 'num' or data_point_dict['Data_Type'] == 'net' or data_point_dict['Data_Type'] == 'precip':\n # process as a number, no number crunching to do.\n processed_value = qc_check(data_element, \\\n old_data_element, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n\n elif data_point_dict['Data_Type'] == 'therm' :\n processed_value = thermistor(data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n float(data_point_dict['Coef_4']), \\\n bad_data_val)\n old_processed_value = thermistor(old_data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n float(data_point_dict['Coef_4']), \\\n float(bad_data_val))\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n \n elif data_point_dict['Data_Type'] == 'thermF' :\n processed_value = thermistor(data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n float(data_point_dict['Coef_4']), \\\n bad_data_val)\n old_processed_value = thermistor(old_data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n float(data_point_dict['Coef_4']), \\\n float(bad_data_val))\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n if processed_value != float(bad_data_val) :\n processed_value = processed_value * 9 / 5 + 32\n\n elif data_point_dict['Data_Type'] == 'poly' :\n processed_value = poly(data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n float(data_point_dict['Coef_4']), \\\n float(data_point_dict['Coef_5']), \\\n float(data_point_dict['Coef_6']), \\\n float(data_point_dict['Coef_7']), \\\n bad_data_val)\n old_processed_value = poly(old_data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n float(data_point_dict['Coef_4']), \\\n float(data_point_dict['Coef_5']), \\\n float(data_point_dict['Coef_6']), \\\n float(data_point_dict['Coef_7']), \\\n bad_data_val)\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n elif data_point_dict['Data_Type'] == 'flux':\n \n \n processed_value = flux(data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n bad_data_val)\n old_processed_value = flux(old_data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n bad_data_val)\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n elif data_point_dict['Data_Type'] == 'netrad':\n #data_point_dict['Coef_3'] indicates the column of the windspeed in the input data\n #NOTE: windspeed is considered to be zero if speed is less than .3 m/s\n if data_point_dict['Coef_3']!=0:\n\n wind_speed = line_str[int(data_point_dict['Coef_3'])]\n\n processed_value = netrad(data_element,\\\n float(wind_speed), \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n bad_data_val)\n old_processed_value = netrad(old_data_element,\\\n float(wind_speed), \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n bad_data_val)\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n else:\n processed_value = flux(data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n bad_data_val)\n old_processed_value = flux(old_data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n bad_data_val)\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) )\n \n elif data_point_dict['Data_Type'] == 'rt_sensor' :\n\n processed_value = rt_sensor(data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n bad_data_val)\n old_processed_value = rt_sensor(old_data_element, \\\n float(data_point_dict['Coef_1']), \\\n float(data_point_dict['Coef_2']), \\\n float(data_point_dict['Coef_3']), \\\n bad_data_val)\n processed_value = qc_check(processed_value, \\\n old_processed_value, \\\n thedate, \\\n qc_dir, \\\n data_point_dict['d_element'], \\\n data_point_dict['Qc_Param_High'], \\\n data_point_dict['Qc_Param_Low'], \\\n data_point_dict['QC_Param_Step'], \\\n float(bad_data_val) ) \n else:\n processed_value = bad_data_val\n return (processed_value)\n # end of data_process function",
"def set_calibration_input_params(self):\n self.cparams.parameters['blur'] = self.calibration_params['blur']\n self.cparams.parameters['morph'] = self.calibration_params['morph']\n self.cparams.parameters['H'] = self.calibration_params['H']\n self.cparams.parameters['S'] = self.calibration_params['S']\n self.cparams.parameters['V'] = self.calibration_params['V']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create pair of interfaces
|
def makeIntfPair( cls, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True ):
# Leave this as a class method for now
assert cls
return makeIntfPair( intfname1, intfname2, addr1, addr2, node1, node2,
deleteIntfs=deleteIntfs )
|
[
"def test_create_interface_two_times(self):\n h1 = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i1 = self.plugin.createAndAddInterface(h1, \"1.2.3.4\")\n\n h2 = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i2 = self.plugin.createAndAddInterface(h2, \"1.2.3.4\")\n\n self._plugin_controller.setLastCommandInformation(\"mock\")\n self._plugin_controller.onCommandFinished()\n self._model_controller.processAllPendingActions()\n \n self.assertTrue(len(self._model_controller.getAllHosts()) == 1, \"The controller should have just one host\")\n self.assertTrue(len(self._model_controller.getHost(h1).getAllInterfaces()) == 1, \"The host should have just one interface\")",
"def _create_peering_interface(asys: UserAS, ixp: IXP):\n br = _get_peering_br(asys)\n ip = IXPMember.objects.filter(ixp=ixp, host=br.host).values_list('public_ip', flat=True)[0]\n port = _find_free_port(asys, ipaddress.ip_address(ip), 50000, 51000)\n return br.interfaces.create(public_ip=str(ip), public_port=port)",
"def __init__(self, fromInterfaceName, toInterfaceName) :\n self.fromSwitchInterface = fromInterfaceName\n f = re.findall(r\"\\d+\", fromInterfaceName.split(\".\")[0])\n if len(f) == 3 :\n self.fromFPC = int(f[0])\n self.fromPIC = int(f[1])\n self.fromPort = int(f[2])\n else:\n raise ValueError(\"FromInterface name is invalid\")\n self.toSwitchInterface = toInterfaceName\n t = re.findall(r\"\\d+\", toInterfaceName.split(\".\")[0])\n if len(t) == 3 :\n self.toFPC = int(t[0])\n self.toPIC = int(t[1])\n self.toPort = int(t[2])\n else:\n raise ValueError(\"ToInterface name is invalid\")",
"def make_interface(backend):\n return contract_interface.ContractInterface(\n {\"path\": contract_path, \"ctor\": [genesis, m, k]},\n backend=backend,\n profiler=profiler,\n )",
"def _create_link(as1: UserAS, as2: UserAS, ixp: IXP) -> IXPLink:\n if1 = _create_peering_interface(as1, ixp)\n if2 = _create_peering_interface(as2, ixp)\n return IXPLink.objects.create(Link.PEER, if1, if2, ixp)",
"def create(module):\n module.node.api('interfaces').create(module.attributes['name'])",
"def addInterface(interface): #@NoSelf",
"def test_create_two_services_same_names_different_port(self):\n h = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i = self.plugin.createAndAddInterface(h, \"1.2.3.4\")\n s1 = self.plugin.createAndAddServiceToInterface(h, i, \"unknown\", protocol=\"tcp\", ports=['80'])\n s2 = self.plugin.createAndAddServiceToInterface(h, i, \"unknown\", protocol=\"tcp\", ports=['443'])\n self._plugin_controller.setLastCommandInformation(\"mock\")\n self._plugin_controller.onCommandFinished()\n self._model_controller.processAllPendingActions()\n \n host = self._model_controller.getHost(h)\n interface = host.getInterface(i)\n self.assertNotEqual(s1, s2, \"Both services should have the same id\")\n self.assertTrue(len(interface.getAllServices()) == 2, \"The interface should have two services\")",
"def registerInterface(interface): #@NoSelf",
"def _create_interface(name, ip, route_dst=None):\n\n logging.debug(\"Creating %s interface.\", name)\n _ipr.link(\"add\", ifname=name, kind=\"dummy\")\n\n logging.debug(\"Assigning %s address to %s interface.\", ip, name)\n index = _ipr.link_lookup(ifname=name)[0]\n _ipr.link(\"set\", index=index, state=\"down\")\n _ipr.addr(\"add\", index=index, address=ip)\n _ipr.link(\"set\", index=index, state=\"up\")\n\n if route_dst is not None:\n # Adding new route\n _add_route(route_dst, name)",
"def get_construction_steps(\n target_interface: Interface) -> Tuple[ConstructionStep, ...]:\n\n # For brevity later, get the list of protocols as a local\n protocols = target_interface.protocol.protocols.protocols\n\n # Helper map for constructing human readable instance names\n base_human_readable_name_map = (\n wayland_protocol_identifiers.get_base_human_readable_name_map(\n target_interface.protocol.protocols.protocols))\n\n # Globals that will be needed (not ordered)\n global_steps = {}\n\n # Non-global instances that will be needed (ordered)\n instance_steps = []\n\n # To help ensure unique instance names\n uniquifier = collections.Counter()\n\n def unique_instance_name(prefix: str, name: str) -> str:\n def dedupe_words(name: str) -> str:\n # Otherwise a generated name might be \"parent_surface_surface\"\n # for a wl_surface passed as a parent_surface argument.\n words = name.split(\"_\")\n if len(words) == 1:\n return name\n words = [\n w for i, w in enumerate(words[:-1]) if w not in words[i + 1]\n ] + [words[-1]]\n return \"_\".join(words)\n\n name = prefix + base_human_readable_name_map.get(name, name)\n name = dedupe_words(name)\n suffix = str(uniquifier.get(name, ''))\n uniquifier[name] += 1\n return name + suffix + \"_\"\n\n def recursive_construction_steps(current_target: Interface, prefix: str,\n minimum_version: int):\n ctor_message = get_constructor_for_interface(current_target)\n ctor = None\n\n if ctor_message is not None:\n # If we have a message, we have to use another interface to\n # create the current target.\n ctor_interface = recursive_construction_steps(\n ctor_message.interface, prefix,\n max(minimum_version,\n ctor_message.since if ctor_message.since else 1))\n ctor_object_args = []\n\n if not ctor_message.is_event:\n # We may also have to construct other interfaces as well to\n # pass as object arguments. Those interfaces can be part of\n # any protocol, though normally it is either in the same\n # protocol or the core Wayland protocol.\n for arg in ctor_message.args:\n arg_step = None\n if arg.type == 'object':\n arg_interface = get_interface_for_name(\n protocols, arg.interface)\n arg_step = recursive_construction_steps(\n arg_interface, f'{prefix}{arg.name}_', 1)\n ctor_object_args.append(arg_step)\n\n ctor = ConstructionStepCtor(ctor_interface, ctor_message,\n tuple(ctor_object_args))\n\n # Construct the step\n step = ConstructionStep(interface=current_target,\n instance_name=unique_instance_name(\n prefix if ctor is not None else '',\n current_target.name),\n ctor=ctor,\n minimum_version=minimum_version)\n\n if ctor is None:\n # For a global, interface, we only make/get one instance\n step = global_steps.setdefault(current_target.name, step)\n else:\n # Otherwise store each individual step\n instance_steps.append(step)\n\n return step\n\n recursive_construction_steps(target_interface, '', 1)\n\n return tuple([global_steps[name]\n for name in sorted(global_steps)] + instance_steps)",
"def create_interface_specs(class_name, params=None, BaseClass=TraitedSpec):\n attr = {}\n if params is not None:\n for p in params:\n name, dipy_type, desc = p[0], p[1], p[2]\n is_file = bool(\"files\" in name or \"out_\" in name)\n traits_type, is_mandatory = convert_to_traits_type(dipy_type, is_file)\n # print(name, dipy_type, desc, is_file, traits_type, is_mandatory)\n if BaseClass.__name__ == BaseInterfaceInputSpec.__name__:\n if len(p) > 3:\n attr[name] = traits_type(\n p[3], desc=desc[-1], usedefault=True, mandatory=is_mandatory\n )\n else:\n attr[name] = traits_type(desc=desc[-1], mandatory=is_mandatory)\n else:\n attr[name] = traits_type(\n p[3], desc=desc[-1], exists=True, usedefault=True\n )\n\n newclass = type(str(class_name), (BaseClass,), attr)\n return newclass",
"def generate_new_interface(self, params):\n # Kill existing iface\n try:\n self.iface.stop()\n except (AttributeError, IOError):\n pass\n self.iface = PiPinInterface(params)\n return self.iface",
"def create_interface(self, name, peer_id=None):\n try:\n if self.ipdb_controller:\n name_iface_peer = name+'_'+peer_id\n self.ipdb_controller.create(ifname=name, kind='veth', peer=name_iface_peer).commit()\n return name, name_iface_peer\n except Exception:\n logging.error('Cannot create interface')\n return '',''",
"def create_interfaces_by_id(interfaces): # noqa: E501\n if connexion.request.is_json:\n interfaces = InterfacesSchema.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'",
"def test_create_two_services_different_names_equal_port(self):\n h = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i = self.plugin.createAndAddInterface(h, \"1.2.3.4\")\n s1 = self.plugin.createAndAddServiceToInterface(h, i, \"unknown\", protocol=\"tcp\", ports=['80'])\n s2 = self.plugin.createAndAddServiceToInterface(h, i, \"test\", protocol=\"tcp\", ports=['80'])\n self._plugin_controller.setLastCommandInformation(\"mock\")\n self._plugin_controller.onCommandFinished()\n self._model_controller.processAllPendingActions()\n \n host = self._model_controller.getHost(h)\n interface = host.getInterface(i)\n self.assertEqual(s1, s2, \"Both services should have the same id\")\n self.assertTrue(len(interface.getAllServices()) == 1, \"The interface should have just one service\")",
"def interface(comp_cls):\n class MyInterface(Interface):\n pass\n MyInterface.__name__ = 'I' + comp_cls.__name__\n return MyInterface",
"def create_interface(self, device, data):\n raise NotImplementedError()",
"def __init__(self):\n self.interface = \\\n {'initialization variables': None,\n 'input variables': None,\n 'input events': None,\n 'output events': None}",
"def _create_tunnel(name, ip, gre_local, gre_remote, route_dst=None):\n\n logging.debug(\"Creating %s interface.\", name)\n _ipr.link(\"add\", ifname=name, kind=\"gre\",\n gre_local=gre_local,\n gre_remote=gre_remote,\n gre_ttl=255)\n\n logging.debug(\"Assigning %s address to %s interface.\", ip, name)\n index = _ipr.link_lookup(ifname=name)[0]\n _ipr.link(\"set\", index=index, state=\"down\")\n _ipr.addr(\"add\", index=index, address=ip)\n _ipr.link(\"set\", index=index, state=\"up\")\n\n if route_dst is not None:\n # Adding new route\n _add_route(route_dst, name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
To validate a serializer and raise error on bad validation
|
def validate_serializer(serializer):
if not serializer.is_valid():
raise ValueError(serializer.errors)
|
[
"def test_serializer_validation(self):\n serializer = self.serializer_class(data={})\n serializer.is_valid()\n\n expected_errors = {\n 'email': ['This field cannot be blank.'],\n }\n\n self.assertEqual(serializer.errors, expected_errors)",
"def test_invalid_datatype(self):\r\n serializer = self.message_serializer(data=[{\"text\": \"Some test text\"}])\r\n assert not serializer.is_valid()\r\n assert serializer.validated_data == {}\r\n assert serializer.data == {}\r\n assert serializer.errors == {\r\n \"non_field_errors\": [\r\n \"Invalid data. Expected a dictionary, but got list.\"\r\n ]\r\n }",
"def validate(self, data):\n if ('blob_id' in data) == ('zarr_id' in data):\n raise serializers.ValidationError(\n {'blob_id': 'Exactly one of blob_id or zarr_id must be specified.'}\n )\n if 'path' not in data['metadata'] or not data['metadata']['path']:\n raise serializers.ValidationError({'metadata': 'No path specified in metadata.'})\n\n # Validate the asset path. If this fails, it will raise a django ValidationError, which\n # will be caught further up the stack and be converted to a DRF ValidationError\n validate_asset_path(data['metadata']['path'])\n\n data['metadata'].setdefault('schemaVersion', settings.DANDI_SCHEMA_VERSION)\n return data",
"def test_serializer_validation(self):\n serializer = self.serializer_class(data={})\n serializer.is_valid()\n\n expected_errors = {\n 'current_password': ['This field is required.'],\n 'new_password': ['This field is required.'],\n 'new_password_confirmation': ['This field is required.']\n }\n\n self.assertEqual(serializer.errors, expected_errors)",
"def test_validate_write_only(self):\n data = {'write_only': 123, 'readable': 456}\n validated = self.serializer.validate(data)\n assert validated == {'write_only': 123, 'readable': 456}",
"def test_validate_none_data(self):\r\n data = None\r\n serializer = self.message_serializer(data=data)\r\n assert not serializer.is_valid()\r\n assert serializer.errors == {\"non_field_errors\": [\"No data provided\"]}",
"def test_invalid_serializer_with_pk(self):\n record = Scope.get(self.uuid)\n serializer = ScopeSerializer(record)\n self.assertEqual(serializer.valid(self.uuid), False)",
"def test_serializer_should_return_expected_error_invalid_token(self):\n self.data['token'] = 'invalidtoken'\n self.data['new_password'] = '123456'\n serializer = ResetPasswordSerializer(data=self.data)\n serializer.is_valid()\n\n expected_error = {\n 'token': ['Invalid password reset token.']\n }\n\n self.assertEqual(serializer.errors, expected_error)\n self.assertEqual(serializer.object, None)\n self.assertEqual(serializer.data, {})",
"def test_validate_invalid_object(self):\n G.VALIDATOR.validate(Job('test_invalid_object.json'))",
"def test_validate_invalid_credentials():\n data = {\"email\": \"test@example.com\", \"password\": \"password\"}\n\n serializer = serializers.TokenSerializer(data=data)\n\n assert not serializer.is_valid()",
"def test_delta_failure(self):\n query_params = {\"delta\": \"bad_delta\"}\n serializer = OCPInventoryQueryParamSerializer(data=query_params)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def test_invalid_serializer_without_pk(self):\n record = Scope.get(self.uuid)\n serializer = ScopeSerializer(record)\n self.assertEqual(serializer.valid(None), False)",
"def test_validate_wrong_format_data(self):\n self.user_data[\"dependents\"] = \"wrong format\"\n serializer = QuoteSerializer(data=self.user_data)\n assert serializer.is_valid() == False",
"def test_ser_invalid(self, podcast_data) -> None:\n del podcast_data[\"name\"]\n podcast_serd = PodcastSerializer(data=podcast_data)\n assert not podcast_serd.is_valid()\n assert \"name\" in podcast_serd.errors",
"def test_invalid(schema, exception):\n with pytest.raises(exception):\n object_._convert_read_only(schema=schema, value=mock.MagicMock())",
"def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n order = Order()\n self.assertRaises(DataValidationError, order.deserialize, data)",
"def test_infrastructure_field_validation_failure(self):\n query_params = {\"infrastructures\": \"notaws\"}\n serializer = FilterSerializer(data=query_params)\n self.assertFalse(serializer.is_valid())",
"def test_deserialize_with_bad_available(self):\n data = PetFactory().serialize()\n data[\"available\"] = \"foo\"\n pet = Pet()\n self.assertRaises(DataValidationError, pet.deserialize, data)",
"def test_serializer_should_validate_username(self):\n self.create_user()\n self.create_another_user()\n\n data = {\n 'first_name': self.user.first_name,\n 'last_name': self.user.last_name,\n 'username': self.user.email,\n 'email': self.user.email\n }\n\n serializer = self.serializer_class(data=data, instance=self.user)\n serializer.is_valid()\n\n expected_error = {\n 'username': ['Invalid username.']\n }\n\n self.assertEqual(serializer.errors, expected_error)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method is called when the spider is opened.
|
def open_spider(self, spider):
_log.info('open_spider[%s]....' % spider.name)
|
[
"def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)",
"def open_spider(self, spider):\n logging.info('open spider')",
"def open(self, spider):\n self.spider = spider\n self.file_system = S3Hook()\n return super(ManifestFeedStorage, self).open(spider)",
"def customize_close_spider(self, **kwargs):\n pass",
"def onSite(self):\n self.callHandlers(\"site\")",
"def configure_request(self, request, link, spider):",
"def close_spider(self, spider):\n self.pool.close()",
"def open_spider(self, spider):\n\n index_dir = os.path.expanduser('~/.sitesearcher/index')\n if not os.path.exists(index_dir):\n os.makedirs(index_dir)\n\n self.indexname = spider.allowed_domains[0]\n if index.exists_in(index_dir, indexname=self.indexname):\n self.index = index.open_dir(index_dir, indexname=self.indexname)\n else:\n self.index = index.create_in(\n index_dir,\n indexname=self.indexname,\n schema=schema,\n )\n self.writer = AsyncWriter(self.index)",
"def define_spider_process_handler(self):\n # Create the SpiderProcessHandler if first time upload\n self.spider_handler = XnatUtils.SpiderProcessHandler(\n self.spider_path,\n self.suffix,\n self.xnat_project,\n self.xnat_subject,\n self.xnat_session,\n time_writer=self.time_writer)",
"def define_spider_process_handler(self):\n # Create the SpiderProcessHandler if first time upload\n self.spider_handler = XnatUtils.SpiderProcessHandler(\n self.spider_path,\n self.suffix,\n self.xnat_project,\n self.xnat_subject,\n self.xnat_session,\n self.xnat_scan,\n time_writer=self.time_writer)",
"def go(self):\n \n self.setprop('crawl', crol.Crawl({\n 'seed_url' : self.registration.site,\n 'crawl_report' : crol.CrawlReport({'seed_url':self.registration.site}),\n 'log' : self.log,\n 'nofollow_patterns' : self.registration.nofollow_patterns,\n 'ignore_patterns' : self.registration.ignore_patterns\n }))\n \n self.log.filename = self.registration.department.name\n self.crawl.start(self.crawl.crawl_report.reportnode)\n self.log.reporttofile(self.crawl.crawl_report)\n if self.crawl.crawl_report.statistics['broken_count'] > 0: self.applyactions()",
"def on_show_view(self) -> None:\n self.setup()",
"def test_scraping(self):\n self._scraper.scrape()",
"def site_complete(self):\n pass",
"def close_spider(self, spider):\n for exporter in self.year_to_exporter.values():\n exporter.finish_exporting()\n exporter.stream.close()",
"def ChooseScraper(self, url):",
"def open(self):\n self.state = _OpenState(self)\n logger.debug(\"Opened\")",
"def setup(self):\n self.site = SiteFactory(is_default_site=True)",
"def _openWebsite(self):\n webbrowser.open(websiteUrl, 2, True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reset counts matrices to fresh values.
|
def reset_mats(self, init_base=None, init_oracle=None):
if init_base is None:
self.seen_states = 1 # NB. this keeps track of number of states visited so far
self.base_counts = Matrix() # NB. `base_counts[s,t]` records number of times we've performed s->t transition
self.base_counts[0,0] += 1
else:
self.seen_states = max(init_base.shape[0], init_base.shape[1])
self.base_counts = Matrix(init_base)
if init_oracle is None:
self.oracle_counts = Matrix() # NB. there should only be one column in the oracle counts, i.e. it's vector-shaped
# initialize with full probability mass on first state:
self.oracle_counts[0,0] += 1
else:
self.oracle_counts = Matrix(init_oracle)
|
[
"def clear_summaries(self):\n\n\t\tself.count = 0\n\t\tmemset(self.counts, 0, self.n*sizeof(double))",
"def reset_all(self):\n self.reset_memory()\n self.reset_traces()\n self.reset_tags()\n\n self.prev_obs = np.zeros(self.nx_inst)\n self.prev_qa = 0\n self.prev_max = 0.",
"def reset_macs_count(self):\n add_batch_counter_variables_or_reset(self)\n self.apply(add_macs_counter_variable_or_reset)",
"def finalize(self):\n for i in range(1, len(self._local_counts)):\n self.counts[i].append(self._local_counts[i])\n self.counts.pop(0)\n\n for i in range(len(self.counts)):\n self.counts[i] = np.array(self.counts[i])",
"def reset(self):\n self.matrix.fill(True)\n for cat in range(len(self.categories)):\n s = self.cat_slice(cat)\n self.matrix[s, s] = False\n np.fill_diagonal(self.matrix, True)\n self.assertions.clear()\n self._log(f'Reset matrix with {self.edges} edges\\n')",
"def reset_plots(self):\n self.rh.reset_count_lists(range(len(self.rh.ROIs)))\n for p in self.plots:\n try:\n for l in p['counts']: l.setData([1])\n except TypeError:\n p['counts'].setData([1])",
"def ResetCutCounters(self):\n for step in self.listOfAnalysisSteps:\n for counterLabel in self.dictOfCutCounters:\n self.dictOfCutCounters[counterLabel][step] = 0",
"def reset ( self ) :\n for h in self._histos : self._histos[h].reset()",
"def reset(self):\n self.metric_vals = {}\n for tracker in self.metric_trackers.values():\n tracker.reset()",
"def reset(self):\r\n self.counter = 0\r\n self.distdict.clear()",
"def Reset(self):\r\n self.grid = self.EmptyGrid()\r\n self.count = self.EmptyGrid()",
"def reset_counting(checkers={}):\n for ch in checkers.values():\n ch.reset()",
"def reset_stats(self):\n self.train_stats[\"nbatches\"] = 0\n self.train_stats[\"acc\"] = 0\n self.train_stats[\"loss\"] = 0\n self.val_stats[\"nbatches\"] = 0\n self.val_stats[\"acc\"] = 0\n self.val_stats[\"loss\"] = 0\n self.val_stats[\"wer\"] = 0",
"def reset(self) -> None:\n for _, sb in self._scoreboxes.items():\n sb.reset()",
"def reset(self):\n self.csr.data[:] = 0",
"def reset(self):\n if self.parallel:\n from pyannote.metrics import manager_\n self.accumulated_ = manager_.dict()\n self.results_ = manager_.list()\n self.uris_ = manager_.dict()\n else:\n self.accumulated_ = dict()\n self.results_ = list()\n self.uris_ = dict()\n for value in self.components_:\n self.accumulated_[value] = 0.",
"def reset(self):\n self.confusion_matrix.reset()",
"def _reset_uncovered_mat(self):\n self.row_uncovered[:] = True\n self.col_uncovered[:] = True",
"def reset():\n global GROUPS, NODES, PIPES, JOBS, _ID\n GROUPS = {}\n NODES = {}\n PIPES = {}\n JOBS = {}\n _ID = count(1)\n logger.info(\"Cleared cache and reset counter.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a new state j given that we're currently in a state i. Running this method updates the underlying count tables (self.{base,oracle}_counts); use `HDP.probas(state)` to get the probability over all visited states `j`.
|
def sample(self, state):
# get probabilities for next state over all states observed so far, plus oracle proba in final index:
base_probas = self.base_probas(state)
# sample one of the states (or oracle query):
next_state = np.random.choice(range(len(base_probas)), p=base_probas)
# update tables and return state if our choice is not oracle:
if next_state < self.seen_states:
self.base_counts[state,next_state] += 1
return next_state
# otherwise if we choose final state, sample from oracle (also updating count tables/num_states):
else:
oracle_probas = self.oracle_probas()
next_oracle_state = np.random.choice(range(len(oracle_probas)), p=oracle_probas)
# update both counts:
self.base_counts[state,next_oracle_state] += 1
self.oracle_counts[next_oracle_state,0] += 1
# update num_states if new state seen:
if next_oracle_state == (oracle_probas.shape[0]-1):
self.seen_states += 1
# return:
return next_oracle_state
|
[
"def state(self, i):\n return self.basis[i]",
"def creation(i,state_in):\n coef = np.sqrt(state_in[i]+1)\n state_out=state_in.copy()\n state_out[i] = state_out[i]+1\n return state_out,coef",
"def successors(self, new_state):\n return self.graph[new_state]",
"def index(self, state):\n return self.basis.index(state)",
"def state2idx(self, state: list) -> int:\n idx = 0\n N = 1\n for i in range(self.num_cofactor):\n idx += state[i] * N\n N *= (self.siteCapacity[i] + 1)\n \n return idx",
"def index_to_state(self, n):\n return self.lookup_table[n]",
"def get_probability(self, state, observation):\n return 0 if state in self.closed_states else 1",
"def index(self, state):\n try:\n idx = self.basis_lut[tuple(state)]\n return idx\n except:\n return -1",
"def compute_state(self, observation):\n return observation",
"def oracle_probas(self):\n n_js = np.array(self.oracle_counts[:self.seen_states,0], dtype=np.float64)\n denominator = np.reciprocal(np.sum(n_js) + self.gamma)\n new_state_proba = self.gamma * denominator\n existing_state_probas = n_js * denominator\n combined_probas = np.concatenate((existing_state_probas, [new_state_proba]), axis=0)\n return (combined_probas / combined_probas.sum())",
"def incremental_score(old_state, i, n, new_state, arrows):\n narrows = not(arrows)\n score = old_state[SCORE]\n # remove i score (new score is 0)\n score -= State.score_at(old_state, i, arrows)\n\n if narrows: # neighbors of i have arrows and might have no more other piles around\n # n will be evaluated as one of these\n for neighbor in State.NEIGHBORS[i]:\n score -= State.score_at(old_state, neighbor, narrows)\n score += State.score_at(new_state, neighbor, narrows)\n else:\n # evaluate n\n score -= State.score_at(old_state, n, narrows)\n score += State.score_at(new_state, n, narrows)\n\n return score",
"def getProbability(self, state, action):\n return self.policyFunction.predict(np.array([state]))[0][action + 1]",
"def generate_state_table(p):\n # generate list of state_numbers which are allowed by the symmetries\n state_table = []\n for i in range(int(2**p['N'])):\n state_table.append(i)\n return state_table",
"def lookup_transition_prob_matrix(self, action, nextState):\n curState = deepcopy(self)\n action = tuple(action)\n if (curState, action, nextState) in GameState.tpm:\n return GameState.tpm[(curState, action, nextState)]\n else:\n prob = self.transition_prob(curState, action, nextState)\n GameState.tpm[(curState, action, nextState)] = prob\n return prob",
"def get_probability(self, state, observation):\n return 1",
"def NN_State_Probability(J_Beta,_State_Energy):\n return math.exp(-1 * J_Beta * _State_Energy)",
"def get_probability_of_individual_state_transitions(transition_table, policy_table):\n states = list(transition_table)\n num_states = len(states)\n\n # Dictionary to store Marginal Probabilities of the Next State given the Current State, Goal and Environment\n marginal_probability_next_state = dict()\n\n # For each state\n for s in range(num_states):\n actions = list(transition_table[states[s]])\n num_actions = len(actions)\n\n probability_next_state = dict()\n possible_next_states = []\n\n # For each action\n for a in range(num_actions):\n\n state_prime = list(transition_table[states[s]][actions[a]])\n num_states_prime = len(state_prime)\n\n # For each next-state\n for sp in range(num_states_prime):\n # If Next State is in the list of possible next states\n if state_prime[sp] in possible_next_states:\n # Accumulation of repeated next state probabilities\n probability_next_state[state_prime[sp]] = \\\n transition_table[states[s]][actions[a]][state_prime[sp]] * policy_table[states[s]][actions[a]] \\\n + probability_next_state[state_prime[sp]]\n\n else:\n # Add this to the list of possible next states\n possible_next_states.append(state_prime[sp])\n\n probability_next_state[state_prime[sp]] = \\\n transition_table[states[s]][actions[a]][state_prime[sp]] * policy_table[states[s]][actions[a]]\n\n # Store in the (Marginal Probabilities of the Next State) Dictionary\n marginal_probability_next_state[states[s]] = probability_next_state\n\n return marginal_probability_next_state",
"def measure(self):\r\n # Calculate probabilities\r\n probabilities = np.zeros(self.n_states)\r\n for i in range(self.n_states):\r\n probabilities[i] = norm(self.base_states[i]) ** 2\r\n\r\n # Choose a random state\r\n n = int(self.n_states)\r\n state = int (np.random.choice(n, p=probabilities) )\r\n\r\n return state",
"def apply_state(self, state):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return an array of probabilities based on current configuration of `self.oracle_counts`. Returned 1d array of type np.float is of size `self.num_states+1`, representing probabiltiies for returning an existing state with an additional value at the end representing the probability for transitioning to a new, unseen state.
|
def oracle_probas(self):
n_js = np.array(self.oracle_counts[:self.seen_states,0], dtype=np.float64)
denominator = np.reciprocal(np.sum(n_js) + self.gamma)
new_state_proba = self.gamma * denominator
existing_state_probas = n_js * denominator
combined_probas = np.concatenate((existing_state_probas, [new_state_proba]), axis=0)
return (combined_probas / combined_probas.sum())
|
[
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n probs = []\n for state, action in zip(states, actions):\n probs.append(1 if self.sample_action(state) == action else 0)\n return np.array(probs)",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n probs = 0.5 * np.ones(len(states))\n return probs",
"def state_probability(self, num_customers):\n raise NotImplementedError()",
"def sample(self, state):\n # get probabilities for next state over all states observed so far, plus oracle proba in final index:\n base_probas = self.base_probas(state)\n # sample one of the states (or oracle query):\n next_state = np.random.choice(range(len(base_probas)), p=base_probas)\n # update tables and return state if our choice is not oracle:\n if next_state < self.seen_states:\n self.base_counts[state,next_state] += 1\n return next_state\n # otherwise if we choose final state, sample from oracle (also updating count tables/num_states):\n else:\n oracle_probas = self.oracle_probas()\n next_oracle_state = np.random.choice(range(len(oracle_probas)), p=oracle_probas)\n # update both counts:\n self.base_counts[state,next_oracle_state] += 1\n self.oracle_counts[next_oracle_state,0] += 1\n # update num_states if new state seen:\n if next_oracle_state == (oracle_probas.shape[0]-1):\n self.seen_states += 1\n # return:\n return next_oracle_state",
"def create_observation_probabilities(self, state_features, timeseries_observations, cycle_states):\n\n observation_state_container = []\n\n for state in state_features:\n parameter_set = state_features[state]\n\n state_obs_probabilities = []\n for ind in range(len(timeseries_observations)):\n obs = timeseries_observations[ind]\n cycle = cycle_states[ind]\n\n mu = parameter_set.loc[parameter_set['CYCLE'] == cycle, 'SEASONALITY_MU'].item()\n sigma = parameter_set.loc[parameter_set['CYCLE'] == cycle, 'SEASONALITY_SIGMA'].item()\n obs_state_probability = norm.pdf(obs, loc=mu, scale=sigma)\n state_obs_probabilities.append(obs_state_probability)\n\n observation_state_container.append(state_obs_probabilities)\n\n observation_state_probabilities = np.array(observation_state_container).T\n\n return observation_state_probabilities",
"def measure(self):\r\n # Calculate probabilities\r\n probabilities = np.zeros(self.n_states)\r\n for i in range(self.n_states):\r\n probabilities[i] = norm(self.base_states[i]) ** 2\r\n\r\n # Choose a random state\r\n n = int(self.n_states)\r\n state = int (np.random.choice(n, p=probabilities) )\r\n\r\n return state",
"def test_probability_by_state_sequence(self):\n observations = [0,1,1]\n probabilities = Algs.analysis_of_state_sequences(self.model3, observations)\n total_probability = sum(prob for sequence, prob in probabilities)\n self.assertAlmostEquals(total_probability,\n Algs.probability_of_observations(self.model3, observations))",
"def getProbability(self, state, action):\n return self.policyFunction.predict(np.array([state]))[0][action + 1]",
"def get_binding_probabilities(self, state):\n cnt_s, cnt_r = state.energies.shape\n subs = range(cnt_s) #< all possible substrate indices\n\n if self.temperature == 0:\n # determine maximal energies for each substrate\n Emax = state.energies.max(axis=1)\n weights = (state.energies == Emax[:, np.newaxis]).astype(np.double)\n \n else:\n # calculate Boltzmann factors\n weights = np.exp(state.energies/self.temperature)\n \n # iterate over all substrate combinations\n probs = np.empty((self.get_input_dim(state), cnt_r))\n for k, sub in enumerate(itertools.combinations(subs, self.num)):\n # calculate interaction probabilities\n probs[k, :] = weights[sub, :].sum(axis=0)\n \n # normalize for each substrate across all receptors\n # => scenario in which sub_ids binds to exactly one receptor\n probs /= np.sum(probs, axis=1)[:, None]\n # TODO: check whether this is the natural normalization\n \n return probs",
"def get_probability(self, state, observation):\n return 0 if state in self.closed_states else 1",
"def get_probability(self, state, observation):\n return 1",
"def __transitions(self):\n # Initialize the transition probailities tensor (S,S,A)\n dimensions = (self.n_states,self.n_states,self.n_actions)\n transition_probabilities = np.zeros(dimensions)\n\n # Compute the transition probabilities. Note that the transitions\n # are deterministic.\n for s in range(self.n_states):\n for a in range(self.n_actions):\n n = 0\n next_s_vec = list()\n for a_batman in self.actions_batman:\n next_s, caught = self.__move(s, a, a_batman)\n \n if caught:\n n = 1\n next_s_vec = [next_s]\n break\n\n elif next_s != None:\n n += 1\n next_s_vec.append(next_s)\n \n for next_s in next_s_vec:\n transition_probabilities[next_s, s, a] = 1/n\n return transition_probabilities",
"def action_probs(self, state):\n pass",
"def get_probability_of_individual_state_transitions(transition_table, policy_table):\n states = list(transition_table)\n num_states = len(states)\n\n # Dictionary to store Marginal Probabilities of the Next State given the Current State, Goal and Environment\n marginal_probability_next_state = dict()\n\n # For each state\n for s in range(num_states):\n actions = list(transition_table[states[s]])\n num_actions = len(actions)\n\n probability_next_state = dict()\n possible_next_states = []\n\n # For each action\n for a in range(num_actions):\n\n state_prime = list(transition_table[states[s]][actions[a]])\n num_states_prime = len(state_prime)\n\n # For each next-state\n for sp in range(num_states_prime):\n # If Next State is in the list of possible next states\n if state_prime[sp] in possible_next_states:\n # Accumulation of repeated next state probabilities\n probability_next_state[state_prime[sp]] = \\\n transition_table[states[s]][actions[a]][state_prime[sp]] * policy_table[states[s]][actions[a]] \\\n + probability_next_state[state_prime[sp]]\n\n else:\n # Add this to the list of possible next states\n possible_next_states.append(state_prime[sp])\n\n probability_next_state[state_prime[sp]] = \\\n transition_table[states[s]][actions[a]][state_prime[sp]] * policy_table[states[s]][actions[a]]\n\n # Store in the (Marginal Probabilities of the Next State) Dictionary\n marginal_probability_next_state[states[s]] = probability_next_state\n\n return marginal_probability_next_state",
"def states(self):\n return np.array(self.state[:self.last_n])",
"def get_probability_heights(self):\n \n sums = [sum(x) for x in self.configurations]\n df = pd.DataFrame({\"Configurations\": self.configurations,\"Heights\":sums})\n count_heights =pd.DataFrame(df[\"Heights\"].value_counts().sort_index().reset_index())\n count_heights.columns = [\"height\",\"count\"]\n \n heights = count_heights[\"height\"].to_list()\n \n counts = np.array(count_heights[\"count\"].to_list())\n N_observed = len(df) # Number of all configurations observed\n probs = counts/N_observed\n# print(count_heights)\n\n return [heights,probs]",
"def _calc_ProbabilityMatrix(self,trials):\r\n Psa = np.mat(np.ones((self.m,self.n)))\r\n for s_index in range(len(trials)):\r\n if s_index+1 < len(trials):\r\n a_index = s_index +1\r\n # convert state and action to indices in probabiilty matrix\r\n s_pos = list(self.S).index(list(trials[s_index]))\r\n a_pos = list(self.A).index(list(trials[a_index]))\r\n Psa[s_pos,a_pos] += 1\r\n\r\n Psa /= np.sum(Psa,axis=1)\r\n return Psa",
"def getAvgProbability(self, states, actions):\n r = 0\n\n for i in range(len(states)):\n r += self.getProbability(states[i], actions[i])\n\n return r / len(states)",
"def _calc_state(self):\n return np.average(a=self.particles, axis=0, weights=self.weights) # for each column"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reset hyperparameters for HDPs.
|
def reset_params(self, t_alpha, t_beta, t_gamma, e_beta, e_gamma):
self.t_hdp.reset_params(t_alpha, t_beta, t_gamma)
self.e_hdp.reset_params(0., e_beta, e_gamma)
|
[
"def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise pooling weights.\n nn.init.normal_(self.pooling_weights, mean=0.0, std=0.02)",
"def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise lookup and target weights.\n nn.init.normal_(self.lookup_weights, mean=0.0, std=0.02)\n if self.target_weights is not None:\n nn.init.normal_(self.target_weights, mean=0.0, std=0.02)",
"def set_hyperparams(self, hp):\n n_step = self.get_last_step()\n self.hyperparams[n_step] = hp\n self.pipeline_description = None #Recreate it again",
"def reset_params(self):\n pass",
"def reset_tuning(self):\n return",
"def reset(self):\n self._lnprob = []\n self._chain = []\n self._epsilon = 0.",
"def reset_optim(self):\r\n\t\tself.optimizer.state = defaultdict(dict)",
"def clear_parameters(self):\n self._solver.clear_parameters()\n self._iter = 0",
"def reset_grads(self):\n for dparam in self.dparams:\n dparam.set_value(0.0 * dparam.get_value())",
"def reset(self):\n self.rrt.reset()\n\tself.bestPath = None\n\tself.bestPathCost = None\n self.lastPruneCost = None\n\tself.updateBestCost()",
"def reset(self):\n self.epsilon = self.initial_epsilon",
"def reset(self):\r\n # reset PID values\r\n self.proportional, self.integral, self.derivative = 0, 0, 0\r\n\r\n # reset previous time and error variables\r\n self.previous_time, self.previous_error = 0, 0",
"def reset(self):\n\n self.model.load_state_dict(self.retrieve(\"model\"))\n self.optimizer.load_state_dict(self.retrieve(\"optimizer\"))\n self.model.to(self.model_device)",
"def reset(self):\n # Noise scaling\n self.noise.reset()\n # Episode parameter\n self._initial_states = None",
"def reset_pp(self):\n try:\n pvc.reset_pp(self.__handle)\n except:\n raise RuntimeError('Failed to reset post-processing settings.')",
"def reset_rescale(self):\n\n for name in self.names:\n self.rescale_parameters[name] = None",
"def set_hyperparameters(self, hyperparameters):\n raise NotImplementedError(\"Can't call this method\")",
"def reset(self) -> None:\n self.epsilon = self.max_epsilon",
"def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_output = None\n self._last_input = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Change the color of this string, but cap it so that added characters will not be colored.
|
def color_cap(color_letter, string):
return C(color_letter)+string+C('N')
|
[
"def cyan(string):\n if not PrintingOptions().is_colored():\n return string\n return colored(string, 'cyan')",
"def colorize(string: str, color: str, bold: bool = False) -> str:\n color_escape = getattr(colorama.Fore, color.upper(), None)\n if not color_escape:\n return string\n elif not bold:\n return color_escape + string + colorama.Fore.RESET\n else:\n return colorama.Style.BRIGHT + color_escape + string + colorama.Style.RESET_ALL",
"def rainbow(self) -> BetterString:\r\n return BetterString(Color.rainbow(text=self.string))",
"def _blue(self, string):\n return ircutils.mircColor(string, \"blue\")",
"def colorize_text(color, text):\n\n return f\"{COLOR_STYLES[color]}{text}{COLOR_STYLES['REGULAR_TEXT']}\"",
"def blue(string):\n if not PrintingOptions().is_colored():\n return string\n return colored(string, 'blue')",
"def shorten_color(self, color):\n\n if color.lower() not in [\"red\", \"blue\"]:\n return \"Please enter a valid color, \\\"red\\\" or \\\"blue\\\" only.\"\n\n short_color = \"B\"\n if color.lower() == \"red\":\n short_color = \"R\"\n\n return short_color",
"def green(string):\n if not PrintingOptions().is_colored():\n return string\n return colored(string, 'green')",
"def red(string):\n if not PrintingOptions().is_colored():\n return string\n return colored(string, 'red')",
"def __str__(self):\n return f\"#{self.color:06x}\"",
"def color_str(color, raw_str):\n if color == 'r':\n fore = 31\n elif color == 'g':\n fore = 32\n elif color == 'b':\n fore = 36\n elif color == 'y':\n fore = 33\n else:\n fore = 37\n color = \"\\x1B[%d;%dm\" % (1, fore)\n return \"%s%s\\x1B[0m\" % (color, raw_str)",
"def color(self, text, color, on_color=None):\n if self.color_enabled:\n return colored(text, color, on_color)\n return text",
"def changeCharColor(self, ws, row, col, color):\n ws.Cells(row, col).Font.Color = color",
"def _color(string, ansi):\n return \"\\x1b[0;{}m{}\\x1b[0m\".format(ansi, string)",
"def colorize(lead, num, color):\n if num != 0 and ANSIBLE_COLOR and color is not None:\n return \"%s%s%-15s\" % (stringc(lead, color),\n stringc(\"=\", color), stringc(str(num), color))\n else:\n return \"%s=%-4s\" % (lead, str(num))",
"def b(s):\n return \"\\033[1m%s\\033[0m\" % s",
"def with_color(s, colors):\n return colors + s + Color.END",
"def test_make_str_white(self):\n self.assertEqual(ColorStr(\"colored\", COLOR_MAP[\"w\"], force_seq=True), \"\\033[97mcolored\\033[0m\")",
"def colored(msg, color):\n return '{}{}{}'.format(color, msg, RESET)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a string and removes all of the characters in removers.
|
def remove_chars(string, removers):
new_string = string #String to edit
for char in removers: #Iterate through characters
new_string = string.replace( char, '' ) #Remove chars one by one
return new_string
|
[
"def strip_chars(string, chars):\r\n return \"\".join(c for c in string if c not in chars)",
"def _strip_chars(self, word, chars_to_remove):\n for char in chars_to_remove:\n word = word.replace(char, '')\n return word",
"def remove_letters(letter, string):\r\n \r\n new_string = \"\"\r\n \r\n for i in string:\r\n \r\n if i != letter:\r\n \r\n new_string += i\r\n \r\n return new_string",
"def strip_chars(chars_to_strip, raw_string):\n return ''.join(ch for ch in raw_string if ch not in set(chars_to_strip))",
"def deletechars(orig_str, del_chars):\r\n\tgen = (i for i in orig_str if i not in del_chars)\r\n\tnew_str = ''\r\n\tfor i in gen: new_str += i\r\n\treturn new_str",
"def filter(string):\n\n filtered_string = \"\"\n for char in string:\n if char not in filtered_string:\n filtered_string += char\n\n return filtered_string",
"def remove_reserved_chars(word):\n return \"\".join(i for i in word if i not in r'\\/:*?\"<>|')",
"def remove_special_characters(string):\r\n s = re.sub('[^A-Za-z0-9\\s]+', '', string)\r\n s = re.sub('\\s+', ' ', s)\r\n return s",
"def _clean(string):\n\n grammar_tokens = [\".\", \",\", \"<\", \">\", \"?\", \"!\", \":\", \";\", \"\\\"\", \"(\", \")\", \"{\", \"}\", \"~\", \"|\", \"/\" ] \n\n for g in grammar_tokens: \n string = string.replace(g, \"\")\n\n string = string.replace(\"\\s+\",\" \")\n string = string.lower()\n return string",
"def remove_odd_chars(s: str):\n return \"\".join(v for v in s if v in ascii_letters + digits + \" \")",
"def removeNonAscii(s):\n output = \"\".join(i for i in s if ord(i) < 128)\n return output",
"def __clean_string(self, raw_string, sub_string):\n cleans = re.sub(\"[^0-9a-zA-Z]\", sub_string, raw_string)\n return cleans.lower()",
"def remove_vowels(string):\n # YOUR CODE GOES HERE #\n vowels = {\n 'a', 'e', 'i', 'o', 'u',\n 'A', 'E', 'I', 'O', 'U'\n }\n\n output = ''\n for char in string:\n if char not in vowels:\n output = output + char\n\n return output",
"def remove_control_characters(s):\n return \"\".join(ch for ch in s if unicodedata.category(ch)[0] != \"C\")",
"def strip_vowels(string):\n\treturn ''.join([char for char in string if char.lower() not in 'aeiou'])",
"def remove_symbols(word):\n result = word\n bad_characters = []\n\n for c in result:\n if c not in allowed:\n bad_characters.append(c)\n\n for c in bad_characters:\n result = result.replace(c, '')\n\n return result",
"def cleanForIRI(string):\n \n iri = \"\"\n \n for c in string:\n if c.isalnum() or c in [\"-\", \".\", \"_\", \"~\"]:\n iri+=c\n return iri",
"def remove_substrs(s, remove_list):\n for r in remove_list:\n s = s.replace(r, '')\n return s",
"def stripCreditCardNumber(str):\n legalChars = (' ', '-',)\n for char in legalChars:\n str = str.replace(char, '')\n return str"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Replace one section of a string with another.
|
def replace(string,section, replacement):
found_spot=string.find(section)
dist=len(section)
newstring=string[:found_spot]+replacement+string[found_spot+dist:]
return newstring
|
[
"def strReplace( x, idx1, idx2, y):\n\n b0 = x[0:idx1]\n b1 = y\n b2 = x[idx2:]\n b = b0+b1+b2\n return str(b)",
"def transform_string(source: str, s1: str, s2: str) -> str:\n for index in range(len(source)):\n\n # if character is in s1, inserts character in s2 at same index\n if source[index] in s1:\n s1_index = s1.index(source[index])\n source = source[:index] + s2[s1_index] + source[index + 1:]\n\n # all these elif statements check for target values and insert desired character using slice.\n elif source[index].isupper():\n source = source[:index] + ' ' + source[index + 1:]\n\n elif source[index].islower():\n source = source[:index] + '#' + source[index + 1:]\n\n elif source[index].isdigit():\n source = source[:index] + '!' + source[index + 1:]\n\n else:\n source = source[:index] + '=' + source[index + 1:]\n\n return source",
"def replace_string(sequence, motif, index, nofail=False):\n # raise an error if index is outside of the string\n if not nofail and index not in range(len(sequence)):\n raise ValueError(\"index outside given string\")\n\n # if not erroring, but the index is still not in the correct range..\n if index < 0: # add it to the beginning\n return motif + sequence\n if index > len(sequence): # add it to the end\n return sequence + motif\n\n # insert the new string between \"slices\" of the original\n return sequence[:index] + motif + sequence[index + 1:]",
"def replacer(string, bad, good):\n\n for (b, g) in zip(bad, good):\n string = string.replace(b, g)\n return string",
"def slice_replace(word, sl, repl):\n return word[:sl[0]] + repl + word[sl[1]:]",
"def _replace(self, str_to_delete='', str_to_insert='', pre_str=None, post_str=None, pre_ind=None, post_ind=None):\n\n\t\tif str_to_delete:\n\t\t\tpos = self.source.find(str_to_delete)\n\t\t\tif pos == -1:\n\t\t\t\tprint(\"Can't find str_to_delete. Source not replaced.\")\n\t\t\t\treturn\n\t\t\tself.source = self.source.replace(str_to_delete, str_to_insert)\n\t\telse:\n\t\t\tif pre_str is not None:\n\t\t\t\tpos = self.source.find(pre_str)\n\t\t\t\tif pos == -1:\n\t\t\t\t\tprint(\"Can't find pre_str. Source not replaced.\")\n\t\t\t\t\treturn\n\t\t\t\tpre_ind = pos + len(pre_str)\n\t\t\telif pre_ind is None:\n\t\t\t\tpre_ind = len(self.source)\n\n\t\t\tif post_str is not None:\n\t\t\t\tpos = self.source[pre_ind:].find(post_str)\n\t\t\t\tif pos == -1:\n\t\t\t\t\tprint(\"Can't find post_str. Source not replaced.\")\n\t\t\t\t\treturn\n\t\t\t\tpost_ind = pre_ind + pos\n\t\t\telif post_ind is None:\n\t\t\t\tpost_ind = len(self.source)\n\t\t\t\n\t\t\tself.source = self.source[:pre_ind] + str_to_insert + self.source[post_ind:]",
"def replacements(in_string,old_substrings,new_substrings):\n for (old,new) in zip(old_substrings,new_substrings):\n in_string = in_string.replace(old, new)\n return in_string",
"def ireplace(text, old, new):\n assert(isinstance(text, str) and isinstance(old, str))\n use_string_format = '%s' in new\n\n old_len = len(old)\n to_replace = []\n for match in iter_find(text.lower(), old.lower()):\n match = text[match:match+old_len]\n if match not in to_replace:\n if use_string_format:\n to_replace.append((match, new % match))\n else:\n to_replace.append((match, new))\n for rule in to_replace:\n text = text.replace(*rule)\n return text",
"def replace(string, sub_function=google_maps_link()):\n\n def do_replace(match):\n original_string = match.group()\n (latitude, longitude) = _convert(*_cleanup(match.groupdict()))\n return sub_function(MapLink(original_string, latitude, longitude))\n\n string = _normalize_string(string)\n return parser_re.sub(do_replace, string)",
"def replace_all_strings(s, str1):\n for str2, str2_replacement in str1:\n s = s.replace(str2, str2_replacement)\n return s",
"def replace(self, string, substitutions):\n\t\tsubstrings = sorted(substitutions, key=len, reverse=True)\n\t\tregex = re.compile('|'.join(map(re.escape, substrings)))\n\t\treturn regex.sub(lambda match: substitutions[match.group(0)], string)",
"def _Substitute(str, **more):\n subst = _COMMON_SUBSTITUTES.copy()\n subst.update(more.items())\n str = str.format(**subst);\n return str",
"def replace(self, old: str, new: str = \"\", count: int = FULL_SIZE, regex: bool = False) -> BetterString:\r\n # Getting the full size\r\n if count == FULL_SIZE:\r\n count = len(self.string)\r\n\r\n if regex:\r\n ret = sub(old, new, self.string, count)\r\n else:\r\n ret = self.string.replace(old, new, count)\r\n\r\n return BetterString(ret)",
"def format_replace(self, format_string, spec_num, old, new):\n # split into list\n flist = format_string.split(\"%\")\n # make sure we're not looking past the list\n if spec_num + 1 >= len(flist):\n return None\n # replace token\n flist[spec_num + 1] = flist[spec_num + 1].replace(old, new, 1)\n # rejoin string\n return \"%\".join(flist)",
"def insert(a_string, sub_string, index):\n return a_string[:index] + sub_string + a_string[index:]",
"def substitute(head1, replacements):\n headline = str(head1) \n index = random.randint(0, len(replacements)-1)\n sub = replacements[index]\n found = headline.find(sub[0])\n while(found != -1):\n headline = headline[:found] + sub[1] + headline[found + len(sub[0]):]\n found = headline.find(sub[0])\n return headline\n return headline",
"def replace(settings, pattern, pattern_type=None, with_value=None):\n\tfilter = settings.format(settings.content)\n\tfilter.replace(pattern, with_value, pattern_type)\n\tsettings.content = filter.content",
"def test_overlapping_replace(self):\n self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],\n allowoverlap=False,\n site=self.site),\n '2121')\n self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],\n allowoverlap=True,\n site=self.site),\n '2221')\n self.assertEqual(textlib.replaceExcept('1\\n= 1 =\\n', '1', ' \\n= 1 =\\n',\n ['header'],\n allowoverlap=True,\n site=self.site),\n ' \\n= 1 =\\n\\n= 1 =\\n')",
"def replStr(str_in, replace_dict=None, **kwargs):\n\n full_kwargs = {**{\"replace_dict\": replace_dict}, **kwargs} # Rebuild kwargs by merging dicts\n\n if replace_dict:\n for t in re.findall(yaml_reserved[\"tag\"][0]+'(.+?)'+yaml_reserved[\"tag\"][1], str_in): # <<<< tag >>>>\n if t in replace_dict.keys():\n str_in = replaceTag(str_in, t, replace_dict[t], parse=True, **full_kwargs)\n\n return str(str_in)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks to see if pending edits remain. Returns whether or not to end the program.
|
def edit_check():
if n_edits>0:
print("You still have pending edits. Do you want to save them?")
choice=input("")
if choice in confirmdict:
if UI.confirm():
return True
else:
return False
else:
print("Choices not confirmed. Do you still want to proceed?")
choice2=input("")
if not choice2 in confirmdict:
return False
return True
else:
return True
|
[
"def check_modified(self, ):\n if not cmds.file(q=1, modified=1):\n return True\n curfile = cmds.file(q=1, sceneName=1)\n r = cmds.confirmDialog( title='Save Changes', message='Save changes to %s?' % curfile,\n button=['Save', 'Don\\'t Save' ,'Cancel'],\n defaultButton='Save', cancelButton='Cancel',\n dismissString='Cancel')\n if r == 'Cancel':\n return False\n if r == 'Save':\n cmds.file(save=True, force=True)\n return True",
"def checkChanged(self):\n if self.txt.edit_modified():\n self.msg = messagebox.askyesnocancel('Save Data?',\n 'Text is not saved. Save it?')\n if self.msg == None:\n return 'cancel'\n elif self.msg:\n self.__saveHandler()\n return 'ok'",
"def has_pending_changes(self):\n status = self._execute(['git', 'status', '--porcelain',\n '--untracked-files=no',\n '--ignore-submodules=dirty'])\n return status != ''",
"def dirty(self):\n if os.path.exists(self.file_path):\n return False\n else:\n raise RuntimeError(\"Source file missing: %s\" % self.file_path)",
"def is_finished(self):\n return len(self.legalMoves) == 0",
"def check_modified(self):\n return bool(self._modified)",
"def test_ok_to_apply(self):\n self.update_docstrings(self.EDIT_DATA_1)\n doc = self.get_docstring('docs/a')\n doc.ok_to_apply = True\n self.failUnless(doc.ok_to_apply == True)\n\n self.edit_docstring('docs/a', 'test edit')\n doc = self.get_docstring('docs/a')\n self.failUnless(doc.ok_to_apply == False)\n\n doc.ok_to_apply = True\n self.failUnless(doc.ok_to_apply == True)\n\n self.edit_docstring('docs/a', 'test edit 2')\n doc = self.get_docstring('docs/a')\n self.failUnless(doc.ok_to_apply == False)",
"def is_end_state(self):\n actions = self.get_legal_actions()\n if not actions:\n return True\n return False",
"def finished(self):\n return (self.pc >= len(self.program))",
"def has_changes(self):\n return self._repo.is_dirty()",
"def checkAndProceedWithUnsavedChanges(self, test=None, what=\"continue\"):\n if (test is not None and test) or (test is None and self.dw.isChanged):\n dlg = wx.MessageDialog(self.toolFrame, 'Unsaved changes might be lost.\\nAre you sure you want to %s?' % what, style=wx.YES_NO|wx.NO_DEFAULT|wx.ICON_EXCLAMATION, caption='Unsaved changes!')\n if dlg.ShowModal() == wx.ID_NO:\n return False\n return True",
"def dirty(self, keep = True):\n if keep == False:\n self.is_dirty = False\n return self.is_dirty",
"def can_update_commitments(self):\n return # boolean",
"def exit(self):\n\n clearTerminal()\n quitting = True\n if self._modified:\n print('You have made unsaved changes to the ' + self._plural + '. Are you sure you want to exit without saving?\\n')\n quitting = getConfirmation()\n\n if quitting:\n self._running = False\n print('Have a nice day.\\n')",
"def was_edited(self):\n return self.editor_id is not None",
"def NeedCommit(self):\n return self._NeedCommit",
"def canRedo(self):\n return self.index != len(self.commands)",
"def finished(self):\n return (self._curr_date >= self._to_date) and not self._buffer",
"def is_finished(self):\n return self.lives == 0 or all(char in self.guesses for char in self.word)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate fastq_dataframe for pipeline input.
|
def make_fastq_dataframe(file_path, barcode_version, output_path=None):
barcode_version = barcode_version.upper()
if barcode_version == 'V1':
parser = _parse_v1_fastq_path
elif barcode_version == 'V2':
parser = _parse_v2_fastq_path
else:
raise ValueError(f'Primer Version can only be V1 or V2, got {barcode_version}.')
if isinstance(file_path, str) and ('*' in file_path):
file_path = [str(pathlib.Path(p).absolute()) for p in glob.glob(file_path)]
elif isinstance(file_path, list):
pass
else:
with open(file_path) as f:
file_path = [line.strip() for line in f]
log.info(f'{len(file_path)} FASTQ file paths in input')
fastq_data = []
for path in file_path:
name_series = parser(path)
fastq_data.append(name_series)
fastq_df = pd.DataFrame(fastq_data)
log.info(f'{fastq_df.shape[0]} valid fastq names.')
if fastq_df.shape[0] == 0:
log.info('No fastq name remained, check if the name pattern is correct.')
return None
# make sure UID is unique
for _, df in fastq_df.groupby(['lane', 'read_type']):
if df['uid'].unique().size != df['uid'].size:
raise ValueError(f'UID column is not unique.')
if output_path is not None:
fastq_df.to_csv(output_path, index=False)
return fastq_df
|
[
"def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning appropriate dtype\n \"\"\"\n return df",
"def write_1d_fastq(self, fastq, attributes):\n basecall_1d_group = self.analysis_grp.create_group(self.__default_basecall_1d__)\n template_data_group = basecall_1d_group.create_group(self.__default_basecalled_template__)\n dt = h5py.special_dtype(vlen=bytes)\n template_data_group.create_dataset(\"Fastq\", data=fastq, dtype=dt)\n attrs = basecall_1d_group.attrs\n for k, v in attributes.items():\n attrs[k] = v",
"def qsf_to_dataframe(qsf_data):\n if hasattr(qsf_data, 'lower'):\n with open(qsf_data) as f:\n return qsf_to_dataframe(f)\n elif hasattr(qsf_data, 'readlines'):\n qsf_data = json.load(qsf_data)\n df = pd.merge(_blocks_to_df(qsf_data), _flows_to_df(qsf_data),\n on='block_id')\n df.sort_values(['flow_idx', 'question_idx'], inplace=True)\n df = pd.merge(df, _questions_to_df(qsf_data), how='inner')\n df.set_index('sub_question_id', inplace=True)\n return df",
"def build_pandas_pipeline():\n categorical_columns = ['category_1', 'category_2']\n numerical_columns = ['number_1', 'number_2', 'number_3']\n\n categorical_transformer = OneHotEncoder(handle_unknown='ignore')\n numerical_transformer = StandardScaler()\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"numerical\", numerical_transformer, numerical_columns),\n (\"categorical\", categorical_transformer, categorical_columns),\n ])\n pipeline = Pipeline(\n steps=[(\"preprocessor\",\n preprocessor), (\"classifier\", linear_model.SGDRegressor())])\n data = pandas_dataframe()\n labels = data['label']\n pipeline.fit(data, labels)\n return pipeline",
"def to_data_frame(self, num_records: int = 0) -> PandasDataFrame:",
"def raw_data(self) -> pd.DataFrame:\n\n min_date = \"2016-01-01\"\n max_date = \"2019-12-13\"\n raw_data = [\n self.generate_data_for_one_customer(i, min_date, max_date)\n for i in range(100)\n ]\n raw_data = pd.concat(raw_data, axis=0)\n for i in range(10):\n raw_data[f\"feat_{i}\"] = np.random.randn(raw_data.shape[0])\n return raw_data",
"def generate_pandas_data(fit_results):\n data = {}\n data[\"q\"] = fit_results.q\n for par in fit_results.parameter:\n data[str(par.values)] = fit_results.parameters.loc[par].values\n pd_data_frame = pd.DataFrame(data = data)\n return pd_data_frame",
"def prepare_data(df, num_features, cat_features, target=None):\r\n\r\n\talgo_df = pd.DataFrame()\r\n\t\r\n\tfor feature in num_features:\r\n\t\talgo_df[feature] = df[feature]\r\n\t\r\n\tfor f in cat_features:\r\n\t\tdf_dummy = pd.get_dummies(df[f], prefix=f)\r\n\t\talgo_df = pd.concat((algo_df, df_dummy), axis=1)\r\n\r\n\treturn algo_df",
"def _bcf_to_df(self):\n dict_list = [v.resume for v in self.variants]\n df = pd.DataFrame.from_records(dict_list)\n try:\n df = df[Filtered_freebayes._col_index]\n except (ValueError, KeyError):\n df = df[Filtered_freebayes._col_index[: len(df.columns)]]\n return df",
"def _gen_df(self, data, columns_num=10):\n\n if len(data) < columns_num:\n data *= columns_num // len(data)\n data.extend(data[:(columns_num % len(data))])\n columns_seq = zip(string.ascii_uppercase[:columns_num], data)\n return pandas.DataFrame(dict(columns_seq))",
"def __call__(self, inputs: DataFrame) -> DataFrame:\n if not self._scheduler:\n raise AttributeError('Pipeline not registered to a Scheduler.')\n\n graph_ctxs = []\n for n in range(self._parallelism):\n graph_ctx = GraphContext(n, self._graph_repr)\n self._scheduler.register(graph_ctx)\n graph_ctxs.append(graph_ctx)\n\n self._outputs = DataFrame()\n\n # Weave inputs into each GraphContext.\n # TODO(fzliu): there are better ways to maintain ordering.\n for n, row in enumerate(inputs.map_iter()):\n idx = n % len(graph_ctxs)\n graph_ctxs[idx](row[0])\n\n for graph_ctx in graph_ctxs:\n graph_ctx.inputs.seal()\n graph_ctx.outputs.wait_sealed()\n\n # TODO(fzliu): Create an operator to merge multiple `DataFrame` instances.\n idx = 0\n merge = True\n while merge:\n for graph_ctx in graph_ctxs:\n elem = graph_ctx.outputs.get(idx, 1)\n if not elem:\n merge = False\n break\n self._outputs.put(elem[0])\n idx += 1\n\n return self._outputs",
"def _build_data_frame(self, company_id):\n company = self.companies[company_id]\n heap = company['heap']\n data_frame = company['data_frame']\n for day in xrange((self.end_date - self.start_date).days + 1):\n next_date = self.start_date + timedelta(days=day)\n date_stamp = \"{}/{}/{}\".format(next_date.month, next_date.day, next_date.year % 1000)\n\n # Assuming '0' value for auto-filled entries.\n if heap:\n next_row = heappop(heap)[1] if heap[0][0] == (next_date - self.start_date).days else \\\n [company_id, date_stamp, 0]\n else:\n next_row = [company_id, date_stamp, 0]\n\n n_val = None if day < self.n else int(next_row[2]) - int(data_frame[day - self.n][2])\n next_row.append(n_val)\n next_row[0] = self.companies[company_id]['name']\n data_frame.append(next_row)",
"def prepare_raw_data(*, df):\n print(f'Preparing the raw data...')\n # Safety measure: drop NaNs\n if df.isna().sum().max() > 0:\n print(f'Dropping {df.isna().sum().max()} NaN rows in raw data')\n df = df.dropna()\n\n df.columns = ['Timestamp', 'Price', 'VolumeBTC']\n\n # In the pandas world, minute is 'T'\n tempres = config.input_data.tempres.lower().replace('m', 'T')\n # Can handle 1s tempres efficiently, since raw data is 1s tempres\n if tempres == '1s':\n df = df.groupby('Timestamp').agg(\n Open=('Price', 'first'),\n High=('Price', 'max'),\n Low=('Price', 'min'),\n Close=('Price', 'last'),\n VolumeBTC=('VolumeBTC', 'sum'))\n # Ensuring order in data\n df = df.sort_index()\n\n df = df.reset_index(drop=False)\n df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s')\n\n # For tempres higher than 1s, use resample. NOTE: very memory intensive for tempres = n seconds\n elif pd.Timedelta(tempres) > pd.Timedelta('1s'):\n df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s')\n df = df.set_index('Timestamp', append=False)\n df = df.resample(tempres).agg({'Price': ['first', 'min', 'max', 'last'], 'VolumeBTC': 'sum'})\n df = df.dropna(how='any')\n df.columns = ['_'.join(col).strip() for col in df.columns.values]\n\n df = df.rename(columns={'Price_first': 'Open',\n 'Price_min': 'Low',\n 'Price_max': 'High',\n 'Price_last': 'Close',\n 'VolumeBTC_sum': 'VolumeBTC'})\n df = df.sort_index()\n df = df.reset_index(drop=False)\n\n # Add some metadata\n df['Symbol'] = config.input_data.asset\n\n print(f'Done.')\n return df",
"def make_df_efficient(ds):\n data = {}\n for v in ds.data_vars:\n data[v] = ds[v].to_pandas()\n return pd.DataFrame(data)",
"def price_statistic_train(a_freq=[1, 2, 5, 10, 20, 60, 120, 240, 500, 750], past=10, q_step=5, df=DB.get_stock_market_all()):\n df_result = pd.DataFrame()\n # for future in a_freq:\n # df[f\"tomorrow{future}\"] = df[\"close\"].shift(-future) / df[\"close\"]\n # df[f\"past{future}\"] = df[\"close\"] / df[\"close\"].shift(future)\n\n for key, df_filtered in LB.custom_quantile(df=df, column=f\"past{past}\", p_setting=[x/100 for x in range(0, 101, q_step)]).items():\n df_result.at[key, \"count\"] = len(df_filtered)\n df_result.at[key, \"q1\"] ,df_result.at[key, \"q2\"] ,df_result.at[key, \"q1_val\"] ,df_result.at[key, \"q2_val\"]= [float(x) for x in key.split(\",\")]\n for future in a_freq:\n # df_result.at[f\"{from_price,to_price}\", f\"tomorrow{future}_mean\"] = (df_filtered[f\"tomorrow{future}\"].mean())\n # df_result.at[f\"{from_price,to_price}\", f\"tomorrow{future}_std\"] = (df_filtered[f\"tomorrow{future}\"].std())\n df_result.at[key, f\"tomorrow{future}gmean\"] = gmean(df_filtered[f\"tomorrow{future}\"].dropna())\n\n # a_path=LB.a_path(f\"Market/CN/Atest/seasonal/all_date_price_statistic_past_{past}\")\n # LB.to_csv_feather(df_result,a_path,skip_feather=True)\n return df_result",
"def _pybytes_to_dataframe(source):\n reader = pa.RecordBatchStreamReader(source)\n return reader.read_pandas()",
"def to_df(value: Any) -> pd.DataFrame:\n if value is None:\n # This return the empty dataframe, which atm is the same as\n # the empty file object in the CAS.\n # However this is not ensured as pyarrow changes\n return pd.DataFrame()\n\n if isinstance(value, pd.DataFrame):\n return value.copy(deep=True)\n\n if isinstance(value, (pd.Series, pd.Index)):\n if value.name is not None:\n return pd.DataFrame(value)\n\n return pd.DataFrame({\"Result\": value})\n\n if isinstance(value, (Number, str, bool, datetime.datetime)):\n return pd.DataFrame({\"Result\": value}, index=[0])\n\n if isinstance(value, np.ndarray):\n try:\n out_df = pd.DataFrame(value)\n except ValueError:\n squeezed = np.squeeze(value)\n if squeezed.shape == ():\n # must be a scalar\n out_df = pd.DataFrame({\"Result\": squeezed}, index=[0])\n else:\n out_df = pd.DataFrame(squeezed)\n\n if out_df.columns.tolist() == [0]:\n out_df.columns = [\"Result\"]\n\n return out_df\n\n raise ValueError(\"Must return a primitive, pd.DataFrame, pd.Series or numpy array.\")",
"def to_dataframe(directory,im,frame,field):\n #making the dataframe in tidy format\n\n sx, dx = Faster.faster(im)\n dx[\"side\"] = \"dx\"\n sx[\"side\"] = \"sx\"\n df = pd.concat([dx,sx])\n df[\"frame\"] = frame\n df[\"field\"] = field\n df[\"experiment\"] = directory\n df.to_csv(\"coordinates.txt\",index = True,header = None, sep = \" \", mode = \"a\")",
"def sample_dataframe():\n return read_sample_dataframe()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RBAC policy is making a network visible to users in a specific tenant. Previously this network was not visible to users in that tenant. We will want to add this tenant to the members list. Also add the RBAC policy.
|
def rbac_create(self, event_type, payload, timestamp):
valid_types = ["network"]
event_type = payload['rbac_policy']['object_type']
action = payload['rbac_policy']['action']
if action not in RBAC_VALID_ACTIONS or event_type not in valid_types:
# I'm bored. Nothing that concerns nor interests us.
return
network_id = payload['rbac_policy']['object_id']
target_tenant = payload['rbac_policy']['target_tenant']
policy_id = payload['rbac_policy']['id']
LOG.debug("Adding RBAC policy for network %s with tenant %s",
network_id, target_tenant)
# Read, modify, write an existing network document. Grab and modify
# the admin version of the document. When saving the document it will
# be indexed for both admin and user.
doc = self.index_helper.get_document(network_id, for_admin=True)
if not doc or not doc['_source']:
LOG.error(_LE('Error adding rule to network. Network %(id)s '
'does not exist.') % {'id': network_id})
return
body = doc['_source']
# Update network with RBAC policy.
add_rbac(body, target_tenant, policy_id)
# Bump version for race condition prevention. Use doc and not
# body, since '_version' is outside of '_source'.
version = doc['_version'] + 1
self.index_helper.save_document(body, version=version)
return pipeline.IndexItem(self.index_helper.plugin,
event_type,
payload,
body)
|
[
"def grant_rbac_policy(self, project_id, object_id, object_type='network'):\n policy = self.get_rbac_policies(retrieve_all=True,\n object_type=object_type,\n object_id=object_id,\n target_tenant=project_id)\n if policy['rbac_policies']:\n self.debug_log(f'{object_type} rbac policy for {project_id} exists')\n return False\n body = { 'rbac_policy': {\n 'target_tenant': project_id,\n 'object_type': object_type,\n 'object_id': object_id,\n 'action': 'access_as_shared' }\n }\n self.debug_log(f'create rbac policy for project {body}')\n if not self.dry_run:\n try:\n self.client.create_rbac_policy(body=body)\n except exceptions.ServiceUnavailable:\n self.log_error('Neutron: Service unavailable!')\n return True",
"async def _addmember(self, ctx, member: discord.Member):\n async with self.config.guild(ctx.guild).tribes() as tribes:\n if tribes:\n for tribe in tribes:\n if ctx.author.id == tribes[tribe][\"owner\"]:\n tribes[tribe][\"allowed\"].append(member.id)\n channel = ctx.guild.get_channel(tribes[tribe][\"channel\"])\n await channel.set_permissions(member, read_messages=True)\n await ctx.send(f\"{member.mention} has been added to the tribe logs\")\n else:\n await ctx.send(f\"You arent set as owner of any tribes to add people on.\")\n else:\n await ctx.send(\"No tribe data available\")",
"def create_rbac_policy(self, **attrs):\n return self._create(_rbac_policy.RBACPolicy, **attrs)",
"def update_rbac_policy(self, rbac_policy, **attrs):\n return self._update(_rbac_policy.RBACPolicy, rbac_policy, **attrs)",
"def deploy_unique_rbac_resources(self):\n corev1 = kube_client.CoreV1Api()\n rbacv1 = kube_client.RbacAuthorizationV1Api()\n\n serviceaccount = corev1.create_namespaced_service_account(\n body=load_resource_yaml(AGENT_SERVICEACCOUNT_PATH), namespace=self.namespace\n )\n\n clusterrole_base = load_resource_yaml(AGENT_CLUSTERROLE_PATH)\n clusterrole_base[\"metadata\"][\"name\"] = f\"signalfx-agent-{self.namespace}\"\n clusterrole = rbacv1.create_cluster_role(body=clusterrole_base)\n\n crb_base = load_resource_yaml(AGENT_CLUSTERROLEBINDING_PATH)\n # Make the binding refer to our testing namespace's role and service account\n crb_base[\"metadata\"][\"name\"] = f\"signalfx-agent-{self.namespace}\"\n crb_base[\"roleRef\"][\"name\"] = clusterrole.metadata.name\n crb_base[\"subjects\"][0][\"namespace\"] = self.namespace\n crb = rbacv1.create_cluster_role_binding(body=crb_base)\n\n configmaprole_base = load_resource_yaml(AGENT_CONFIGMAPROLE_PATH)\n configmaprole_base[\"metadata\"][\"name\"] = f\"signalfx-agent-{self.namespace}\"\n configmaprole_base[\"metadata\"][\"namespace\"] = self.namespace\n configmaprole = rbacv1.create_namespaced_role(self.namespace, body=configmaprole_base)\n\n configmap_rolebinding_base = load_resource_yaml(AGENT_CONFIGMAPROLEBINDING_PATH)\n # Make the binding refer to our testing namespace's role and service account\n configmap_rolebinding_base[\"metadata\"][\"name\"] = f\"signalfx-agent-{self.namespace}\"\n configmap_rolebinding_base[\"metadata\"][\"namespace\"] = self.namespace\n configmap_rolebinding_base[\"roleRef\"][\"name\"] = configmaprole.metadata.name\n configmap_rolebinding_base[\"subjects\"][0][\"namespace\"] = self.namespace\n configmap_rolebinding = rbacv1.create_namespaced_role_binding(self.namespace, body=configmap_rolebinding_base)\n\n try:\n yield\n finally:\n delete_opts = kube_client.V1DeleteOptions(grace_period_seconds=0, propagation_policy=\"Background\")\n\n rbacv1.delete_cluster_role_binding(crb.metadata.name, body=delete_opts)\n rbacv1.delete_cluster_role(clusterrole.metadata.name, body=delete_opts)\n rbacv1.delete_namespaced_role(configmaprole.metadata.name, self.namespace, body=delete_opts)\n rbacv1.delete_namespaced_role_binding(configmap_rolebinding.metadata.name, self.namespace, body=delete_opts)\n corev1.delete_namespaced_service_account(\n serviceaccount.metadata.name, namespace=self.namespace, body=delete_opts\n )\n print(\"Deleted RBAC resources\")",
"def update_account_policy(self, user, account, policy, replace=False):\n return",
"def modify_policy_add_role(\n crm_service: str, project_id: str, role: str, member: str\n) -> None:\n\n policy = get_policy(crm_service, project_id)\n\n binding = None\n for b in policy[\"bindings\"]:\n if b[\"role\"] == role:\n binding = b\n break\n if binding is not None:\n binding[\"members\"].append(member)\n else:\n binding = {\"role\": role, \"members\": [member]}\n policy[\"bindings\"].append(binding)\n\n set_policy(crm_service, project_id, policy)",
"def test_add_permission(self):\n \n self.role1.give_to_permittee(self.u1)\n self.role1.give_to_permittee(self.u2)\n self.assertFalse(has_permission(self.u1, self.project, \"perm2\"))\n self.assertFalse(has_permission(self.u2, self.project, \"perm2\"))\n \n self.role1.add_permission(self.obj_perm2)\n self.assertTrue(has_permission(self.u1, self.project, \"perm2\"))\n self.assertTrue(has_permission(self.u2, self.project, \"perm2\"))",
"def rbac_delete(self, event_type, payload, timestamp):\n policy_id = payload['rbac_policy_id']\n\n # Read, modify, write an existing network document. For both the\n # admin and user version of the document.\n\n # Find all documents (admin and user) with the policy ID.\n docs = self.index_helper.get_docs_by_nested_field(\n \"rbac_policy\", \"rbac_id\", policy_id, version=True)\n\n if not docs or not docs['hits']['hits']:\n return\n\n for doc in docs['hits']['hits']:\n if doc['_id'].endswith(USER_ID_SUFFIX):\n # We only want to use the admin document.\n continue\n body = doc['_source']\n\n target_tenant = None\n policies = body['rbac_policy']\n for p in policies:\n if p.get('rbac_id') == policy_id:\n target_tenant = p['target_tenant']\n\n # Remove target_tenant from members list.\n members_list = (body['members'])\n if target_tenant in members_list:\n members_list.remove(target_tenant)\n body['members'] = members_list\n\n # Remove RBAC policy.\n new_list = [p for p in policies if p.get('rbac_id') != policy_id]\n body['rbac_policy'] = new_list\n\n # Bump version for race condition prevention. Use doc and not\n # body, since '_version' is outside of '_source'.\n version = doc['_version'] + 1\n self.index_helper.save_document(body, version=version)\n return pipeline.IndexItem(self.index_helper.plugin,\n event_type,\n payload,\n body)",
"def perform_create(self, serializer):\n user = self.request.user\n new_sub = serializer.save()\n new_sub.moderators.add(user)\n UserSubMembership.objects.create(user=user, sub=new_sub)",
"def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):\n # MANAGE_ORGANIZATION_AUTH setting checked in RoleAccess\n if isinstance(sub_obj, Role):\n if sub_obj.content_object is None:\n raise PermissionDenied(_(\"The {} role cannot be assigned to a team\").format(sub_obj.name))\n\n if isinstance(sub_obj.content_object, ResourceMixin):\n role_access = RoleAccess(self.user)\n return role_access.can_attach(sub_obj, obj, 'member_role.parents', *args, **kwargs)\n if self.user.is_superuser:\n return True\n\n # If the request is updating the membership, check the membership role permissions instead\n if relationship in ('member_role.members', 'admin_role.members'):\n rel_role = getattr(obj, relationship.split('.')[0])\n return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)\n\n return super(TeamAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)",
"def get_rbac_policy(self, rbac_policy):\n return self._get(_rbac_policy.RBACPolicy, rbac_policy)",
"def add_access_list(self, loadbalancer, access_list):\r\n return loadbalancer.add_access_list(access_list)",
"def _add_acls(self, username, project_id, project_root):\n logger.info('Adding ACLs for %s in project %s', username, project_id)\n client = service_account()\n job = client.jobs.submit(body={\n \"name\": \"{username}-{project_id}-acls\".format(\n username=username,\n project_id=project_id\n ),\n \"appId\": settings.PORTAL_PROJECTS_PEMS_APP_ID,\n \"archive\": False,\n \"parameters\": {\n \"projectId\": project_id,\n \"username\": username,\n \"action\": \"add\",\n \"root_dir\": project_root,\n }\n })\n logger.info('Add ACLs job id: %s', job.id)",
"def statement_deny_put_boundary(self) -> Statement:\n return Statement(\n Action=[\n awacs.iam.PutRolePermissionsBoundary,\n awacs.iam.PutUserPermissionsBoundary,\n ],\n Condition=Condition(\n StringNotEquals(\n {\"iam:PermissionsBoundary\": self.approved_boundary_policies}\n )\n ),\n Effect=Deny,\n Resource=[\n Sub(\"arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*\"),\n Sub(\"arn:${AWS::Partition}:iam::${AWS::AccountId}:user/*\"),\n ],\n Sid=\"DenyPutUnapprovedBoundary\",\n )",
"def add_acl(group: str, permission: str, scope: str, profile: str):\n # Add the acl\n acl_query = 'databricks secrets put-acl'\n acl_query += f' --profile {profile}'\n acl_query += f' --scope {scope}'\n acl_query += f' --principal {group}'\n acl_query += f' --permission {permission}'\n\n # Run and enforce success\n logging.info(f'Adding {permission} to {scope} for {group}')\n sp = subprocess.run(acl_query, capture_output=True)\n sp.check_returncode()",
"def advapi32_AddAccessAllowedAce(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pAcl\", \"dwAceRevision\", \"AccessMask\", \"pSid\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def set_balance_policy_for_bonding_device(self, bond_port, policy):\n self.dut.send_expect(\"set bonding balance_xmit_policy %d %s\" % (bond_port, policy), \"testpmd> \")\n new_policy = self.get_bond_balance_policy(bond_port)\n policy = \"BALANCE_XMIT_POLICY_LAYER\" + policy.lstrip('l')\n self.verify(new_policy == policy, \"Set bonding balance policy failed\")",
"def add_acls(user: str, task: str, topic: str, zookeeper_endpoint: str, env_str=None):\n\n _add_role_acls(\"producer\", user, task, topic, zookeeper_endpoint, env_str)\n _add_role_acls(\"consumer --group=*\", user, task, topic, zookeeper_endpoint, env_str)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RBAC policy is making a network invisible to users in specific tenant. Previously this network was visible to users in that tenant. We will remove this tenant from the members list. Also remove the RBAC policy.
|
def rbac_delete(self, event_type, payload, timestamp):
policy_id = payload['rbac_policy_id']
# Read, modify, write an existing network document. For both the
# admin and user version of the document.
# Find all documents (admin and user) with the policy ID.
docs = self.index_helper.get_docs_by_nested_field(
"rbac_policy", "rbac_id", policy_id, version=True)
if not docs or not docs['hits']['hits']:
return
for doc in docs['hits']['hits']:
if doc['_id'].endswith(USER_ID_SUFFIX):
# We only want to use the admin document.
continue
body = doc['_source']
target_tenant = None
policies = body['rbac_policy']
for p in policies:
if p.get('rbac_id') == policy_id:
target_tenant = p['target_tenant']
# Remove target_tenant from members list.
members_list = (body['members'])
if target_tenant in members_list:
members_list.remove(target_tenant)
body['members'] = members_list
# Remove RBAC policy.
new_list = [p for p in policies if p.get('rbac_id') != policy_id]
body['rbac_policy'] = new_list
# Bump version for race condition prevention. Use doc and not
# body, since '_version' is outside of '_source'.
version = doc['_version'] + 1
self.index_helper.save_document(body, version=version)
return pipeline.IndexItem(self.index_helper.plugin,
event_type,
payload,
body)
|
[
"def revoke_rbac_policy(self, project_id, object_id, object_type='network'):\n policy = self.get_rbac_policies(retrieve_all=True,\n object_type=object_type,\n object_id=object_id,\n target_tenant=project_id)\n if not len(policy['rbac_policies']) > 0:\n self.debug_log(f'{object_type} rbac policy for {project_id} do not exists')\n return False\n policy_id = policy['rbac_policies'][0]['id']\n self.debug_log(f'remove rbac policy {policy_id}')\n if not self.dry_run:\n try:\n self.client.delete_rbac_policy(policy_id)\n except exceptions.ServiceUnavailable:\n self.log_error('Neutron: Service unavailable!')\n return True",
"def _revoke_permissions(self: typing.Any) -> None:\n logger.info(\"*** Revoking no longer needed permissions...\")\n if not self.is_drs_enabled:\n EnvironmentUtils.revoke_permissions_to_the_bucket(\n self.sql_service_account,\n self.gcs_bucket_name,\n self.worker_pod_namespace,\n self.worker_pod_name,\n self.worker_container_name,\n )",
"def do_clear_acls(self, args):\n lb = self.findlb(args.loadbalancer, readonly=False)\n lb_acl = lb.accesslist()\n lb_acl.delete()",
"def revoke_access(self, user_uuid):\r\n permission = self.get_permission(user_uuid)\r\n permission.delete()",
"def revoke_access(self):\n with sql_connection.TRN as TRN:\n sql = \"\"\"DELETE FROM labcontrol.labmanager_access\n WHERE email = %s\"\"\"\n TRN.add(sql, [self.id])\n TRN.execute()",
"def thb_remove_restrict(self, chat_id, member_id, member_name):\n\n bot = self.bot\n logging.info('User is human')\n bot.restrict_chat_member(\n chat_id, member_id,\n can_send_messages=True,\n can_send_media_messages=True,\n can_send_other_messages=True,\n can_add_web_page_previews=True,\n )\n bot.send_message(\n chat_id,\n BOT_MSGS['allowed'].format(member_name),\n parse_mode=ParseMode.HTML,\n )",
"def modify_policy_remove_member(\n crm_service: str, project_id: str, role: str, member: str\n) -> None:\n\n policy = get_policy(crm_service, project_id)\n\n binding = next(b for b in policy[\"bindings\"] if b[\"role\"] == role)\n if \"members\" in binding and member in binding[\"members\"]:\n binding[\"members\"].remove(member)\n\n set_policy(crm_service, project_id, policy)",
"def statement_deny_remove_boundary_policy(self) -> Statement:\n return Statement(\n Action=[\n awacs.iam.DeleteRolePermissionsBoundary,\n awacs.iam.DeleteUserPermissionsBoundary,\n ],\n Condition=Condition(\n StringEquals({\"iam:PermissionsBoundary\": self.policy_arn})\n ),\n Effect=Deny,\n Resource=[\"*\"],\n Sid=\"DenyRemovalOfBoundaryFromUserOrRole\",\n )",
"def delete_access_list(self, loadbalancer):\r\n return loadbalancer.delete_access_list()",
"def remove_tac_member(partner, member, cursor):\n\n if not g.user.may_perform(Action.UPDATE_TAC_COMMENTS,\n partner=partner):\n raise InvalidUsage(message='You are not allowed to update members of {partner}'\n .format(partner=partner),\n status_code=403)\n\n sql = '''\nDELETE FROM PiptUserTAC\nWHERE\n PiptUser_Id = (SELECT PiptUser_Id FROM PiptUser WHERE Username = %s)\n AND\n Partner_Id = (SELECT Partner_Id FROM Partner WHERE Partner_Code = %s)\n'''\n params = (member, partner)\n cursor.execute(sql, params)",
"def _removeFromAccessCSEBaseACP(self, originator: str) -> None:\n\t\tif (res := CSE.dispatcher.retrieveResource(Configuration.get('cse.security.csebaseAccessACPI'))).resource is not None:\n\t\t\tres.resource.removePermissionForOriginator(originator)\n\t\t\tres.resource.dbUpdate()",
"def revoke_dataset_access(self):\n self.is_active = False\n self.save()",
"def statement_deny_put_boundary(self) -> Statement:\n return Statement(\n Action=[\n awacs.iam.PutRolePermissionsBoundary,\n awacs.iam.PutUserPermissionsBoundary,\n ],\n Condition=Condition(\n StringNotEquals(\n {\"iam:PermissionsBoundary\": self.approved_boundary_policies}\n )\n ),\n Effect=Deny,\n Resource=[\n Sub(\"arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*\"),\n Sub(\"arn:${AWS::Partition}:iam::${AWS::AccountId}:user/*\"),\n ],\n Sid=\"DenyPutUnapprovedBoundary\",\n )",
"def ex_destroy_balancer_access_rule(self, balancer, rule):\r\n accepted = self.ex_destroy_balancer_access_rule_no_poll(balancer, rule)\r\n if not accepted:\r\n msg = 'Delete access rule not accepted'\r\n raise LibcloudError(msg, driver=self)\r\n\r\n return self._get_updated_balancer(balancer)",
"async def RemoveStaticRole(self, user: User): \n await user.remove_roles(self.memberRole)",
"async def rmrank ( self , ctx , member : discord.Member = None , * rankName : str ):\n rank = discord.utils.get (ctx.guild.roles, name = ' ' .join (rankName))\n if member is not None :\n await member.remove_roles (rank)\n await ctx.send ( f ' : white_check_mark: Role ** { rank.name } ** has been removed from ** { member.name } ** ' )\n else :\n await ctx.send ( ' : no_entry: You must specify a user! ' )",
"def test_remove_from_permittee_no_conflict(self):\n \n self.role1.give_to_permittee(self.u1)\n self.assertTrue(has_permission(self.u1, self.project, \"perm1\"))\n\n self.role1.remove_from_permittee(self.u1)\n self.assertFalse(has_permission(self.u1, self.project, \"perm1\"))",
"def RoleRevokePermission(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def abandon_bet(self, abandoner):\n self.bets = [bet for bet in self.bets if bet.user != abandoner]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check the status of the submission. Retry until the status is "Valid", or if there is an error with the request to get the submission envelope.
|
def wait_for_valid_status(envelope_url, http_requests):
def log_before(envelope_url):
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print('{0} Getting status for {1}'.format(now, envelope_url))
def keep_polling(response):
# Keep polling until the status is "Valid/Complete" or "Invalid"
envelope_js = response.json()
status = envelope_js.get('submissionState')
print('submissionState: {}'.format(status))
return status not in ('Valid', 'Complete', 'Invalid')
response = http_requests.get(
envelope_url,
before=log_before(envelope_url),
retry=retry_if_result(keep_polling),
)
return response.json()
|
[
"def check_submission(submission):\n desc = sfn.describe_execution(executionArn=submission.execution_arn)\n status = desc[\"status\"]\n result = None\n if status == \"SUCCEEDED\":\n sfn_output = json.loads(desc.get(\"output\", \"{}\"))\n result_s3uri = sfn_output.get(\"Output\", {}).get(\"S3Uri\")\n if result_s3uri:\n results[submission.ix] = result_s3uri\n else:\n pbar.write(\n f\"Doc processed but missing output S3 URI: \"\n f\"{submission.input_doc}\"\n )\n results[submission.ix] = desc\n pbar.update(1)\n elif status != \"RUNNING\":\n results[submission.ix] = desc\n pbar.write(\n \"Doc failed to process - see results for details: \"\n f\"{submission.input_doc}\"\n )\n pbar.update(1)\n else:\n result = submission\n time.sleep(0.08)\n return result",
"def validate_submission(self, submission_path):\n ...",
"def get_status():\n # Only accept JSON\n if not request.json:\n resp = create_response(\"Input should be specified in valid JSON format only\",400)\n return resp\n \n validate_get_status_input(request.json)\n\n name, email_address = MailerUtils.get_name_email_tuple(request.json.get('email'))\n\n # Get the job associated with the given ID from the Queue\n job_id = request.json['id']\n job = q.fetch_job(job_id)\n \n if(job is None):\n resp = create_response(\"Cannot find result for supplied ID and email\", 404)\n return resp\n\n # Get relevant metadata from the job\n mailer_name = job.meta['handled_by'] # Which mailer was used\n messages_info = job.meta['messages_info'] # Info about all recepients and underlying provider specific ID for the request\n\n # Get info about the relevant message\n single_message_info = next(message_info for message_info in messages_info if message_info.get('email_address') == email_address)\n if(single_message_info is None):\n resp = create_response(\"Cannot find message sent to {0} during request with ID {1}\".format(email_address,job_id),404)\n return resp\n\n relevant_mailer = available_mailers[mailer_name]\n status_info = relevant_mailer.get_message_status(single_message_info)\n \n if(status_info is None):\n # Must have timed out\n resp = create_response(\"This request cannot be served right now. Please try again.\", 503)\n return resp\n\n resp = create_response(None, 200, status_info)\n return resp",
"def checkJobState(self, jobSpecId):\n # //\n # // Should we actually submit the job?\n #// The Racers settings in the JobStates DB define how many\n # //times the same identical job can be submitted in parallel\n # // So we check to see how many jobs have been submitted\n #// for this JobSpecID, and if there are too many, it doesnt\n # // get submitted, we send a SubmissionFailed Event\n # //\n #//\n try:\n stateInfo = JobState.general(jobSpecId)\n except StandardError, ex:\n # //\n # // Error here means JobSpecID is unknown to \n #// JobStates DB.\n msg = \"Error retrieving JobState Information for %s\\n\" % jobSpecId\n msg += \"Aborting submitting job...\\n\"\n msg += str(ex)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit()\n return {}\n except ProdAgentException, ex:\n # //\n # // Error here means JobSpecID is unknown to \n #// JobStates DB.\n msg = \"Error retrieving JobState Information for %s\\n\" % jobSpecId\n msg += \"Aborting submitting job...\\n\"\n msg += str(ex)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit()\n return {}\n\n cacheDir = stateInfo.get('CacheDirLocation', 'UnknownCache')\n if not os.path.exists(cacheDir):\n msg = \"Cache Dir does not exist for job spec id: %s\\n\" % jobSpecId\n msg += \"JobState reports Cache as:\\n %s\\n\" % cacheDir\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit() \n return {}\n \n numRacers = stateInfo['Racers'] # number of currently submitted\n maxRacers = stateInfo['MaxRacers'] # limit on parallel jobs\n\n if numRacers >= maxRacers:\n # //\n # // To many submitted jobs for this JobSpecID already\n #// Abort submission\n msg = \"Too many submitted jobs for JobSpecID: %s\\n\" % jobSpecId\n msg += \"Current Jobs: %s\\n\" % numRacers\n msg += \"Maximum Jobs: %s\\n\" % maxRacers\n logging.warning(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit()\n return {}\n\n return stateInfo",
"def check_generation_status(submission, file_type):\n return generation_handler.check_generation(submission, file_type)",
"def _retry_failed_submissions(self):\n\n still_failing = []\n for create_func, batch_data in self._submission_fails:\n try:\n self._submit_batches.submit_update(create_func, batch_data)\n except SubmitBatchesException:\n still_failing.append((create_func, batch_data))\n if self._print_verbose_activated:\n if len(self._submission_fails) > 0:\n print(\"Of\", len(self._submission_fails), \"/\", len(still_failing),\n \"are still failing.\")\n self._submission_fails = still_failing",
"def _check_v2_job_status_on_queue(\n self,\n auth_type,\n auth_value,\n export_status_controller,\n export_status_action,\n export_job_id,\n request_retry=None,\n ):\n request_label = \"TMC v2 Advertiser Stats: Check Export Status\"\n\n v2_export_status_request_url = \\\n self.tune_mat_request_path(\n mat_api_version=\"v2\",\n controller=export_status_controller,\n action=export_status_action\n )\n\n request_params = {auth_type: auth_value, \"job_id\": export_job_id}\n\n self.logger.info(\n \"TMC v2 Advertiser Stats: Check Job Status\",\n extra={\n 'action': export_status_action,\n 'job_id': export_job_id,\n 'request_url': v2_export_status_request_url,\n 'request_params': safe_dict(request_params)\n }\n )\n\n tries = 60 # -1 (indefinite)\n delay = 10\n jitter = 10\n max_delay = 60\n\n if request_retry is not None:\n if 'delay' in request_retry:\n delay = request_retry['delay']\n if 'jitter' in request_retry:\n jitter = request_retry['jitter']\n if 'max_delay' in request_retry:\n max_delay = request_retry['max_delay']\n\n if 'tries' in request_retry:\n tries = request_retry['tries']\n else:\n request_retry.update({'tries': 60})\n else:\n request_retry = {'tries': 60, 'delay': 10, 'timeout': 60}\n\n self.logger.debug(msg=(\"TMC v2 Advertiser Stats: Check Job Status: \" \"Request Retry\"), extra=request_retry)\n\n report_url = None\n _attempts = 1\n export_percent_complete = 0\n\n time.sleep(10)\n\n _tries, _delay = tries, delay\n while True:\n try:\n response = self.mv_request.request(\n request_method=\"GET\",\n request_url=v2_export_status_request_url,\n request_params=request_params,\n request_label=request_label,\n request_retry_func=self.tune_v2_request_retry_func\n )\n\n except TuneRequestBaseError as tmc_req_ex:\n self.logger.error(\n \"TMC v2 Advertiser Stats: Check Job Status: Failed\",\n extra=tmc_req_ex.to_dict(),\n )\n raise\n\n except TuneReportingError as tmc_rep_ex:\n self.logger.error(\n \"TMC v2 Advertiser Stats: Check Job Status: Failed\",\n extra=tmc_rep_ex.to_dict(),\n )\n raise\n\n except Exception as ex:\n print_traceback(ex)\n\n self.logger.error(\"TMC v2 Advertiser Stats: Check Job Status: {}\".format(get_exception_message(ex)))\n raise\n\n http_status_successful = is_http_status_type(\n http_status_code=response.status_code, http_status_type=HttpStatusType.SUCCESSFUL\n )\n\n if not http_status_successful:\n raise TuneReportingError(\n error_message=(\"Failed to get export status on queue: {}\").format(response.status_code),\n error_code=TuneReportingErrorCodes.REP_ERR_REQUEST\n )\n\n if hasattr(response, 'url'):\n self.logger.info(\n \"TMC v2 Advertiser Stats: Reporting API: Status URL\", extra={'response_url': response.url}\n )\n\n json_response = response.json()\n\n if not json_response:\n request_status_successful = False\n\n elif 'status_code' not in json_response:\n request_status_successful = False\n\n else:\n status_code = json_response['status_code']\n\n request_status_successful = is_http_status_type(\n http_status_code=status_code, http_status_type=HttpStatusType.SUCCESSFUL\n )\n\n errors = None\n if 'errors' in json_response:\n errors = json_response['errors']\n\n if not request_status_successful:\n error_message = (\"TMC v2 Advertiser Stats: Check Job Status: GET '{}', Failed: {}, {}\").format(\n v2_export_status_request_url, status_code, errors\n )\n\n if (status_code == TuneReportingError.EX_SRV_ERR_500_INTERNAL_SERVER):\n self.logger.error(error_message)\n\n elif (status_code == TuneReportingError.EX_SRV_ERR_503_SERVICE_UNAVAILABLE):\n self.logger.error(error_message)\n\n elif (status_code == TuneReportingError.EX_SRV_ERR_504_SERVICE_TIMEOUT):\n self.logger.error(error_message)\n continue\n\n elif (status_code == TuneReportingError.EX_CLT_ERR_408_REQUEST_TIMEOUT):\n self.logger.error(\n \"GET '{}' request timeout, Retrying: {}\".format(v2_export_status_request_url, status_code)\n )\n continue\n\n else:\n raise TuneReportingError(error_message=error_message, error_code=status_code)\n\n if tries >= 0 and _tries <= 1:\n if (status_code == HttpStatusCode.GATEWAY_TIMEOUT):\n raise TuneReportingError(\n error_message=error_message, error_code=TuneReportingErrorCodes.GATEWAY_TIMEOUT\n )\n elif (status_code == HttpStatusCode.REQUEST_TIMEOUT):\n raise TuneReportingError(\n error_message=error_message, error_code=TuneReportingErrorCodes.REQUEST_TIMEOUT\n )\n else:\n raise TuneReportingError(error_message=error_message, error_code=status_code)\n else:\n self.logger.warning(error_message)\n\n export_percent_complete = 0\n if 'data' in json_response and json_response['data']:\n json_data = json_response['data']\n\n if \"percent_complete\" in json_data:\n export_percent_complete = \\\n safe_int(json_data[\"percent_complete\"])\n\n self.logger.info(\n msg=(\"TMC v2 Advertiser Stats: \"\n \"Check Job Export Status: \"\n \"Response Success\"),\n extra={\n 'job_id': export_job_id,\n 'export_status': json_data[\"status\"],\n 'export_percent_complete': safe_int(export_percent_complete),\n 'attempt': _attempts\n }\n )\n\n if (export_status_action == TuneV2AdvertiserStatsStatusAction.STATUS):\n if (export_percent_complete == 100 and json_data[\"status\"] == \"complete\" and json_data[\"url\"]):\n report_url = json_data[\"url\"]\n\n self.logger.debug(\n \"TMC v2 Advertiser Stats: Check Job Export Status: Completed\",\n extra={\n 'job_id': export_job_id,\n 'action': export_status_action,\n 'report_url': report_url,\n 'request_label': request_label\n }\n )\n\n break\n\n elif (export_status_action == TuneV2AdvertiserStatsStatusAction.DOWNLOAD):\n if (export_percent_complete == 100 and\n json_data[\"status\"] == \"complete\" and\n json_data[\"data\"][\"url\"]):\n report_url = json_data[\"data\"][\"url\"]\n\n self.logger.debug(\n \"TMC v2 Advertiser Stats: Check Job Export Status: Completed\",\n extra={\n 'job_id': export_job_id,\n 'action': export_status_action,\n 'report_url': report_url,\n 'request_label': request_label\n }\n )\n\n break\n else:\n self.logger.debug(\"TMC v2 Advertiser Stats: \" \"Check Job Export Status: \" \"No Data Available\")\n\n if tries >= 0:\n _tries -= 1\n if _tries == 0:\n self.logger.error(\n \"TMC v2 Advertiser Stats: Check Job Export Status: Exhausted Retries\",\n extra={\n 'attempt': _attempts,\n 'tries': _tries,\n 'action': export_status_action,\n 'request_label': request_label,\n 'export_percent_complete': safe_int(export_percent_complete),\n 'job_id': export_job_id\n }\n )\n\n raise TuneReportingError(\n error_message=(\n \"TMC v2 Advertiser Stats: \"\n \"Check Job Export Status: \"\n \"Exhausted Retries: \"\n \"Percent Completed: {}\"\n ).format(safe_int(export_percent_complete)),\n error_code=TuneReportingErrorCodes.REP_ERR_RETRY_EXHAUSTED\n )\n\n _attempts += 1\n\n self.logger.info(\n \"TMC v2 Advertiser Stats: Check Job Status\",\n extra={'attempt': _attempts,\n 'job_id': export_job_id,\n 'delay': _delay,\n 'action': export_status_action}\n )\n\n time.sleep(_delay)\n\n _delay += jitter\n _delay = min(_delay, max_delay)\n\n if export_percent_complete == 100 and not report_url:\n raise TuneReportingError(\n error_message=(\"TMC v2 Advertiser Stats: Check Job Export Status: \"\n \"Download report URL: Undefined\"),\n error_code=TuneReportingErrorCodes.REP_ERR_UNEXPECTED_VALUE\n )\n\n self.logger.info(\n \"TMC v2 Advertiser Stats: Check Job Export Status: Finished\",\n extra={\n 'attempt': _attempts,\n 'action': export_status_action,\n 'report_url': report_url,\n 'request_label': request_label,\n 'export_percent_complete': export_percent_complete,\n 'job_id': export_job_id\n }\n )\n\n return report_url",
"def check_status(domain, org_key, job_id, headers):\n url = f\"{domain}/api/investigate/v1/orgs/{org_key}/processes/search_jobs/{job_id}\"\n contacted = \"\"\n completed = \"1\"\n print(\"Checking to see if query has completed...\")\n while contacted != completed:\n response = requests.get(url, headers=headers).json()\n contacted = response.get(\"contacted\")\n completed = response.get(\"completed\")\n print(\"Query has completed.\")\n return True",
"def test_submitter_status_waiting(self):\n\n # login to the website\n self.utils.account.login_as(self.username,self.password)\n\n # submit a ticket\n po = self.catalog.load_pageobject('SupportTicketNewPage')\n po.goto_page()\n problem_text = 'hubcheck test ticket\\n%s' % (self.fnbase)\n po.submit_ticket({'problem' : problem_text})\n\n po = self.catalog.load_pageobject('SupportTicketSavePage')\n self.ticket_number = po.get_ticket_number()\n po.goto_logout()\n\n\n\n assert self.ticket_number is not None, \"no ticket number returned\"\n assert int(self.ticket_number) > 0, \"Submitting a support ticket\" \\\n + \" returned ticket number: %s\" % (self.ticket_number)\n\n # login to the website as a ticket submitter\n self.utils.account.login_as(self.username,self.password)\n\n # change the ticket status\n # we also add a comment so the status change\n # is not hidden from the ticket submitter\n po = self.catalog.load_pageobject('SupportTicketViewPage',\n self.ticket_number)\n po.goto_page()\n comment_data = {\n 'comment' : 'comment',\n 'status' : 'Awaiting user action'\n }\n po.add_comment(comment_data)\n\n # get the ticket status from the comment form.\n current_url = po.current_url()\n status = po.get_ticket_status()\n assert status == \"Open\", \\\n \"After changing the status of support ticket\" \\\n + \" #%s (%s) status = '%s', expected '%s'\" \\\n % (self.ticket_number,current_url,status,comment_data['status'])\n\n # retrieve the last comment\n # check the ticket comment's changelog for the status change\n comment = po.get_nth_comment(-1)\n assert comment.is_new_status_waiting() is False, \\\n \"After changing the status of support ticket\" \\\n + \" #%s (%s) comment status = '%s', expected 'accepted'\" \\\n % (self.ticket_number,current_url,comment.get_status_changes()[1])",
"def validate_status(self, value):\n if self.context['view'].action == 'create':\n if not value == THREAD_INVITE_PENDING:\n raise serializers.ValidationError(\"status must be PENDING for new invite\")\n return value",
"def is_valid(cls, status):\n\n return status == cls.WORKING or status == cls.PUBLISHED or status == cls.ALL",
"def test_sandbox_submission_polling(mocker):\n mocker.patch.object(\n demisto,\n \"demistoVersion\",\n return_value={\"version\": \"6.2.0\", \"buildNumber\": \"12345\"},\n )\n mocker.patch(\n \"TrendMicroVisionOneV3.Client.http_request\",\n mock_sandbox_submission_polling_response,\n )\n mocker.patch(\"CommonServerPython.ScheduledCommand.raise_error_if_not_supported\", lambda: None)\n client = Client(\"https://apimock-dev.trendmicro.com\", api_key, proxy, verify)\n args = {\"task_id\": \"8559a7ce-2b85-451b-8742-4b943ad76a22\"}\n result = get_sandbox_submission_status(args, client)\n assert result.outputs[\"report_id\"] == \"8559a7ce-2b85-451b-8742-4b943ad76a22\"\n assert isinstance(result.outputs[\"type\"], str)\n assert isinstance(result.outputs[\"digest\"], dict)\n assert isinstance(result.outputs[\"arguments\"], str)\n assert isinstance(result.outputs[\"analysis_completion_time\"], str)\n assert isinstance(result.outputs[\"risk_level\"], str)\n assert isinstance(result.outputs[\"detection_name_list\"], list)\n assert isinstance(result.outputs[\"threat_type_list\"], list)\n assert isinstance(result.outputs[\"file_type\"], str)",
"def test_issue_form(self):\n with self.client:\n api_response = issue_form(self)\n response_data = json.loads(api_response.data.decode())\n\n self.assertTrue(response_data['status'] == \"prediction completed\")\n self.assertEqual(api_response.status_code, 200)",
"def fulfill(self):\n self.send_email_sc()\n self.status = self.FULFILLED\n self.save()",
"def _send_response(self):\n for identifier in self._identifiers:\n if identifier in self._responses and self._was_updated(identifier):\n response = requests.post(self._submit_url, {\n \"identifier\": identifier,\n \"api_key\": self._api_key,\n \"notebook\": str(self._notebook),\n \"response\": str(self._responses[identifier]),\n })\n assert response.text != \"SUBMISSION UNSUCCESSFUL\" and response.text == \"SUBMISSION SUCCESSFUL\", \\\n \"submission was not sent successfully\"\n self._updated_since_last_post[identifier] = False",
"def isValidSubmission():\n global scriptDialog\n\n errors = \"\"\n warnings = \"\"\n\n # Check if SU file exists\n sceneFile = scriptDialog.GetValue( \"SceneBox\" )\n if not os.path.isfile( sceneFile ):\n errors += 'SketchUp file \"%s\" does not exist.\\n\\n' % sceneFile\n elif PathUtils.IsPathLocal( sceneFile ) and not scriptDialog.GetValue( \"SubmitSceneBox\" ):\n warnings += 'SketchUp file \"%s\" is local.\\n\\n' % sceneFile\n\n # Check Output\n exportDirectory = scriptDialog.GetValue( \"ExportDirectoryBox\" ).strip()\n if not exportDirectory:\n errors += \"An output directory was not specific.\\n\\n\"\n elif not os.path.isdir( exportDirectory ):\n errors += 'The directory of the output file does not exist: \"%s\"\\n\\n' % exportDirectory\n\n isVray = scriptDialog.GetEnabled( \"VrayBox\" ) and scriptDialog.GetValue( \"VrayBox\" )\n vrayVersion = int( scriptDialog.GetValue( \"VrayVersionBox\" ) )\n vrayFrames = scriptDialog.GetValue( \"VrayFramesBox\" ).strip()\n # Check if a valid frame range has been specified for V-Ray 3 or later\n if isVray and vrayVersion >= 3 and is2dAnimation() and not FrameUtils.FrameRangeValid( vrayFrames ):\n errors += 'Frame range \"%s\" is not valid.\\n\\n' % vrayFrames\n\n if errors:\n scriptDialog.ShowMessageBox( \"The following errors occurred, you must fix these before continuing.\\n\\n%s\" % errors.strip(), \"Error\" )\n return False\n elif warnings:\n result = scriptDialog.ShowMessageBox( \"The following warnings occurred, are you sure you want to continue?\\n\\n%s\" % warnings.strip(), \"Warning\", ( \"Yes\", \"No\" ) )\n if result == \"No\":\n return False\n\n return True",
"def _set_verification_status(self, status):\n attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)\n\n if status in [\"submitted\", \"approved\", \"expired\", \"denied\", \"error\"]:\n attempt.mark_ready()\n attempt = self.submit_attempt(attempt)\n\n if status in [\"approved\", \"expired\"]:\n attempt.approve()\n elif status == \"denied\":\n attempt.deny(\"Denied!\")\n elif status == \"error\":\n attempt.system_error(\"Error!\")\n\n if status == \"expired\":\n days_good_for = settings.VERIFY_STUDENT[\"DAYS_GOOD_FOR\"] # lint-amnesty, pylint: disable=unused-variable\n attempt.expiration_date = now() - timedelta(days=1)\n attempt.save()",
"async def _do_work_claim(self) -> bool:\n # 1. Ask the LTA DB for the next TransferRequest to be picked\n # configure a RestClient to talk to the LTA DB\n lta_rc = ClientCredentialsAuth(address=self.lta_rest_url,\n token_url=self.lta_auth_openid_url,\n client_id=self.client_id,\n client_secret=self.client_secret,\n timeout=self.work_timeout_seconds,\n retries=self.work_retries)\n self.logger.info(\"Asking the LTA DB for a TransferRequest to work on.\")\n pop_body = {\n \"claimant\": f\"{self.name}-{self.instance_uuid}\"\n }\n response = await lta_rc.request('POST', f'/TransferRequests/actions/pop?source={self.source_site}&dest={self.dest_site}', pop_body)\n self.logger.info(f\"LTA DB responded with: {response}\")\n tr = response[\"transfer_request\"]\n if not tr:\n self.logger.info(\"LTA DB did not provide a TransferRequest to work on. Going on vacation.\")\n return False\n # process the TransferRequest that we were given\n try:\n await self._do_work_transfer_request(lta_rc, tr)\n except Exception as e:\n await self._quarantine_transfer_request(lta_rc, tr, f\"{e}\")\n raise e\n # if we were successful at processing work, let the caller know\n return True",
"async def get_status(self):\n\n while True:\n self.logger.info(\"Task \" + self.pipelineId + \" waiting \" + str(self.nextTry) + \" s\")\n await asyncio.sleep(self.nextTry)\n\n response = await self.httpSession.post(self.TASKING_API_URL, {\"pipelineId\": self.pipelineId}, ret_type=\"JSON\")\n\n if response[\"status\"] == \"RESOLVED\":\n break\n else:\n assert(response[\"status\"] == \"PROCESSING\"), \"Remote task has some unexpected status\"\n\n self.nextTry = response[\"nextTry\"]\n self.logger.info(\"Task \" + self.pipelineId + \" status: \" + response[\"status\"])\n\n await self.retrieve()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overrides handle so that the environ is set.
|
def handle(self):
self.environ = self.server._environ.copy()
BaseHTTPRequestHandler.handle(self)
|
[
"def _config(\n self,\n environ,\n start_response,\n set_response\n ):\n self._environ = environ\n self._start_response = start_response\n self._set_response = set_response\n # set default headers\n self.set_headers('content-type', \"application/json\")\n return self",
"def __call__(environ, start_response):",
"def setHandle(self, handle):\n self.__handle = handle",
"def OnosEnvSetup(self, handle):\n self.Gensshkey(handle)\n self.home = self.GetEnvValue(handle, 'HOME')\n self.AddKnownHost(handle, self.OC1, \"karaf\", \"karaf\")\n self.AddKnownHost(handle, self.OC2, \"karaf\", \"karaf\")\n self.AddKnownHost(handle, self.OC3, \"karaf\", \"karaf\")\n self.DownLoadCode(handle,\n 'https://github.com/wuwenbin2/OnosSystemTest.git')\n # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos')\n if self.masterusername == 'root':\n filepath = '/root/'\n else:\n filepath = '/home/' + self.masterusername + '/'\n self.OnosRootPathChange(filepath)\n self.CopyOnostoTestbin()\n self.ChangeOnosName(self.agentusername, self.agentpassword)\n self.InstallDefaultSoftware(handle)\n self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword)",
"def _base_environ(self, **request):\n environ = {\n 'HTTP_COOKIE': self.cookies.output(header='', sep='; '),\n 'PATH_INFO': '/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '127.0.0.1',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': 'testserver',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'wsgi.version': (1, 0),\n 'wsgi.url_scheme': 'http',\n 'wsgi.errors': self.errors,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': False,\n 'wsgi.run_once': False,\n }\n environ.update(self.defaults)\n environ.update(request)\n return environ",
"def set_environ(envval):\n if overwrite:\n return lambda k, v: envval.update({k: str(v)})\n return lambda k, v: envval.setdefault(k, str(v))",
"def set_handle(self, handle = \"anon\"):\n Client.handle = handle;\n print '[Client] Handle set to ', handle",
"def _patch_env(**environs: str):\n # Adapted loosely from https://stackoverflow.com/a/34333710\n # Capture the original environ values\n original_environs = {k: os.environ.get(k) for k in environs}\n\n # Patch the environment\n for k, v in environs.items():\n os.environ[k] = v\n try:\n # Run the context manager\n yield\n finally:\n # Restore the original environ values\n for k, v in original_environs.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v",
"def environInject(shellName):",
"def test__EnvGetter__handle__entered():\n def generator_function():\n yield RETURN_TYPE_EXCEPTION, 'koishi'\n \n env = EnvGetter()\n \n env.__enter__()\n \n try:\n env._handle(generator_function(), None)\n except RuntimeError:\n raised = True\n else:\n raised = False\n \n vampytest.assert_false(raised)\n \n vampytest.assert_eq(env._captured, [(RETURN_TYPE_EXCEPTION, 'koishi')])",
"def __attrs_post_init__(self):\n self._env = {} # environment which would be in-effect only for this session\n self._env_permanent = {} # environment variables which would be in-effect in future sessions if resource is persistent",
"def prepare_environment(self) -> None:\n pass",
"def unset_environ_credentials_for_testing() -> None:\n with override_environ(**MockBoto3Session.AWS_CREDENTIALS_ENVIRON_NAME_OVERRIDES):\n yield",
"def patch_using_env(self):\n if self.cred_properties:\n credentials_config = self.cred_properties\n\n user = getenv(\"HERE_USER_ID\") or credentials_config[\"user\"]\n client = getenv(\"HERE_CLIENT_ID\") or credentials_config[\"client\"]\n key = (\n getenv(\"HERE_ACCESS_KEY_ID\")\n or getenv(\"HERE_ACCESS_KEY\")\n or credentials_config[\"key\"]\n )\n secret = (\n getenv(\"HERE_ACCESS_KEY_SECRET\")\n or getenv(\"HERE_ACCESS_SECRET\")\n or credentials_config[\"secret\"]\n )\n endpoint = (\n getenv(\"HERE_TOKEN_ENDPOINT_URL\")\n or getenv(\"HERE_TOKEN_ENDPOINT\")\n or credentials_config[\"endpoint\"]\n )\n credentials_config[\"user\"] = user\n credentials_config[\"client\"] = client\n credentials_config[\"key\"] = key\n credentials_config[\"secret\"] = secret\n credentials_config[\"endpoint\"] = endpoint",
"def init_environ(self):\n\t\t#workdir = wpre + projectname + '/' + setname + '/'\n\t\tself.config['pdict'] = {}\n\t\t#self.config['workdir'] = workdir\n\n\t\tself.config['solvent'] = 'water'\n\t\tself.config['interface'] = 'electronic'\n\t\tself.config['diffuse'] = 'none'",
"def setup_environment():",
"def test_env_Windows(self):\n if not platform.isWindows():\n raise SkipTest('Windows-only test')\n \n r = _spawnDefaultArgs('exec')\n self.assertEqual(r['env'], os.environ)\n \n r = _spawnDefaultArgs('exec', env=None)\n self.assertEqual(r['env'], os.environ)\n \n r = _spawnDefaultArgs('exec', env={'foo': 'bar'})\n e = os.environ.copy()\n e.update({'foo': 'bar'})\n self.assertEqual(r['env'], e)",
"def updated_environ(prepend, overwrite):\n env = os.environ.copy()\n for key, value in prepend.items():\n env[key] = \"{0}{1}{2}\".format(value,\n os.pathsep,\n env.get(key, \"\"))\n\n env.update(overwrite)\n\n old_environ = os.environ\n os.environ = env\n\n try:\n yield env\n finally:\n os.environ = old_environ",
"def setRT_wsgienv(self, env):\n\t\tfrom wsgifront import read_body\n\t\t\n\t\tself.RT.wsgi_env = env\n\t\tself.RT.wsgi_body = read_body(env)",
"def _create_environ(self, url, method, data, refer, content_type=None):\n environ_args = dict(self._wsgi_server, method=method)\n base_url = self._referrer if refer else self._base_url\n environ_args.update(self._canonicalize_url(url, base_url))\n environ_args.update(self._prep_input(method, data, content_type))\n environ = create_environ(**environ_args)\n if refer and self._referrer:\n environ['HTTP_REFERER'] = self._referrer\n environ.setdefault('REMOTE_ADDR', '127.0.0.1')\n self._cookie_jar.export_to_environ(environ)\n return environ"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a multiselector form The form consists of a label for each selector, either taken from a list of labels or else consisting of a prefix and a number. The buttons are labeled with the selector state.
|
def __init__(self, num_selectors,
label_text = [],
label_template = "Channel",
button_text = [],
button_template = "Port",
buttons = 1,
title="MultiSwitch"):
super(MultiSelectorForm, self).__init__()
self.num_selectors = num_selectors
self.label_text = label_text
self.label_template = label_template
self.button_template = button_template
if button_text:
self.button_text = button_text
else:
self.button_text = [""]*buttons
self.title=title
self.state = {}
self.signal = SignalMaker()
|
[
"def select_labels(self) -> List[Label]:",
"def make_choose_control(field_name,\n included_label,\n included_items,\n excluded_label,\n excluded_items,\n item_to_text=str,\n item_to_value=str,\n ordered=0):\n \n # We'll construct an array of buttons. Each element is an HTML\n # input control.\n buttons = []\n # Construct the encoding for the items initially included.\n initial_value = string.join(map(item_to_value, included_items), \",\")\n # The hidden control that will contain the encoded representation of\n # the included items.\n hidden_control = '<input type=\"hidden\" name=\"%s\" value=\"%s\">' \\\n % (field_name, initial_value)\n # Construct names for the two select controls.\n included_select_name = \"_inc_\" + field_name\n excluded_select_name = \"_exc_\" + field_name\n\n # The select control for included items. When the user selects an\n # item in this list, deselect the selected item in the excluded\n # list, if any.\n included_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (included_select_name, excluded_select_name)\n # Build options for items initially selected.\n for item in included_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n included_select = included_select + option\n included_select = included_select + '</select>\\n'\n\n # The select control for excluded items. When the user selects an\n # item in this list, deselect the selected item in the included\n # list, if any.\n excluded_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (excluded_select_name, included_select_name)\n # Build options for items initially excluded.\n for item in excluded_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n excluded_select = excluded_select + option\n excluded_select = excluded_select + '</select>\\n'\n\n # The Add button.\n button = '''\n <input type=\"button\"\n value=\" << Add \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />\n ''' % (excluded_select_name, included_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n # The Remove button.\n button = '''\n <input\n type=\"button\"\n value=\" Remove >> \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" /> \n ''' % (included_select_name, excluded_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n if ordered:\n # The Move Up button.\n button = '''\n <input type=\"button\"\n value=\" Move Up \"\n onclick=\"swap_option(document.form.%s, -1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n\n buttons.append(button)\n\n # The Move Down button.\n button = '''\n <input type=\"button\"\n value=\" Move Down \"\n onclick=\"swap_option(document.form.%s, 1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n buttons.append(button)\n\n # Arrange everything properly.\n buttons = string.join(buttons, \"\\n<br />\\n\")\n return '''\n %(hidden_control)s\n <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">\n <tr valign=\"center\">\n <td>\n %(included_label)s:\n <br />\n %(included_select)s\n </td>\n <td align=\"center\">\n %(buttons)s\n </td>\n <td>\n %(excluded_label)s:<br />\n %(excluded_select)s\n </td>\n </tr>\n </table>\n ''' % locals()",
"def generate_label_inputs(self):\n\n # check that number of labels is not empty\n if self.numLabelsInput.text().strip() != '':\n\n # convert string (number of labels) to integer\n self.num_labels = int(self.numLabelsInput.text())\n\n # delete previously generated widgets\n for input, headline in zip(self.label_inputs, self.label_headlines):\n input.deleteLater()\n headline.deleteLater()\n\n # initialize values\n self.label_inputs = []\n self.label_headlines = [] # labels to label input fields\n margin_top = 400\n\n # show headline for this step\n self.groupBox.setTitle('4. Fill in the labels and click \"Next\"')\n self.groupBox.setStyleSheet('font-weight: bold')\n\n # diplsay input fields\n for i in range(self.num_labels):\n # append widgets to lists\n self.label_inputs.append(QtWidgets.QLineEdit(self))\n self.label_headlines.append(QLabel(f'label {i + 1}:', self))\n self.formLayout.addRow(self.label_headlines[i], self.label_inputs[i])\n\n self.groupBox.setLayout(self.formLayout)\n self.scroll.setWidget(self.groupBox)\n self.scroll.setWidgetResizable(True)",
"def create_labels(self):\n for name in self.names:\n temp_label = Label(text=name)\n self.root.ids.main.add_widget(temp_label)",
"def popup(self, index, dummy):\n mylogger.debug(\"multi-selector form popup(%d) invoked\",index)\n self.dialog = Selector_Form(index)\n mylogger.debug(\"dialog is type %s\", type(self.dialog))\n self.dialog.setupUi(self.button_text, label_default=\"Port\", cols=2)\n self.dialog.setWindowTitle(\"IF \"+str(index))\n self.dialog.show()\n self.dialog.signal.stateChanged.connect(\n slotgen(index,self.update_selector))\n mylogger.debug(\"multi-selector form popup(%d) completed\",index)",
"def create_labels(self):\n for name in self.names:\n label_name = Label(text=name, id=name)\n self.root.ids.entries_box.add_widget(label_name)",
"def labels(self, ids: Iterable[AnyId]) -> LabelSelection:\n return LabelSelection(\n replace(\n self._selection,\n ids=[str(l) for l in ids],\n data=None, # use the edge (parent) data\n parentkey=\"labels\",\n parent=self._selection,\n )\n )",
"def setup_prefixes_ui(self, parent, layout):\n prefixes = self.names_config.get(\"prefixes\", {})\n\n btn_grid = QtWidgets.QGridLayout()\n btn_grid.setObjectName(\"prefixBtnGrid\")\n\n if prefixes:\n # create button for all prefixes\n x = 0\n y = 0\n for prefix in prefixes:\n name = prefix[\"name\"]\n btn = QtWidgets.QPushButton()\n btn.setText(name)\n btn.setCheckable(True)\n btn.clicked.connect(self._on_prefix_or_suffix_clicked)\n btn_grid.addWidget(btn, y, x, 1, 1)\n self.prefix_btns[name] = btn\n\n x += 1\n if x > 1:\n x = 0\n y += 1\n\n layout.addLayout(btn_grid)\n\n else:\n no_names_label = QtWidgets.QLabel(parent)\n no_names_label.setText(\"no prefixes\")\n no_names_label.setProperty(\"cssClasses\", \"help\")\n layout.addWidget(no_names_label)",
"def setup_suffixes_ui(self, parent, layout):\n suffixes = self.names_config.get(\"suffixes\", {})\n\n btn_grid = QtWidgets.QGridLayout()\n btn_grid.setObjectName(\"suffixBtnGrid\")\n\n if suffixes:\n # create button for all suffixes\n x = 0\n y = 0\n for suffix in suffixes:\n name = suffix[\"name\"]\n btn = QtWidgets.QPushButton()\n btn.setText(name)\n btn.setCheckable(True)\n btn.clicked.connect(self._on_prefix_or_suffix_clicked)\n btn_grid.addWidget(btn, y, x, 1, 1)\n self.suffix_btns[name] = btn\n\n x += 1\n if x > 1:\n x = 0\n y += 1\n\n layout.addLayout(btn_grid)\n\n else:\n no_names_label = QtWidgets.QLabel(parent)\n no_names_label.setText(\"no suffixes\")\n no_names_label.setProperty(\"cssClasses\", \"help\")\n layout.addWidget(no_names_label)",
"def render(self, name, value, attrs=None, choices=()):\n\n if value is None:\n value = []\n has_id = attrs and attrs.has_key('id')\n final_attrs = self.build_attrs(attrs, name=name)\n\n # normalize to strings.\n str_values = set([smart_unicode(v) for v in value])\n is_checked = lambda value: value in str_values\n\n # set container fieldset and list\n output = [u'<fieldset id=\"id_%s\">\\n <ul class=\"pick_multi\">' % name]\n\n # add numbered checkboxes wrapped in list items\n chained_choices = enumerate(chain(self.choices, choices))\n for i, (option_value, option_label) in chained_choices:\n option_label = escape(smart_unicode(option_label))\n\n # If an ID attribute was given, add a numeric index as a suffix,\n # so that the checkboxes don't all have the same ID attribute.\n if has_id:\n final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))\n\n cb = widgets.CheckboxInput(final_attrs, check_test=is_checked)\n rendered_cb = cb.render(name, option_value)\n cb_label = (rendered_cb, option_label)\n\n output.append(u' <li><label>%s %s</label></li>' % cb_label)\n\n output.append(u' </ul>\\n</fieldset>')\n return u'\\n'.join(output)",
"def create_package_selector(self, packageList):\n\n # Create group box\n self.packageGroup = QGroupBox()\n self.packageLayout = QGridLayout()\n\n # Create explanation label\n self.packageSelectLabel = QLabel(\"Please select the packages you \" + \\\n \"wish to install. Note, all packages are required to run \" + \\\n \"the program.\")\n self.packageSelectLabel.setStyleSheet(\"font-weight: bold;\")\n self.packageLayout.addWidget(self.packageSelectLabel, 0, 0, 1, 2)\n\n self.packagesChecks = {}\n\n # Iterate over packages, adding all required\n for n, package in enumerate(packageList.keys()):\n self.packagesChecks[package] = QCheckBox(package)\n self.packageLayout.addWidget(self.packagesChecks[package], \\\n 1 + int(n/2), n % 2)\n self.packagesChecks[package].setChecked(True)\n\n # Set group box layout\n self.packageGroup.setLayout(self.packageLayout)\n\n return self.packageGroup",
"def gen_range_selector_labels(total_range: float, min_diff: float, include_standard: bool = True):\n n = len(_LABELS)\n # get index of first button that shows at least 10 min diff data points. Note that\n # the maximum value is n - 2\n min_diff_10 = min_diff / 86400 * 10 # convert to days\n first = n - 2\n\n for i, (width, _) in enumerate(_LABELS[:-1]):\n if width > min_diff_10:\n first = i\n break\n\n # get index of the last button that is at least double the data range. Note that\n # the minimum value is first + 1\n total_range_2 = total_range / 86400 * 2 # convert to days\n last = n - 1\n\n for i, (width, _) in enumerate(_LABELS[first + 1 :]):\n if width > total_range_2:\n last = first + i\n break\n\n # get evenly spaced selection of labels between first and last indices\n labels = [_LABELS[i][1] for i in range(first, last + 1)]\n\n # add special buttons\n if labels[-1][-1] in {\"m\", \"y\"}:\n labels.append(\"ytd\")\n\n if include_standard:\n labels.insert(0, \"fit\")\n labels.insert(0, \"reset\")\n\n return labels",
"def select_controls(**kwargs):\n\n raise NotImplementedError('Function select_controls not implemented for current DCC!')",
"def __init__(\n self,\n button_labels: List[str],\n lines: Optional[List[mpl_lines.Line2D]] = None,\n margin_right: int = 0,\n fontsize: int = 13,\n ):\n for label in button_labels:\n try:\n int(label[:-1])\n assert label[-1] in {\"s\", \"M\", \"h\", \"d\", \"w\", \"m\", \"y\"}\n except ValueError:\n assert label in {\"ytd\", \"fit\", \"reset\"}\n\n if label == \"fit\" and lines is None:\n raise ValueError(\"The 'fit' button requires the 'lines' argument to be defined.\")\n except AssertionError:\n raise ValueError(f\"Invalid range selector button label '{label}'\")\n\n self.dict_ = {\n \"type\": \"range_selector_buttons\",\n \"button_labels\": button_labels,\n \"line_ids\": None if lines is None else get_line_ids(lines),\n \"margin_right\": margin_right,\n \"fontsize\": fontsize,\n }",
"def set_objects(self, nodes_list, widget=\"select\", empty_label=True):\n # Set the widget\n if widget == \"radio\":\n self.fields[\"nodes\"].widget = forms.RadioSelect()\n else:\n self.fields[\"nodes\"].widget = forms.Select()\n\n # Populate the form with all the name of the direct nodes\n elements_list = []\n for element in nodes_list:\n # Get the name of the nodes\n element_name_index = (element.name).index(\"#\")\n element_name = (element.name)[element_name_index + 1 :]\n elements_list.append((element.id, element_name))\n\n self.fields[\"nodes\"].choices = elements_list\n\n # Create an empty label is needed\n if not empty_label:\n self.fields[\"nodes\"].empty_label = None\n else:\n self.fields[\"nodes\"].empty_label = \"Select a database object...\"",
"def createCollapseButtons(self):\n with formLayout(nd=100) as form:\n expBtn = button(l='Expand All', h=20, bgc=[0.26, 0.26, 0.26], c=Callback(self.collapseAll, False))\n cllBtn = button(l='Collapse All', h=20, bgc=[0.26, 0.26, 0.26], c=Callback(self.collapseAll, True))\n formLayout(form, e=True,\n af=[(expBtn, 'left', 1), (cllBtn, 'right', 1)],\n ap=[(expBtn, 'right', 1, 50), (cllBtn, 'left', 1, 50)]\n )",
"def multichoice(prompt,options):\n\t\tform.append([sg.Text(prompt)])\n\t\tfor i in range(0,len(options)):\n\t\t\tform.append([sg.Radio(options[i],prompt,key=prompt)])",
"def create_controls_for_selected(shape_data, link=True):\n result = []\n sel = pm.selected()\n if not sel:\n ctl = create_control(shape_data)\n result.append(ctl)\n else:\n for node in sel:\n if meta.has_metaclass(node, CONTROL_SHAPE_METACLASS):\n # update shape\n replace_shapes(node, shape_data)\n result.append(node)\n else:\n # create new control\n ctl = create_control(shape_data, target_node=node, link=link)\n result.append(ctl)\n pm.select(result)\n return result",
"def create_selected(self):\n curves = []\n sel = cmds.ls(sl=True)\n target = sel[0] if sel else None\n for item in self.control_list.selectedItems():\n text = item.text()\n control_file = os.path.join(CONTROLS_DIRECTORY, '{0}.json'.format(text))\n fh = open(control_file, 'r')\n data = json.load(fh)\n fh.close()\n curve = create_curve(data)\n if target:\n cmds.delete(cmds.parentConstraint(target, curve))\n curves.append(curve)\n if curves:\n cmds.select(curves)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pop up a 1xN selector if a button is pressed
|
def popup(self, index, dummy):
mylogger.debug("multi-selector form popup(%d) invoked",index)
self.dialog = Selector_Form(index)
mylogger.debug("dialog is type %s", type(self.dialog))
self.dialog.setupUi(self.button_text, label_default="Port", cols=2)
self.dialog.setWindowTitle("IF "+str(index))
self.dialog.show()
self.dialog.signal.stateChanged.connect(
slotgen(index,self.update_selector))
mylogger.debug("multi-selector form popup(%d) completed",index)
|
[
"def buttons(self, state):\n pass",
"def selectEveryNth(self, n): \n\t\tif not n:\n\t\t\tfor i in range(len(self.buttonList)):\n\t\t\t\tself.selectedFrames[i] = 0\n\t\t\t\tself.setButtonState(self.buttonList[i], 0)\n\t\t\treturn\n\t\tfor i, btn in enumerate(self.buttonList):\n\t\t\tif not (i) % n:\n\t\t\t\tself.selectedFrames[i] = 1\n\t\t\t\tself.setButtonState(btn, 1)\n\t\t\telse:\n\t\t\t\tself.selectedFrames[i] = 0\n\t\t\t\tself.setButtonState(btn, 0)",
"def button_press(self, x, y, button):\n base._Widget.button_press(self, x, y, button)\n if button == 1:\n icon = self.get_icon_in_position(x, y)\n if icon is not None:\n cmd = self.progs[icon][\"cmd\"]\n if cmd.startswith(\"qshell:\"):\n exec(cmd[7:].lstrip())\n else:\n self.qtile.spawn(cmd)\n self.draw()",
"def refresh_screen(self):\r\n self.choice = \"\"\r\n self.choice_label.set(\"Choose a letter:\" + self.choice)\r\n self.ok_buttons = [button for sublist in self.buttons for button in sublist]\r\n [button.config(bg=self.BG_DEFAULT, state='normal') for button in self.ok_buttons]",
"def _switch_popup(self, *args):\n self.logger.debug(\" _switch_popup: invoked with %s\",str(args))\n frame, rowname, key, switch, condition = args\n self.logger.debug(\" _switch_popup: switch is %s\", switch)\n selector = Selector_Form(key, parent=self)\n selector.switch = switch\n self.logger.debug(\" _switch_popup: selector is type %s\", type(selector))\n selector.setupUi(switch.inputs, label_default=\"Port\", cols=2)\n selector.setWindowTitle(\"IF selection\")\n selector.show()\n selector.signal.stateChanged.connect(\n slotgen((selector,key,rowname), selector.update_selector))\n self.logger.debug(\n \" _switch_popup: multi-selector form popup(%s) completed\",\n key)",
"def generate_line_selector(start_x=5, start_y=5, max_x=-1, max_y=-1):\n \n def get_line_drawer(x,y,w,h,size):\n def draw_line():\n graphics.set_line_width(size)\n graphics.set_color(0,0,0,1)\n draw.line(x+15,y+10, x+w-15, y+h-10)\n return draw_line\n \n def get_line_setter(size):\n def set_line_size():\n graphics.user_line_size = size\n return set_line_size\n \n line_group = gui.ButtonGroup()\n w, h = resources.SquareButton.width, resources.SquareButton.height\n if max_x < 0: max_x = min(resources.SquareButton.width*6,controlspace.max_x)\n if max_y < 0: max_y = min(resources.SquareButton.height,controlspace.max_y)\n steps = int(max_x/w)\n current_width = 1.0\n max_width = 20.0\n width_inc = (max_width-current_width)/float(steps)\n size_set = False\n newbutton = None\n for x in xrange(start_x, start_x+max_x, w):\n newbutton = gui.Button(text=\"\", image=resources.SquareButton,\n action=get_line_setter(current_width), x=x, y=start_y, \n more_draw=get_line_drawer(x, start_y, w, h, current_width),\n parent_group=line_group)\n controlspace.add(newbutton)\n if graphics.user_line_size <= current_width and not size_set:\n newbutton.action()\n newbutton.select()\n size_set = True\n current_width += width_inc\n if not size_set: newbutton.select()\n return line_group",
"def __choose_player(self, lanel, player, choose):\r\n\r\n def clicking():\r\n \"\"\"\r\n Function that occurs when you press Select.\r\n Coverage of the image is more relevant, and updating of the\r\n dictionary is under review.\r\n If you have already selected two - close the window.\r\n \"\"\"\r\n self.__dict_player[player] = choose\r\n\r\n white = tk.PhotoImage(file=\"ex12//white.png\")\r\n caver = tk.Button(lanel, image=white)\r\n caver.grid(row=(choose % 2) + 1, column=player)\r\n\r\n if len(self.__dict_player) == 2:\r\n self.__root.after(1000, self.__root.destroy)\r\n\r\n return clicking",
"def press(self, button, wait=0.0, port=0):",
"def buttonClickedCallback(self, button, number):\n\t\tshift = wx.GetKeyState(wx.WXK_SHIFT)\n\t\tnumbers = []\n\t\tminNum, maxNum = number, number\n\t\tif shift and self.lastNumber >= 0:\n\t\t\tminNum = min(number, self.lastNumber)\n\t\t\tmaxNum = max(number, self.lastNumber)\n\t\tfor num in range(minNum, maxNum+1):\n\t\t\tflag = not self.selectedFrames.get(num,False)\n\t\t\tif num == self.lastNumber: flag = not flag\n\t\t\tnumbers.append((num,flag))\n\n\t\tfor num,flag in numbers:\n\t\t\tdo_cmd = self.parentPath + \".timepointSelection.setTimepoint(%d, %s)\" % (num, str(flag))\n\t\t\tundo_cmd = self.parentPath + \".timepointSelection.setTimepoint(%d, %s)\" % (num, str(not flag))\n\t\t\n\t\t\tif flag:\n\t\t\t\tdescstr = \"Select timepoint %d for processing\" % num\n\t\t\telse:\n\t\t\t\tdescstr = \"Unselect timepoint %d for processing\" % num\n\t\t\tcmd = lib.Command.Command(lib.Command.GUI_CMD, None, None, do_cmd, undo_cmd, desc = descstr)\n\t\t\tcmd.run()\n\n\t\tself.lastNumber = number",
"def showFileSelector(button):\n if button['new']:\n StudyArea.children = [StudyArea_description, self.user_preference, self.file_selector, self.imageProcessing_Button]\n else:\n StudyArea.children = [StudyArea_description, self.user_preference,self.imageProcessing_Button]",
"def play(self):\n self.__show_bignum('---')\n\n # Get the selected value from the Button Group\n value = self.group.getSelectedButton()['value'][0]\n if value == 'A':\n big = 0 # All small\n else:\n big = int(value) # The number of \"big numbers\"\n\n # Get the random selection and update the display\n sample = self.get_random_selection(big)\n for x in range(6):\n canvas = self.__nums[x]\n canvas.delete(f'num-{x}')\n canvas.drawText(str(sample[x]), 32, 28, font=(\"Arial\", 36, 'bold'), tag=f'num-{x}')",
"def generate_brush_selector(start_x=5,start_y=5,max_x=-1,max_y=-1):\n \n def get_brush_drawer(x,y,w,h,size):\n if size < 1.5: size = 1.5\n def draw_brush():\n graphics.set_color(0,0,0,1)\n graphics.set_line_width(size)\n draw.points((x+w/2,y+h/2))\n return draw_brush\n \n def get_brush_setter(size):\n def set_brush_size():\n graphics.brush_size = size\n return set_brush_size\n \n brush_group = gui.ButtonGroup()\n w, h = resources.SquareButton.width, resources.SquareButton.height\n if max_x < 0: max_x = min(resources.SquareButton.width*6,controlspace.max_x)\n if max_y < 0: max_y = min(resources.SquareButton.height,controlspace.max_y)\n steps = int(max_x/w)\n current_width = 1.0\n max_width = 48.0\n width_inc = (max_width-current_width)/steps\n size_set = False\n newbutton = None\n for x in xrange(start_x, start_x+max_x, w):\n newbutton = gui.Button(text=\"\", image=resources.SquareButton,\n action=get_brush_setter(current_width), x=x, y=start_y, \n more_draw=get_brush_drawer(x, start_y, w, h, current_width),\n parent_group=brush_group)\n controlspace.add(newbutton)\n if graphics.brush_size <= current_width and not size_set:\n newbutton.action()\n newbutton.select()\n size_set = True\n current_width += width_inc\n if not size_set: newbutton.select()\n return brush_group",
"def ace_restriction_select():\n x, y = properties.SCREEN_WIDTH / 2, properties.SCREEN_HEIGHT / 2\n width, height = SUITS[0][1].width, SUITS[0][1].height\n SUITS[0][1].center = (x - width / 2, y - height / 2)\n SUITS[1][1].center = (x + width / 2, y - height / 2)\n SUITS[2][1].center = (x - width / 2, y + height / 2)\n SUITS[3][1].center = (x + width / 2, y + height / 2)\n\n for index, card_suit in enumerate(makao.CardSuit):\n button(None, SUITS[0][1].center[0] - width / 2, SUITS[0][1].center[1] - height / 1.45,\n 2 * width, height / 5, properties.FRAME_COLOR, properties.FRAME_COLOR)\n\n button('Choose suit', SUITS[0][1].center[0] - width / 2 + 5,\n SUITS[0][1].center[1] - height / 1.45 + 5, 2 * width - 10,\n height / 5 - 5, properties.TABLE_CAPTION_COLOR, properties.TABLE_CAPTION_COLOR)\n\n button(None, SUITS[index][1].center[0] - width / 2, SUITS[index][1].center[1] - height / 2,\n width, height, properties.FRAME_COLOR, properties.FRAME_COLOR)\n\n button(None, SUITS[index][1].center[0] - width / 2 + 5,\n SUITS[index][1].center[1] - height / 2 + 5, width - 10, height - 10,\n properties.BUTTON_COLOR, properties.OVER_BUTTON_COLOR)\n\n SCREEN.blit(SUITS[index][0], SUITS[index][1])\n if SUITS[index][1].collidepoint(pygame.mouse.get_pos()):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n return card_suit\n\n pygame.display.update()",
"def _new_button_clicked(self):\n current_widget = self._get_selected_widget()\n current_widget.create_new()",
"def btn2_on(self):\n\n dialog = NewSupplier(self)\n self.dialogs.append(dialog)\n dialog.show()",
"def __init__(self, num_selectors,\n label_text = [],\n label_template = \"Channel\",\n button_text = [],\n button_template = \"Port\",\n buttons = 1,\n title=\"MultiSwitch\"):\n super(MultiSelectorForm, self).__init__()\n self.num_selectors = num_selectors\n self.label_text = label_text\n self.label_template = label_template\n self.button_template = button_template\n if button_text:\n self.button_text = button_text\n else:\n self.button_text = [\"\"]*buttons\n self.title=title\n self.state = {}\n\n self.signal = SignalMaker()",
"def flag_square(self, event, loc):\n row = loc[0]\n col = loc[1]\n flag = Button(self.fieldFrame,text=\" \",bg=\"red\")\n flag.grid(row=row,column=col,sticky=E)\n flag.bind('<Button-3>',lambda event,r=row,c=col:\n self.unflag_square(event,(r,c)))\n self.GUIField[row][col].destroy() # destroy button there previously\n self.GUIField[row][col] = flag # store new button in GUIField\n self.mines -= 1\n self.minesLabel.configure(\n text=(\"Mines Remaining: %03d\" % self.mines) + \"\\n\\n\"\n )",
"def button_dialog(message: str = 'Please select an option.',\n choices: Sequence[str] = ['Cancel', 'OK'],\n **kwargs) -> int:\n return li.button_dialog(\n message, choices,\n icon=[config.root_folder + '/kineticstoolkit/logo.png',\n config.root_folder + '/kineticstoolkit/logo_hires.png'],\n **kwargs)",
"def jack_restriction_select():\n x, y = properties.SCREEN_WIDTH / 2, properties.SCREEN_HEIGHT / 2\n width, height = 120, 120\n\n button(None, x - width * 2, y - height * 1.5, width * 4, height / 2,\n properties.FRAME_COLOR, properties.FRAME_COLOR)\n button('Choose value', x - width * 2 + 5, y - height * 1.5 + 5, width * 4 - 8, height / 2 - 8,\n properties.TABLE_CAPTION_COLOR, properties.TABLE_CAPTION_COLOR)\n\n buttons = []\n\n ii = -2\n for num in range(5, 9):\n button(None, x + width * ii, y - height, width, height,\n properties.FRAME_COLOR, properties.FRAME_COLOR)\n buttons.append(button(str(num), x + width * ii + 5, y - height + 5, width - 8, height - 8,\n properties.BUTTON_COLOR, properties.OVER_BUTTON_COLOR))\n ii += 1\n\n ii = -2\n for num in range(9, 13):\n if num == 11:\n num = 'Q'\n elif num == 12:\n num = 'Nothing'\n\n button(None, x + width * ii, y, width, height,\n properties.FRAME_COLOR, properties.FRAME_COLOR)\n buttons.append(button(str(num), x + width * ii + 5, y + 5, width - 8, height - 8,\n properties.BUTTON_COLOR, properties.OVER_BUTTON_COLOR))\n ii += 1\n\n for index, value in enumerate(buttons):\n if value.collidepoint(pygame.mouse.get_pos()):\n if index != len(buttons) - 1:\n change_value = makao.NOT_SPECIAL_VALUES[index]\n else:\n # change to None\n change_value = 11\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n return change_value\n\n pygame.display.update()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is 1 and the text "Unknown".
|
def update_selector(self, index, new_state=-1):
mylogger.debug("update_selector invoked for switch %d",index)
if new_state > -1:
self.state[index] = new_state
else:
try:
self.state[index] = self.dialog.state
self.dialog.close()
except AttributeError:
# program has not yet set the state
self.state[index] = new_state
self.set_button_text(index,-1,text="Unknown")
self.set_button_text(index, self.state[index])
mylogger.debug("new state for switch %d is %d",
index, self.state[index])
self.current_selector = index
self.signal.stateChanged.emit()
|
[
"def setGroupingActive( self, state ):\n self.uiGroupBTN.setChecked(state)",
"def update_state(self):\n state = self.bridge.get_group(self.group_id)\n\n logger.debug(\"group state: %s\", pformat(state))\n\n self._on = state['state']['all_on']\n if self._on or state['action']['bri'] != 1 or self._brightness is None:\n self._brightness = state['action']['bri']\n\n self._state = state",
"def set_default(self, item, state=False):\n for i in self.group:\n if i.label == item:\n i.set_state(state)",
"def set_state(self, state: bool) -> None:\n # Send EasyRemote update_element event for this button\n # with the given state.\n self.er.s.sendto((f\"action=update_element&id={self.id}\"\n f\"&page={self.page}&value={int(state)}\"\n \"&type=btn&event=up\").encode(), self.er.addr)",
"def _set_switch_button_text(self, switch, state,\n button_template=\"Sel \", text=None):\n self.logger.debug(\n \"_set_switch_button_text: setting button %s to text for state %s\",\n switch, state)\n if text:\n pass\n elif switch.inputs:\n self.logger.debug(\n \"_set_switch_button_text: text will be selected from %s\",\n switch.inputs)\n if state != None:\n if switch.inputs[state]:\n text = switch.inputs[state]\n else:\n text = button_template+\" \"+str(state)\n else:\n text = button_template+\" \"+str(state)\n else:\n text = button_template+\" \"+str(state)\n switch.setText(text)",
"def buttons(self, state):\n pass",
"def state(self, val):\n if isinstance(self._state, Button.State):\n self._state = val",
"def _on_selector_change(self, selected: tuple, value: str):\n self._update_from_selection(value)",
"def buttonsState(self, oldCount, newCount):\n\n if newCount <= 0:\n # print(\"View Widget Entering buttonsState 0 rows ...\")\n self.btnGrid.itemAt(_Button.CLEARSELECTION).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.PRINT).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SHOWINFO).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SELECTALL).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SHOWOUTPUT).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SHOWOUTPUTERRORS).widget().setEnabled(False)\n if oldCount < 0:\n totalRows = self.tableView.model.rowCount()\n if totalRows > 0:\n self.btnGrid.itemAt(_Button.SELECTALL).widget().setEnabled(True)\n else:\n totalRows = self.tableView.model.rowCount()\n totalSelectedRows = self.tableView.selectedRowsCount()\n\n # print(\n # (\n # f\"View Widget Entering buttonsState total rows {totalRows} \"\n # f\"total selected rows {totalSelectedRows} selected ...\"\n # )\n # )\n\n if totalRows == 0:\n self.buttonsState(0, 0)\n else:\n self.btnGrid.itemAt(_Button.PRINT).widget().hide()\n self.btnGrid.itemAt(_Button.SHOWINFO).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SHOWOUTPUT).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SHOWOUTPUTERRORS).widget().setEnabled(False)\n if totalSelectedRows == 0:\n self.btnGrid.itemAt(_Button.CLEARSELECTION).widget().setEnabled(\n False\n )\n self.btnGrid.itemAt(_Button.PRINT).widget().setEnabled(False)\n self.btnGrid.itemAt(_Button.SELECTALL).widget().setEnabled(False)\n elif totalSelectedRows == 1:\n self.btnGrid.itemAt(_Button.CLEARSELECTION).widget().setEnabled(\n True\n )\n self.btnGrid.itemAt(_Button.SHOWINFO).widget().setEnabled(True)\n self.btnGrid.itemAt(_Button.SHOWOUTPUT).widget().setEnabled(True)\n self.btnGrid.itemAt(_Button.SHOWOUTPUTERRORS).widget().setEnabled(\n True\n )\n if totalSelectedRows == totalRows:\n self.btnGrid.itemAt(_Button.SELECTALL).widget().setEnabled(False)\n else:\n self.btnGrid.itemAt(_Button.SELECTALL).widget().setEnabled(True)",
"def set_selected(self, state = 1):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.node.selected\", self._node._eco_id, state)\r\n p2e._app.Exec(arg_str)",
"def initUIState(self):\n\n\t\tself.initButtonGroup(self.outlinerButtonGrp)\n\t\tself.initButtonGroup(self.wireframeButtonGrp)",
"def apply_state_to_menu(menu, state):\n if menu['id'] == 'new_game':\n character = state['character']\n # top level menu\n if 'sex' in character:\n get_item(menu, 'choose_sex')['label'] = 'Choose Sex ({})'.format(strings[character['sex']])\n if 'name' in character:\n get_item(menu, 'choose_name')['label'] = 'Choose Name ({})'.format(character['name'])",
"def set_selected(self, state = True):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.selected\", \r\n self._object._eco_id, state)\r\n p2e._app.Exec(arg_str)\r\n \r\n #Note: Once you finish selecting the individual objects you want, you \r\n #should then call the doc.selection.update function. \r\n p2e.model.selection.update()",
"def refresh_screen(self):\r\n self.choice = \"\"\r\n self.choice_label.set(\"Choose a letter:\" + self.choice)\r\n self.ok_buttons = [button for sublist in self.buttons for button in sublist]\r\n [button.config(bg=self.BG_DEFAULT, state='normal') for button in self.ok_buttons]",
"def check_selected(self, sender, args):\n self._set_states(state=True, selected=True)",
"def update_state(self, dialog):\n\n changed = False\n parent = self.parent\n parent_id = parent.get_named_id('group', None) if isinstance(parent, Group) else None\n awaiting_listeners = AwaitingListeners()\n\n if self._enabled_temp != self._enabled:\n awaiting_listeners = self.send_event('enabling-changed', self)[0]\n changed = True\n self._enabled = self._enabled_temp\n for v in self._allocated_ids:\n dialog.Enable(v, self._enabled)\n if self._visible_temp != self._visible:\n awaiting_listeners = self.send_event('visibility-changed', self)[0]\n changed = True\n self._visible = self._visible_temp\n for v in self._allocated_ids:\n dialog.HideElement(v, not self._visible)\n if parent_id is None: # Notify the elements themselves\n dialog.queue_layout_changed(v)\n\n if changed and parent_id is not None:\n dialog.queue_layout_changed(parent_id)\n\n if awaiting_listeners:\n dialog.widgets.queue(next, awaiting_listeners)",
"def update_selected(self, caller, value):\n for index, node in enumerate(self.data):\n if value == node[\"text\"]:\n self.layout_manager.select_node(index)",
"def _switch_popup(self, *args):\n self.logger.debug(\" _switch_popup: invoked with %s\",str(args))\n frame, rowname, key, switch, condition = args\n self.logger.debug(\" _switch_popup: switch is %s\", switch)\n selector = Selector_Form(key, parent=self)\n selector.switch = switch\n self.logger.debug(\" _switch_popup: selector is type %s\", type(selector))\n selector.setupUi(switch.inputs, label_default=\"Port\", cols=2)\n selector.setWindowTitle(\"IF selection\")\n selector.show()\n selector.signal.stateChanged.connect(\n slotgen((selector,key,rowname), selector.update_selector))\n self.logger.debug(\n \" _switch_popup: multi-selector form popup(%s) completed\",\n key)",
"def display_state(self, running_state):\n if not running_state in [\"running_continuous\",\n \"running_single\",\n \"paused\",\n \"stopped\"]:\n raise ValueError(\"Na running_state should be either \"\n \"running_continuous, \"\n \"running_single, \"\n \"paused or \"\n \"stopped\")\n if running_state==\"running_continuous\":\n self.button_single.setEnabled(False)\n self.button_single.setText(\"Run single\")\n self.button_continuous.setEnabled(True)\n self.button_continuous.setText(\"Pause\")\n return\n if running_state== \"running_single\":\n self.button_single.setEnabled(True)\n self.button_single.setText(\"Pause\")\n self.button_continuous.setEnabled(False)\n self.button_continuous.setText(\"Run continuous\")\n return\n if running_state == \"paused\":\n self.button_continuous.setText(\"Resume continuous\")\n self.button_single.setText(\"Run single\")\n self.button_continuous.setEnabled(True)\n self.button_single.setEnabled(False)\n return\n if running_state == \"stopped\":\n self.button_continuous.setText(\"Run continuous\")\n self.button_single.setText(\"Run single\")\n self.button_continuous.setEnabled(True)\n self.button_single.setEnabled(True)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve data from self.sources into self.directory / 'raw' and validate against checksum.
|
def retrieve(self):
target_dir = self.directory / "raw"
os.makedirs(target_dir, exist_ok=True) # create directory if it doesn't exist
for url, filename, md5_checksum in self.sources:
if utils.is_url(url):
processing_fn = partial(
utils.fetch_url, url=url, filename=filename, target_dir=target_dir
)
else:
processing_fn = partial(
utils.get_and_copy, identifier=url, filename=filename, target_dir=target_dir
)
utils.retrieve_from_cache_if_exists(
filename=filename,
target_dir=target_dir,
processing_fn=processing_fn,
md5_checksum=md5_checksum,
caching_enabled=self.cache,
verbose=self.verbose,
)
if not self.retrieve_all: # retrieve just the first dataset
return
if self.retrieve_all: # all datasets retrieved
return
else: # retrieving first dataset only but all fallbacks failed
raise RuntimeError(f"Unable to download {self.verbose_name} data.")
|
[
"def check_raws(self, _location, _date, _date_raw_data):\n try:\n # raw file names\n _raws = [_s for _s in _date_raw_data if re.match(re.escape(self.id), _s) is not None]\n # deleted?! unset pipelined as well\n if len(_raws) == 0:\n self.db_entry['raw_data']['location'] = []\n self.db_entry['raw_data']['data'] = []\n self.db_entry['raw_data']['last_modified'] = utc_now()\n return {'status': 'error', 'message': 'raw files for {:s} not available any more'.format(self.id),\n 'db_record_update': ({'_id': self.id},\n {\n '$set': {\n # 'exposure': None,\n # 'magnitude': None,\n # 'fits_header': {},\n # 'coordinates': {},\n 'raw_data.location': [],\n 'raw_data.data': [],\n 'raw_data.last_modified':\n self.db_entry['raw_data']['last_modified']\n },\n '$unset': {\n 'pipelined': 1\n }\n })\n }\n # time tags. use the 'freshest' time tag for 'last_modified'\n time_tags = [datetime.datetime.utcfromtimestamp(os.stat(os.path.join(_location, _date, _s)).st_mtime)\n for _s in _raws]\n time_tag = max(time_tags)\n\n # init/changed? the archiver will have to update database entry then:\n if (len(self.db_entry['raw_data']['data']) == 0) or \\\n (abs((time_tag - self.db_entry['raw_data']['last_modified']).total_seconds()) > 1.0):\n self.db_entry['raw_data']['location'] = ['{:s}:{:s}'.format(\n self.config['server']['analysis_machine_external_host'],\n self.config['server']['analysis_machine_external_port']),\n _location]\n self.db_entry['raw_data']['data'] = sorted(_raws)\n self.db_entry['raw_data']['last_modified'] = time_tag\n\n # additionally, fetch FITS header and parse some info from there\n # grab header from the last file, as it's usually the smallest.\n fits_header = get_fits_header(os.path.join(self.db_entry['raw_data']['location'][1],\n self.db_entry['date_utc'].strftime('%Y%m%d'),\n self.db_entry['raw_data']['data'][-1]))\n # save fits header\n self.db_entry['fits_header'] = fits_header\n\n # separately, parse exposure and magnitude\n self.db_entry['exposure'] = float(fits_header['EXPOSURE'][0]) if ('EXPOSURE' in fits_header) else None\n self.db_entry['magnitude'] = float(fits_header['MAGNITUD'][0]) if ('MAGNITUD' in fits_header) else None\n\n # Get and parse coordinates\n _ra_str = fits_header['TELRA'][0] if 'TELRA' in fits_header else None\n _objra = fits_header['OBJRA'][0] if 'OBJRA' in fits_header else None\n if _objra is not None:\n for letter in ('h, m'):\n _objra = _objra.replace(letter, ':')\n _objra = _objra[:-1]\n # print(_objra)\n # TELRA not available? try replacing with OBJRA:\n if _ra_str is None:\n _ra_str = _objra\n\n _dec_str = fits_header['TELDEC'][0] if 'TELDEC' in fits_header else None\n _objdec = fits_header['OBJDEC'][0] if 'OBJDEC' in fits_header else None\n if _objdec is not None:\n for letter in ('d, m'):\n _objdec = _objdec.replace(letter, ':')\n _objdec = _objdec[:-1]\n # print(_objdec)\n # TELDEC not available? try replacing with OBJDEC:\n if _dec_str is None:\n _dec_str = _objdec\n\n _az_str = str(fits_header['AZIMUTH'][0]) if 'AZIMUTH' in fits_header else None\n _el_str = str(fits_header['ELVATION'][0]) if 'ELVATION' in fits_header else None\n _epoch = float(fits_header['EQUINOX'][0]) if 'EQUINOX' in fits_header else 2000.0\n\n if None in (_ra_str, _dec_str):\n _azel = None\n _radec = None\n _radec_str = None\n _radec_deg = None\n else:\n if not ('9999' in _ra_str or '9999' in _dec_str):\n # string format: H:M:S, D:M:S\n _radec_str = [_ra_str, _dec_str]\n # the rest are floats [rad]\n _ra, _dec = radec_str2rad(_ra_str, _dec_str)\n _radec = [_ra, _dec]\n # for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)\n _radec_deg = [_ra * 180.0 / np.pi - 180.0, _dec * 180.0 / np.pi]\n if (None not in (_az_str, _el_str)) and ('9999' not in (_az_str, _el_str)):\n _azel = [float(_az_str) * np.pi / 180., float(_el_str) * np.pi / 180.]\n else:\n _azel = None\n else:\n _azel = None\n _radec = None\n _radec_str = None\n _radec_deg = None\n\n self.db_entry['coordinates']['epoch'] = _epoch\n self.db_entry['coordinates']['radec_str'] = _radec_str\n self.db_entry['coordinates']['radec_geojson'] = {'type': 'Point', 'coordinates': _radec_deg}\n self.db_entry['coordinates']['radec'] = _radec\n self.db_entry['coordinates']['azel'] = _azel\n\n # DB updates are handled by the main archiver process\n # we'll provide it with proper query to feed into pymongo's update_one()\n return {'status': 'ok', 'message': 'raw files changed',\n 'db_record_update': ({'_id': self.id},\n {\n '$set': {\n 'exposure': self.db_entry['exposure'],\n 'magnitude': self.db_entry['magnitude'],\n 'fits_header': self.db_entry['fits_header'],\n 'coordinates': self.db_entry['coordinates'],\n 'raw_data.location': self.db_entry['raw_data']['location'],\n 'raw_data.data': self.db_entry['raw_data']['data'],\n 'raw_data.last_modified': time_tag\n },\n '$unset': {\n 'pipelined.*': 1\n }\n }\n )\n }\n else:\n return {'status': 'ok', 'message': None}\n\n except Exception as _e:\n traceback.print_exc()\n return {'status': 'error', 'message': str(_e)}",
"def verify(self) -> None:\n for filename, sha256sum in self.config.sha256.items():\n digest = _sha256_digest(os.path.join(self.raw_dataset_dir, filename))\n if digest != sha256sum:\n raise ValueError(f\"Checksum mismatch for file {filename} of {self.config.name} dataset\")\n if not self.config.sha256:\n logger.warning(f\"No sha256 digest provided for dataset {self.config.name}, cannot verify.\")\n logger.info(\"Contents:\")\n for filename in os.listdir(self.raw_dataset_dir):\n path = os.path.join(self.raw_dataset_dir, filename)\n if not os.path.isdir(path):\n digest = _sha256_digest(path)\n logger.info(f\" {filename}: {digest}\")",
"def test_sources_result(self):\n for source in self.sources:\n with self.subTest(source.__name__):\n name = join(\"reference_data\", f\"{source.__name__}.npy\")\n actual_data = get_source_result(source)\n if WRITE_NEW_REFERENCE_DATA:\n np.save(name, actual_data)\n ref_data = np.load(name)\n np.testing.assert_allclose(actual_data, ref_data, rtol=1e-5, atol=1e-8)",
"def _validate_random_hashes(self):\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True",
"def scan_files(self, datasource = None):\n fileList = os.listdir(datasource)\n\n # Now remove directories:\n fileList = [item for item in fileList if not os.path.isdir(item)]\n # Generate filename\n outFile = \"{}.sav\".format(hashlib.sha1(os.path.abspath(datasource).encode()).hexdigest())\n\n self._save_and_compress(os.path.join(self._get_local_repo_base_path(), \"meta\", \"dir\", outFile), data = {\"fileList\": fileList})",
"def _validate_random_hashes(self) -> bool:\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True",
"def checksource(overwrite=True, verbose=False, subdir='', splitcal_vis=''):\n # Read the dataset(s) and get properties\n if (splitcal_vis == ''):\n vislist = glob.glob('*.cal')\n else:\n if (type(splitcal_vis) == str):\n vislist = splitcal_vis.split(',')\n else:\n vislist = splitcal_vis\n print(\"Checking datasets: \", vislist)\n mymsmd = au.createCasaTool(msmdtool)\n if (len(subdir) > 0):\n if (os.path.exists(subdir)):\n if (subdir[-1] != '/'): \n subdir += '/'\n else:\n os.mkdir(subdir)\n if (subdir[-1] != '/'): \n subdir += '/'\n pnglist = []\n textfiles = []\n for vis in vislist:\n mymsmd.open(vis)\n freq=mymsmd.meanfreq(0,unit='GHz')\n # Check Source\n check=mymsmd.fieldsforintent('OBSERVE_CHECK_SOURCE*',True)[0]\n checkid=mymsmd.fieldsforintent('OBSERVE_CHECK_SOURCE*',False)[0]\n checkpos=mymsmd.phasecenter(checkid)\n # Phase calibrator\n phase=mymsmd.fieldsforintent('CALIBRATE_PHASE*',True)[0]\n phaseid=mymsmd.fieldsforintent('CALIBRATE_PHASE*',False)[0]\n phasepos=mymsmd.phasecenter(phaseid)\n if ('OBSERVE_TARGET#ON_SOURCE' in mymsmd.intents()):\n nScienceFields= len(mymsmd.fieldsforintent('OBSERVE_TARGET*',False))\n science = mymsmd.fieldsforintent('OBSERVE_TARGET*',True)[0]\n scienceid = mymsmd.fieldsforintent('OBSERVE_TARGET*',False)[0]\n else:\n nScienceFields = 0\n mymsmd.done()\n\n floatcell = au.pickCellSize(vis, maxBaselinePercentile=99, \n verbose=verbose)\n cell = au.pickCellSize(vis, maxBaselinePercentile=99, cellstring=True, \n verbose=verbose)\n# imsize = int(au.nextValidImsize(int(5.0/floatcell))) # valid when we only had checksources for synthBeam < 0.25\n imsize = int(au.nextValidImsize(int(np.max([5.0,5.0*au.estimateSynthesizedBeam(vis)])/floatcell))) \n print(\"imsize = \", imsize)\n region='circle[[%dpix , %dpix], 15pix ]' % (int(imsize/2),int(imsize/2))\n\n if False:\n # original method (for bands 3-6 only)\n cell = str(np.round(0.015*(100/freq),3))+'arcsec'\n if freq < 116.0:\n imsize = [320,320]\n region='circle[[160pix , 160pix] ,15pix ]'\n else:\n imsize = [680,680]\n region='circle[[340pix , 340pix] ,15pix ]'\n\n ###################################\n # IMAGE \n ###################################\n weighting = 'briggs'\n robust = 0.5\n niter = 50\n threshold = '0.0mJy'\n spw=''\n separation = au.angularSeparationOfTwoFields(vis,checkid,phaseid)\n if (nScienceFields > 0):\n separation_pcal_science = au.angularSeparationOfTwoFields(vis,scienceid,phaseid)\n separation_check_science = au.angularSeparationOfTwoFields(vis,scienceid,checkid)\n\n fieldtype = ['checksource','phasecal']\n field = [check,phase]\n for i,cal in enumerate(field):\n if (not os.path.exists(cal+'_'+vis+'.image') or overwrite):\n os.system('rm -rf '+cal+'_'+vis+'.*')\n if verbose:\n print(\"Running tclean('%s', field='%s', cell=%s, imsize=%s, ...)\" % (vis, cal, str(cell), str(imsize)))\n tclean(vis=vis,\n imagename=cal+'_'+vis,\n field=cal,spw=spw,\n specmode='mfs',\n deconvolver='hogbom',\n imsize = imsize, \n cell= cell, \n weighting = weighting, \n robust = robust,\n niter = niter, \n threshold = threshold, \n interactive = False,\n mask = region,\n gridder = 'standard')\n png = subdir+fieldtype[i]+'_'+cal+'_'+vis+'.image.png'\n pnglist.append(png)\n au.imviewField(cal+'_'+vis+'.image',radius=30*floatcell,\n contourImage=cal+'_'+vis+'.mask',levels=[1],\n plotfile=png)\n\n\n ###################################\n # ANALYZE\n ###################################\n ###########\n # PHASE\n ###########\n imagename=phase+'_'+vis\n if verbose:\n print(\"Running imfit('%s', region='%s')\" % (imagename+'.image', region))\n # Fit the phase source to get position and flux\n imagefit=imfit(imagename=imagename+'.image',\n region=region) \n fitresults=au.imfitparse(imagefit)\n\n # Compare the Positions\n phasepos_obs=au.direction2radec(phasepos)\n if fitresults is not None:\n phasepos_fit=','.join(fitresults.split()[:2])\n phasepos_diff=au.angularSeparationOfStrings(phasepos_obs,phasepos_fit,verbose=False)*3600.\n\n # Compare the Flux densities\n peakIntensity = au.imagePeak(imagename+'.image')\n selffluxfile=glob.glob('*.fluxscale')[0]\n fluxscaleResult = au.fluxscaleParseLog(selffluxfile,field=phase)\n if fluxscaleResult is not None:\n selfflux = fluxscaleResult[0][0]\n phaseflux_fit=float(fitresults.split()[2])\n phaseCoherence = 100*peakIntensity/phaseflux_fit\n phaseflux_diff=100*(selfflux-phaseflux_fit)/selfflux\n\n # Print the final results and save to file\n textfile = subdir+'calimage_results_'+vis+'.txt'\n textfiles.append(textfile)\n f = open(textfile,'w')\n f.write('\\n*************************************************************\\n\\n')\n line = 'CHECK_SOURCE IMAGE ANALYSIS REPORT (version %s)\\n' % version(short=True)\n writeOut(f,line)\n info = au.getFitsBeam(imagename+'.image')\n synthBeam = (info[0]*info[1])**0.5\n if fitresults is None:\n line = \"Phasecal %s: imfit failed\" % (phase)\n elif fluxscaleResult is not None:\n line= \"Phasecal %s: Position difference = %s arcsec = %s synth.beam, Flux %% difference = %s\"%(phase,au.roundFiguresToString(phasepos_diff,3), au.roundFiguresToString(phasepos_diff/synthBeam,3), au.roundFiguresToString(phaseflux_diff,3))\n writeOut(f,line)\n line = \" coherence = peakIntensity/fittedFluxDensity = %s%%\" % (au.roundFiguresToString(phaseCoherence,3))\n else:\n line = \"Phasecal %s: Position difference = %s arcsec = %s synth.beam\" % (phase,au.roundFiguresToString(phasepos_diff,3), au.roundFiguresToString(phasepos_diff/synthBeam,3))\n writeOut(f,line)\n f.close()\n if fluxscaleResult is None:\n print(\"Full checksource analysis is not supported if there is no flux calibrator\")\n return textfiles, pnglist\n\n ###########\n # CHECK\n ###########\n imagename=check+'_'+vis\n # Fit the check source to get position and flux\n if verbose:\n print(\"Running imfit('%s', region='%s')\" % (imagename+'.image', region))\n imagefit=imfit(imagename=imagename+'.image',\n region=region) \n fitresults=au.imfitparse(imagefit, deconvolved=True)\n info = au.getFitsBeam(imagename+'.image')\n synthMajor, synthMinor = info[0:2]\n synthBeam = (info[0]*info[1])**0.5\n\n # Compare the Positions\n checkpos_obs=au.direction2radec(checkpos)\n if fitresults is not None:\n checkpos_fit=','.join(fitresults.split()[:2])\n checkpos_diff=au.angularSeparationOfStrings(checkpos_obs,checkpos_fit,\n verbose=False)*3600.\n\n # Compare the Flux densities\n selffluxfile=glob.glob('*.fluxscale')[0]\n results = au.fluxscaleParseLog(selffluxfile,field=check)\n peakIntensity = au.imagePeak(imagename+'.image')\n if (results is not None and fitresults is not None):\n selfflux=results[0][0] \n checkflux_fit=float(fitresults.split()[2])\n\n checkflux_diff=100*(selfflux-checkflux_fit)/selfflux\n checkCoherence = 100*peakIntensity/checkflux_fit\n if fitresults is not None:\n if verbose: \n print(\"Checksource fitresults: \", fitresults)\n deconvolvedMajor = float(fitresults.split()[5])\n deconvolvedMinor = float(fitresults.split()[7])\n\n # Print the final results and save to file\n f=open(textfile,'a')\n if fitresults is None:\n line = \"Checksource %s: imfit failed\" % (phase)\n else:\n if (results is not None):\n line= \"\\nChecksource %s: Position difference = %s arcsec = %s synth.beam, Flux %% difference = %s\"%(check ,au.roundFiguresToString(checkpos_diff,3),au.roundFiguresToString(checkpos_diff/synthBeam,3),au.roundFiguresToString(checkflux_diff,3))\n writeOut(f,line)\n line = \" coherence = peakIntensity/fittedFluxDensity = %s%%\" % (au.roundFiguresToString(checkCoherence,3))\n else:\n line= \"\\nChecksource %s: Position difference = %s arcsec = %s synth.beam\" % (check ,au.roundFiguresToString(checkpos_diff,3),au.roundFiguresToString(checkpos_diff/synthBeam,3))\n writeOut(f,line)\n line = \" beam size = %s x %s arcsec\" % (au.roundFiguresToString(synthMajor,3), au.roundFiguresToString(synthMinor,3))\n writeOut(f,line)\n line = \" apparent deconvolved size = %s x %s arcsec = %s synth.beam area\" % (au.roundFiguresToString(deconvolvedMajor,2), au.roundFiguresToString(deconvolvedMinor,2), au.roundFiguresToString(deconvolvedMajor*deconvolvedMinor/(synthBeam**2),2))\n writeOut(f,line)\n line = \" angular separation of phasecal to checksource = %s degree\" % (au.roundFiguresToString(separation,3))\n writeOut(f,line)\n if (nScienceFields > 0):\n if (nScienceFields > 1):\n modifier = 'first'\n else:\n modifier = 'only'\n line = \" angular separation of phasecal to %s science field (%d) = %s degree\" % (modifier,scienceid,au.roundFiguresToString(separation_pcal_science,3))\n writeOut(f,line)\n line = \" angular separation of checksource to %s science field (%d) = %s degree\" % (modifier,scienceid,au.roundFiguresToString(separation_check_science,3))\n writeOut(f,line)\n f.close()\n # end 'for' loop over vislist\n return textfiles, pnglist",
"def cache_sources(self):\n import shutil\n import os\n\n data = self.filesystem.build_path('data')\n\n cache = self.filesystem.source_store\n\n if not os.path.exists(data):\n os.makedirs(data)\n\n for k, v in self.metadata.sources.items():\n fn = self.filesystem.download(k)\n\n base = os.path.basename(fn)\n dest = os.path.join(data, base)\n\n cache_key = self.source_store_cache_key(base)\n\n shutil.copyfile(fn, dest)\n\n if cache and not cache.has(cache_key):\n self.log(\"Putting: {}\".format(cache_key))\n cache.put(fn,cache_key,metadata=dict(vname=self.identity.vname))",
"def _checksum_local_file(self, source_path):\n with open(source_path, 'r') as img_file:\n hasher = hashlib.md5()\n block_size = 0x10000\n buf = img_file.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = img_file.read(block_size)\n source_cksum = hasher.hexdigest()\n return source_cksum",
"def read_sources(self):\n\n sf_fn = self.filesystem.path(self.SOURCES_FILE)\n\n if os.path.exists(sf_fn):\n from sources import SourcesFile\n\n sf = SourcesFile(sf_fn, self.metadata)\n\n sf.read()",
"def validate_checksums(self, dest_dir=THIRDPARTY_DIR):\n real_checksums = self.get_checksums(dest_dir)\n for csk in (\"md5\", \"sha1\", \"sha256\"):\n csv = getattr(self, csk)\n rcv = real_checksums.get(csk)\n if csv and rcv and csv != rcv:\n return False\n return True",
"def _check_sources(self):\n for source_name, source in self.sources.items():\n if \"data\" not in source or \"ref_column\" not in source:\n raise ValueError(\n \"Each source needs to have a `data` and a `ref_column` property\"\n )\n if not isinstance(source[\"data\"], pd.DataFrame):\n raise ValueError(\n \"The `data` property of each source must contain a DatFrame\"\n )\n if not isinstance(source[\"data\"].index, pd.DatetimeIndex):\n raise ValueError(\n \"The `data` DataFrame must have a pd.DatetimeIndex for each source\"\n )\n if source[\"data\"].index.duplicated().any():\n raise ValueError(\n \"The input dataframe must not have duplicate index values, \"\n \"convert the data into a normalized wide format\"\n )\n if (\n not isinstance(source[\"ref_column\"], str)\n or source[\"ref_column\"] not in source[\"data\"].columns\n ):\n raise ValueError(\n \"Each source must have a string specifying the reference column, and the reference\"\n \"column must be available in the source's DataFrame\"\n )\n if self.ref_source_name not in self.sources.keys():\n raise ValueError(\n \"The reference source name must be available in the source dict\"\n )",
"def get_sources(self):\n\n self.sources = []\n cur = self.settings['conn'].cursor()\n cur.execute(\"SELECT id, name, fulltext, mediapath, memo, owner, date FROM source\")\n results = cur.fetchall()\n for r in results:\n guid = self.create_guid()\n suffix = \"txt\"\n if r[3] is not None:\n suffix = r[3].split('.')[-1]\n else:\n if '.' in r[1]:\n suffix = r[1].split('.')[-1]\n if suffix == 'transcribed':\n suffix = 'txt'\n filename = guid + '.' + suffix\n\n plaintext_filename = None\n if r[2] is not None:\n plaintext_filename = self.create_guid() + \".txt\"\n source = {'id': r[0], 'name': r[1], 'fulltext': r[2], 'mediapath': r[3],\n 'memo': r[4], 'owner': r[5], 'date': r[6].replace(' ', 'T'), 'guid': guid,\n 'filename': filename, 'plaintext_filename': plaintext_filename,\n 'external': None}\n if source['mediapath'] is not None:\n fileinfo = os.stat(self.settings['path'] + source['mediapath'])\n if fileinfo.st_size >= 2147483647:\n source['external'] = self.settings['directory']\n self.sources.append(source)",
"def prepare_data(self) -> None:\n if not os.path.exists(self.data_dir):\n os.mkdir(self.data_dir)\n\n urlretrieve(\n url=f'https://raw.githubusercontent.com/Ninarehm/attack/master/Fairness_attack/data/{self.file_name}',\n filename=self.path_to_file\n )",
"def check_file_hashes(self):\n for filepath in pathlib.Path(self.dir.name).glob(\"**/*.*\"):\n filename = os.path.basename(filepath)\n if filename != \"datapackage.json\" and filename != \"datapackage-digest.json\":\n file = open(filepath, \"rb\").read()\n hash = support_hash_file(self.hash_type, file)\n file = str(filepath).split(\"/\")[-2:]\n file = \"/\".join(file)\n res = None\n for item in self.datapackage[\"resources\"]:\n if item[\"path\"] == file:\n res = item\n if res == None or (res[\"hash\"] != hash):\n print(\n \"\\nfile %s's hash does not match the hash listed in the datapackage\"\n % file\n )\n return False\n return True",
"def checksum(self):\n checksums = {\n 'slug': hashlib.sha256(\n '{self.name}.{self.dtype}'.encode('utf-8')\n ).hexdigest(),\n 'files': {}\n }\n def file_hash(filepath):\n running_hash = hashlib.sha256()\n with open(filepath, 'rb') as IN:\n while True:\n # Read file in as little chunks.\n buf = IN.read(4096)\n if not buf:\n break\n running_hash.update(buf)\n return running_hash.hexdigest()\n # iterate over the direcory and calucalte the hash\n for root,dirs,files in os.walk(self.thawed_dir):\n for file_path in sorted(files):\n full_path = str(Path(root)/file_path)\n # Calculate a relative path to the freezable object\n rel_path = full_path.replace(str(self.thawed_dir)+'/','')\n # calculate and store the checksums\n phash = file_hash(full_path)\n checksums['files'][rel_path] = phash\n # calculate the total\n total = hashlib.sha256(checksums['slug'].encode('utf-8'))\n # Iterate over filenames AND hashes and update checksum\n for filename,csum in checksums['files'].items():\n total.update(filename.encode('utf-8'))\n total.update(csum.encode('utf-8'))\n checksums['total'] = total.hexdigest()\n return checksums",
"def validate(self):\r\n\t\tfrom ..nrml import NRMLError\r\n\r\n\t\tsource_ids = []\r\n\t\tfor source in self.sources:\r\n\t\t\tif not source.source_id in source_ids:\r\n\t\t\t\tsource_ids.append(source.source_id)\r\n\t\t\telse:\r\n\t\t\t\traise NRMLError(\"Duplicate source id found: %s\" % source.source_id)",
"def __load_sources(self):\r\n for root, _, files in os.walk(os.sep.join(['modules', self.module, 'sources'])):\r\n for file_ in files:\r\n path = os.path.join(root, file_)\r\n ext = os.path.splitext(path)[1]\r\n if ext in ('.yaml', '.yml'):\r\n Ruleset.logger.debug('Loading source from file \"%s\".', path)\r\n with open(path) as stream:\r\n self.sources.append(Source(yaml.safe_load(stream)))",
"def write_data_source_files(self) -> None:\n data_sources_dir = self.ids.additional_output_dir / f\"{self.ids.short_polarity}_data_sources\"\n if len(list(data_sources_dir.glob(\"*\"))) >= 4:\n logger.warning(\n (\n \"Data sources directory already populated from previous work on this analysis. \"\n \"Not overwriting.\"\n )\n )\n else:\n shutil.rmtree(data_sources_dir, ignore_errors=True)\n logger.info(\"Writing data source files to %s.\", data_sources_dir)\n ma_data.make_data_sources_tables(\n self.ids.groups, self.atlas, self.ids.additional_output_dir, self.ids.short_polarity\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load polling data for UK General Elections.
|
def load_polling_data(self):
polls = {}
for geo in self.geos:
poll_df = pd.read_csv(
self.directory / "raw" / f"general_election-{geo}-polls.csv", parse_dates=["to"]
).sort_values("to")
poll_df.columns = utils.sanitise(
poll_df.columns,
replace={"ulster_unionist_party": "uup", "sinn_fein": "sf", "alliance": "apni"},
)
polls[geo] = poll_df
return polls
|
[
"def load_poll_data():\n polls = []\n \n with open('./cogs/polls.json', 'r', encoding='utf-8') as poll_file:\n try:\n polls = json.load(poll_file)\n except json.JSONDecodeError:\n pass\n return polls",
"def fetch_data(self):\r\n print(\"Fetching Data from USGS Water Services API\")\r\n self.response = requests.get(self.complete_url)\r\n self.response.raise_for_status()",
"def get_regional_and_national_poll_of_polls(self, polls):\n election_day = self.now_date\n one_week_before = election_day - pd.Timedelta(days=7)\n one_month_before = election_day - pd.Timedelta(days=30)\n\n # Use single last poll from each pollster in final week of polling then average out\n final_polls = {}\n for geo in self.geos:\n period_before = one_week_before if geo == \"uk\" else one_month_before\n final_polls[geo] = self.calculate_poll_of_polls(\n polls=polls[geo], from_date=period_before, to_date=election_day\n )\n # Consider MRPs equivalent to a large poll\n final_polls[geo].loc[final_polls[geo].method == \"MRP\", \"sample_size\"] = (\n final_polls[geo].query('method != \"MRP\"').sample_size.max()\n )\n # Handle missing sample sizes\n mean_sample_size = final_polls[geo].query('method != \"MRP\"').sample_size.mean()\n if pd.isnull(mean_sample_size):\n mean_sample_size = 1\n final_polls[geo][\"sample_size\"] = final_polls[geo].sample_size.fillna(mean_sample_size)\n\n # Calculate regional polling\n regional_polling_missing = any(final_polls[geo].empty for geo in self.geos)\n\n # Regional polling is missing, just calculate UK-level polling only.\n if regional_polling_missing:\n # TODO: Check how this affects 2015/2017 models\n parties = [\"con\", \"lab\", \"ld\", \"ukip\", \"grn\", \"chuk\", \"bxp\", \"snp\"]\n # Create new polls dictionary by geo containing simple average across all pollsters\n national_polling = final_polls[\"uk\"].mean().loc[parties]\n # We don't yet have regional polling in 2015 for Scotland, Wales, NI, London - add as other.\n national_polling[\"other\"] = 1 - national_polling.sum()\n poll_of_polls = {\"uk\": national_polling}\n # Turn into dataframe\n polls_df_list = []\n for geo in poll_of_polls:\n polls_df_list.append(\n pd.DataFrame(\n {\n \"geo\": geo,\n \"party\": poll_of_polls[geo].index,\n \"voteshare\": poll_of_polls[geo],\n }\n ).reset_index(drop=True)\n )\n polls_df = pd.concat(polls_df_list, axis=0)\n\n # We have polling for all regions.\n else:\n parties = {\n # TODO: Add [\"chuk\", \"bxp\", \"ukip\"] to uk, scotland, wales, london\n \"uk\": [\"con\", \"lab\", \"ld\", \"grn\", \"snp\"],\n \"scotland\": [\"con\", \"lab\", \"ld\", \"snp\", \"grn\"],\n \"wales\": [\"con\", \"lab\", \"ld\", \"pc\", \"grn\"],\n \"ni\": [\"dup\", \"uup\", \"sf\", \"sdlp\", \"apni\", \"grn\", \"con\"],\n \"london\": [\"con\", \"lab\", \"ld\", \"grn\"],\n \"england_not_london\": [\"con\", \"lab\", \"ld\", \"grn\"],\n }\n all_parties = set(x for y in parties.values() for x in y)\n poll_of_polls = {}\n for geo in self.geos:\n sample_size_weights = (\n final_polls[geo].sample_size / final_polls[geo].sample_size.sum()\n )\n weighted_poll_of_polls = (\n final_polls[geo][parties[geo]]\n .multiply(sample_size_weights, axis=0)\n .sum()\n .reindex(all_parties, fill_value=0.0)\n )\n poll_of_polls[geo] = weighted_poll_of_polls\n\n # Estimate polling for England excluding London\n # survation_wts from http://survation.com/wp-content/uploads/2017/06/Final-MoS-Post-BBC-Event-Poll-020617SWCH-1c0d4h9.pdf\n survation_wts = pd.Series({\"scotland\": 85, \"england\": 881, \"wales\": 67, \"ni\": 16})\n survation_wts[\"uk\"] = survation_wts.sum()\n survation_wts[\"london\"] = 137\n survation_wts[\"england_not_london\"] = survation_wts.england - survation_wts.london\n\n england_not_london = poll_of_polls[\"uk\"] * survation_wts[\"uk\"]\n for geo in [\"scotland\", \"wales\", \"ni\", \"london\"]:\n england_not_london = england_not_london.sub(\n poll_of_polls[geo] * survation_wts[geo], fill_value=0.0\n )\n england_not_london /= survation_wts[\"england_not_london\"]\n england_not_london.loc[[\"pc\", \"snp\"]] = 0.0\n poll_of_polls[\"england_not_london\"] = england_not_london\n\n # Fix PC (Plaid Cymru) for UK\n poll_of_polls[\"uk\"][\"pc\"] = (\n poll_of_polls[\"wales\"][\"pc\"] * survation_wts[\"wales\"] / survation_wts[\"uk\"]\n )\n\n # Add Other & normalise\n for geo in self.geos + [\"england_not_london\"]:\n poll_of_polls[geo][\"other\"] = max(\n 1 - poll_of_polls[geo].sum(), 0\n ) # weighted means can sum > 1\n poll_of_polls[geo] = poll_of_polls[geo] / poll_of_polls[geo].sum()\n\n # Export\n polls_df_list = []\n for geo in poll_of_polls:\n polls_df_list.append(\n pd.DataFrame(\n {\n \"geo\": geo,\n \"party\": poll_of_polls[geo].index,\n \"voteshare\": poll_of_polls[geo],\n }\n ).reset_index(drop=True)\n )\n polls_df = pd.concat(polls_df_list, axis=0)\n\n return polls_df",
"def load_new_data(self):\n r = requests.get(self.STATUS_URL)\n raw_data = self._received_data_processor(r.text)\n soup = BeautifulSoup(raw_data, 'lxml')\n self.status_data = soup.find(\"service\").find(\"subway\").findAll(\"line\")",
"def load_current_elections(self, bucket_name, blob_name): \n filepath = os.path.join(self.path, blob_name)\n \n #load data from Google Cloud Storage \n self.client.download_file(filepath, bucket_name, blob_name)\n data = self.client.load_tmp_json(filepath)\n\n try: \n elections = data['elections']\n logging.info(\"Successfully loaded current elections data.\")\n return elections\n except KeyError as error: \n logging.error(f\"There are no current elections stored in file: 'gs://' {bucket_name} + '/' + {blob_name}\")\n raise\n except Exception as error: \n logging.error(f\"Error loading current elections from file: 'gs://' {bucket_name} + '/' + {blob_name}\")\n logging.error(error)\n raise",
"def company_info_loader(self):\n\n\n self.data_retriever()\n self.exp_type_loc_table()",
"def load_data_from_slack():\n\n Channel.load_from_slack()\n User.load_from_slack()",
"def loadResources(self):\n AbstractSelection.loadResources(self)\n if self.resource_pool.has_key(data_sources.CVFOLDS):\n #fs = self.resource_pool[data_sources.CVFOLDS]\n #self.folds = fs.readFolds()\n self.folds = self.resource_pool[data_sources.CVFOLDS]\n elif self.resource_pool.has_key(data_sources.TRAIN_QIDS):\n self.folds = self.resource_pool[data_sources.TRAIN_QIDS]\n #self.folds = qsource.readFolds()",
"def loadData(self):\r\n\r\n dbName = self.db_info['name']\r\n hostname = self.db_info['hostname']\r\n user = self.db_info['user']\r\n pwd = self.db_info['pwd']\r\n label_coll_name = self.db_info['label_coll_name']\r\n history_coll_name = self.db_info['history_coll_name']\r\n port = self.db_info['port']\r\n\r\n try:\r\n print(\"Trying connection...\")\r\n client = MongoClient(hostname)\r\n client[dbName].authenticate(user, pwd)\r\n db = client[dbName]\r\n print(\"Connected to mongodb @ {0}:[{1}]\".format(\r\n hostname, port))\r\n except Exception as E:\r\n print(\"Fail to connect mongodb @ {0}:{1}, {2}\".format(\r\n hostname, port, E))\r\n exit()\r\n\r\n # Read label collection\r\n collection = db[label_coll_name]\r\n num_urls = collection.count()\r\n data = {}\r\n if num_urls > 0:\r\n dataDB = collection.find({})\r\n for i in range(num_urls):\r\n wid = dataDB[i]['idna']\r\n data[wid] = dataDB[i]['value']\r\n if 'url' not in data[wid]:\r\n data[wid]['url'] = wid\r\n\r\n # Read history\r\n collection = db[history_coll_name]\r\n num_events = collection.count()\r\n labelhistory = {}\r\n if num_events > 0:\r\n dataDB = collection.find({})\r\n for i in range(num_events):\r\n wid = dataDB[i]['idna']\r\n labelhistory[wid] = dataDB[i]['value']\r\n\r\n df_labels, df_preds = self.get_df(data, labelhistory)\r\n\r\n # In the current version, predictions are not being stored in the\r\n # mongo db. They must be loaded from files.\r\n if os.path.isfile(self.datapreds_file):\r\n # Load prediction dataframes stored in pickle files\r\n df_preds = pd.read_pickle(self.datapreds_file)\r\n\r\n return df_labels, df_preds, labelhistory",
"def _load(self):\n self.get_table()\n self._get_pole()\n self._get_winners()",
"async def load_data(self):\n await self.bot.wait_until_ready()\n guild = discord.utils.get(\n self.bot.guilds,\n name='Hatventures Community'\n )\n\n self.guild = guild",
"def load_gas_consumption():\n _load_consumption_from_api(settings.GAS_CONSUMPTION_URL, models.GasConsumption)",
"def refresh_gics():\n\n # Delete all records\n TGICSIndustry.delete_all_data()\n TGICSIndustryGroup.delete_all_data()\n TGICSSector.delete_all_data()\n TGICSSubIndustry.delete_all_data()\n\n # Get all objects in static database\n # and pass all data to insert function\n # Sector\n d = GICSSector.objects.all()\n TGICSSector.populate_data(d)\n\n # Sub industry group\n d = GICSIndustryGroup.objects.all()\n TGICSIndustryGroup.populate_data(d)\n\n # Sub industry group\n d = GICSIndustry.objects.all()\n TGICSIndustry.populate_data(d)\n\n # Sub industry group\n d = GICSSubIndustry.objects.all()\n TGICSSubIndustry.populate_data(d)",
"async def cog_load(self) -> None:\n await self.bot.wait_until_guild_available()\n await self.refresh_inventories()",
"def test_load_data(self):\n\n load_data(example_agency)\n\n # Check that agency elements are loaded\n a = Agency.objects.get(name='Environmental Protection Agency')\n self.assertEqual('environmental-protection-agency', a.slug)\n self.assertEqual('The mission of EPA is to protect', a.description)\n self.assertEqual(['Acid Rain', 'Agriculture'], a.keywords)\n self.assertEqual(['common request 1'], a.common_requests)\n self.assertEqual(['no records about 1'], a.no_records_about)\n\n # Check that elements from top-level (sub_agency) offices are loaded\n sub_a = Agency.objects.get(\n name='Region 10 (States: AK, ID, OR, WA)')\n self.assertEqual(\n 'region-10-states-ak-id-or-wa', sub_a.slug)\n self.assertEqual(['keyword 1', 'keyword 2'], sub_a.keywords)\n self.assertEqual(a, sub_a.parent)\n # Ensure that abbreviations are not overwritten\n self.assertEqual('R9', sub_a.abbreviation)\n self.assertEqual(['common request 1'], sub_a.common_requests)\n self.assertEqual(['no records about 1'], sub_a.no_records_about)\n self.assertEqual(\n 'The mission of this sub is...', sub_a.description)\n\n # Check that elements from regular offices are loaded\n o = Office.objects.get(\n name='Region 9 (States: AZ, CA, HI, NV, AS, GU)')\n self.assertEqual(\n 'environmental-protection-agency-' +\n '-region-9-states-az-ca-hi-nv-as-gu', o.slug)",
"async def load_eu_data():\n eu_url = \"https://sourceforge.net/projects/miuimix/rss?path=/\"\n async with ClientSession() as session:\n stable = eT.fromstring(await fetch(session, f'{eu_url}/weekly'))\n weekly = eT.fromstring(await fetch(session, f'{eu_url}/stable'))\n stable_links = [i.find('link').text for i in stable[0].findall('item')]\n weekly_links = [i.find('link').text for i in weekly[0].findall('item')]\n return [*stable_links, *weekly_links]",
"def fetch_standings():\n # check if the data needs to be fetched // or stored json\n try:\n with open('app/data/gw_standings/standings_current.json', 'r') as file:\n data = json.loads(file.read())\n except:\n return get_live_result()\n\n updated = data['updated']\n try:\n status = data['status']\n except KeyError:\n status = \"ongoing\"\n gameweek = data['gameweek']\n\n if status == 'completed' and gameweek == find_current_gw():\n return data\n\n current = calendar.timegm(time.gmtime())\n\n if current - updated < 500:\n return data\n return get_live_result()",
"def _download_data(self):\n self.raw_data = requests.get(self.api_address).json()\n self.age = datetime.now()",
"async def test_load_standard_empty(self):\n async with LOCK:\n mgr = pub.getDefaultTopicMgr()\n mgr.delTopic(ALL_LINK_RECORD_RESPONSE)\n aldb = ModemALDB(random_address())\n aldb.read_write_mode = ReadWriteMode.STANDARD\n pub.subscribe(send_nak_response, SEND_FIRST_TOPIC)\n\n response = await aldb.async_load()\n _LOGGER.debug(\"Done LOAD function.\")\n _LOGGER.debug(\"Status: %s\", response.name)\n assert aldb.is_loaded\n _LOGGER.debug(\"ALDB Record Count: %d\", len(aldb))\n assert len(aldb) == 0\n pub.unsubscribe(send_nak_response, SEND_FIRST_TOPIC)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes straight average across each pollster's final poll in last week prior to election day. Repeat for regions, if regional polling is available.
|
def get_regional_and_national_poll_of_polls(self, polls):
election_day = self.now_date
one_week_before = election_day - pd.Timedelta(days=7)
one_month_before = election_day - pd.Timedelta(days=30)
# Use single last poll from each pollster in final week of polling then average out
final_polls = {}
for geo in self.geos:
period_before = one_week_before if geo == "uk" else one_month_before
final_polls[geo] = self.calculate_poll_of_polls(
polls=polls[geo], from_date=period_before, to_date=election_day
)
# Consider MRPs equivalent to a large poll
final_polls[geo].loc[final_polls[geo].method == "MRP", "sample_size"] = (
final_polls[geo].query('method != "MRP"').sample_size.max()
)
# Handle missing sample sizes
mean_sample_size = final_polls[geo].query('method != "MRP"').sample_size.mean()
if pd.isnull(mean_sample_size):
mean_sample_size = 1
final_polls[geo]["sample_size"] = final_polls[geo].sample_size.fillna(mean_sample_size)
# Calculate regional polling
regional_polling_missing = any(final_polls[geo].empty for geo in self.geos)
# Regional polling is missing, just calculate UK-level polling only.
if regional_polling_missing:
# TODO: Check how this affects 2015/2017 models
parties = ["con", "lab", "ld", "ukip", "grn", "chuk", "bxp", "snp"]
# Create new polls dictionary by geo containing simple average across all pollsters
national_polling = final_polls["uk"].mean().loc[parties]
# We don't yet have regional polling in 2015 for Scotland, Wales, NI, London - add as other.
national_polling["other"] = 1 - national_polling.sum()
poll_of_polls = {"uk": national_polling}
# Turn into dataframe
polls_df_list = []
for geo in poll_of_polls:
polls_df_list.append(
pd.DataFrame(
{
"geo": geo,
"party": poll_of_polls[geo].index,
"voteshare": poll_of_polls[geo],
}
).reset_index(drop=True)
)
polls_df = pd.concat(polls_df_list, axis=0)
# We have polling for all regions.
else:
parties = {
# TODO: Add ["chuk", "bxp", "ukip"] to uk, scotland, wales, london
"uk": ["con", "lab", "ld", "grn", "snp"],
"scotland": ["con", "lab", "ld", "snp", "grn"],
"wales": ["con", "lab", "ld", "pc", "grn"],
"ni": ["dup", "uup", "sf", "sdlp", "apni", "grn", "con"],
"london": ["con", "lab", "ld", "grn"],
"england_not_london": ["con", "lab", "ld", "grn"],
}
all_parties = set(x for y in parties.values() for x in y)
poll_of_polls = {}
for geo in self.geos:
sample_size_weights = (
final_polls[geo].sample_size / final_polls[geo].sample_size.sum()
)
weighted_poll_of_polls = (
final_polls[geo][parties[geo]]
.multiply(sample_size_weights, axis=0)
.sum()
.reindex(all_parties, fill_value=0.0)
)
poll_of_polls[geo] = weighted_poll_of_polls
# Estimate polling for England excluding London
# survation_wts from http://survation.com/wp-content/uploads/2017/06/Final-MoS-Post-BBC-Event-Poll-020617SWCH-1c0d4h9.pdf
survation_wts = pd.Series({"scotland": 85, "england": 881, "wales": 67, "ni": 16})
survation_wts["uk"] = survation_wts.sum()
survation_wts["london"] = 137
survation_wts["england_not_london"] = survation_wts.england - survation_wts.london
england_not_london = poll_of_polls["uk"] * survation_wts["uk"]
for geo in ["scotland", "wales", "ni", "london"]:
england_not_london = england_not_london.sub(
poll_of_polls[geo] * survation_wts[geo], fill_value=0.0
)
england_not_london /= survation_wts["england_not_london"]
england_not_london.loc[["pc", "snp"]] = 0.0
poll_of_polls["england_not_london"] = england_not_london
# Fix PC (Plaid Cymru) for UK
poll_of_polls["uk"]["pc"] = (
poll_of_polls["wales"]["pc"] * survation_wts["wales"] / survation_wts["uk"]
)
# Add Other & normalise
for geo in self.geos + ["england_not_london"]:
poll_of_polls[geo]["other"] = max(
1 - poll_of_polls[geo].sum(), 0
) # weighted means can sum > 1
poll_of_polls[geo] = poll_of_polls[geo] / poll_of_polls[geo].sum()
# Export
polls_df_list = []
for geo in poll_of_polls:
polls_df_list.append(
pd.DataFrame(
{
"geo": geo,
"party": poll_of_polls[geo].index,
"voteshare": poll_of_polls[geo],
}
).reset_index(drop=True)
)
polls_df = pd.concat(polls_df_list, axis=0)
return polls_df
|
[
"def average_quarterly(table):\n quarterly_pollutant = {'1Q 2013': [], '2Q 2013': [], '3Q 2013': [], '4Q 2013': [],\n '1Q 2014': [], '2Q 2014': [], '3Q 2014': [], '4Q 2014': [],\n '1Q 2015': [], '2Q 2015': [], '3Q 2015': [], '4Q 2015': [],\n '1Q 2016': [], '2Q 2016': [], '3Q 2016': [], '4Q 2016': [],\n '1Q 2017': []}\n temp20131 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20132 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20133 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20134 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20141 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20142 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20143 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20144 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20151 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20152 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20153 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20154 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20161 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20162 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20163 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20164 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n temp20171 = {'PM2.5': [], 'PM10': [], 'SO2': [], 'NO2': [], 'CO': [], 'O3': []}\n\n for i in range(len(table)):\n if table.iloc[i, 1] == 2013 and 1 <= table.iloc[i, 2] <= 3:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20131[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2013 and 4 <= table.iloc[i, 2] <= 6:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20132[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2013 and 7 <= table.iloc[i, 2] <= 9:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20133[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2013 and 10 <= table.iloc[i, 2] <= 12:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20134[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2014 and 1 <= table.iloc[i, 2] <= 3:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20141[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2014 and 4 <= table.iloc[i, 2] <= 6:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20142[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2014 and 7 <= table.iloc[i, 2] <= 9:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20143[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2014 and 10 <= table.iloc[i, 2] <= 12:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20144[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2015 and 1 <= table.iloc[i, 2] <= 3:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20151[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2015 and 4 <= table.iloc[i, 2] <= 6:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20152[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2015 and 7 <= table.iloc[i, 2] <= 9:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20153[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2015 and 10 <= table.iloc[i, 2] <= 12:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20154[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2016 and 1 <= table.iloc[i, 2] <= 3:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20161[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2016 and 4 <= table.iloc[i, 2] <= 6:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20162[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2016 and 7 <= table.iloc[i, 2] <= 9:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20163[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2016 and 10 <= table.iloc[i, 2] <= 12:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20164[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n elif table.iloc[i, 1] == 2017 and 1 <= table.iloc[i, 2] <= 3:\n for x in range(5, 11):\n if not pd.isnull(table.iloc[i, x]):\n temp20171[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n\n # for i in range(len(table)):\n # for x in range(5, 11): # pollutant index from original table (PM2.5~O3: 5~10)\n # if table.iloc[i, 1] == 2013 and 1 <= table.iloc[i, 2] <= 3 and not pd.isnull(table.iloc[i, x]):\n # temp20131[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2013 and 4 <= table.iloc[i, 2] <= 6 and not pd.isnull(table.iloc[i, x]):\n # temp20132[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2013 and 7 <= table.iloc[i, 2] <= 9 and not pd.isnull(table.iloc[i, x]):\n # temp20133[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2013 and 10 <= table.iloc[i, 2] <= 12 and not pd.isnull(table.iloc[i, x]):\n # temp20134[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2014 and 1 <= table.iloc[i, 2] <= 3 and not pd.isnull(table.iloc[i, x]):\n # temp20141[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2014 and 4 <= table.iloc[i, 2] <= 6 and not pd.isnull(table.iloc[i, x]):\n # temp20142[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2014 and 7 <= table.iloc[i, 2] <= 9 and not pd.isnull(table.iloc[i, x]):\n # temp20143[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2014 and 10 <= table.iloc[i, 2] <= 12 and not pd.isnull(table.iloc[i, x]):\n # temp20144[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2015 and 1 <= table.iloc[i, 2] <= 3 and not pd.isnull(table.iloc[i, x]):\n # temp20151[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2015 and 4 <= table.iloc[i, 2] <= 6 and not pd.isnull(table.iloc[i, x]):\n # temp20152[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2015 and 7 <= table.iloc[i, 2] <= 9 and not pd.isnull(table.iloc[i, x]):\n # temp20153[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2015 and 10 <= table.iloc[i, 2] <= 12 and not pd.isnull(table.iloc[i, x]):\n # temp20154[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2016 and 1 <= table.iloc[i, 2] <= 3 and not pd.isnull(table.iloc[i, x]):\n # temp20161[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2016 and 4 <= table.iloc[i, 2] <= 6 and not pd.isnull(table.iloc[i, x]):\n # temp20162[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2016 and 7 <= table.iloc[i, 2] <= 9 and not pd.isnull(table.iloc[i, x]):\n # temp20163[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2016 and 10 <= table.iloc[i, 2] <= 12 and not pd.isnull(table.iloc[i, x]):\n # temp20164[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n # elif table.iloc[i, 1] == 2017 and 1 <= table.iloc[i, 2] <= 3 and not pd.isnull(table.iloc[i, x]):\n # temp20171[INDEX_POLLUTANT[x]].append(table.iloc[i, x])\n for value in temp20131.values():\n quarterly_pollutant['1Q 2013'].append(sum(value)/len(value))\n for value in temp20132.values():\n quarterly_pollutant['2Q 2013'].append(sum(value)/len(value))\n for value in temp20133.values():\n quarterly_pollutant['3Q 2013'].append(sum(value)/len(value))\n for value in temp20134.values():\n quarterly_pollutant['4Q 2013'].append(sum(value)/len(value))\n for value in temp20141.values():\n quarterly_pollutant['1Q 2014'].append(sum(value)/len(value))\n for value in temp20142.values():\n quarterly_pollutant['2Q 2014'].append(sum(value)/len(value))\n for value in temp20143.values():\n quarterly_pollutant['3Q 2014'].append(sum(value)/len(value))\n for value in temp20144.values():\n quarterly_pollutant['4Q 2014'].append(sum(value)/len(value))\n for value in temp20151.values():\n quarterly_pollutant['1Q 2015'].append(sum(value)/len(value))\n for value in temp20152.values():\n quarterly_pollutant['2Q 2015'].append(sum(value)/len(value))\n for value in temp20153.values():\n quarterly_pollutant['3Q 2015'].append(sum(value)/len(value))\n for value in temp20154.values():\n quarterly_pollutant['4Q 2015'].append(sum(value)/len(value))\n for value in temp20161.values():\n quarterly_pollutant['1Q 2016'].append(sum(value)/len(value))\n for value in temp20162.values():\n quarterly_pollutant['2Q 2016'].append(sum(value)/len(value))\n for value in temp20163.values():\n quarterly_pollutant['3Q 2016'].append(sum(value)/len(value))\n for value in temp20164.values():\n quarterly_pollutant['4Q 2016'].append(sum(value)/len(value))\n for value in temp20171.values():\n quarterly_pollutant['1Q 2017'].append(sum(value)/len(value))\n # quarterly_pollutant['1Q 2013'] = [sum(temp20131) / len(temp20131)]\n # quarterly_pollutant['2Q 2013'] = [sum(temp20132) / len(temp20132)]\n # quarterly_pollutant['3Q 2013'] = [sum(temp20133) / len(temp20133)]\n # quarterly_pollutant['4Q 2013'] = [sum(temp20134) / len(temp20134)]\n # quarterly_pollutant['1Q 2014'] = [sum(temp20141) / len(temp20141)]\n # quarterly_pollutant['2Q 2014'] = [sum(temp20142) / len(temp20142)]\n # quarterly_pollutant['3Q 2014'] = [sum(temp20143) / len(temp20143)]\n # quarterly_pollutant['4Q 2014'] = [sum(temp20144) / len(temp20144)]\n # quarterly_pollutant['1Q 2015'] = [sum(temp20151) / len(temp20151)]\n # quarterly_pollutant['2Q 2015'] = [sum(temp20152) / len(temp20152)]\n # quarterly_pollutant['3Q 2015'] = [sum(temp20153) / len(temp20153)]\n # quarterly_pollutant['4Q 2015'] = [sum(temp20154) / len(temp20154)]\n # quarterly_pollutant['1Q 2016'] = [sum(temp20161) / len(temp20161)]\n # quarterly_pollutant['2Q 2016'] = [sum(temp20162) / len(temp20162)]\n # quarterly_pollutant['3Q 2016'] = [sum(temp20163) / len(temp20163)]\n # quarterly_pollutant['4Q 2016'] = [sum(temp20164) / len(temp20164)]\n # quarterly_pollutant['1Q 2017'] = [sum(temp20171) / len(temp20171)]\n return quarterly_pollutant",
"def avg_per_day_of_week(df):\n\n # return the avg usage per day of week per station on each day\n total_daily_per_station = df.groupby(['STATION', 'DATE','DAY_INT', 'DAY_STR',])[\"ENTRIES_DIFF\", \"EXIT_DIFF\"].sum()\n\n # average out the traffic at each station grouped by day of the week\n avg_daily_per_station = total_daily_per_station.groupby([\"STATION\",\"DAY_INT\", \"DAY_STR\"])[\"ENTRIES_DIFF\", \"EXIT_DIFF\"].mean()\n\n # cobine the entries and exits and sort to get the most popuklar days at what stations\n avg_daily_per_station[\"COMBINED\"] = avg_daily_per_station[\"ENTRIES_DIFF\"] + avg_daily_per_station[\"EXIT_DIFF\"]\n avg_daily_per_station.sort_values(by=[\"COMBINED\"], ascending=False)\n\n return avg_daily_per_station",
"def compute_average(self):\n self.report_dict = OrderedDict(sorted(self.report_dict.items(), key=lambda x: x[0], reverse=True))\n date_list = list(self.report_dict.keys())[::-1]\n\n print('Calculating the running total Initiated')\n running_total_dict = dict()\n for date in date_list:\n for border_measure, attributes in self.report_dict[date].items():\n if border_measure not in running_total_dict:\n running_total_dict[border_measure] = [attributes['sum'], 1]\n self.report_dict[date][border_measure]['running_total'] = 0\n else:\n calcul = running_total_dict[border_measure][0] / running_total_dict[border_measure][1]\n self.report_dict[date][border_measure]['running_total'] = math.ceil(calcul) if (float(\n calcul) % 1) >= 0.5 else round(calcul)\n running_total_dict[border_measure][0] += attributes['sum']\n running_total_dict[border_measure][1] += 1",
"def get_avg(self):\n\t\treturn self.sum / max(len(self.window), 1)",
"def average_edge(pollster_edges, pollster_errors):\n #TODO: Implement this function\n s = []\n ae = 0\n for key in pollster_edges.keys() and pollster_edges.keys():\n s.append(pollster_to_weight(key,pollster_errors)) \n ae = weighted_average(pollster_edges.values(),s)\n return ae",
"def avg_per_day_of_week_and_time(df):\n\n # get the total traffic for each station at each hour of each day; recasting time to standard bins\n total_hourly_per_station = df.groupby(['STATION', 'DATE', 'DAY_INT', 'DAY_STR' , 'TIME'])[\"ENTRIES_DIFF\", \"EXIT_DIFF\"].sum()\n\n # average out the traffic at each station grouped by day of the week and time slot\n avg_hourly_per_station = total_hourly_per_station.groupby([\"STATION\", 'DAY_INT', 'DAY_STR' ,\"TIME\"])[\"ENTRIES_DIFF\", \"EXIT_DIFF\"].mean()\n\n # cobine the entries and exits and sort to get the most popular days and times at each stations\n avg_hourly_per_station[\"COMBINED\"] = avg_hourly_per_station[\"ENTRIES_DIFF\"] + avg_hourly_per_station[\"EXIT_DIFF\"]\n avg_hourly_per_station.sort_values(by=[\"COMBINED\"], ascending=False).head(50)\n\n return avg_hourly_per_station",
"def weighted_average(data_dict=data_dict, n_last=5):\n prediction = np.zeros(len(parties))\n for df in data_dict.values():\n for i in range(min(n_last, len(df))):\n results = df[parties].iloc[i]\n num_people = df['Befragte'].iloc[i]#np.nan_to_num(float(df['Befragte'].iloc[i].replace('.', '').replace('T • ', '').replace('O • ', '')))\n # TODO: Polls from different polling firms have different time spacing. Take this into account.\n prediction += results * num_people\n return _prediction_to_dataframe(_normalize_to_hundred(prediction))",
"def calculate_est_total_crawls_per_week(self):\n est_total_crawls_per_day = self.calculate_est_total_crawls_per_day()\n est_total_crawls_per_week = est_total_crawls_per_day * 7\n \n return float(est_total_crawls_per_week)",
"def compute_average_df(self, df):\n avg = df[['variable', 'week', 'value']].groupby(\n by=['variable', 'week']).mean().reset_index()\n return avg",
"def compute_hw_average(grades):\n return -1",
"def findMovingAverage(date,window,data):\n day = date\n count = 0\n try:\n while count < window: # Going back finding the start date excluding weekends\n try:\n data[day]\n count+=1\n except KeyError:\n pass\n day -= timedelta(days=1)\n maList = []\n count1 = 0\n day += timedelta(days=1)\n while count1 < count:\n try:\n maList.append(data[day])\n count1 += 1\n except KeyError:\n pass\n day += timedelta(days=1)\n\n movingAve = round((sum(maList)/len(maList)),2)\n\n except OverflowError:\n raise OverflowError\n print(\"\\nNot enough previous data to calculate the desired moving average.\")\n print(\"Either change the simulation period or increase the period of the data\")\n print(\"Program terminated\\n\")\n sys.exit(1)\n raise\n\n return movingAve",
"def calc_primary_emissions(nei_data):\n\n def get_primary_poll_for_industry(nei_data, yr):\n \"\"\"Function to get 'primary pollutants' for each industry.\n 'primary pollutants' are defined as the three pollutants that are highest, relative to the \n corss-industry emission values. \n \"\"\"\n # Get mean emissions totals for each pollutant, for each industry.\n needed_cols = ['FAC_INDUSTRY'] + \\\n [col for col in nei_data.columns if '2014' in col]\n mean_emiss = nei_data[needed_cols].groupby('FAC_INDUSTRY').mean()\n\n # Norm. emissions of each pollutant by dividing by the mean across all industries. Primary pollutants\n # for an industry are the those that have the largest emissoins relative to cross-industry means.\n primary_poll = {}\n mean_emiss_quant = mean_emiss.copy()\n for i, row in mean_emiss_quant.iterrows():\n mean_emiss_quant.loc[i,\n :] = mean_emiss_quant.loc[i, :]/mean_emiss.mean()\n primary_poll[i] = {'poll'+str(i+1): name.split(':')[1] for\n i, name in enumerate(list(row.nlargest(3).index))}\n return primary_poll\n\n def calc_mean_emiss_by_industry(nei_data, years=['2008', '2011', '2014']):\n \"\"\"Function for calculating mean emissions of each pollutant, for each industry\"\"\"\n mean_emiss_by_year = {}\n for year in years:\n needed_cols = ['FAC_INDUSTRY'] + \\\n [col for col in nei_data.columns if year in col]\n mean_emiss = nei_data[needed_cols].groupby('FAC_INDUSTRY').mean()\n mean_emiss_by_year[year] = mean_emiss.rename(columns={col: col.split(':')[1] for col\n in mean_emiss.columns})\n return mean_emiss_by_year\n\n def add_primary_poll_cols(row, poll_num, year, primary_poll, mean_emiss):\n \"\"\"Function for calculating emissions for the primary pollutants for a SINGLE facility, normalized by \n the emissions for all facilities in the industry. \n \"\"\"\n poll_name = primary_poll[row['FAC_INDUSTRY']]['poll'+str(poll_num)]\n poll_val = row[':'.join(['total_emissions', poll_name, year])] / \\\n mean_emiss[year].loc[row['FAC_INDUSTRY'], poll_name]\n return poll_val\n\n primary_poll = get_primary_poll_for_industry(nei_data, '2014')\n mean_emiss = calc_mean_emiss_by_industry(\n nei_data, years=['2008', '2011', '2014'])\n for year in ['2008', '2011', '2014']:\n for poll_num in range(1, 4):\n new_col = []\n for _, row in nei_data.iterrows():\n new_col.append(add_primary_poll_cols(\n row, poll_num, year, primary_poll, mean_emiss))\n nei_data['poll'+str(poll_num)+'_'+year] = new_col\n\n return nei_data, primary_poll",
"def evaluate_avg_qvals(self, states, minmax=np.max):\n print 'Evaluating average Q value for held out states...'\n avg_qval = np.mean(minmax(self.qnn.predict(states), axis=1))\n self.evaluation_metric['avg_qvals_per_epoch'].append(avg_qval)\n print avg_qval\n plt.figure(2)\n plt.title('Average Q on ' + self.game_params[\"name\"])\n plt.xlabel('Training Epochs')\n plt.ylabel('Average Action Value (Q)')\n plt.axis([0, self.agent_params['no_epochs'], 0, 6.0])\n print len(self.evaluation_metric['epoch'])\n print len(self.evaluation_metric['avg_qvals_per_epoch'])\n plt.plot(self.evaluation_metric['epoch'], self.evaluation_metric['avg_qvals_per_epoch'], 'k')\n plt.draw()",
"def compute_ghzdays_average(data_set):\n\n daily_average = 0\n count = 0\n for row in data_set:\n # Manual submissions show up with 0 days compute. Can't divide by zero!\n if float(row[4]) > 0:\n daily_average += (float(row[6]) / float(row[4]))\n count += 1\n\n # Average GHz-days per day for all entries.\n daily_average = daily_average / count\n\n return daily_average",
"def _average_sums(self, sums):\n\n # g averaged in each field\n g = sums['g'].copy()\n gpsf = sums['gpsf'].copy()\n\n gsq = sums['gsq'].copy()\n wsq = sums['wsq'].copy()\n\n\n winv = 1.0/sums['wsum']\n wainv = winv[:,newaxis]\n\n g[:,0] *= winv\n g[:,1] *= winv\n gpsf[:,0] *= winv\n gpsf[:,1] *= winv\n\n if 'gpsf_orig' in sums.dtype.names:\n gpsf_orig = sums['gpsf_orig'].copy()\n gpsf_orig[:,0] *= winv\n gpsf_orig[:,1] *= winv\n else:\n gpsf_orig=None\n\n # sum(w*2g*2\n gerrsq_sum = gsq - g**2*wsq\n gerr = sqrt(gerrsq_sum)*wainv\n\n # responses averaged over all fields\n R = zeros(2)\n Rpsf = zeros(2)\n Rsel = zeros(2)\n Rsel_psf = zeros(2)\n\n factor = 1.0/(2.0*self.step)\n\n wsum=sums['wsum'].sum()\n\n g1p = sums['g_1p'][:,0].sum()/wsum\n g1m = sums['g_1m'][:,0].sum()/wsum\n g2p = sums['g_2p'][:,1].sum()/wsum\n g2m = sums['g_2m'][:,1].sum()/wsum\n\n g1p_psf = sums['g_1p_psf'][:,0].sum()/wsum\n g1m_psf = sums['g_1m_psf'][:,0].sum()/wsum\n g2p_psf = sums['g_2p_psf'][:,1].sum()/wsum\n g2m_psf = sums['g_2m_psf'][:,1].sum()/wsum\n\n R[0] = (g1p - g1m)*factor\n R[1] = (g2p - g2m)*factor\n Rpsf[0] = (g1p_psf - g1m_psf)*factor\n Rpsf[1] = (g2p_psf - g2m_psf)*factor\n\n #Rmean = R.mean()\n #R[:] = Rmean\n #R[0] = R[1]\n #R[:] = R[1]\n #R *= (-1)\n #R[:]=1\n\n if self.args.etype:\n Rpsf *= 0.5\n #Rpsf *= (0.01/0.019998000199980003)\n print(\"R:\",R)\n print(\"Rpsf:\",Rpsf)\n\n # selection terms\n if self.do_selection:\n s_g1p = sums['s_g_1p'][:,0].sum()/sums['s_wsum_1p'].sum()\n s_g1m = sums['s_g_1m'][:,0].sum()/sums['s_wsum_1m'].sum()\n s_g2p = sums['s_g_2p'][:,1].sum()/sums['s_wsum_2p'].sum()\n s_g2m = sums['s_g_2m'][:,1].sum()/sums['s_wsum_2m'].sum()\n\n Rsel[0] = (s_g1p - s_g1m)*factor\n Rsel[1] = (s_g2p - s_g2m)*factor\n\n # can be zero if we aren't calculating psf terms (roundified psf)\n tsum=sums['s_wsum_1p_psf'].sum()\n if tsum != 0.0:\n s_g1p_psf = sums['s_g_1p_psf'][:,0].sum()/sums['s_wsum_1p_psf'].sum()\n s_g1m_psf = sums['s_g_1m_psf'][:,0].sum()/sums['s_wsum_1m_psf'].sum()\n s_g2p_psf = sums['s_g_2p_psf'][:,1].sum()/sums['s_wsum_2p_psf'].sum()\n s_g2m_psf = sums['s_g_2m_psf'][:,1].sum()/sums['s_wsum_2m_psf'].sum()\n\n Rsel_psf[0] = (s_g1p_psf - s_g1m_psf)*factor\n Rsel_psf[1] = (s_g2p_psf - s_g2m_psf)*factor\n\n if self.args.etype:\n Rsel_psf *= 0.5\n #Rsel_psf *= (0.01/0.019998000199980003)\n\n print()\n print(\"Rsel:\",Rsel)\n print(\"Rpsf_sel:\",Rsel_psf)\n\n if self.args.R is not None:\n print(\"ignoring calculated R, using input\")\n R = self.Rinput\n Rsel = self.Rselect_input\n print(\"R:\",R)\n print(\"Rsel:\",Rsel)\n\n return g, gerr, gpsf, gpsf_orig, R, Rpsf, Rsel, Rsel_psf",
"def National_Average_Baseline(Data, counties):\n\n National_Average = np.zeros((6, 3))\n Total_Num_Voters = Data.shape[0]\n\n National_Average[0,0] = Data.loc[(Data['Other'] ==1) & (Data['SR.WHI']==1)].shape[0]\n National_Average[0,1] = Data.loc[(Data['Democrat'] ==1) & (Data['SR.WHI']==1)].shape[0]\n National_Average[0,2] = Data.loc[(Data['Republican'] ==1) & (Data['SR.WHI']==1)].shape[0]\n\n National_Average[1,0] = Data.loc[(Data['Other'] ==1) & (Data['SR.BLA']==1)].shape[0]\n National_Average[1,1] = Data.loc[(Data['Democrat'] ==1) & (Data['SR.BLA']==1)].shape[0]\n National_Average[1,2] = Data.loc[(Data['Republican'] ==1) & (Data['SR.BLA']==1)].shape[0]\n\n National_Average[2,0] = Data.loc[(Data['Other'] ==1) & (Data['SR.HIS']==1)].shape[0]\n National_Average[2,1] = Data.loc[(Data['Democrat'] ==1) & (Data['SR.HIS']==1)].shape[0]\n National_Average[2,2] = Data.loc[(Data['Republican'] ==1) & (Data['SR.HIS']==1)].shape[0]\n\n National_Average[3,0] = Data.loc[(Data['Other'] ==1) & (Data['SR.ASI']==1)].shape[0]\n National_Average[3,1] = Data.loc[(Data['Democrat'] ==1) & (Data['SR.ASI']==1)].shape[0]\n National_Average[3,2] = Data.loc[(Data['Republican'] ==1) & (Data['SR.ASI']==1)].shape[0]\n\n National_Average[4,0] = Data.loc[(Data['Other'] ==1) &(Data['SR.NAT']==1)].shape[0]\n National_Average[4,1] = Data.loc[(Data['Democrat'] ==1) & (Data['SR.NAT']==1)].shape[0]\n National_Average[4,2] = Data.loc[(Data['Republican'] ==1) & (Data['SR.NAT']==1)].shape[0]\n\n National_Average[5,0] = Data.loc[(Data['Other'] ==1) & (Data['SR.OTH']==1)].shape[0]\n National_Average[5,1] = Data.loc[(Data['Democrat'] ==1) & (Data['SR.OTH']==1)].shape[0]\n National_Average[5,2] = Data.loc[(Data['Republican'] ==1) & (Data['SR.OTH']==1)].shape[0]\n\n National_Average = National_Average / Total_Num_Voters\n\n # replicate by CV_counties\n replica = {}\n for c in counties:\n replica[c] = National_Average\n\n return replica",
"def averageGHCNDStations( station_list, \\\n variable = 'PRCP', \\\n timeseries_start = dt.date(1850,1,1), \\\n timeseries_end = dt.date.today(), \\\n fixed_station_count = None, \\\n use_low_elevations = False, \\\n ghcnd_base = \"/N/project/obrienta_startup/datasets/ghcn-daily/\", \\\n elevations = None, \\\n adjusted_elevation = None,\n lats = None, \\\n lons = None):\n #********************************\n # create the master set of dates\n #********************************\n # set the time units\n time_units = 'days since {:04}-{:02}-{:02} 00:00:00'.format(timeseries_start.year,timeseries_start.month,timeseries_start.day)\n # get the number of days\n num_days = (timeseries_end - timeseries_start).days\n # create the time vector\n times = arange(num_days)\n # create the dates\n dates = nc.num2date(times,time_units)\n \n # set the dry adiabatic lapse rate\n g_over_cp = -9.8076 / 1003.5 # [K/m]\n \n # check whether we need to + can do elevation adjustment\n if adjusted_elevation is not None and elevations is None:\n raise ValueError(\"`adjusted_elevation` is not None, but `elevations` is; elevations are required to do elevation adjustment.\")\n \n \n # create a master station data array\n num_stations = len(station_list) # number of stations\n data_array = ma.masked_equal(ma.zeros([num_stations,num_days]),0)\n \n # Loop over all stations and read data\n for i in range(num_stations):\n # get the station ID\n station_id = station_list[i]\n \n # set the station path\n station_path = ghcnd_base + 'all/{}.dly'.format(station_id)\n\n # read the station's data\n try:\n data, datetmp = read_ghcnd_variable(station_path,variable,omit_missing_values=True)\n except:\n raise RuntimeError(print(station_path))\n \n # do the adiabatic adjustment if necessary\n if adjusted_elevation is not None:\n lapse_rate_adjustment = g_over_cp * (adjusted_elevation - elevations[i])\n # do the adjustment assume that we need to autoscale the data before adjusting\n data = data/10 + lapse_rate_adjustment\n \n if len(data) != 0:\n # convert date objects to dates\n datetmp = [dt.datetime(d.year,d.month,d.day) for d in datetmp]\n\n # get the indices for the dates in the master array\n inds = array(nc.date2num(datetmp,time_units)).astype(int)\n\n # deal with out-of-bounds indices\n masked_inds = ma.masked_outside(inds, times[0], times[-1])\n\n # remove data outside the requested time\n data = ma.masked_where(ma.getmask(masked_inds),data).compressed()\n inds = masked_inds.compressed()\n\n # insert the data into the array\n data_array[i,inds] = data\n\n # deal with a fixed station count if needed\n if fixed_station_count is not None:\n # initialize the indices that will be unmasked in the final array\n unmasked_i = []\n unmasked_j = []\n # loop over all days\n for j in range(num_days):\n # get the station indices for which there are data on this day\n valid_station_inds = nonzero(logical_not(ma.getmask(data_array)[:,j]))[0]\n\n # if we have enough data for the day\n if len(valid_station_inds) >= fixed_station_count :\n\n if elevations is not None and use_low_elevations:\n # choose from the lowest stations\n isort = argsort(elevations[valid_station_inds])\n for i in valid_station_inds[isort[:fixed_station_count]]:\n unmasked_i.append(i)\n unmasked_j.append(j)\n else:\n # randomly choose a fixed number of stations and append the (station,day) index\n # to the unmasking arrays\n for i in random.choice(valid_station_inds,size=fixed_station_count,replace=False):\n unmasked_i.append(i)\n unmasked_j.append(j)\n\n\n # set the new data mask\n fixed_station_mask = ones(data_array.shape,dtype=bool)\n # unmask the chosen points above\n fixed_station_mask[unmasked_i,unmasked_j] = False\n\n # mask the data (only keeping the chosen points above)\n data_array = ma.masked_where(fixed_station_mask,data_array)\n\n # calculate the average and standard deviation\n data_average = ma.average(data_array,axis=0)\n data_std = ma.std(data_array,axis=0)\n\n # calculate the average elevation, latitude and/or longitude\n if elevations is not None:\n # create a version of elevations that is broadcast to the shape of data array\n elevationtmp = elevations[:,newaxis]*ones(shape(data_array))\n # mask the elevation array in the same places where data array is maksed\n elevationtmp = ma.masked_where(ma.getmask(data_array),elevationtmp)\n # get the average elevation at the unmasked locations\n elevation_average = ma.average(elevationtmp,axis=0)\n if lats is not None:\n # create a version of lats that is broadcast to the shape of data array\n lattmp = lats[:,newaxis]*ones(shape(data_array))\n # mask the lat array in the same places where data array is maksed\n lattmp = ma.masked_where(ma.getmask(data_array),lattmp)\n # get the average lat at the unmasked locations\n lat_average = ma.average(lattmp,axis=0)\n if lons is not None:\n # create a version of lons that is broadcast to the shape of data array\n lontmp = lons[:,newaxis]*ones(shape(data_array))\n # mask the lon array in the same places where data array is maksed\n lontmp = ma.masked_where(ma.getmask(data_array),lontmp)\n # get the average lon at the unmasked locations\n lon_average = ma.average(lontmp,axis=0)\n\n \n # calculate the number of stations at each time\n data_count = ma.masked_equal(sum(logical_not(ma.getmask(data_array)),axis=0),0)\n \n data_average = ma.masked_where(ma.getmask(data_count),data_average)\n data_std = ma.masked_where(logical_or(ma.getmask(data_count), data_count < 2),data_std)\n\n return_list = [dates, data_average, data_std, data_count]\n\n # append elevation/lat/lon to the list of returned variables as needed\n if elevations is not None:\n return_list.append(elevation_average)\n if lats is not None:\n return_list.append(lat_average)\n if lons is not None:\n return_list.append(lon_average)\n \n return tuple(return_list)",
"def global_average(fld, gw):\n if \"time\" in fld.dims:\n return wgt_areaave_xr(fld, gw).mean(dim=\"time\")\n else:\n return wgt_areaave_xr(fld, gw)",
"def average_percentage_weeks_saved_weekly_target_met():\n total_weeks_saved = 0\n total_weeks = 0\n\n users = User.objects.filter(is_staff=False, user__is_active=True)\n\n for user in users:\n goals = Goal.objects.filter(user__is_staff=False, user__is_active=True, user=user)\n\n for goal in goals:\n weekly_aggregates = goal.get_weekly_aggregates_to_date()\n\n for week in weekly_aggregates:\n if week >= goal.weekly_target:\n total_weeks_saved += 1\n\n total_weeks += 1\n\n if total_weeks_saved is not 0 and total_weeks is not 0:\n return (total_weeks_saved / total_weeks) * 100\n else:\n return 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Merge national polling, and geolevel polling if available, into results dataframe.
|
def combine_results_and_polls(results, polls):
# Merge into previous election's results to calculate swing
results = (
results.merge(
right=polls.query('geo == "uk"')[["party", "voteshare"]].rename(
columns={"voteshare": "national_polls"}
),
on="party",
how="outer",
)
.sort_values(["ons_id", "party"])
.reset_index(drop=True)
)
# If we have geo-polls, add those too
if set(polls.geo.unique()) != {"uk"}:
results = (
results.merge(
right=polls.query('geo != "uk"')[["geo", "party", "voteshare"]].rename(
columns={"voteshare": "geo_polls"}
),
on=["geo", "party"],
how="outer",
)
.sort_values(["ons_id", "party"])
.reset_index(drop=True)
)
return results
|
[
"def load_polling_data(self):\n polls = {}\n for geo in self.geos:\n poll_df = pd.read_csv(\n self.directory / \"raw\" / f\"general_election-{geo}-polls.csv\", parse_dates=[\"to\"]\n ).sort_values(\"to\")\n poll_df.columns = utils.sanitise(\n poll_df.columns,\n replace={\"ulster_unionist_party\": \"uup\", \"sinn_fein\": \"sf\", \"alliance\": \"apni\"},\n )\n polls[geo] = poll_df\n\n return polls",
"def get_regional_and_national_poll_of_polls(self, polls):\n election_day = self.now_date\n one_week_before = election_day - pd.Timedelta(days=7)\n one_month_before = election_day - pd.Timedelta(days=30)\n\n # Use single last poll from each pollster in final week of polling then average out\n final_polls = {}\n for geo in self.geos:\n period_before = one_week_before if geo == \"uk\" else one_month_before\n final_polls[geo] = self.calculate_poll_of_polls(\n polls=polls[geo], from_date=period_before, to_date=election_day\n )\n # Consider MRPs equivalent to a large poll\n final_polls[geo].loc[final_polls[geo].method == \"MRP\", \"sample_size\"] = (\n final_polls[geo].query('method != \"MRP\"').sample_size.max()\n )\n # Handle missing sample sizes\n mean_sample_size = final_polls[geo].query('method != \"MRP\"').sample_size.mean()\n if pd.isnull(mean_sample_size):\n mean_sample_size = 1\n final_polls[geo][\"sample_size\"] = final_polls[geo].sample_size.fillna(mean_sample_size)\n\n # Calculate regional polling\n regional_polling_missing = any(final_polls[geo].empty for geo in self.geos)\n\n # Regional polling is missing, just calculate UK-level polling only.\n if regional_polling_missing:\n # TODO: Check how this affects 2015/2017 models\n parties = [\"con\", \"lab\", \"ld\", \"ukip\", \"grn\", \"chuk\", \"bxp\", \"snp\"]\n # Create new polls dictionary by geo containing simple average across all pollsters\n national_polling = final_polls[\"uk\"].mean().loc[parties]\n # We don't yet have regional polling in 2015 for Scotland, Wales, NI, London - add as other.\n national_polling[\"other\"] = 1 - national_polling.sum()\n poll_of_polls = {\"uk\": national_polling}\n # Turn into dataframe\n polls_df_list = []\n for geo in poll_of_polls:\n polls_df_list.append(\n pd.DataFrame(\n {\n \"geo\": geo,\n \"party\": poll_of_polls[geo].index,\n \"voteshare\": poll_of_polls[geo],\n }\n ).reset_index(drop=True)\n )\n polls_df = pd.concat(polls_df_list, axis=0)\n\n # We have polling for all regions.\n else:\n parties = {\n # TODO: Add [\"chuk\", \"bxp\", \"ukip\"] to uk, scotland, wales, london\n \"uk\": [\"con\", \"lab\", \"ld\", \"grn\", \"snp\"],\n \"scotland\": [\"con\", \"lab\", \"ld\", \"snp\", \"grn\"],\n \"wales\": [\"con\", \"lab\", \"ld\", \"pc\", \"grn\"],\n \"ni\": [\"dup\", \"uup\", \"sf\", \"sdlp\", \"apni\", \"grn\", \"con\"],\n \"london\": [\"con\", \"lab\", \"ld\", \"grn\"],\n \"england_not_london\": [\"con\", \"lab\", \"ld\", \"grn\"],\n }\n all_parties = set(x for y in parties.values() for x in y)\n poll_of_polls = {}\n for geo in self.geos:\n sample_size_weights = (\n final_polls[geo].sample_size / final_polls[geo].sample_size.sum()\n )\n weighted_poll_of_polls = (\n final_polls[geo][parties[geo]]\n .multiply(sample_size_weights, axis=0)\n .sum()\n .reindex(all_parties, fill_value=0.0)\n )\n poll_of_polls[geo] = weighted_poll_of_polls\n\n # Estimate polling for England excluding London\n # survation_wts from http://survation.com/wp-content/uploads/2017/06/Final-MoS-Post-BBC-Event-Poll-020617SWCH-1c0d4h9.pdf\n survation_wts = pd.Series({\"scotland\": 85, \"england\": 881, \"wales\": 67, \"ni\": 16})\n survation_wts[\"uk\"] = survation_wts.sum()\n survation_wts[\"london\"] = 137\n survation_wts[\"england_not_london\"] = survation_wts.england - survation_wts.london\n\n england_not_london = poll_of_polls[\"uk\"] * survation_wts[\"uk\"]\n for geo in [\"scotland\", \"wales\", \"ni\", \"london\"]:\n england_not_london = england_not_london.sub(\n poll_of_polls[geo] * survation_wts[geo], fill_value=0.0\n )\n england_not_london /= survation_wts[\"england_not_london\"]\n england_not_london.loc[[\"pc\", \"snp\"]] = 0.0\n poll_of_polls[\"england_not_london\"] = england_not_london\n\n # Fix PC (Plaid Cymru) for UK\n poll_of_polls[\"uk\"][\"pc\"] = (\n poll_of_polls[\"wales\"][\"pc\"] * survation_wts[\"wales\"] / survation_wts[\"uk\"]\n )\n\n # Add Other & normalise\n for geo in self.geos + [\"england_not_london\"]:\n poll_of_polls[geo][\"other\"] = max(\n 1 - poll_of_polls[geo].sum(), 0\n ) # weighted means can sum > 1\n poll_of_polls[geo] = poll_of_polls[geo] / poll_of_polls[geo].sum()\n\n # Export\n polls_df_list = []\n for geo in poll_of_polls:\n polls_df_list.append(\n pd.DataFrame(\n {\n \"geo\": geo,\n \"party\": poll_of_polls[geo].index,\n \"voteshare\": poll_of_polls[geo],\n }\n ).reset_index(drop=True)\n )\n polls_df = pd.concat(polls_df_list, axis=0)\n\n return polls_df",
"def prepare_polls(polls, t_last):\n polls = polls.copy()\n rows = polls.poll_date <= t_last\n polls = polls.loc[rows]\n\n polls.loc[:, 'week_index'] = polls.week - polls.week.min()\n days = pd.date_range(polls.poll_date.min(),\n polls.poll_date.max())\n days = pd.Series(range(len(days)), index=days)\n\n # Integer to represent day for each poll in polls\n poll_2_dayID = days.loc[polls.poll_date]\n\n # Assign an ID to each pollster for each poll in polls\n pollsters = polls.pollster.unique()\n enumerated_pollsters = pd.Series(range(len(pollsters)), index=pollsters)\n poll_2_pollsterID = enumerated_pollsters.loc[polls.pollster]\n\n polls.loc[:, 'date_index'] = poll_2_dayID.values\n polls.loc[:, 'pollster_index'] = poll_2_pollsterID.values\n\n national_poll_inds = polls.state == 'general'\n national_polls = polls.loc[national_poll_inds]\n state_polls = polls.loc[~national_poll_inds]\n\n # Assign an ID to each state\n states = state_polls.state.unique()\n enumerated_states = pd.Series(range(len(states)), index=states)\n poll_2_stateID = enumerated_states.loc[state_polls.state]\n state_polls.loc[:, 'state_index'] = poll_2_stateID.values\n\n return state_polls, national_polls",
"def convert_json_to_dataframes(self):\r\n print(\"Converting Data into Dataframe.\")\r\n json_data = self.response.json()\r\n complete_df = pandas.DataFrame()\r\n for data in json_data['value']['timeSeries']:\r\n location = data['sourceInfo']['siteCode'][0]['value']\r\n metric = data['variable']['variableName'].split(',')[0].lower()\r\n units= data['variable']['unit']['unitCode'].lower()\r\n statistic = data['variable']['options']['option'][0]['value'].lower()\r\n\r\n temp_data_load=pandas.DataFrame(data['values'][0]['value'])\r\n try:\r\n temp_data_load.drop(columns='qualifiers', inplace=True)\r\n except:\r\n pass\r\n column_name='{stat} {metric} ({units})'.format(stat=statistic\r\n ,metric=metric\r\n ,units=units)\r\n if 'value' not in temp_data_load.columns:\r\n pass\r\n else:\r\n temp_data_load.rename(columns={'dateTime':'date'\r\n ,'value': column_name}\r\n ,inplace=True)\r\n temp_data_load['date'] = pandas.to_datetime(temp_data_load['date'])\r\n temp_data_load['location'] = location\r\n\r\n #Check if location already exists in df\r\n if complete_df.empty:\r\n complete_df = temp_data_load\r\n elif ((complete_df['location'].isin([location]).any()) &\r\n (column_name in complete_df.columns)):\r\n complete_df.update(temp_data_load)\r\n elif (complete_df['location'].isin([location]).any()):\r\n complete_df=complete_df.merge(temp_data_load, how='outer', on=['location','date'])\r\n else:\r\n complete_df=complete_df.append(temp_data_load,sort=True)\r\n\r\n self.water_dataframe = complete_df",
"def _merge_results(self):\n\t\tnew_dict = {}\n\t\tfor it in self.data:\n\t\t\tnew_dict.update(it)\n\n\t\tfor k,v in new_dict.items():\n\t\t\tfor kk, vv in v.time_stamps.items():\n\t\t\t\tfor kkk,vvv in vv.items():\n\t\t\t\t\tnew_dict[k].time_stamps[kk][kkk] = vvv - self.HB_config['time_ref']\n\n\t\tself.data = new_dict",
"def get_apple_mobility_data(update=True):\n\n # Data source\n url = 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2024HotfixDev12/v3/en-us/applemobilitytrends-2021-01-10.csv'\n abs_dir = os.path.dirname(__file__)\n\n if update:\n # download raw data\n df_raw = pd.read_csv(url)\n # save a copy in the raw folder\n rel_dir = os.path.join(abs_dir, '../../../data/covid19_DTM/raw/mobility/apple/apple_mobility_trends.csv')\n df_raw.to_csv(rel_dir, index=False)\n else:\n df_raw = pd.read_csv(os.path.join(abs_dir,\n '../../../data/covid19_DTM/raw/mobility/apple/apple_mobility_trends.csv'))\n\n # Start by extracting the overall data for Belgium\n df = df_raw[df_raw['region']=='Belgium']\n columns = pd.to_datetime(df_raw.columns[6:].values)\n data = df.values[:,6:]\n arrays = [\n np.array([\"Belgium\",\"Belgium\",\"Belgium\"]),\n np.array([\"driving\", \"transit\", \"walking\"]),\n ]\n df_apple = pd.DataFrame(data,index=arrays,columns=columns)\n\n # Loop over all available provinces and transit types\n df = df_raw[((df_raw['country']=='Belgium')&(df_raw['geo_type']=='sub-region'))]\n for province in set(df['region']):\n for mobility_type in df['transportation_type'][df['region'] == province]:\n data = df[((df['transportation_type']==mobility_type)&(df['region']==province))].values[:,6:]\n df_entry = pd.DataFrame(data,index=[[province],[mobility_type]],columns=columns)\n df_apple=df_apple.append(df_entry)\n\n # Re-define baseline and cut away data before February 15th 2020:\n for spatial_unit,mobility_type in df_apple.index:\n base=np.mean(df_apple.loc[(spatial_unit,mobility_type)][pd.to_datetime('2020-02-15'):pd.to_datetime('2020-03-10')])\n df_apple.loc[(spatial_unit,mobility_type)] = -(100-df_apple.loc[(spatial_unit,mobility_type)]/base*100)\n df_apple.loc[(spatial_unit,mobility_type)] = df_apple.loc[(spatial_unit,mobility_type)][pd.to_datetime('2020-02-15'):]\n\n return df_apple",
"async def fetch_full_data(self, bases: List[str], quotes: List[str]):\n df = pd.DataFrame(\n columns = [\n 'timestamp',\n 'symbol',\n 'toSymbol',\n 'price',\n 'lastVolume',\n 'lastVolumeTo',\n 'volumeDay',\n 'volumeDayTo',\n 'volume24Hour',\n 'volume24HourTo',\n 'openDay',\n 'highDay',\n 'lowDay',\n 'open24Hour',\n 'high24Hour',\n 'change24Hour',\n 'changePct24Hour',\n 'changeDay',\n 'changePctDay',\n 'supply',\n 'mktCap',\n 'totalVolume24Hr',\n 'totalVolume24HrTo',\n ])\n self.update()\n await self.throttle()\n response = await aget(self.urlBase + self.endpoints['fullData'], headers = self.authHeader,\n params = {\n 'fsyms': ','.join(bases)[:-1],\n 'tosyms': ','.join(quotes)[:-1],\n })\n self.lastCall = self.milliseconds()\n response.raise_for_status()\n js = response.json()\n\n for base in js['RAW']:\n for quote in js['RAW'][base]:\n d = js['RAW'][base][quote]\n suffix = d['FROMSYMBOL']\n df = df.append(\n {\n 'timestamp': self.lastCall,\n 'symbol': d['FROMSYMBOL'],\n 'toSymbol': d['TOSYMBOL'],\n 'price': d['PRICE'],\n 'lastVolume': d['LASTVOLUME'],\n 'lastVolumeTo': d['LASTVOLUMETO'],\n 'volumeDay': d['VOLUMEDAY'],\n 'volumeDayTo': d['VOLUMEDAYTO'],\n 'volume24Hour': d['VOLUME24HOUR'],\n 'volume24HourTo': d['VOLUME24HOURTO'],\n 'openDay': d['OPENDAY'],\n 'highDay': d['HIGHDAY'],\n 'lowDay': d['LOWDAY'],\n 'open24Hour': d['OPEN24HOUR'],\n 'high24Hour': d['HIGH24HOUR'],\n 'change24Hour': d['CHANGE24HOUR'],\n 'changePct24Hour': d['CHANGEPCT24HOUR'],\n 'changeDay': d['CHANGEDAY'],\n 'changePctDay': d['CHANGEPCTDAY'],\n 'supply': d['SUPPLY'],\n 'mktCap': d['MKTCAP'],\n 'totalVolume24Hr': d['TOTALVOLUME24H'],\n 'totalVolume24HrTo': d['TOTALVOLUME24HTO'],\n }, ignore_index = True)\n return df.set_index('timestamp')",
"def get_results(poll):\n\n assert poll is not None, \"Invalid poll: None\"\n\n if not poll['closed']:\n return None\n\n results = {}\n\n # Get cached results\n results_db = get_entries('results', 'poll', poll['uid'])\n\n # If no cache, compute the results and store them\n if len(results_db) == 0:\n ballots = get_entries('ballots', 'poll', poll['uid'])\n\n # If no ballots provide, no results\n if len(ballots) == 0:\n return None\n\n # Number of ballots cast\n ballots_count = len(ballots) / len(poll['choices'])\n\n # Build data structures\n choices = {}\n results = {}\n for choice in poll['choices']:\n choices[choice['id']] = {'votes': [0] * 7}\n results[choice['id']] = {'ballots': ballots_count}\n\n # Count the number of vote for each grade for each choice\n for ballot in ballots:\n choices[ballot['choice']]['votes'][ballot['grade']] += 1\n\n # Store the count in percentage for display purposes\n for choice in choices:\n results[choice]['percentages'] = []\n for vote in choices[choice]['votes']:\n results[choice]['percentages'].append(100 * vote / ballots_count)\n\n # Transfrom the number of vote to a list of votes\n for _, choice in choices.items():\n votes = []\n for i in range(len(choice['votes'])):\n votes.extend([i] * choice['votes'][i])\n choice['votes'] = votes\n\n # Compute the median, the number of better and worse vote.\n for _, choice in choices.items():\n choice_compute(choice)\n\n # Apply the grade for each choice\n for choice in choices:\n if choices[choice]['median'] == 0:\n results[choice]['grade'] = \"To reject\"\n elif choices[choice]['median'] == 1:\n results[choice]['grade'] = \"Poor\"\n elif choices[choice]['median'] == 2:\n results[choice]['grade'] = \"Acceptable\"\n elif choices[choice]['median'] == 3:\n results[choice]['grade'] = \"Fair\"\n elif choices[choice]['median'] == 4:\n results[choice]['grade'] = \"Good\"\n elif choices[choice]['median'] == 5:\n results[choice]['grade'] = \"Very Good\"\n elif choices[choice]['median'] == 6:\n results[choice]['grade'] = \"Excellent\"\n\n if choices[choice]['better'] > choices[choice]['worse']:\n results[choice]['grade'] += \"+\"\n else:\n results[choice]['grade'] += \"-\"\n\n # Sort the vote to etablish the ranks\n ranks = rank_choices(choices, ballots_count)\n for choice in results:\n results[choice]['rank'] = ranks[choice]\n\n\n # Store the results\n results_db = []\n for choice, result in results.items():\n results_db.append((poll['uid'], choice, \";\".join([str(rank) for rank in result['rank']]) if isinstance(result['rank'], list) else str(result['rank']), result['grade'], \";\".join([str(percentage) for percentage in result['percentages']]), result['ballots']))\n\n get_db().executemany(\"INSERT INTO results (poll, choice, rank, grade, percentages, ballots) VALUES (?, ?, ?, ?, ?, ?)\", results_db)\n\n # Destroy the ballots\n get_db().execute('DELETE FROM ballots WHERE poll = ?', [poll['uid']])\n\n else:\n for result in results_db:\n results[result['choice']] = {'rank' : int(result['rank']) if ';' not in result['rank'] else [int(vote) for vote in result['rank'].split(';')], 'grade': result['grade'], 'percentages': [int(percentage) for percentage in result['percentages'].split(';')], 'ballots': result['ballots']}\n\n return results",
"def extResults(self, level):\n self.extData = pd.merge(left=self.results(), right=self._metaData(level), on=level)\n cols = [x for x in self.extData.columns if x != self.column]\n cols = cols + [self.column]\n self.extData = self.extData[cols]\n pd.set_option('display.max_colwidth', -1)\n return self.extData",
"def join_cdr_grid(cdr, grid):\n \n\n cdr.columns = [\"cellId\", \"time\", \"countryCode\", \"smsIn\", \"smsOut\",\n \"callIn\", \"callOut\", \"internet\"]\n norm_grid = json_normalize(grid['features'])\n\n agg_df = cdr[cdr['countryCode'] != 0].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n\n\n joined_df = pd.merge(left=norm_grid, right=agg_df, how='left',\n left_on='properties.cellId', right_on='cellId')\n \n return joined_df",
"def combine_time_and_html_log_data(real_time_drilling_data, daily_log_data):\r\n\r\n #Parse datetimes of TIME column into two seperate columns\r\n time = 'TIME'\r\n # if time not in real_time_drilling_data:\r\n # time = \"Time\"\r\n daily_log_data = daily_log_data.rename(columns={\"Start time\": \"Start_time\", \"End time\": \"End_time\"})\r\n\r\n real_time_drilling_data['original_time'] = real_time_drilling_data['TIME']\r\n real_time_drilling_data.astype({'TIME':'datetime64[ns]'})\r\n real_time_drilling_data['TIME'] = pd.to_datetime(real_time_drilling_data['TIME']) + timedelta(hours=8)\r\n\r\n real_time_drilling_data['log_date'] = real_time_drilling_data['TIME'].map(lambda x: x.date())\r\n real_time_drilling_data['current_time'] = real_time_drilling_data['TIME'].map(lambda x: x.strftime(\"%H:%M:%S\"))\r\n real_time_drilling_data['wellbore_name'] = real_time_drilling_data['nameWellbore'].map(lambda x: (x[:8]).replace(\"/\", \"_\"))\r\n\r\n q = \"\"\"\r\n SELECT *\r\n FROM real_time_drilling_data rtdd\r\n INNER JOIN daily_log_data dld \r\n ON (rtdd.current_time BETWEEN\r\n dld.Start_time AND dld.End_time)\r\n AND (rtdd.log_date == dld.log_date)\r\n AND (rtdd.wellbore_name == dld.wellbore_name)\r\n \"\"\"\r\n\r\n print(\"Processing SQL querry\")\r\n joined_df = sqldf(q)\r\n print(\"Finished join\")\r\n return joined_df",
"def process(self):\n processed_directory = self.directory / \"processed\"\n os.makedirs(processed_directory, exist_ok=True) # create directory if it doesn't exist\n\n # Import general election results & polling data\n results_dict = self.load_results_data()\n polls_full = self.load_polling_data()\n\n # Calculate poll of polls\n polls = self.get_regional_and_national_poll_of_polls(polls=polls_full)\n\n # Merge polls into previous election results dataframe\n results_dict[self.last] = self.combine_results_and_polls(\n results=results_dict[self.last], polls=polls\n )\n\n # Add into previous election results: national voteshare, national swing (vs current polling),\n # national swing forecast (per party per seat) and national swing forecast winner (per seat).\n results_dict[self.last] = self.calculate_national_swing(results_dict[self.last])\n\n # If we have geo-polling for previous election, also calculate a geo-level swing forecast.\n if \"geo_polls\" in results_dict[self.last].columns:\n results_dict[self.last] = self.calculate_geo_swing(results_dict[self.last])\n\n # Create ML-ready dataframe and export\n model_df = self.export_model_ready_dataframe(results_dict=results_dict)\n\n print(f\"Exporting {self.last}->{self.now} model dataset to {processed_directory.resolve()}\")\n model_df.to_csv(\n processed_directory / f\"general_election-uk-{self.now}-model.csv\", index=False\n )",
"def fetch(api, site, start, end, *, nrel_pvdaq_api_key):\n try:\n site_extra_params = common.decode_extra_parameters(site)\n except ValueError:\n return pd.DataFrame()\n try:\n years = list(range(start.year, end.year + 1))\n obs_df = pvdaq.get_pvdaq_data(\n site_extra_params['network_api_id'], years,\n api_key=nrel_pvdaq_api_key)\n except Exception:\n # Not yet sure what kind of errors we might hit in production\n logger.warning(f'Could not retrieve data for site {site.name}'\n f' between {start} and {end}.')\n return pd.DataFrame()\n obs_df = _watts_to_mw(obs_df)\n try:\n obs_df = obs_df.tz_localize(site.timezone)\n except NonExistentTimeError as e:\n logger.warning(f'Could not localize data for site {site.name} '\n f'due to DST issue: {e}')\n return pd.DataFrame()\n return obs_df",
"def join_cdr_grid_by_time(cdr, grid):\n cdrr = cdr.copy()\n cdrr.columns = [\"cellId\", \"time\", \"countryCode\", \"smsIn\", \"smsOut\",\n \"callIn\", \"callOut\", \"internet\"]\n norm_grid = json_normalize(grid['features'])\n\n # change miliseconds to datetime\n cdrr.index = pd.to_datetime(cdrr['time'],unit='ms',utc=True)\n cdrr.index = cdrr.index.tz_localize('UTC').tz_convert('Europe/Rome')\n cdrr['date'] = cdrr.index\n cdrr['time_hour'] = cdrr.index.hour\n cdrr['weekday'] = cdrr.index.weekday\n\n # returning Booleans\n cdrr['morning_weekday'] = (cdrr['time_hour'] >= 0) & (cdrr['time_hour'] < 8) & (cdrr['weekday'] != 6) & (cdrr['weekday'] != 5)\n cdrr['day_weekday'] = (cdrr['time_hour'] >= 8) & (cdrr['time_hour'] < 16) & (cdrr['weekday'] != 6) & (cdrr['weekday'] != 5)\n cdrr['evening_weekday'] = (cdrr['time_hour'] >= 16) & (cdrr['time_hour'] < 24) & (cdrr['weekday'] != 6) & (cdrr['weekday'] != 5)\n cdrr['morning_weekend'] = (cdrr['time_hour'] >= 0) & (cdrr['time_hour'] < 8) & ((cdrr['weekday'] == 6) | (cdrr['weekday'] == 5))\n cdrr['day_weekend'] = (cdrr['time_hour'] >= 8) & (cdrr['time_hour'] < 16) & ((cdrr['weekday'] == 6) | (cdrr['weekday'] == 5))\n cdrr['evening_weekend'] = (cdrr['time_hour'] >= 16) & (cdrr['time_hour'] < 24) & ((cdrr['weekday'] == 6) | (cdrr['weekday'] == 5))\n\n #aggregations for each time/day slots\n morning_weekday = cdrr[(cdrr['countryCode'] != 0) & (cdrr['morning_weekday'] == True)].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n day_weekday = cdrr[(cdrr['countryCode'] != 0) & (cdrr['day_weekday'] == True)].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n evening_weekday = cdrr[(cdrr['countryCode'] != 0) & (cdrr['evening_weekday'] == True)].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n morning_weekend = cdrr[(cdrr['countryCode'] != 0) & (cdrr['morning_weekend'] == True)].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n day_weekend = cdrr[(cdrr['countryCode'] != 0) & (cdrr['day_weekend'] == True)].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n evening_weekend = cdrr[(cdrr['countryCode'] != 0) & (cdrr['evening_weekend'] == True)].groupby('cellId').agg({\n 'cellId': 'first',\n 'time': 'first',\n 'smsIn': 'sum',\n 'smsOut': 'sum',\n 'callIn': 'sum',\n 'callOut': 'sum',\n 'internet': 'sum'\n })\n\n # merge with grid\n m_weekday = pd.merge(left=norm_grid, right=morning_weekday, how='left', left_on='properties.cellId', right_on='cellId')\n d_weekday = pd.merge(left=norm_grid, right=day_weekday, how='left', left_on='properties.cellId', right_on='cellId')\n e_weekday = pd.merge(left=norm_grid, right=evening_weekday, how='left', left_on='properties.cellId', right_on='cellId')\n m_weekend = pd.merge(left=norm_grid, right=morning_weekend, how='left', left_on='properties.cellId', right_on='cellId')\n d_weekend = pd.merge(left=norm_grid, right=day_weekend, how='left', left_on='properties.cellId', right_on='cellId')\n e_weekend = pd.merge(left=norm_grid, right=evening_weekend, how='left', left_on='properties.cellId', right_on='cellId')\n\n #filling NaN values with 0's\n m_weekday.fillna(0, inplace=True)\n d_weekday.fillna(0, inplace=True)\n e_weekday.fillna(0, inplace=True)\n m_weekend.fillna(0, inplace=True)\n d_weekend.fillna(0, inplace=True)\n e_weekend.fillna(0, inplace=True)\n\n return m_weekday, d_weekday, e_weekday, m_weekend, d_weekend, e_weekend",
"def rail_data(station_code):\n current_date = datetime.now().strftime('%d/%m/%Y ')\n station_url = \"http://api.irishrail.ie/realtime/realtime.asmx/getStationDataByCodeXML?StationCode=\" + station_code\n response = requests.get(station_url)\n getStationDataXML = json.dumps(xmltodict.parse(response.text))\n getStationDataXML_json = json.loads(getStationDataXML)\n if len(getStationDataXML_json[\"ArrayOfObjStationData\"]) < 4:\n # no information returned from rail api\n # return placeholder object to avoid error\n return placeholder\n else:\n station_data_json = getStationDataXML_json[\"ArrayOfObjStationData\"][\"objStationData\"]\n if type(station_data_json) is dict:\n station_data_json = [\n getStationDataXML_json[\"ArrayOfObjStationData\"][\"objStationData\"]]\n station_data_df = pd.DataFrame(station_data_json)\n final_df = station_data_df[['Origintime', 'Destination']].copy()\n final_df[\"Line\"] = station_data_df.Origin + \\\n \" \" + station_data_df.Direction\n final_df.rename(columns={'Origintime': 'idA',\n 'Line': 'idB',\n 'Destination': 'targetA'},\n inplace=True)\n final_df.idA = current_date + final_df.idA\n arrival_time = station_data_df[\"Duein\"]\n final_df = final_df.join(arrival_time)\n final_df.rename(columns={'Duein': 'targetB'},\n inplace=True)\n final_df.targetB = final_df[\"targetB\"].astype(int)\n final_df = final_df.sort_values(by='targetB')\n final_df.targetB = final_df[\"targetB\"].astype(str)\n df = final_df\n result = df.to_json(orient=\"records\")\n parsed = json.loads(result)\n return {'results': parsed}",
"def get_hdobs_summary(self):\n\n data = {}\n for mission_id in self.missions.keys():\n sub_df = self.missions[mission_id]['hdobs'].tail(20)\n if pd.to_datetime(sub_df['time'].values[-1]) < dt.utcnow() - timedelta(hours=1):\n continue\n\n # Parse data\n array = [val for i, val in enumerate(\n sub_df['sfmr']) if 'sfmr' not in sub_df['flag'].values[i]]\n max_sfmr = np.nanmax(array) if not all_nan(array) else np.nan\n\n array = [val for i, val in enumerate(\n sub_df['p_sfc']) if 'p_sfc' not in sub_df['flag'].values[i]]\n min_p_sfc = np.nanmin(array) if not all_nan(array) else np.nan\n\n array = [val for i, val in enumerate(\n sub_df['wspd']) if 'wspd' not in sub_df['flag'].values[i]]\n max_wspd = np.nanmax(array) if not all_nan(array) else np.nan\n\n array = [val for i, val in enumerate(\n sub_df['temp']) if 'temp' not in sub_df['flag'].values[i]]\n max_temp = np.nanmax(array) if not all_nan(array) else np.nan\n\n array = [val for i, val in enumerate(\n sub_df['dwpt']) if 'dwpt' not in sub_df['flag'].values[i]]\n max_dwpt = np.nanmax(array) if not all_nan(array) else np.nan\n\n data[mission_id] = {\n 'min_mslp': min_p_sfc,\n 'max_sfmr': max_sfmr,\n 'max_wspd': max_wspd,\n 'max_temp': max_temp,\n 'max_dwpt': max_dwpt,\n 'start_time': pd.to_datetime(sub_df['time'].values[0]),\n 'end_time': pd.to_datetime(sub_df['time'].values[-1]),\n }\n\n return data",
"def load_prices_and_postcode(since=0):\n pp = load_pricepaid(since)\n pc = load_postcode()\n complete = pd.merge(pc,pp,on=\"postcode\",how=\"inner\")\n return complete",
"def create_data():\n\n filtered_uk = __filter_uk_data()\n filtered_il = __filter_il_data()\n merged_df = __merge_df(df_uk=filtered_uk, df_il=filtered_il)\n\n return merged_df",
"def combine_and_merge_preprocessed_results(self):\n master_pp_temp = []\n for result in self.preprocess_results.values():\n for row in result:\n master_pp_temp.append(row)\n\n master_pp_df = pd.DataFrame(master_pp_temp)\n\n # Memory Optimization: Reduce memory by specifying column types\n # this will downcast the columns automatically to the smallest possible datatype\n # without losing any information.\n floats = master_pp_df.select_dtypes(\n include=[\"float64\"]).columns.tolist()\n master_pp_df[floats] = master_pp_df[floats].apply(\n pd.to_numeric, downcast=\"float\"\n )\n ints = master_pp_df.select_dtypes(include=[\"int64\"]).columns.tolist()\n master_pp_df[ints] = master_pp_df[ints].apply(\n pd.to_numeric, downcast=\"integer\")\n\n # Removing timestamp as it's not required for model training\n master_pp_df.drop([\"timestamp\"], axis=1, inplace=True)\n master_pp_df.reset_index(drop=True, inplace=True)\n self.master_preprocessed_result = master_pp_df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assumes df has `ons_id` and `party` columns.
|
def calculate_winners(df, voteshare_col):
return (
df.sort_values(voteshare_col, ascending=False)
.groupby("ons_id")
.head(1)[["ons_id", "party"]]
.set_index("ons_id")
.party
)
|
[
"def add_ons_column(df,dataset):\n x = df['postcode'].values.tolist()\n ons_results = get_data(x,dataset) \n ons_df = pd.DataFrame(ons_results).drop_duplicates()\n return pd.merge(df,ons_df,on=\"postcode\",how=\"inner\")",
"def get_full_vote_info(votes_df):\n vote_counts = votes_df.groupby(['voteId', 'party', 'leg_vote']).apply(len).reset_index()\n vote_counts.rename(columns={0: 'count'}, inplace=True)\n\n\n out = votes_df[['voteId']].drop_duplicates()\n cols = ['voteId']\n\n # Rep, Aye\n out = join_new_data(out, vote_counts, 'Republican', 'AYE', 'rep_aye_count', cols)\n # Rep, Noe\n out = join_new_data(out, vote_counts, 'Republican', 'NOE', 'rep_noe_count', cols)\n # Rep, Abs\n out = join_new_data(out, vote_counts, 'Republican', 'ABS', 'rep_abs_count', cols)\n\n # Dem, Aye\n out = join_new_data(out, vote_counts, 'Democrat', 'AYE', 'dem_aye_count', cols)\n # Dem, Noe\n out = join_new_data(out, vote_counts, 'Democrat', 'NOE', 'dem_noe_count', cols)\n # Dem, Abs\n out = join_new_data(out, vote_counts, 'Democrat', 'ABS', 'dem_abs_count', cols)\n\n out = out.fillna(0)\n\n out['d_alignment'] = out.apply(lambda x: 'For'\n if x.dem_aye_count > x.dem_noe_count\n else 'Against',\n axis=1)\n out['r_alignment'] = out.apply(lambda x: 'For'\n if x.rep_aye_count > x.rep_noe_count\n else 'Against',\n axis=1)\n\n out['d_min'] = out.apply(lambda x: min(x.dem_aye_count, x.dem_noe_count), axis=1)\n out['r_min'] = out.apply(lambda x: min(x.rep_aye_count, x.rep_noe_count), axis=1)\n\n out['d_maj'] = out.apply(lambda x: max(x.dem_aye_count, x.dem_noe_count), axis=1)\n out['r_maj'] = out.apply(lambda x: max(x.rep_aye_count, x.rep_noe_count), axis=1)\n\n out['d_total'] = out['dem_aye_count'] + out['dem_noe_count']\n out['r_total'] = out['rep_aye_count'] + out['rep_noe_count']\n\n out = votes_df.merge(out, on='voteId')\n\n return out",
"def only_one_party(self):\n self.df = self.df[(self.df['party']=='ALL_DEM') | (self.df['party']=='ALL_REP')]\n self.df['party'] = self.df['party'].apply(lambda p: 0 if p=='ALL_DEM' else 1)",
"def generateLineageDf(df: pd.DataFrame):\n ids = df['NCBI_Taxon_ID']\n return lineage_df[lineage_df.tax_id.isin(ids)]",
"def plot_sentiment_for_party(df, party, output):\n df = df.filter(df['party']==party)\n df = df.groupBy(df['date'], df['sentiment_category']).count().orderBy(\"date\")\n df.coalesce(1).write.option(\"header\", \"true\").mode(\"overwrite\").csv(output+'/'+party+'-for-dash')",
"def oasis_occurence(df_events, fp_occurrence):\n occurence = df_events.copy()\n occurence = occurence.drop(['ID_event'], axis=1) \n df_occurence = pd.DataFrame(occurence)\n #column names as in oeasis\n df_occurence['period_no'] = df_occurence.Year\n df_occurence['occ_year'] = df_occurence.Year\n df_occurence['occ_month'] = df_occurence.Month\n df_occurence['occ_day'] = df_occurence.Month\n #select columns\n columns_selected = ['event_id', 'period_no', 'occ_year', 'occ_month', 'occ_day' ]\n df_osasis_occurence = df_occurence[columns_selected]\n #save as csv\n df_osasis_occurence.to_csv(fp_occurrence, index=False)\n return df_osasis_occurence",
"def iob_relation_sub_df(token_df, tuple_var):\r\n id_var = [(\"id_features\", \"token_par_id\")]\r\n relation_df = token_df[id_var + [tuple_var[0], tuple_var[1]]]\r\n relation_df = relation_df[relation_df[tuple_var[1]]!=\"_\"]\r\n relation_df[[tuple_var[0], tuple_var[1]]] = relation_df[[tuple_var[0], tuple_var[1]]].applymap(lambda x: x.split(\"|\"))\r\n \r\n def get_tuple(row, list_var): \r\n return list(zip(*[row[x] for x in list_var]))#tuple([row[x] for x in list_var])\r\n \r\n relation_df[(\"id_features\", \"list_tuple\")] = relation_df.apply(lambda x : get_tuple(x, [tuple_var[0], tuple_var[1]]), axis = 1) \r\n relation_df_long = unlist_df(relation_df, (\"id_features\", \"list_tuple\"), id_var)\r\n \r\n relation_df_long = relation_df_long.rename(columns = {\"token_par_id\": \"token_par_id_target\"})\r\n relation_df_long[(\"id_features\",\"id_relation\")] = relation_df_long[(\"id_features\", \"list_tuple\")].map(lambda x: x[1])\r\n relation_df_long[(\"id_features\",\"id_relation\")] = relation_df_long[(\"id_features\", \"id_relation\")].map(lambda x: x if \"[\" in x else x + '[0_0]')\r\n relation_df_long[(\"id_features\",\"token_par_id_origin\")] = relation_df_long[(\"id_features\", \"list_tuple\")].map(lambda x: x[1].split(\"[\")[0])\r\n relation_df_long[(\"id_features\",\"span_id_origin\")] = relation_df_long[(\"id_features\", \"id_relation\")].map(lambda x: x.split(\"[\")[1].split(\"_\")[0])\r\n relation_df_long[(\"id_features\",\"span_id_target\")] = relation_df_long[(\"id_features\", \"id_relation\")].map(lambda x: x.split(\"[\")[1].split(\"_\")[1][:-1])\r\n relation_df_long[(\"id_features\",\"type_relation\")] = relation_df_long[(\"id_features\", \"list_tuple\")].map(lambda x: x[0])\r\n \r\n relation_df_long_grouped = relation_df_long\\\r\n .groupby([(\"id_features\",\"token_par_id_origin\"), (\"id_features\", \"span_id_origin\")])\\\r\n .agg({(\"id_features\", \"type_relation\"): list, \r\n (\"id_features\",\"token_par_id_target\"): list, \r\n (\"id_features\",\"span_id_target\"): list}).reset_index()\r\n \r\n def concat_span_id_relation(row): \r\n return [row[(\"id_features\",\"span_id_origin\")] + \"_\" + x for x in row[(\"id_features\",\"span_id_target\")]]\r\n \r\n relation_df_long_grouped[(\"id_features\", \"span_id_relation\")] = \\\r\n relation_df_long_grouped.apply(concat_span_id_relation, axis = 1)\r\n \r\n \r\n relation_df_long_grouped = relation_df_long_grouped\\\r\n .rename(columns = {\"type_relation\": tuple_var[0][1], \r\n \"token_par_id_target\": tuple_var[1][1], \r\n \"span_id_relation\": tuple_var[0][1] + \"span_id\", \r\n \"token_par_id_origin\": \"token_par_id\"})\r\n \r\n relation_df_long_grouped = relation_df_long_grouped\\\r\n .drop([(\"id_features\", \"span_id_origin\"), (\"id_features\",\"span_id_target\")], axis = 1) \r\n return relation_df_long_grouped",
"def get_full_votes(elec_df, turnout_df, extras_df, candidate):\n party = \"Dem\" if candidate.split(\"_\")[0][-1] == \"D\" else \"Rep\"\n # tracking = {county: 0 for county in set(elec_df.County)}\n if candidate in extras_df.columns:\n elec_df[f\"{candidate}_full\"] = 0\n for i in range(len(elec_df)):\n county = elec_df.County.iloc[i]\n precinct = elec_df.MATCH.iloc[i]\n precincts_in_county = set(elec_df[elec_df.County == county].MATCH)\n\n if county == \"St. Mary's\":\n county = county.replace(\"St. Mary's\", \"Saint Mary’s\")\n precinct = precinct.replace(\"St. Mary's\", \"Saint Mary's\")\n precincts_in_county = set([s.replace(\"St. Mary's\", \"Saint Mary's\") for s in precincts_in_county])\n elif county == \"Queen Anne's\":\n county = county.replace(county, \"Queen Anne’s\")\n precinct = precinct.replace(county, \"Queen Anne’s\")\n precincts_in_county = set([s.replace(county, \"Queen Anne’s\") for s in precincts_in_county])\n elif county == \"Prince George's\":\n county = county.replace(county, \"Prince George’s\")\n precinct = precinct.replace(county, \"Prince George’s\")\n precincts_in_county = set([s.replace(county, \"Prince George’s\") for s in precincts_in_county])\n\n county_votes = extras_df[candidate].loc[county] \n county_turnout = turnout_df[turnout_df.MATCH.isin(precincts_in_county)][f\"Total Voted {party}\"].sum()\n try:\n precinct_turnout_df = turnout_df[turnout_df.MATCH == precinct]\n assert len(precinct_turnout_df) == 1\n except:\n # assert county == \"Frederick\"\n if candidate == \"PRES12D\":\n print(f\"No additional votes added in {precinct}\")\n elec_df[f\"{candidate}_full\"].iloc[i] = elec_df[candidate].iloc[i]\n continue\n precinct_turnout = precinct_turnout_df[f\"Total Voted {party}\"].iloc[0]\n extra_votes = (precinct_turnout / county_turnout) * county_votes\n elec_df[f\"{candidate}_full\"].iloc[i] = elec_df[candidate].iloc[i] + extra_votes\n else:\n print(f\"No extra vote data for {candidate}\")\n\n return",
"def create_final(facilities_df):\n ids = 'ID_NUMBER'\n zips = 'ZIP_CODE'\n states = 'STATE_CODE'\n #al = 'ACTIVITY_LOCATION'\n gb = facilities_df.groupby([ids, zips, states])\\\n .size().reset_index()\n d = {ids: gb[ids], zips: gb[zips], states: gb[states]}\n #gb = facilities_df.groupby([ids, al])\\\n #.size().reset_index()\n #d = {ids: gb[ids], al: gb[al]}\n facilities_with_features_df = pd.DataFrame(data=d)\n return facilities_with_features_df",
"def plot_count_for_party(df, output):\n df = df.groupBy(df['date'], df['party']).count().orderBy(\"date\")\n df.coalesce(1).write.option(\"header\", \"true\").mode(\"overwrite\").csv(output+'/Tweet-Count-for-dash')",
"def check_inventory_on_pred_df(self, pid_df):\n\n pid_df = self.user_sizes(pid_df)\n pid_string = self.make_query_string_from_df(pid_df, 'activity') #get distinct pids\n sku_size_df = self.dbHelpers.query_postgres(self.sku_size_query.format(pid_string))\n\n prods = pid_df[['activity','identifier']].drop_duplicates()\n prods = prods.merge(sku_size_df, on='activity')\n prods = prods[pd.isnull(prods.identifier_x)|(prods.identifier_x==prods.identifier_y)]\n\n skus_to_check = list(prods.upc.unique())\n skus_w_inventory = self.sku_inventory_get(skus_to_check)\n prods = self.product_inventory_sizes_threshold(prods, skus_w_inventory)\n\n\n # row_color_check = lambda row: ((row.color_code=={''})|(row.sksz_color_code in row.color_code))\n buyer_df = pid_df[~pd.isnull(pid_df.identifier)].merge(sku_size_df, on=['activity','identifier'])\n if not buyer_df.empty:\n buyer_df = buyer_df[buyer_df.apply(self.row_color_check, axis=1)]\n buyer_df = buyer_df[buyer_df.upc.isin(skus_w_inventory)]\n\n lead_df = pid_df[pd.isnull(pid_df.identifier)].merge(sku_size_df, on = 'activity')\n if not lead_df.empty:\n lead_df = lead_df[lead_df.apply(self.row_color_check, axis=1)]\n # lead_prods = self.product_inventory_sizes_threshold(prods, skus_w_inventory)\n lead_df = lead_df.merge(prods, on=['activity', 'sksz_color_code'],\n how='inner') #this is wrong right_on = ['pid','sksz_color_code'],\n # buyer_df = self.buyer_inv_check(pid_df,sku_size_df)\n # lead_df = self.lead_inv_check(pid_df, sku_size_df)\n out_df = pd.concat([buyer_df,lead_df])\n # out_df.rename(columns={'sksz_color_code':'color_code'}, inplace=True)\n return out_df",
"def join_new_data(left_df, vote_counts, party, vote, field_name, cols):\n idx = (vote_counts.party == party) & (vote_counts.leg_vote == vote)\n out = left_df.merge(vote_counts[idx], on='voteId', how='left', suffixes=('', '_new'))\n\n cols.append(field_name)\n return out.rename(columns={'count': field_name})[cols]",
"def format_svDF(svDF, svtype=\"unknown\", interesting_chromosomes=\"all\", sampleName=\"sampleX\"):\n\n\n # return the same if empty\n if len(svDF)==0: return svDF\n\n if set(svDF.ID)=={\"\"}: svDF[\"ID\"] = svDF.Name\n\n # add a uniqueID\n svDF[\"uniqueID\"] = [\"%s_%s_%i\"%(sampleName, svtype, I+1) for I in range(len(svDF))]\n\n # filter and get only chromosomes that are in interesting_chromosomes\n if interesting_chromosomes!=\"all\":\n\n # define the chromosomal fields\n chromosome_fields = {\"Chr\", \"ChrA\", \"ChrB\", \"#CHROM\", \"CHR2\"}.intersection(set(svDF.keys()))\n\n # get the df where the interesting_chromosomes are involved\n svDF = svDF[svDF.apply(lambda r: all([r[c] in interesting_chromosomes for c in chromosome_fields]), axis=1)]\n\n def get_estimate_AF_for_breakends(bends_metadata_dict, AF_field=\"real_AF\", estimate_fn=min):\n\n \"\"\"Takes a dict that maps each breakpoints to a list of metadata of each breakend, and returns the estimate_fn AF observed in any breakpoint\"\"\"\n\n estimate_AF = estimate_fn([estimate_fn([bend_info[AF_field] for bend_info in list_breakend_info]) for list_breakend_info in bends_metadata_dict.values()])\n\n return estimate_AF\n\n # get the estimated minimum and maxium allele freq\n for estimate_fn_name, estimate_fn in [(\"min\", min), (\"max\", max), (\"mean\", np.mean)]:\n svDF[\"real_AF_%s\"%estimate_fn_name] = svDF.bends_metadata_dict.apply(get_estimate_AF_for_breakends, estimate_fn=estimate_fn)\n\n # get the implicated breakpoint IDs\n svDF[\"bpIDs\"] = svDF.bends_metadata_dict.apply(lambda x: \",\".join(sorted(x)))\n\n # get the quality\n for estimate_fn_name, estimate_fn in [(\"min\", min), (\"max\", max), (\"mean\", np.mean)]:\n svDF[\"QUAL_%s\"%estimate_fn_name] = svDF.bends_metadata_dict.apply(get_estimate_AF_for_breakends, AF_field=\"QUAL\", estimate_fn=estimate_fn)\n\n\n # get the worst filter tag\n svDF[\"all_FILTERs\"] = svDF.bends_metadata_dict.apply(lambda x: \",\".join(set.union(*[set.union(*[set(bend_info[\"FILTER\"].split(\";\")) for bend_info in list_breakend_info]) for list_breakend_info in x.values()])))\n\n # get the breakpoint IDs\n svDF[\"BREAKPOINTIDs\"] = svDF.bends_metadata_dict.apply(lambda x: \",\".join(sorted(x.keys())))\n\n # add ID \n svDF[\"IDstring\"] = svDF.apply(lambda r: get_IDstring_for_svDF_r(r, svtype), axis=1)\n\n return svDF",
"def select_autapses(partner_df):\n return partner_df.query('body_pre == body_post')",
"def add_participant_id_to_df(request_id_df):\n # Use pandas .apply() method to call get_participant_id function for every row in the dataframe (axis=1 specifies to use rows rather than columns). \n # The result is stored in a new field 'Participant Id'.\n request_id_df['Participant Id'] = request_id_df.apply(get_participant_id, axis=1)",
"def get_swings_heatmap_data(joined):\n changes = joined[joined.winner42 != joined.winner43]\n\n df = pd.DataFrame()\n\n for p1 in MAJOR_PARTIES:\n z = {x: 0 for x in parties}\n\n for p2 in MAJOR_PARTIES:\n if p1 == p2:\n z[p2] == 0\n else:\n wins = changes[changes.winner42 == p1][changes.winner43 == p2]\n z[p2] = wins.shape[0]\n\n df[p1] = pd.Series(z)\n\n return df",
"def get_deltas(df: pd.DataFrame, side: str) -> pd.DataFrame:\n\n result = df.groupby(['co_occurring_joint', 'reference_type'])[[\n 'co_occurring_side', 'reference_side', 'conditional_probability'\n ]].apply(get_delta_co_occurring_joints)\n\n result.name = 'delta'\n\n return result.reset_index()",
"def consolidate_country_col(df, country_col, country_id_col, covid_df): \n \n covid_countries = covid_df[['country_id', 'country']].drop_duplicates()\n covid_countries['country_lower'] = covid_countries['country'].str.lower()\n covid_countries['country_id_lower'] = covid_countries['country_id'].str.lower()\n \n df = df.rename(columns={\n country_col: 'country_other',\n country_id_col: 'country_id_other',\n })\n df['country_other_lower'] = df['country_other'].str.lower()\n df['country_id_other_lower'] = df['country_id_other'].str.lower()\n \n \n def _take_first_non_null_col(_df, _cols):\n return _df[_cols].fillna(method='bfill', axis=1).iloc[:, 0]\n \n def _consolidate_on(_df, col):\n _join_df = covid_countries.set_index(f'{col}_lower')\n _df = _df.join(_join_df, on=f'{col}_other_lower')\n _df['country_other'] = _take_first_non_null_col(_df, ['country', 'country_other'])\n for c in _join_df.columns:\n del _df[c]\n \n return _df\n \n df = _consolidate_on(df, 'country_id')\n df = _consolidate_on(df, 'country')\n \n df = df[df['country_other'].isin(covid_countries['country'])]\n \n del df['country_id_other']\n del df['country_other_lower']\n del df['country_id_other_lower']\n df = df.rename(columns={\n 'country_other': 'country'\n })\n \n return df",
"def plot_avg_sentiment_for_party(df, output):\n df = df.groupBy(df['date'], df['party']).agg(avg(df['sentiment']).alias('average')).orderBy(\"date\")\n df.coalesce(1).write.option(\"header\", \"true\").mode(\"overwrite\").csv(output+'/Avg-Senti-for-dash')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process results data from consecutive UK General Elections (e.g. 2010 and 2015) into a single modelready dataset ready for predicting the later (e.g. 2015) election.
|
def process(self):
processed_directory = self.directory / "processed"
os.makedirs(processed_directory, exist_ok=True) # create directory if it doesn't exist
# Import general election results & polling data
results_dict = self.load_results_data()
polls_full = self.load_polling_data()
# Calculate poll of polls
polls = self.get_regional_and_national_poll_of_polls(polls=polls_full)
# Merge polls into previous election results dataframe
results_dict[self.last] = self.combine_results_and_polls(
results=results_dict[self.last], polls=polls
)
# Add into previous election results: national voteshare, national swing (vs current polling),
# national swing forecast (per party per seat) and national swing forecast winner (per seat).
results_dict[self.last] = self.calculate_national_swing(results_dict[self.last])
# If we have geo-polling for previous election, also calculate a geo-level swing forecast.
if "geo_polls" in results_dict[self.last].columns:
results_dict[self.last] = self.calculate_geo_swing(results_dict[self.last])
# Create ML-ready dataframe and export
model_df = self.export_model_ready_dataframe(results_dict=results_dict)
print(f"Exporting {self.last}->{self.now} model dataset to {processed_directory.resolve()}")
model_df.to_csv(
processed_directory / f"general_election-uk-{self.now}-model.csv", index=False
)
|
[
"def preprocess_by_country_all_years(training_set, submit_rows_index, startyear=1972):\n\n # Rename columns to make indexing easier\n info_cols = training_set.iloc[:, -3:]\n training_set = training_set.iloc[:, :-3]\n training_set = training_set.rename(lambda x: int(x.split(' ')[0]), axis=1)\n training_set = pd.concat([training_set, info_cols], axis=1)\n\n # Mark all columns that need to be predicted in 2007\n training_set['to_predict'] = training_set.index.isin(submit_rows_index)\n\n # Get a list of all series codes, and all series codes that we predict\n # Not all countries will have values for all codes\n # require at least 'percent' of countries should have this series.\n percent = 0.9\n all_series = training_set['Series Code'].value_counts()\n all_series = all_series.where(all_series>all_series[0]*percent).dropna()\n \n pred_series = training_set.loc[submit_rows_index, 'Series Code'].unique()\n\n\n # Group by country\n gb = training_set.groupby('Country Name')\n\n # Construct dataframe row by row\n # X: Nations * Year * Series\n # Y: Nations * 2007 * Series \n Xrows, Yrows = {}, {}\n for g, group in gb:\n years = [int(i) for i in range(startyear,2007)]\n xarray = group[ years ]\n y = group[2007]\n code = group['Series Code']\n pred = group['to_predict']\n\n Xrow = {}\n Yrow = {}\n for xind, yind, series, to_pred in zip(xarray.index, y.index, code, pred):\n if series in all_series:\n Xrow[series] = xarray.loc[xind].T\n if to_pred:\n Yrow[series] = y.loc[yind].T\n\n Xrow = pd.DataFrame(Xrow)\n if not Xrow.empty: ## NAN is not empty\n Xrows[g] = Xrow\n Yrow = pd.DataFrame(Yrow, index=[2007])\n if not Yrow.empty: \n Yrows[g] = Yrow\n\n X = Xrows\n Y = Yrows\n\n # linear interpolation for missing values in X\n for nation in X:\n X[nation].interpolate(method = 'linear', axis = 0, limit_direction = 'both', inplace = True)\n\n return X, Y",
"def engineer_features(df,training=True):\n\n ## extract dates\n dates = df['date'].values.copy()\n dates = dates.astype('datetime64[D]')\n\n ## engineer some features\n eng_features = defaultdict(list)\n previous =[7, 14, 21, 28, 35, 42, 49, 56, 63, 70] # [7, 14, 28, 70]\n y = np.zeros(dates.size)\n for d,day in enumerate(dates):\n\n ## use windows in time back from a specific date\n for num in previous:\n ## get current day\n current = np.datetime64(day, 'D') \n ## get first day of N previous days\n prev = current - np.timedelta64(num, 'D')\n ## filer days to only keep desired ones\n mask = np.in1d(dates, np.arange(prev,current,dtype='datetime64[D]'))\n ## sum revenue over selected period\n eng_features[\"previous_{}\".format(num)].append(df[mask]['revenue'].sum())\n\n ## get get the target revenue\n ## target = revenue over next 30 days\n plus_30 = current + np.timedelta64(30,'D')\n mask = np.in1d(dates, np.arange(current,plus_30,dtype='datetime64[D]'))\n y[d] = df[mask]['revenue'].sum()\n\n ## attempt to capture monthly trend with previous years data (if present)\n start_date = current - np.timedelta64(365,'D')\n stop_date = plus_30 - np.timedelta64(365,'D')\n mask = np.in1d(dates, np.arange(start_date,stop_date,dtype='datetime64[D]'))\n eng_features['previous_year'].append(df[mask]['revenue'].sum())\n\n ## add some non-revenue features\n minus_30 = current - np.timedelta64(30,'D')\n mask = np.in1d(dates, np.arange(minus_30,current,dtype='datetime64[D]'))\n eng_features['recent_invoices'].append(df[mask]['unique_invoices'].mean())\n eng_features['recent_views'].append(df[mask]['total_views'].mean())\n\n X = pd.DataFrame(eng_features)\n ## combine features in to df and remove rows with all zeros\n X.fillna(0,inplace=True)\n mask = X.sum(axis=1)>0\n X = X[mask]\n y = y[mask]\n dates = dates[mask]\n X.reset_index(drop=True, inplace=True)\n\n if training == True:\n ## remove the last 30 days (because the target is not reliable)\n mask = np.arange(X.shape[0]) < np.arange(X.shape[0])[-30]\n X = X[mask]\n y = y[mask]\n dates = dates[mask]\n X.reset_index(drop=True, inplace=True)\n \n return(X,y,dates)",
"def lacer(df, df1, train_start_date, train_end_date, test_start_date, test_end_date, request_type, CD, predictor_num): #Once model is ready, replace df with csv\n\n #Create Training and Testing Sets\n dftrain = preprocessing(df , train_start_date, train_end_date)\n dftrain = dftrain.reset_index(drop = True)\n dftest = preprocessing(df1, test_start_date, test_end_date)\n dftest = dftest.reset_index(drop = True)\n\n #Reserve test set for training on all 3 models. \n y_train, y_test = lc.CreateTestSet(dftest, predictor_num)\n y_test = y_test.reshape((-1, 1))\n\n\n## 2 Models\n #Model1: CD\n modelCD = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainCD = dftrain[dftrain['CD'] == CD].reset_index(drop = True)\n\n X_trainCD, X_testCD = lc.CreateTrainSet(dftrainCD, predictor_num)\n X_testCD = X_testCD.reshape((-1, 1))\n modelCD.fit(X_trainCD, X_testCD)\n\n y_predCD = modelCD.predict(y_train)\n\n #Model2: Request_type\n modelRT = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainRT = dftrain[dftrain['RequestType'] == request_type].reset_index(drop = True)\n\n X_trainRT, X_testRT = lc.CreateTrainSet(dftrainRT, predictor_num)\n X_testRT = X_testRT.reshape((-1, 1))\n\n modelRT.fit(X_trainRT, X_testRT)\n\n y_predRT = modelRT.predict(y_train)\n\n\n #Average out all predictions\n y_predFinal = (y_predCD + y_predRT )/2\n\n #Return metrics \n return lc.metrics(y_predFinal, y_test)",
"def _get_data(self, start, end):\n for iterator, year in enumerate(range(self.start, self.end + 1)):\n this_year_mean = []\n retired, retired_total, participants = 0, 0, 0\n year2scrape = StandingsScraper(year)\n year2scrape.load()\n print(f\"Scraping year {year}\", year2scrape.url)\n\n for race in year2scrape.races:\n for result in race:\n if \"RET\" in str(result) or \"Ret\" in str(result):\n retired += 1\n if str(result) != \"nan\": #Nan means driver didn't participate\n participants += 1\n if participants != 0: #In 2020 there are about 15 empty rows\n this_race_percent = (retired/participants) * 100\n this_year_mean.append(this_race_percent)\n retired_total += retired\n retired, participants = 0, 0\n\n self.ret_mean[iterator] = sum(this_year_mean)/len(this_year_mean)\n self.retired[iterator] = retired_total",
"def build_solution():\r\n\r\n \"\"\"Reads CSV file and splits in into four datasets: Train, Kaggle public\r\n eaderboard, Kaggle private leaderboard (i.e the test set), and unused.\"\"\"\r\n df = pd.read_csv(\"data/atlas-higgs-challenge-2014-v2.csv\", sep=',')\r\n df_test = df[df['KaggleSet'].isin(('b', 'v'))]\r\n assert(len(df_test) == 550_000)\r\n df_test = df_test.drop('Weight', axis='columns')\r\n df_test = df_test.rename(columns={'KaggleWeight': 'Weight', 'Label': 'Class'})\r\n df_test.to_csv('data/solution_from_cern.csv', columns=['EventId', 'Class', 'Weight'], index=False)",
"def compute_common_data(self):\n self.filter_expression_and_priors()\n print('Creating design and response matrix ... ')\n self.design_response_driver.delTmin = self.delTmin\n self.design_response_driver.delTmax = self.delTmax\n self.design_response_driver.tau = self.tau\n (self.design, self.response) = self.design_response_driver.run(self.expression_matrix, self.meta_data)\n\n # compute half_tau_response\n print('Setting up TFA specific response matrix ... ')\n self.design_response_driver.tau = self.tau / 2\n (self.design, self.half_tau_response) = self.design_response_driver.run(self.expression_matrix, self.meta_data)",
"def engineer_features(df,training=True):\r\n\r\n ## extract dates\r\n logging.info('start feature')\r\n dates = df['date'].values.copy()\r\n dates = dates.astype('datetime64[D]')\r\n\r\n ## engineer some features\r\n eng_features = defaultdict(list)\r\n previous =[7, 14, 28, 70] #[7, 14, 21, 28, 35, 42, 49, 56, 63, 70]\r\n y = np.zeros(dates.size)\r\n for d,day in enumerate(dates):\r\n\r\n ## use windows in time back from a specific date\r\n for num in previous:\r\n current = np.datetime64(day, 'D') \r\n prev = current - np.timedelta64(num, 'D')\r\n mask = np.in1d(dates, np.arange(prev,current,dtype='datetime64[D]'))\r\n eng_features[\"previous_{}\".format(num)].append(df[mask]['revenue'].sum())\r\n\r\n ## get get the target revenue \r\n plus_30 = current + np.timedelta64(30,'D')\r\n mask = np.in1d(dates, np.arange(current,plus_30,dtype='datetime64[D]'))\r\n y[d] = df[mask]['revenue'].sum()\r\n\r\n ## attempt to capture monthly trend with previous years data (if present)\r\n start_date = current - np.timedelta64(365,'D')\r\n stop_date = plus_30 - np.timedelta64(365,'D')\r\n mask = np.in1d(dates, np.arange(start_date,stop_date,dtype='datetime64[D]'))\r\n eng_features['previous_year'].append(df[mask]['revenue'].sum())\r\n\r\n ## add some non-revenue features\r\n minus_30 = current - np.timedelta64(30,'D')\r\n mask = np.in1d(dates, np.arange(minus_30,current,dtype='datetime64[D]'))\r\n eng_features['recent_invoices'].append(df[mask]['unique_invoices'].mean())\r\n eng_features['recent_views'].append(df[mask]['total_views'].mean())\r\n\r\n X = pd.DataFrame(eng_features)\r\n ## combine features in to df and remove rows with all zeros\r\n# X.fillna(0,inplace=True)\r\n# mask = X.sum(axis=1)>0\r\n X = X[mask]\r\n y = y[mask]\r\n dates = dates[mask]\r\n X.reset_index(drop=True, inplace=True)\r\n\r\n# if training == True:\r\n# ## remove the last 30 days (because the target is not reliable)\r\n# mask = np.arange(X.shape[0]) < np.arange(X.shape[0])[-30]\r\n# X = X[mask]\r\n# y = y[mask]\r\n# dates = dates[mask]\r\n# X.reset_index(drop=True, inplace=True)\r\n \r\n return(X,y,dates)",
"def run(self):\n\n country_data = self.country_fetch()\n province_data=self.province_fetch()\n\n ### Get dates & cumulative recorded lists\n time_list = list(country_data.Datum)\n lists = np.asarray(country_data.Aantal, dtype='int')\n\n for k in range(len(time_list)):\n\n ### Translating data format from DD/MM/YYYY to YYYY-MM-DD\n if k % 3 == 0:\n confirmed = lists[k]\n if confirmed < 0:\n confirmed = None\n else:\n confirmed = int(confirmed)\n elif k % 3 == 1:\n HOSPITALISED = lists[k]\n if HOSPITALISED < 0:\n HOSPITALISED = None\n else:\n HOSPITALISED = int(HOSPITALISED)\n else:\n date = time_list[k]\n dead = lists[k]\n if dead < 0:\n dead = None\n else:\n dead = int(dead)\n\n upsert_obj = {\n # source is mandatory and is a code that identifies the source\n 'source': self.SOURCE,\n # date is also mandatory, the format must be YYYY-MM-DD\n 'date': date,\n # country is mandatory and should be in English\n # the exception is \"Ships\"\n 'country': \"Netherlands\",\n # countrycode is mandatory and it's the ISO Alpha-3 code of the country\n # an exception is ships, which has \"---\" as country code\n 'countrycode': 'NLD',\n 'gid': ['NLD'],\n # adm_area_1, when available, is a wide-area administrative region, like a\n # Canadian province in this case. There are also subareas adm_area_2 and\n # adm_area_3\n 'adm_area_1': None,\n 'adm_area_2': None,\n 'adm_area_3': None,\n 'confirmed': confirmed,\n # dead is the number of people who have died because of covid19, this is cumulative\n 'dead': dead,\n 'hospitalised': HOSPITALISED\n }\n\n self.upsert_data(**upsert_obj)\n \n \n valid_provinces=list(set(province_data.Provincienaam))\n \n for province in valid_provinces[1:]:\n \n \n current_data=province_data[province_data.Provincienaam==province]\n \n date_list=list(province_data[province_data.Provincienaam==province].Datum)\n \n confirmed_list=np.array(province_data[province_data.Provincienaam==province].Aantal,dtype='int')\n \n \n for i in range(len(date_list)):\n \n date=date_list[i]\n confirmed=confirmed_list[i]\n \n adm_area_1, adm_area_2, adm_area_3, gid = province, None, None, None\n \n upsert_obj = {\n # source is mandatory and is a code that identifies the source\n 'source': self.SOURCE,\n # date is also mandatory, the format must be YYYY-MM-DD\n 'date': date,\n # country is mandatory and should be in English\n # the exception is \"Ships\"\n 'country': \"Netherlands\",\n # countrycode is mandatory and it's the ISO Alpha-3 code of the country\n # an exception is ships, which has \"---\" as country code\n 'countrycode': 'NLD',\n 'gid': ['NLD'],\n # adm_area_1, when available, is a wide-area administrative region, like a\n # Canadian province in this case. There are also subareas adm_area_2 and\n # adm_area_3\n 'adm_area_1': adm_area_1,\n 'adm_area_2': None,\n 'adm_area_3': None,\n 'confirmed': int(confirmed)\n }\n\n self.upsert_data(**upsert_obj)",
"def calculate_group_predictions(self, individual_predictions):",
"def avg_predictions(df_grp_2, cal_df, fisc_calender, pred_start, fpg, res_all_sp):\r\n pivot_var = \"MM\"\r\n agg_col = \"LINE_ORDERS\"\r\n var_list = ['prev_5wk_avg_4wk_hld']\r\n level_list = list(df_grp_2[pivot_var].unique())\r\n \r\n tt = df_grp_2.copy()\r\n level = \"wk\"\r\n # Rearrange date by fiscal weeks\r\n tt = cu.merge_cal_tt(tt, cal_df.drop('FISC_EOW_DT', axis = 1), level)\r\n tt[agg_col+\"_ACT\"] = tt[agg_col]\r\n tt[agg_col].replace(np.nan, 0, inplace=True)\r\n \r\n model_data_pivot = pd.pivot_table(tt,\r\n index=['FISC_YR_NBR', 'FISC_MTH_NBR','FISC_WK_OF_MTH_ID', 'FISC_WK_OF_YR_NBR', 'FISC_QTR_NBR'],\r\n columns=pivot_var,\r\n values=agg_col).reset_index()\r\n model_data_pivot = cu.merge_cal_tt(model_data_pivot, cal_df, level)\r\n\r\n model_data_pivot = cu.fillna_values(model_data_pivot, pred_start)\r\n for i in level_list:\r\n if str(i) not in model_data_pivot.columns:\r\n model_data_pivot[str(i)] = 0\r\n tt = model_data_pivot.copy()\r\n train = tt[(tt.FISC_WK_OF_MTH_ID < pred_start) ]\r\n test = tt[(tt.FISC_WK_OF_MTH_ID >= pred_start) ]\r\n for i in level_list:\r\n if str(i) not in train.columns:\r\n train[str(i)] = 0\r\n if str(i) not in test.columns:\r\n test[str(i)] = 0\r\n \r\n model_data_orders_df = prev_avg_sparsity_scoring(level_list, train, test, \"_salesMAPP_prev_5wk_avg_4wk_hld\")\r\n model_data_orders_melt_df = cu.lag_variable_melt(agg_col, model_data_orders_df, var_list)\r\n model_data_orders_melt_df.columns = model_data_orders_melt_df.columns.str.replace(\"variable\", pivot_var)\r\n model_data_orders_melt_df[\"RSS\"] = fpg\r\n model_data_orders_melt_df[\"RSS_MM\"] = model_data_orders_melt_df[\"RSS\"]+\"_\"+model_data_orders_melt_df[pivot_var]\r\n model_data_orders_melt_df = pd.merge(model_data_orders_melt_df, \r\n df_grp_2[[\"FISC_WK_OF_MTH_ID\", pivot_var, agg_col, \"SPARSITY\"]], how =\"left\")\r\n \r\n model_data_orders_melt_df.columns = model_data_orders_melt_df.columns.str.replace(\"LINE_ORDERS_VALUE\", \"Prediction_Trf\")\r\n model_data_orders_melt_df = model_data_orders_melt_df[['FISC_YR_NBR', 'FISC_MTH_NBR', 'FISC_WK_OF_MTH_ID',\r\n pivot_var, 'Prediction_Trf', 'RSS', 'RSS_MM']]\r\n res_all_sp.append(model_data_orders_melt_df)\r\n \r\n return res_all_sp",
"def evaluate_predictive_model(request=None, debug_out=None, course_group_name=None):\n from course.models import CourseGroup, Student\n from stats.models import ValueHistory\n from datetime import datetime, timedelta\n from django.utils import timezone\n from collections import Counter\n\n # Returns a range of dates between two provided dates\n def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\n # Debugging logs\n debug_out = open(\"../../../home/pepijn/report.log\", \"a\")\n def debug(msg):\n if debug_out is not None:\n debug_out.write(\"[%s] %s \\n\" % (datetime.now().isoformat(), str(msg)))\n print(\"[%s] %s\" % (datetime.now().isoformat(), str(msg)))\n\n group = CourseGroup.objects.get(name=course_group_name)\n picked_vars = []\n if group == None:\n debug(\"No course group found with label: %s\" % course_group_name)\n return\n else:\n debug(\"Evaluating predictive model on: %s\" % group.label)\n\n result_matrix = [['course date time', 'sample size train', 'sample size test',\n 'mean absolute error','root mean square error', 'fail/pass accuracy',\n 'at-risk recall', 'baseline']]\n\n correlation_matrix = [['course date time',\n \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value',\n # \"name\",'Correlation', 'p-value',\n # \"name\",'Correlation', 'p-value',\n \"name\",'Correlation', 'p-value']]\n # Loop over all course days \n\n for course_day in daterange(group.start_date+timedelta(days=55), group.end_date):\n result_record = []\n result_record.append(course_day)\n correlation_record = []\n correlation_record.append(course_day)\n kfold_scores = {'mae':[], 'rmse':[], 'fpa': [], 'fsr':[]}\n debug('Datetime %s' % course_day)\n variables = group.course.variable_set.exclude(type='OUT').order_by('order').exclude(name=\"access_application_count\").exclude(name=\"interact_application_count\").exclude(name=\"scored_assessment_count\").exclude(name=\"attempted_assessment_count\").exclude(name=\"launched_assessment_count\")\n # variables = group.course.variable_set.exclude(type='OUT').order_by('order')\n var_names = variables.values_list(\"name\", flat=True)\n\n course_datetime_now = group.calculate_course_datetime(timezone.make_aware(datetime.combine(course_day, datetime.min.time())))\n\n # Collect the statistics of the students\n group_statistics = {}\n for variable in variables:\n value_history_comparison = ValueHistory.objects.filter(variable=variable, course_datetime__gte=timedelta(days=course_datetime_now.days), course_datetime__lt=timedelta(days=course_datetime_now.days+1), group=group)\n group_statistics[variable.name] = variable.calculate_statistics_from_values(value_history_comparison)\n\n # Collect relevant value statistics for model building. Y 2015-2016 (pk=2) was selected for this purpose\n pred_statistics = {}\n for variable in variables:\n value_history_1516 = ValueHistory.objects.filter(variable=variable.pk, group=2, course_datetime__gte=timedelta(days=course_datetime_now.days), course_datetime__lt=timedelta(days=course_datetime_now.days+1))\n if len(value_history_1516) > 0:\n pred_statistics[variable.name] = variable.calculate_statistics_from_values(value_history_1516)\n\n # Get the y values i.e. grades needed for machine learning (train group)\n try:\n with open(\"/home/pepijn/data_2015.csv\", mode='r') as infile:\n reader = csv.reader(infile)\n iki_grades = {rows[0]:rows[1] for rows in reader}\n except IOError, e:\n debug('%s' % e)\n iki_grades = {}\n\n # Variable value dicts is a dictionary holding another dictionary for each individual variable. \n # Eacht variable dictionary holds for each student as key his statistics as value.\n variable_value_dicts_pred = {}\n variable_value_dicts_group = {}\n group_student_ids = []\n # Loop over all variables\n for var_name in var_names:\n variable_value_dicts_pred[var_name] = {}\n variable_value_dicts_group[var_name] = {}\n\n # Loop over individual student's statistics in the variable statistics\n pred_statistic = pred_statistics[var_name]\n for indv_student_statistic in pred_statistic:\n # Add the student statistics to the dict\n student_id = encrypt_value(indv_student_statistic['student'])\n variable_value_dicts_pred[var_name][student_id] = indv_student_statistic\n \n # Loop over individual student's statistics in the comparison statistics \n group_statistic = group_statistics[var_name]\n for indv_student_statistic in group_statistic:\n # Add the student statistics to the dict\n # student_id = encrypt_value(indv_student_statistic['student'])\n student_id = indv_student_statistic['student']\n student = Student.objects.filter(identification=student_id)\n if len(student) > 0 and student[0].final_grade is not None: \n group_student_ids.append(student_id)\n variable_value_dicts_group[var_name][student_id] = indv_student_statistic\n\n group_student_ids = list(set(group_student_ids))\n\n # extract data in matrix form from the value dicts\n X, Y = extract_xy_values(iki_grades.keys(), var_names, variable_value_dicts_pred, iki_grades)\n X = normalize(X, axis=0, norm='max')\n\n\n # Get grades from course group (test group)\n group_grades = {}\n # group_passed = {}\n for group_student_id in group_student_ids:\n try:\n student = Student.objects.get(identification=group_student_id)\n # print group_student_id, student.final_grade, student.passed_course\n group_grades[group_student_id] = student.final_grade\n # group_passed[group_student_id] = student.passed_course\n except:\n print group_student_id\n\n # for student_id in group_grades.keys():\n # if group_passed[student_id] == False and group_grades[student_id] > 5:\n # group_grades[student_id] = 5\n # print group_grades[student_id], group_passed[student_id]\n\n # Extract data in matrix form from dicts\n X_group, Y_group, ids = extract_xy_values(group_grades.keys(), var_names, variable_value_dicts_group, group_grades, return_ids=True)\n X_group = normalize(X_group, axis=0, norm='max')\n result_record.append(len(Y))\n result_record.append(len(Y_group))\n\n # Correlation analysis\n for i in xrange(len(var_names)):\n var_values = [row[i] for row in X_group]\n correlation_record.append(var_names[i])\n correlation_record.append(pearsonr(Y_group, var_values)[0])\n correlation_record.append(pearsonr(Y_group, var_values)[1])\n\n # print pearsonr(Y_group, var_values)[0], pearsonr(Y_group, var_values)[1]\n # print len(Y), len(Y_group)\n\n # Use machine learning to create a predictive model\n X = np.array(X).astype(dtype='float')\n Y = np.array(Y).astype(dtype='float')\n \n from sklearn.neighbors import KNeighborsRegressor\n from sklearn.tree import DecisionTreeRegressor\n from sklearn.feature_selection import RFE\n # Deal with low amounts of data\n if len(Y) < 5: \n regr = KNeighborsRegressor(n_neighbors=1)\n else:\n regr = KNeighborsRegressor(n_neighbors=3)\n\n\n # rfe = RFE(estimator=DecisionTreeRegressor())\n # rfe.fit(X, Y)\n # for x in xrange(len(var_names)):\n # print var_names[x], rfe.ranking_[x], rfe.support_[x]\n # if rfe.ranking_[x] == 1:\n # picked_vars.append(var_names[x])\n # print Counter(picked_vars)\n # print(\"Optimal number of features : %d\" % rfe.n_features_)\n # Now train again using all instances, this model is used for final prediction\n regr.fit(X, Y)\n y_pred_plus = []\n y_pred = []\n y_test = []\n student_ids = []\n import sys\n for i in xrange(len(X_group)):\n sys.stdout.write(\"\\r%d%%\" % i)\n sys.stdout.flush()\n # student = Student.objects.get(identification=ids[i])\n # print ids[i], student.final_grade, Y_group[i], student.passed_course, regr.predict([X_group[i]])\n if Y_group[i] is not None:\n # Get the already obtained grades from the activity db\n total_weight, actual_grade = get_grade_so_far(ids[i], debug_out=None, until=course_day)\n if actual_grade is not None:\n grade_so_far = actual_grade\n else:\n grade_so_far = 0\n # print actual_grade, grade_so_far, total_weight\n y_pred_plus.append(np.array([grade_so_far + (regr.predict([X_group[i]])[0]*(float(1)-total_weight))]))\n y_pred.append(regr.predict([X_group[i]]))\n y_test.append(Y_group[i])\n student_ids.append(ids[i])\n\n print y_pred\n print y_pred_plus\n\n for x in xrange(len(y_pred)):\n print y_pred[x][0], y_pred_plus[x][0], y_test[x], student_ids[x]\n\n y_pred = np.array(y_pred_plus)\n\n\n baseline_accuracy = (float)(len([x for x in Y_group if x > 5.4]))/len(Y_group)\n kfold_scores['mae'].append(mean_absolute_error(y_test, y_pred))\n kfold_scores['rmse'].append(sqrt(mean_squared_error(y_test,y_pred)))\n kfold_scores['fpa'].append(fail_pass_error(y_test, y_pred))\n fsr_val, fsr_count = fail_recall(y_test, y_pred)\n \n\n # This if statement was necessary due to the small amount of data\n if fsr_count > 0:\n kfold_scores['fsr'].append(fsr_val)\n else:\n kfold_scores['fsr'].append(0)\n\n result_record.append(kfold_scores['mae'][0])\n result_record.append(kfold_scores['rmse'][0])\n result_record.append(kfold_scores['fpa'][0])\n result_record.append(kfold_scores['fsr'][0])\n result_record.append(baseline_accuracy)\n\n # print kfold_scores, baseline_accuracy\n result_matrix.append(map(str, result_record))\n correlation_matrix.append(map(str, correlation_record))\n result_matrix = np.array(result_matrix)\n csv_name = '../../2016_analysis/optimized_vars/KNN_60_plus.csv'\n np.savetxt(csv_name, result_matrix, delimiter=',', fmt='%s')\n\n correlation_matrix = np.array(correlation_matrix)\n csv_name = '../../2016_analysis/optimized_vars/correlation_plus.csv'\n np.savetxt(csv_name, correlation_matrix, delimiter=',', fmt='%s')\n # print result_matrix",
"def callGAModel(region):\n year = 2000\n # while(year <= 2006):\n execGaModel(year, region)\n year += 1",
"def predict_year_budget(year, product):\n\n # Step 1 - Call the API get history sales data\n res2018 = Product.search_by_year_and_product(2018, product)\n x_train = []\n y_train = []\n for key in res2018:\n x_train.append(float(key))\n y_train.append(float(res2018[key]))\n\n # Step 2 - Train data\n x_train = np.array(x_train)\n y_train = np.array(y_train)\n\n X_train = x_train.reshape(-1, 1)\n X2_train = np.hstack([X_train, X_train**2])\n print('>>>X2_train', X2_train.shape)\n lin_reg = LinearRegression()\n\n lin_reg.fit(X2_train, y_train)\n print('coef', lin_reg.coef_)\n print('intercept', lin_reg.intercept_)\n\n predict_result = lin_reg.predict(X2_train)\n print('>>>>predict_result', predict_result)\n\n # Step 3 - Struct Data\n result = {\n 'predict': {\n 'year': year,\n 'product': product,\n 'data': predict_result.tolist()\n }\n }\n return result",
"def fetch_results(dataset, model_folder='results/models/model_85/'):\n prediction_path = os.path.join(model_folder, 'predictions/')\n # load target dataframe\n raw_target_df = pd.read_csv(os.path.join(prediction_path, dataset, 'raw_target_df.csv'))\n # format targets\n raw_target_df.diagnosis = (raw_target_df.diagnosis == 'AD').astype(int)\n raw_target_df.sex = (raw_target_df.sex == 'F').astype(int)\n # load predictions\n ID = np.load(os.path.join(prediction_path, dataset, 'id.npy'))\n disease_preds = np.load(os.path.join(prediction_path, dataset, 'disease.npy'))\n age_preds = np.load(os.path.join(prediction_path, dataset, 'age.npy'))\n sex_preds = np.load(os.path.join(prediction_path, dataset, 'sex.npy'))\n # store predictions in dataframe\n df_preds = pd.DataFrame({'participant_id': ID[:, 0],\n 'session_id': ID[:, 1],\n 'diagnosis': disease_preds.flatten(),\n 'age': age_preds.flatten(),\n 'sex': sex_preds.flatten()})\n # merge dataframes\n cols = ['participant_id', 'session_id', 'diagnosis', 'age', 'sex']\n data = raw_target_df[cols].merge(df_preds, on=['participant_id', 'session_id'], suffixes=('_true', '_pred'))\n\n return data",
"def createCandidateTables(data, model, features, races, dollarlist=[1000, 10000, 100000]):\n race_key = ['CONTEST_NAME', 'ELECTION_DATE']\n cand_key = [*race_key, 'CANDIDATE_NAME']\n # from all the races, select only those who have more than 2 ppl running;\n # i.e. select those with the same index as the races table we created (races doesn't have cand data)\n myRaceIndices = races.reset_index().set_index(race_key).index\n myRaces = data.reset_index(drop=True).set_index(race_key).loc[myRaceIndices]\n grouping = myRaces.groupby(race_key, as_index=False)\n raceList = []\n for name, group in grouping:\n # Looping through races\n # these_cands = group.reset_index().set_index(cand_key).copy()\n candList = []\n for row in group.itertuples():\n # Loop through ALL candidates in each race\n # Row is a named tuple, so we can access the names, and also the Index, like below\n #Pandas(Index=('State Assembly Member District 1', Timestamp('2018-06-05 00:00:00')),\n # CANDIDATE_NAME='JENNY O CONNELL-NOWAIN', PARTY_NAME='No Party Preference', ...\n candName = row.CANDIDATE_NAME\n dollarList = []\n for donation in dollarlist:\n # these_cands['FAVORITE'] = these_cands.loc[row.Index, 'CANDIDATE_NAME']\n newFacts = addMoney(group, row.Index, candName, donation).reset_index()\n newResults = raceModel(newFacts, model, features)\n newResults = findRanking(newResults)\n newcols = ['PARTY_NAME', 'RANK', 'WINS', 'FAVORITE','PRED_VOTE_PCT']\n newResults = newResults[list(set(cand_key + features + newcols))]\n newResults = newResults.set_index([*race_key, 'FAVORITE'])\n dollarList.append(newResults.copy())\n candResults = pd.concat(dollarList, keys=dollarlist, names=['DONATION'])\n candResults['MIN_DONATION'] = findMinWinDonation(candResults, candName)\n candList.append(candResults.copy())\n raceResults = pd.concat(candList)\n raceList.append(raceResults.copy())\n allResults = pd.concat(raceList)\n return allResults",
"def run_rb_system(self):\r\n \r\n forecast_case_no = pd.Series(0, index=self.forecast.index)\r\n gb_forecast = self.forecast.groupby([\"lu\", \"ld\", \"sp\", \"sn\", \"ud\", \"aud\", \"rsi1\", \"rsi2\", \"rsi3\", \"rsi4\"])\r\n for i, key in enumerate(gb_forecast.indices.keys()):\r\n forecast_case_no.loc[gb_forecast.groups[key]] = i\r\n forecast_ncase = gb_forecast.ngroups\r\n \r\n forecast_case_result = pd.Series(\"\", index=self.forecast.index)\r\n for i in range(forecast_ncase):\r\n case1 = self.forecast[forecast_case_no == i]\r\n case2 = self.train[(self.train.lu == case1.lu[0]) &\r\n (self.train.ld == case1.ld[0]) &\r\n (self.train.rsi1 == case1.rsi1[0]) &\r\n (self.train.rsi2 == case1.rsi2[0]) &\r\n (self.train.sp == case1.sp[0]) &\r\n (self.train.sn == case1.sn[0]) &\r\n (self.train.ud == case1.ud[0]) &\r\n (self.train.aud == case1.aud[0]) &\r\n (self.train.rsi3 == case1.rsi3[0]) &\r\n (self.train.rsi4 == case1.rsi4[0])] # exact same case\r\n if case2.shape[0] != 0:\r\n forecast_case_result[case1.index] = self.trained_case.ix[case2.index, \"case_result\"][0]\r\n else:\r\n forecast_case_result[case1.index] = \"Unobserved\"\r\n\r\n self.forecasted_case = pd.concat([forecast_case_no, forecast_case_result], axis=1)\r\n self.forecasted_case.columns = [\"case_no\", \"case_result\"]\r\n\r\n return self.forecasted_case",
"def country_predictions(country, data):\n\n i = 4\n try:\n while( data[data[\"Country/Region\"] == country].groupby([\"Country/Region\"])[data.columns[i:i+1]].apply(sum).values[0]<=0):\n i+=1\n except: i = 4\n if(country == \"US\"):\n data = data[data[\"Country/Region\"] == country].groupby([\"Country/Region\"])[data.columns[45:]].apply(sum)\n\n elif( i>4):\n data = data[data[\"Country/Region\"] == country].groupby([\"Country/Region\"])[data.columns[i-1:]].apply(sum)\n else:\n data = data[data[\"Country/Region\"] == country].groupby(['Country/Region'])[data.columns[i:]].apply(sum)\n x = data.columns\n\n y = data.values\n\n x_stmps= pd.Series(x).apply(swap)\n poly = PolynomialFeatures(degree = 4)\n X_Poly = poly.fit_transform(np.array(x_stmps).reshape(len(x_stmps), 1))\n poly.fit(X_Poly, y.reshape(len(x), 1))\n #Fitting data:\n model_linear = LinearRegression()\n model_linear.fit(X_Poly, y.reshape(len(x), 1))\n predictions = model_linear.predict(poly.fit_transform(np.array(date_list).reshape(len(date_list), 1)))\n\n return generatePoints(predictions)",
"def prepdatasets():\n\n Squad().get_train_data()\n NQ().get_train_data()\n TriviaQA().get_train_data()\n\n return 0",
"def process_model_se_data(overwrite=True):\n print(\"\\nProcessing parcel data to MAZ/TAZ...\")\n # Summarize parcel data to MAZ\n for year in YEARS:\n print(f\"{str(year)}:\")\n # Set output\n out_gdb = validate_geodatabase(make_path(CLEANED, f\"PMT_{year}.gdb\"))\n out_fds = validate_feature_dataset(make_path(out_gdb, \"Polygons\"), sr=SR_FL_SPF)\n out_fc = make_path(out_fds, \"MAZ\")\n maz_se_data = make_path(\n RAW, \"SERPM\", \"maz_data_2015.csv\"\n ) # TODO: standardize SERPM pathing\n # Summarize parcels to MAZ\n print(\"--- summarizing MAZ activities from parcels\")\n par_fc = make_path(out_gdb, \"Polygons\", \"Parcels\")\n se_data = make_path(out_gdb, \"EconDemog_parcels\")\n par_data = p_help.estimate_maz_from_parcels(\n par_fc=par_fc,\n par_id_field=prep_conf.PARCEL_COMMON_KEY,\n maz_fc=out_fc,\n maz_id_field=prep_conf.MAZ_COMMON_KEY,\n taz_id_field=prep_conf.TAZ_COMMON_KEY,\n se_data=se_data,\n se_id_field=prep_conf.PARCEL_COMMON_KEY,\n agg_cols=prep_conf.MAZ_AGG_COLS,\n consolidations=prep_conf.MAZ_PAR_CONS,\n )\n # Fetch MAZ data (enrollments, etc.)\n print(\"--- fetching other base-year MAZ data\")\n maz_data = pd.read_csv(maz_se_data)\n maz_data.rename(columns=prep_conf.SERPM_RENAMES, inplace=True)\n # Consolidate\n maz_data = p_help.consolidate_cols(\n df=maz_data,\n base_fields=[prep_conf.MAZ_COMMON_KEY, prep_conf.TAZ_COMMON_KEY],\n consolidations=prep_conf.MAZ_SE_CONS,\n )\n # Patch for full regional MAZ data\n print(\"--- combining parcel-based and non-parcel-based MAZ data\")\n maz_data = p_help.patch_local_regional_maz(\n maz_par_df=par_data,\n maz_par_key=prep_conf.MAZ_COMMON_KEY,\n maz_df=maz_data,\n maz_key=prep_conf.MAZ_COMMON_KEY,\n )\n # Export MAZ table\n print(\"--- exporting MAZ socioeconomic/demographic data\")\n maz_table = make_path(out_gdb, \"EconDemog_MAZ\")\n\n check_overwrite_output(output=maz_table, overwrite=overwrite)\n df_to_table(maz_data, maz_table)\n\n # Summarize to TAZ scale\n print(\"--- summarizing MAZ data to TAZ scale\")\n maz_data.drop(columns=[prep_conf.MAZ_COMMON_KEY], inplace=True)\n taz_data = maz_data.groupby(prep_conf.TAZ_COMMON_KEY).sum().reset_index()\n # Export TAZ table\n print(\"--- exporting TAZ socioeconomic/demographic data\")\n taz_table = make_path(out_gdb, \"EconDemog_TAZ\")\n\n check_overwrite_output(output=taz_table, overwrite=overwrite)\n df_to_table(taz_data, taz_table)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set dialog proxies from proxies dict
|
def set_proxies(self, proxies):
if proxies:
protocols = ["http", "https", "ftp", "socks"]
for protocol in protocols:
entry_id = protocol + "_proxy_entry"
entry_widget = self.ui.get_object(entry_id)
port_id = protocol + "_proxy_port"
port_widget = self.ui.get_object(port_id)
try:
proxy = proxies[protocol]
proxy = proxy.replace('https://', '')
proxy = proxy.replace('http://', '')
host = proxy.split(':')[0]
port = proxy.split(':')[1]
entry_widget.set_text(host)
port_widget.set_text(port)
except (IndexError, KeyError) as err:
pass
|
[
"def set_proxy(self, host, port):\n self.proxy = {\n 'host': host,\n 'port': port\n }",
"def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n self.__proxy = (proxytype,addr,port,rdns,username,password)",
"def update_all_proxy_attrs(self):\n for proxy in self.proxies:\n self.set_proxy_attrs(proxy)",
"def setdefaultproxy(proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n global _defaultproxy\r\n _defaultproxy = (proxytype,addr,port,rdns,username,password)",
"def test_proxy_default_configs(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n self.assert_util(\n proxy.proxy_enable.is_checked,\n False\n )\n self.assert_util(\n proxy.dns_enable.is_checked,\n False\n )\n self.assert_util(\n proxy.type.get_value,\n \"http\"\n )\n self.assert_util(\n proxy.host.get_value,\n \"\"\n )\n self.assert_util(\n proxy.port.get_value,\n \"\"\n )\n self.assert_util(\n proxy.username.get_value,\n \"\"\n )\n self.assert_util(\n proxy.password.get_value,\n \"\"\n )",
"def __change_proxy_selenium(self):\n PROXY = \"35.162.25.177:3128\" # IP:PORT or HOST:PORT\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--proxy-server=http://%s' % PROXY)\n self.__driver = webdriver.Chrome(chrome_options=chrome_options)",
"def set_proxy(proxy: str) -> bool:\n resp = get_config()\n if not resp:\n return False\n data = resp[\"result\"]\n path = resp[\"path\"]\n data[\"proxy\"] = proxy\n with open(path, \"w\") as file:\n json.dump(data, file, sort_keys=True, indent=\"\")\n return True",
"def __init__(self, proxies=None):\n self._address_pool = set() if proxies is None else set(proxies)\n self._address_pool_cycle = cycle(self._address_pool)\n self.__load_proxies_from_file()",
"def proxies(self) -> dict:\n proxies = {}\n if (\n self.default_args.tc_proxy_host is not None\n and self.default_args.tc_proxy_port is not None\n ):\n\n if (\n self.default_args.tc_proxy_username is not None\n and self.default_args.tc_proxy_password is not None\n ):\n tc_proxy_username = quote(self.default_args.tc_proxy_username, safe='~')\n tc_proxy_password = quote(self.default_args.tc_proxy_password, safe='~')\n\n # proxy url with auth\n proxy_url = (\n f'{tc_proxy_username}:{tc_proxy_password}'\n f'@{self.default_args.tc_proxy_host}:{self.default_args.tc_proxy_port}'\n )\n else:\n # proxy url without auth\n proxy_url = f'{self.default_args.tc_proxy_host}:{self.default_args.tc_proxy_port}'\n proxies = {'http': f'http://{proxy_url}', 'https': f'http://{proxy_url}'}\n return proxies",
"def _delete_proxy(self, proxy):\n print \"except, remove proxy: \", proxy \n new_set = set(self.proxy_list)\n new_set.remove(proxy)\n self.proxy_list = list(new_set)",
"def set_proxy_bases(self, bases):\n self._proxy_bases = bases",
"def _delete_proxy(self, proxy_id:str) -> dict:\r\n params = {'f': 'json',\r\n 'proxies': proxy_id}\r\n url = \"%s/sharing/rest/content/users/%s/items/%s/deleteProxies\" % (self._portal.url,\r\n self._user_id,\r\n self.id)\r\n return self._portal.con.post(url, params)",
"def __configure_proxy_settings(self, proxy_config: collections.OrderedDict):\n\n if self._is_not_empty_string(\"address\", proxy_config):\n self.check_zap_result(\n result=self.get_zap.core.set_option_proxy_chain_name(\n string=str(proxy_config[\"address\"])\n ),\n method_name=\"set_option_proxy_chain_name\",\n )\n if self._is_not_empty_integer(\"port\", proxy_config):\n self.check_zap_result(\n result=self.get_zap.core.set_option_proxy_chain_port(\n integer=str(proxy_config[\"port\"])\n ),\n method_name=\"set_option_proxy_chain_port\",\n )\n if \"skipProxyAddresses\" in proxy_config and (\n proxy_config[\"skipProxyAddresses\"] is not None\n ):\n logging.debug(\n \"Disabling all possible pre existing proxy excluded domains before adding new ones.\"\n )\n self.check_zap_result(\n result=self.get_zap.core.disable_all_proxy_chain_excluded_domains(),\n method_name=\"add_proxy_chain_excluded_domain\",\n )\n for address in proxy_config[\"skipProxyAddresses\"]:\n logging.debug(\n \"Excluding (skip) address '%s' from global proxy setting\", address\n )\n self.check_zap_result(\n result=self.get_zap.core.add_proxy_chain_excluded_domain(\n value=address, isregex=True, isenabled=True\n ),\n method_name=\"add_proxy_chain_excluded_domain\",\n )",
"def update_proxy_pool(self):\n proxy_list = []\n try:\n resp = requests.get(self.url)\n except ConnectionError as ce:\n print(ce)\n return(1)\n soup = bs(resp.text, \"html.parser\")\n proxy_table = soup.find_all(id='proxylisttable')\n for tr in proxy_table[0].find_all('tbody')[0].find_all('tr'):\n td = tr.find_all('td')\n proxy_list.append({\n 'ip': td[0].text,\n 'port': td[1].text,\n 'anonymity': td[4].text.upper(),\n 'https': td[6].text\n })\n self._data_frame = pd.DataFrame(proxy_list)",
"def set_proxy_credentials(self, username, password):\n self._set_proxy_credentials(username.encode(), password.encode())",
"def set_proxy_host(self, proxy_host):\n CheckValue.check_str(proxy_host, 'proxy_host')\n self._proxy_host = proxy_host\n return self",
"def set_http_proxy(self, proxy_url):\r\n result = self._parse_proxy_url(proxy_url=proxy_url)\r\n scheme = result[0]\r\n host = result[1]\r\n port = result[2]\r\n username = result[3]\r\n password = result[4]\r\n\r\n self.proxy_scheme = scheme\r\n self.proxy_host = host\r\n self.proxy_port = port\r\n self.proxy_username = username\r\n self.proxy_password = password\r\n self.http_proxy_used = True\r\n\r\n self._setup_http_proxy()",
"def _set_networkProxySetting(self, *args) -> \"bool\" :\n return _core.NetworkPreferences__set_networkProxySetting(self, *args)",
"def alter_proxy(proxy):\n # Default case where 'proxy' key is not set -- do nothing\n proxy_value = proxy.lower()\n # python-swift client takes into account both\n # upper and lower case proxies so clear them all\n os.environ.pop(\"http_proxy\", None)\n os.environ.pop(\"https_proxy\", None)\n os.environ.pop(\"HTTP_PROXY\", None)\n os.environ.pop(\"HTTPS_PROXY\", None)\n if proxy_value.startswith('http://') or \\\n proxy_value.startswith('https://'):\n LOG.info('Using proxy {0}'.format(proxy_value))\n os.environ['HTTP_PROXY'] = str(proxy_value)\n os.environ['HTTPS_PROXY'] = str(proxy_value)\n else:\n raise Exception('Proxy has unknown scheme')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create weighted box for set of boxes
|
def get_weighted_box(boxes, conf_type='avg'):
box = np.zeros(6, dtype=np.float32)
conf = 0
conf_list = []
for b in boxes:
box[2:] += (b[1] * b[2:])
conf += b[1]
conf_list.append(b[1])
box[0] = boxes[0][0]
if conf_type == 'avg':
box[1] = conf / len(boxes)
elif conf_type == 'max':
box[1] = np.array(conf_list).max()
box[2:] /= conf
return box
|
[
"def scale_bbox(self, boxes, old_width, new_width):\n boxes = copy.deepcopy(boxes)\n scale_percent = new_width / old_width\n for b in boxes:\n b.xmin = int(b.xmin * scale_percent)\n b.ymin = int(b.ymin * scale_percent)\n b.xmax = int(b.xmax * scale_percent)\n b.ymax = int(b.ymax * scale_percent)\n return boxes",
"def generate_collection_boxes():\n \n collection_boxes = dict()\n # Define rivers (automatically defines masks and areas)\n # Start with defining the new regions to put the water\n \n # USA East Coast\n collection_boxes[\"USECoast1\"] = Box(37, 46, -70, -52)\n collection_boxes[\"USECoast2\"] = Box(32, 37, -70, -65)\n collection_boxes[\"USECoast3\"] = Box(28.75, 40, -81, -70)\n collection_boxes[\"USECoast4\"] = Box(40, 46, -52, -48)\n collection_boxes[\"USECoast5\"] = Box(46, 50, -66, -58)\n collection_boxes[\"USECoast6\"] = Box(40, 46, -48, -46) # New One, only for catching\n \n # Greenland Arctic\n collection_boxes[\"GrArc1\"] = Box(81, 88, 279.5, 346)\n # North American Arctic\n collection_boxes[\"NAMArc1\"] = Box(78, 86, 271, 279.5)\n collection_boxes[\"NAMArc2\"] = Box(68.75, 86, 246, 271)\n collection_boxes[\"NAMArc3\"] = Box(60, 82, 233, 246)\n collection_boxes[\"NAMArc4\"] = Box(60, 80, 191, 233)\n collection_boxes[\"NAMArc5\"] = Box(55, 68.75, 250, 264.375) # only for catching the water, not for spreading it\n collection_boxes[\"NWTerr1\"] = Box(55, 60, 235, 246) # only for catching the water\n collection_boxes[\"NWTerr2\"] = Box(55, 66, 246, 250) # not for spreading it\n # Great Lakes # Can decide which spreading box to add this to\n collection_boxes[\"GrLakes1\"] = Box(43, 48.75, -90, -72) # only for catching the water, not for spreading it\n # Gulf of Mexico\n collection_boxes[\"GoM1\"] = Box(17.7, 28.75, -96.3, -80)\n # East Pacific\n collection_boxes[\"EPac1\"] = Box(50, 60, 191, 215.5)\n collection_boxes[\"EPac2\"] = Box(50, 60, 215.5, 225.5)\n collection_boxes[\"EPac3\"] = Box(38.5, 60, 225.5, 234.5)\n collection_boxes[\"EPac4\"] = Box(33.75, 38.5, 230, 260)\n collection_boxes[\"EPac5\"] = Box(28.5, 33.75, 234.5, 260)\n # Russia Pacific\n collection_boxes[\"RussPac1\"] = Box(58, 68, 178, 191)\n # Labrador Sea & Baffin Bay\n collection_boxes[\"BafLab1\"] = Box(68.75, 80, 275, 317)\n collection_boxes[\"BafLab2\"] = Box(50, 68.75, 294.25, 317)\n collection_boxes[\"BafLab3\"] = Box(46, 50, 305.75, 317)\n collection_boxes[\"HudBay1\"] = Box(48.75, 68.75, 264.375, 294.375) # only for catching the water\n collection_boxes[\"HudBay2\"] = Box(51, 54, 260, 264.375) # not for spreading it\n # Atlantic Greenland Iceland\n collection_boxes[\"AtlGr1\"] = Box(58, 71.25, 317, 337.25)\n collection_boxes[\"AtlGr2\"] = Box(62.5, 63.75, 337.25, 339.5)\n # E Greenland & Iceland\n collection_boxes[\"EGrIce1\"] = Box(63.75, 81, 337.25, 346)\n collection_boxes[\"EGrIce2\"] = Box(68.75, 83, 346, 357)\n # E Iceland\n collection_boxes[\"EIceland1\"] = Box(63.75, 68.75, 346, 351)\n # UK Atlantic\n collection_boxes[\"UKAtl1\"] = Box(46, 62.5, 346.75, 360)\n # Eurasian GIN Seas\n collection_boxes[\"EurGIN1\"] = Box(60, 80, 3, 9.5)\n collection_boxes[\"EurGIN2\"] = Box(68, 78, 9.5, 24.375)\n collection_boxes[\"EurGIN3\"] = Box(60, 68, 0, 16)\n collection_boxes[\"EurGIN4\"] = Box(50, 60, 0, 13)\n collection_boxes[\"EurGIN5\"] = Box(66.25, 68, 16, 24.375)\n collection_boxes[\"EurGIN6\"] = Box(60, 80, 0., 3) # New one, only for catching\n collection_boxes[\"Baltic1\"] = Box(50, 60.0, 13, 30) # only for catching the water\n collection_boxes[\"Baltic2\"] = Box(60, 66.25, 16, 38) # not for spreading\n # South Iceland\n collection_boxes[\"SIceland1\"] = Box(60, 63.75, 339.5, 346.75)\n # Siberian Arctic\n collection_boxes[\"SibArc1\"] = Box(68, 82, 173, 191)\n collection_boxes[\"SibArc2\"] = Box(68, 82, 114.5, 173) # New One\n # Eurasian Arctic\n collection_boxes[\"EurArc1\"] = Box(78, 86, 9.5, 114.5)\n collection_boxes[\"EurArc2\"] = Box(66.25, 78, 24.375, 114.5)\n collection_boxes[\"EurArc3\"] = Box(80, 86, 0, 9) # New One - only for catching\n # Mediterranean\n collection_boxes[\"Med1\"] = Box(29, 40, 0, 41.5)\n collection_boxes[\"Med2\"] = Box(40, 45, 0, 24)\n collection_boxes[\"BlckSea1\"] = Box(40, 50, 26, 42) # only for catching the water, not for spreading it\n collection_boxes[\"CaspSea1\"] = Box(35, 50, 46, 55) # NEW ONE , only for catching\n # Patagonia Atlantic\n collection_boxes[\"PatAtl1\"] = Box(-56.25, -40.0, 290.5, 305)\n # Patagonia Pacific\n collection_boxes[\"PatPac1\"] = Box(-57.5, -36, 282, 290.5)\n collection_boxes[\"PatPac2\"] = Box(-57.5, -56.25, 282, 294.5)\n # New Zealand (South)\n collection_boxes[\"SNZPac1\"] = Box(-47.5, -43.75, 167, 176)\n # New Zealand (North)\n collection_boxes[\"NNZPac1\"] = Box(-43.75, -39, 165, 174.25)\n # Antarctic Ross Sea\n collection_boxes[\"AARos1\"] = Box(-90.0, -68.0, 167.0, 239.0)\n # Antarctic Amundsen Sea\n collection_boxes[\"AAAmund\"] = Box(-90.0, -60.0, 239.0, 297.0)\n # Antarctic Weddell Sea\n collection_boxes[\"AAWeddell\"] = Box(-90.0, -60.0, 297.0, 360.0)\n # Antarctic Riiser-Larson Sea\n collection_boxes[\"AARiiLar\"] = Box(-90.0, -60.0, 0.0, 59)\n # Antarctic Davis Sea\n collection_boxes[\"AADavis\"] = Box(-90.0, -60.0, 59.0, 167.0)\n \n return collection_boxes",
"def box(filter_size=3, show_verbose=False):\n\n return Bx, x",
"def paper_needed(box):\n side_areas = [x*y for (x,y) in sides(parse_dimensions(box))]\n extra = min(side_areas)\n return sum(side_areas) + extra",
"def build_box(atoms, timestep, radius, center, use_atomic_volume, average_on_atom, bx, by, bz):\n box = Box(timestep, radius=radius, center=center, use_atomic_volume=use_atomic_volume, average_on_atom=average_on_atom)\n for atom in atoms:\n box.add_atom(atom)\n box.set_boundary(bx=bx, by=by, bz=bz)\n box.measure()\n return box",
"def shelf():\r\n\r\n return box(pos=vector(0, 0.1/2, 0), size=vector(2, 0.1, 2), color=color.white)",
"def box_size(boxes: Tensor) -> Tensor:\n dists = []\n dists.append(boxes[:, 2] - boxes[:, 0])\n dists.append(boxes[:, 3] - boxes[:, 1])\n if boxes.shape[1] // 2 == 3:\n dists.append(boxes[:, 5] - boxes[:, 4])\n return torch.stack(dists, axis=1)",
"def addBox(self, indx, box):\n (x, y, width) = box\n data = self.shapesTable.get(indx, None)\n if data is not None:\n data.boxes.append((x, y, width))\n data.area += width\n else:\n lb = [(x, y, width)]\n self.shapesTable[indx] = Polygonize.Data(lb, width)",
"def box_create(cls, name: Expr, size: Expr) -> Expr:\n return BoxCreate(name, size)",
"def scale_boxes(boxes, image_shape):\n\n #nprint(\"image_shape = {}\".format(image_shape))\n height = float(image_shape[0])\n width = float(image_shape[1])\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes",
"def box_type_1(self, X, Y, name, ident, box_width, box_height):\r\n boxW2 = box_width / 2\r\n boxH2 = box_height / 2\r\n x0, y0 = X - boxW2, Y - boxH2 # Top_left of box\r\n x1, y1 = X + boxW2, Y + boxH2 # Bottom_right of box\r\n width = x1 - x0\r\n height = y1 - y0\r\n \r\n box = gui.SvgRectangle(x0, y0, width, height)\r\n box.set_stroke(width=2, color='black')\r\n box.set_fill(color='yellow')\r\n box_name = gui.SvgText(X, Y, name)\r\n box_name.attributes['text-anchor'] = 'middle'\r\n box_id = gui.SvgText(X, Y + 15, str(ident))\r\n box_id.attributes['text-anchor'] = 'middle'\r\n self.sheet.append([box, box_name, box_id])\r\n\r\n mid_north = [X, Y - boxH2]\r\n mid_south = [X, Y + boxH2]\r\n mid_east = [X + boxW2, Y]\r\n mid_west = [X - boxW2, Y]\r\n\r\n return mid_north, mid_south, mid_east, mid_west",
"def get_boxes(trees_inside: list, width: int, height: int):\n \n width_factor = 2\n height_factor = 5.6\n \n\n bboxes = []\n for tree in trees_inside:\n print(\"tree value\", tree)\n depth = tree[1]\n treeoffset = tree[0]\n \n x0 = width * treeoffset\n x1 = x0 - width/(width_factor * depth)\n if x1 > width:\n x1 = width\n if x1 < 0:\n x1 = 0\n\n y0 = (depth / height) * height * height_factor\n y1 = y0 + height*height_factor/depth\n if y1 > height:\n y1 = height\n\n bboxes.append([int(x0),int(y0),int(x1),int(y1)]) #x0,x1,y0,y1\n\n return bboxes",
"def make_box(world,width,depth,height,wall_thickness=0.005,mass=float('inf')):\n left = primitives.box(wall_thickness,depth,height,[-width*0.5-wall_thickness*0.5,0,height*0.5])\n right = primitives.box(wall_thickness,depth,height,[width*0.5+wall_thickness*0.5,0,height*0.5])\n front = primitives.box(width,wall_thickness,height,[0,-depth*0.5-wall_thickness*0.5,height*0.5])\n back = primitives.box(width,wall_thickness,height,[0,depth*0.5+wall_thickness*0.5,height*0.5])\n bottom = primitives.box(width,depth,wall_thickness,[0,0,wall_thickness*0.5])\n boxgeom = Geometry3D()\n boxgeom.setGroup()\n for i,elem in enumerate([left,right,front,back,bottom]):\n boxgeom.setElement(i,elem)\n if mass != float('inf'):\n print(\"Making open-top box a rigid object\")\n bmass = Mass()\n bmass.setMass(mass)\n bmass.setCom([0,0,height*0.3])\n bmass.setInertia([width/12,depth/12,height/12])\n box = world.makeRigidObject(\"box\")\n box.geometry().set(boxgeom)\n box.appearance().setColor(0.6,0.3,0.2,1.0)\n box.setMass(bmass)\n return box\n else:\n print(\"Making open-top box a terrain\")\n box = world.makeTerrain(\"box\")\n box.geometry().set(boxgeom)\n box.appearance().setColor(0.6,0.3,0.2,1.0)\n return box",
"def boxes_cxcywh(self):\n boxes = self.boxes\n if self.is_boxes:\n boxes = (boxes[:, 0::2].mean(1), boxes[:, 1::2].mean(1),\n boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1])\n boxes = np.vstack(boxes).T\n return boxes",
"def __create_random_box(training_geometry, geotransform, box_dim, num_trials):\n mbr = training_geometry.GetEnvelope()\n minx = mbr[0]\n maxx = mbr[1]\n miny = mbr[2]\n maxy = mbr[3]\n \n cell_width = geotransform[1]\n cell_height = geotransform[5]\n\n trial_num = 0\n while trial_num < num_trials: \n rand_lx = random.uniform(minx, maxx) # left x\n rand_uy = random.uniform(miny, maxy) # upper y\n rx = rand_lx + (box_dim * cell_width) # right x\n ly = rand_uy + (box_dim * cell_height) # lower y (remember that cell height is negative)\n wkt_box = \"POLYGON ((%f %f, %f %f, %f %f, %f %f, %f %f))\" % (rand_lx, rand_uy, rand_lx, ly, rx, ly, rx, rand_uy, rand_lx, rand_uy)\n training_box_geom = ogr.CreateGeometryFromWkt(wkt_box)\n if training_geometry.Contains(training_box_geom):\n box_info = [[rand_lx, rand_uy], box_dim]\n return box_info, training_box_geom\n trial_num += 1\n return None, None",
"def correct_boxes(height, width, boxes, aug_type='rotate', **kwargs):\n result = []\n boxes = np.asarray(boxes)\n\n w0 = (width - 0.5) / 2.0\n h0 = (height - 0.5) / 2.0\n for box in boxes:\n x1, y1, x2, y2, class_id = box\n rela_x0 = (x1 + x2) / float(width) / 2\n rela_y0 = (y1 + y2) / float(height) / 2\n rela_w0 = np.abs(x1 - x2) / float(width)\n rela_h0 = np.abs(y1 - y2) / float(height)\n\n if aug_type == 'rotate':\n '''\n as normal, formula for Coordinate point rotation is :\n x_new = (x - w0) * np.cos(angel) - (y - h0) * np.sin(angel) + w0\n y_new = (x - w0) * np.sin(angel) + (y - h0) * np.cos(angel) + h0\n but in our case, the first quadrant should be changed into the forth quadrant in morphology fields.\n '''\n\n angel = kwargs.get('angel', 0)\n angel = angel * 2 * np.pi / 360\n\n fxy = lambda x, y: [(x - w0) * np.cos(angel) - (-y - -h0) * np.sin(angel) + w0,\n -((x - w0) * np.sin(angel) + (-y - -h0) * np.cos(angel) + -h0)]\n\n x11, y11 = fxy(x1, y1)\n x22, y22 = fxy(x2, y2)\n x33, y33 = fxy(x2, y1)\n x44, y44 = fxy(x1, y2)\n\n new_x1 = np.round(np.min([x11, x22, x33, x44])).astype(int)\n new_x2 = np.round(np.max([x11, x22, x33, x44])).astype(int)\n new_y1 = np.round(np.min([y11, y22, y33, y44])).astype(int)\n new_y2 = np.round(np.max([y11, y22, y33, y44])).astype(int)\n\n new_x1 = np.max([0, new_x1])\n new_x2 = np.min([width, new_x2])\n new_y1 = np.max([0, new_y1])\n new_y2 = np.min([height, new_y2])\n\n result.append([new_x1, new_y1, new_x2, new_y2, class_id])\n\n elif aug_type == 'flip':\n if kwargs.get('flip_code', 1) == 1:\n new_x1 = width - x2\n new_x2 = width - x1\n new_y1 = y1\n new_y2 = y2\n elif kwargs.get('flip_code', 0) == 0:\n new_y1 = height - y2\n new_y2 = height - y1\n new_x1 = x1\n new_x2 = x2\n elif kwargs.get('flip_code', -1) == -1:\n new_x1 = width - x2\n new_x2 = width - x1\n new_y1 = height - y2\n new_y2 = height - y1\n result.append([new_x1, new_y1, new_x2, new_y2, class_id])\n\n elif aug_type == 'resize':\n new_h, new_w = kwargs.get('new_h'), kwargs.get('new_w')\n bg_h, bg_w = kwargs.get('bg_h'), kwargs.get('bg_w')\n\n dh = (bg_h - new_h) / 2.0\n dw = (bg_w - new_w) / 2.0\n\n abs_new_x0 = new_w * rela_x0\n abs_new_y0 = new_h * rela_y0\n abs_new_w0 = new_w * rela_w0\n abs_new_h0 = new_h * rela_h0\n\n if dh >= 0 and dw >= 0:\n new_x1 = abs_new_x0 - abs_new_w0 / 2.0 + dw\n new_x2 = abs_new_x0 + abs_new_w0 / 2.0 + dw\n new_y1 = abs_new_y0 - abs_new_h0 / 2.0 + dh\n new_y2 = abs_new_y0 + abs_new_h0 / 2.0 + dh\n new_x1 = np.max([dw, new_x1])\n new_x2 = np.min([dw + new_w, new_x2])\n new_y1 = np.max([dh, new_y1])\n new_y2 = np.min([dh + new_h, new_y2])\n\n elif dh < 0 and dw >= 0:\n new_x1 = abs_new_x0 - abs_new_w0 / 2.0 + dw\n new_x2 = abs_new_x0 + abs_new_w0 / 2.0 + dw\n new_y1 = abs_new_y0 + dh - abs_new_h0 / 2.0\n new_y2 = new_y1 + abs_new_h0\n new_y1 = np.max([dh, new_y1])\n new_y2 = np.min([dh + new_h, new_y2])\n\n elif dh >= 0 and dw < 0:\n new_x1 = abs_new_x0 + dw - abs_new_w0 / 2.0\n new_x2 = new_x1 + abs_new_w0\n new_y1 = abs_new_y0 - abs_new_h0 / 2.0 + dh\n new_y2 = abs_new_y0 + abs_new_h0 / 2.0 + dh\n new_x1 = np.max([dw, new_x1])\n new_x2 = np.min([dw + new_w, new_x2])\n\n else:\n new_x1 = abs_new_x0 + dw - abs_new_w0 / 2.0\n new_x2 = new_x1 + abs_new_w0\n new_y1 = abs_new_y0 + dh - abs_new_h0 / 2.0\n new_y2 = new_y1 + abs_new_h0\n\n new_x1 = np.max([0, new_x1])\n new_x2 = np.min([new_x2, bg_w - 1])\n new_y1 = np.max([0, new_y1])\n new_y2 = np.min([new_y2, bg_h - 1])\n if new_x1 >= bg_w or new_y1 >= bg_h:\n continue\n result.append([new_x1, new_y1, new_x2, new_y2, class_id])\n\n return np.asarray(result, dtype=int)",
"def calc_receptive_boxes(height, width):\n\n rf, stride, padding = [196.0, 16.0, 90.0] # hardcoded for vgg-16 conv5_3\n\n x, y = torch.meshgrid(torch.arange(0, height), torch.arange(0, width))\n coordinates = torch.reshape(torch.stack([y, x], dim=2), [-1, 2])\n # [y,x,y,x]\n point_boxes = torch.cat([coordinates, coordinates], 1)\n bias = [-padding, -padding, -padding + rf - 1, -padding + rf - 1]\n rf_boxes = stride * point_boxes + torch.FloatTensor(bias)\n return rf_boxes",
"def count_boxes(packages: List[dict]) -> int:\n\n volume = sum([p[\"width\"]*p[\"length\"]*p[\"height\"] for p in packages])\n weight = sum([p[\"weight\"] for p in packages])\n\n return max(math.ceil(volume/BOX_VOLUME), math.ceil(weight/BOX_WEIGHT))",
"def Box(self, *args):\n return _Select3D.Select3D_SensitiveBox_Box(self, *args)",
"def split_boxes_rimwise(boxes, weights, nsplit):\n\tif len(boxes) < nsplit:\n\t\t# If we have fewer tods than processes, just assign one to each, and give empty\n\t\t# ones to the remainder\n\t\treturn [[[i]] for i in range(len(boxes))] + [[[]] for i in range(len(boxes),nsplit)]\n\tweights = np.asarray(weights)\n\t# Divide boxes into N groups with as equal weight as possible,\n\t# and as small bbox as possible\n\tn = len(boxes)\n\tgroups = []\n\t# Compute distance of every point from center. We will\n\t# start consuming points from edges\n\tcenters = np.mean(boxes,1)\n\tcenter_tot = np.mean(centers,0)\n\tcdist = calc_dist2(centers, center_tot[None])\n\ttotweight = np.sum(weights)\n\t# We keep track of which boxes have already been\n\t# processed via a mask.\n\tmask = np.full(n, True, dtype=np.bool)\n\tcumweight = 0\n\tfor gi in xrange(nsplit):\n\t\t# Compute the target weight for this group.\n\t\t# On average this should simply be totweight/nsplit,\n\t\t# but we adjust it on the fly to compensate for any\n\t\t# groups that end up deviating from this.\n\t\ttargweight = (totweight-cumweight)/(nsplit-gi)\n\t\tp = unmask(np.argmax(cdist[mask]),mask)\n\t\tmask[p] = False\n\t\t# Find distance of every point to this point. Ouch, this\n\t\t# makes the algorithm O(N^2) if one doesn't introduce gridding\n\t\tpdist = calc_dist2(centers[mask], centers[p,None])\n\t\tdinds = unmask(np.argsort(pdist),mask)\n\t\tcumw = np.cumsum(weights[dinds])\n\t\t# We will use as many of the closest points as\n\t\t# needed to reach the target weight, but not\n\t\t# so many that there aren't enough points left\n\t\t# for at least one per remaining mpi task.\n\t\tif gi == nsplit-1:\n\t\t\tnsel = None\n\t\telse:\n\t\t\tnsel = len(np.where(cumw < targweight)[0])\n\t\t\tnsel = max(0,min(nsel, np.sum(mask)-(nsplit-gi)))\n\t\tgroup = np.concatenate([[p],dinds[:nsel]])\n\t\tgroups.append([group])\n\t\tmask[group] = False\n\t\tcumweight += np.sum(weights[group])\n\treturn groups"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.