query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Audit names in commit metadata. Names which do not have a first name and a surname are extremely uncommon and when present are therefore generally invalid. As we want people to use their actual name when committing we do some checks to make sure that what looks like an actual name is present. | def audit_names_in_metadata(self):
# Iterate over commits....
for commit in self.repository.commits.values():
for name in [ commit.committer_name, commit.author_name ]:
# Is the name whitelisted?
if name in self.FullNameWhitelist:
continue
# As a special case, allow the name 'GitHub' for certain repositories
if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:
self.__log_warning(commit.sha1, "Commit has username 'GitHub' (web merge of PR); allowing anyway")
continue
# Check to see if the name contains spaces - if not - it is probably misconfigured....
if " " not in name.strip():
self.__log_failure(commit.sha1, "Non-full name: " + name)
continue | [
"def sanitize_names(self):\n self.first_name = self._sanitize_name(self.first_name)\n self.last_name = self._sanitize_name(self.last_name)",
"def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)",
"def validate_name(self):\n if self.first_name == \"\":\n self.reason += \" First name is empty \"\n if not bool(re.match('[a-zA-Z\\s]+$', self.first_name)):\n self.reason += \" FirstName should have Alphabets and Spaces \"\n if self.last_name == \"\":\n self.reason += \" Last name is empty \"\n if not bool(re.match('[a-zA-Z\\s]+$', self.last_name)):\n self.reason += \" LastName should have Alphabets and Spaces \"",
"def _maybe_set_name(self) -> None:\n if not self.name:\n if isinstance(self.github, dict):\n if self.github.get(\"commit\"):\n self.name = f\"{self.reason}: {self.github['commit']}\"",
"def test_first_last_name(self):\n\n formatted_name = name_function1.get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')",
"def test_first_last_name(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')",
"def convert_name(self, human_name):\n\n human_name = HumanName(human_name)\n if human_name.suffix:\n self.metadata[\"gutenberg_name_suffix\"] = human_name.suffix\n human_name.suffix = \"\"\n if human_name.nickname:\n # LOGGER.debug(\"%s nickname: %s\", str(human_name), human_name.nickname)\n no_nickname = copy.copy(human_name)\n no_nickname.nickname = \"\"\n first_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.first, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.first,\n # re.UNICODE\n # ),\n # human_name.nickname\n # )\n if first_name_match and len(first_name_match.group(0)) >= len(human_name.first):\n human_name.first = first_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.first):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"] = set([str(no_nickname)])\n middle_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.middle, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.middle, re.UNICODE\n # ),\n # human_name.nickname\n # )\n if middle_name_match and len(middle_name_match.group(0)) >= len(human_name.middle):\n human_name.middle = middle_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.middle):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"].add(str(no_nickname))\n return human_name",
"def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name",
"def verify_name(data):\n assert data['Name'] == \"Carbon credits\", \"Name is Incorrect\"",
"def test_first_last_name(self):\n\t\tformatted_name = get_formatted_name(\"Janis\", \"joplin\")\n\t\t#Test if the result of formatted_name is the same as 'Janis Joplin'\n\t\tself.assertEqual(formatted_name, 'Janis Joplin')",
"def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')",
"def audit_city_name(city_names, city_name):",
"def test_name_validate_last_name(self):\n pass",
"def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last",
"def series_statement_added_entry_personal_name(self, key, value):\n indicator_map1 = {\"0\": \"Forename\", \"1\": \"Surname\", \"3\": \"Family name\"}\n indicator_map2 = {\n \"0\": \"Main entry not represented by pronoun\",\n \"1\": \"Main entry represented by pronoun\"}\n field_map = {\n 'p': 'name_of_part_section_of_a_work',\n '6': 'linkage',\n 'u': 'affiliation',\n 'b': 'numeration',\n '4': 'relator_code',\n 'x': 'international_standard_serial_number',\n 'n': 'number_of_part_section_of_a_work',\n 'a': 'personal_name',\n '8': 'field_link_and_sequence_number',\n 'k': 'form_subheading',\n 't': 'title_of_a_work',\n 'e': 'relator_term',\n 'l': 'language_of_a_work',\n 'c': 'titles_and_other_words_associated_with_a_name',\n 'g': 'miscellaneous_information',\n 'f': 'date_of_a_work',\n 'd': 'dates_associated_with_a_name',\n 'v': 'volume_sequential_designation',\n }\n\n order = utils.map_order(field_map, value)\n\n if key[3] in indicator_map1:\n order.append('type_of_personal_name_entry_element')\n\n if key[4] in indicator_map2:\n order.append('pronoun_represents_main_entry')\n\n return {\n '__order__': tuple(order) if len(order) else None,\n 'name_of_part_section_of_a_work': utils.force_list(\n value.get('p')\n ),\n 'linkage': value.get('6'),\n 'affiliation': value.get('u'),\n 'numeration': value.get('b'),\n 'relator_code': utils.force_list(\n value.get('4')\n ),\n 'international_standard_serial_number': value.get('x'),\n 'number_of_part_section_of_a_work': utils.force_list(\n value.get('n')\n ),\n 'personal_name': value.get('a'),\n 'field_link_and_sequence_number': utils.force_list(\n value.get('8')\n ),\n 'form_subheading': utils.force_list(\n value.get('k')\n ),\n 'title_of_a_work': value.get('t'),\n 'relator_term': utils.force_list(\n value.get('e')\n ),\n 'language_of_a_work': value.get('l'),\n 'titles_and_other_words_associated_with_a_name': utils.force_list(\n value.get('c')\n ),\n 'miscellaneous_information': value.get('g'),\n 'date_of_a_work': value.get('f'),\n 'dates_associated_with_a_name': value.get('d'),\n 'volume_sequential_designation': value.get('v'),\n 'type_of_personal_name_entry_element': indicator_map1.get(key[3]),\n 'pronoun_represents_main_entry': indicator_map2.get(key[4]),\n }",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def test_first_last_name(self):\n formated_name = name_function.get_formated('janis', 'joplin', )\n self.assertEqual(formated_name, 'Janis Joplin')",
"def combine_name(self):\n if self.first_name.isalpha() and self.last_name.isalpha():\n username = self.first_name + \" \" + self.last_name\n return username\n return 'Names must be alphabets'",
"def test_check_metadata_nameid_full_name():\n check = CheckTester(googlefonts_profile,\n \"com.google.fonts/check/metadata/nameid/full_name\")\n\n font = TEST_FILE(\"merriweather/Merriweather-Regular.ttf\")\n\n assert_PASS(check(font),\n 'with a good font...')\n\n # here we change the font.fullname on the METADATA.pb\n # to introduce a \"mismatch\" error condition:\n font_metadata = check['font_metadata']\n good = font_metadata.full_name\n font_metadata.full_name = good + \"bad-suffix\"\n\n assert_results_contain(check(font, {\"font_metadata\": font_metadata}),\n FAIL, 'mismatch',\n 'with mismatching fullname values...')\n\n # and restore the good value prior to the next test case:\n font_metadata.full_name = good\n\n # And here we remove all FULL_FONT_NAME entries\n # in order to get a \"lacks-entry\" error condition:\n ttFont = check['ttFont']\n for i, name in enumerate(ttFont[\"name\"].names):\n if name.nameID == NameID.FULL_FONT_NAME:\n del ttFont[\"name\"].names[i]\n assert_results_contain(check(ttFont),\n FAIL, 'lacks-entry',\n 'when a font lacks FULL_FONT_NAME entries in its name table...')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Audit commit metadata. Invalid hostnames such as localhost or (none) will be caught by this auditor. This will ensure that invalid email addresses or users will not show up in commits. | def audit_emails_in_metadata(self):
# Iterate over commits....
disallowed_domains = ["localhost", "localhost.localdomain", "(none)", "bombardier.com", "rail.bombardier.com"]
for commit in self.repository.commits.values():
for email_address in [ commit.committer_email, commit.author_email ]:
# Extract the email address, and reject them if extraction fails....
extraction = re.match("^(\S+)@(\S+)$", email_address)
if not extraction:
self.__log_failure(commit.sha1, "Seemingly invalid email address: " + email_address)
continue
# Don't allow domains which are disallowed...
domain = extraction.group(2)
if domain in disallowed_domains:
self.__log_failure(commit.sha1, "Email address using a blocked domain: " + email_address)
continue
# Ensure they have a valid MX/A entry in DNS....
try:
dns.resolver.query(domain, "MX")
except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel):
try:
dns.resolver.query(domain, "A")
except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN):
self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address)
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address) | [
"def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue",
"def format_commit(cls, commit):\n return dict(\n author=commit.get('author') or '<no author>',\n committed=commit.get('committed'),\n message=remove_tags(commit.get('message') or ''),\n )",
"def prepare_commit(self, commit):\n header = yaml.dump(commit.meta, default_flow_style=False)\n header += \"---\\n\"\n if commit.value is None:\n return bytes(header)\n else:\n return bytes(header) + bytes(commit.value)",
"def get_commit_change_stats(self, commit_url='', full_name='', commit_sha=''):\n if commit_url == '' and (commit_sha == '' and full_name == ''):\n raise BaseException('commit url could not be generated. Commit url, commit sha and full name not set')\n return None\n url = commit_url\n if url == '':\n url = COMMIT_DETAILS.format(commit_sha=commit_sha, full_name=full_name)\n url = self.get_full_url(url)\n\n json_data = loads(self.get_from_net(url))\n stats = {'additions': 0, 'deletions': 0}\n if 'stats' in json_data:\n stats['additions'] = json_data['stats']['additions']\n stats['deletions'] = json_data['stats']['deletions']\n\n return stats",
"def find_author_info(commit):\n committer = None\n if commit.committer is not None:\n committer = commit.committer.name or commit.committer.login\n git_author = commit.raw_data['commit']['author']['name']\n if commit.author is not None:\n author = commit.author.name or commit.author.login + f' ({git_author})'\n else:\n # Users that deleted their accounts will appear as None\n author = git_author\n return committer, author",
"async def audit(self, ctx):",
"def collect_maintainability(rows, commit, dataset):\n for i, row in rows[::-1].iterrows(): \n user = row['owner']\n project = row['project']\n \n if commit == 'regular': \n commit_sha = row['sha-reg']\n parent_commit_sha = row['sha-reg-p']\n else:\n commit_sha = row['sha']\n parent_commit_sha = row['sha-p']\n \n try:\n bch.robust_analyze_commit(user, project, commit_sha)\n bch.robust_analyze_commit(user, project, parent_commit_sha)\n except Exception as error:\n log.error(error)\n rows.at[i, 'ERROR'] = 'YES'\n log.error(\"Exception not expected\")\n if not isinstance(error, bch.BetterCodeHubException):\n import pdb; pdb.set_trace()\n log.error(f\"Skipping {user}/{project}.\")\n \n rows.to_csv(dataset, index=False)",
"def commit_names(self, commit):\n return []",
"def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)",
"def _get_commit_info(commit: git.Commit, pretty_format: str) -> str:\n try:\n return commit.repo.git.show(commit.hexsha, pretty=f\"format:{pretty_format}\")\n except git.GitCommandError as error:\n raise PackitException(\n f\"Cannot find commit {commit.hexsha!r} to check its signature.\", error\n )",
"def test_ignored_metadata_git(self):\n helpers.execute_python([self.script, self.input_file,\n self.output_file])\n log, _ = helpers.get_log(recipyenv.get_recipydb())\n keys = [\"gitrepo\", \"gitorigin\", \"gitcommit\"]\n for key in keys:\n assert key in log, (\"Expected \" + key + \" in log\")\n recipyrc = recipyenv.get_recipyrc()\n helpers.update_recipyrc(recipyrc, \"ignored metadata\", \"git\")\n helpers.execute_python([self.script, self.input_file,\n self.output_file])\n log, _ = helpers.get_log(recipyenv.get_recipydb())\n for key in keys:\n assert key not in log, (\"Unexpected \" + key + \" in log\")",
"def make_log_entries(commits, git_repo):\n entries = []\n # Add header\n author = git_repo.get_author_info()\n entries.append(\"* %s %s <%s> %s\" % \\\n (datetime.datetime.now().strftime(\"%a %b %d %Y\"),\n author.name, author.email, get_version(git_repo,\n commits[0])))\n for commit in commits:\n commit_info = git_repo.get_commit_info(commit)\n entries.append(\"- %s\" % commit_info[\"subject\"])\n return entries",
"def git_show_commit(self, commit):\n details = self.repo.git.show(commit)\n return str(details)",
"def get_commit_info(commits):\n args = [\n 'git',\n 'log',\n '--topo-order',\n '--format=%H\\t%p\\t%s',\n '--no-walk',\n ]\n args.extend(commits)\n\n err, output = get_output(args)\n if err:\n raise AbortError('error querying for commits to review:\\n%s' % output)\n\n commits = []\n for i, line in enumerate(output.splitlines()):\n fields = line.split('\\t', 3)\n commit, parents, summary = fields\n parents = parents.split()\n\n if len(parents) > 1:\n raise AbortError('merge commits are not reviewable: %s' %\n commit[0:7])\n\n commits.append((commit, summary))\n\n return reversed(commits)",
"def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors",
"def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)",
"def FakeCommitAsDict(commit_self):\n git_hash = commit_self.git_hash\n n = git_hash[len('git_hash_'):]\n return {\n 'repository': 'chromium',\n 'git_hash': git_hash,\n 'url': 'https://example.com/repository/+/' + git_hash,\n 'author': 'author%s@chromium.org' % (n,),\n 'subject': 'Subject.',\n 'message': 'Subject.\\n\\nCommit message.',\n }",
"def blame(self, rev, file):\r\n data = self.git.blame(rev, '--', file, p=True)\r\n commits = dict()\r\n blames = list()\r\n info = None\r\n\r\n for line in data.splitlines(False):\r\n parts = self.re_whitespace.split(line, 1)\r\n firstpart = parts[0]\r\n if self.re_hexsha_only.search(firstpart):\r\n # handles \r\n # 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start\r\n # 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates another line of blame with the same data\r\n digits = parts[-1].split(\" \")\r\n if len(digits) == 3:\r\n info = {'id': firstpart}\r\n blames.append([None, []])\r\n elif info['id'] != firstpart:\r\n info = {'id': firstpart}\r\n blames.append([commits.get(firstpart), []])\r\n # END blame data initialization\r\n else:\r\n m = self.re_author_committer_start.search(firstpart)\r\n if m:\r\n # handles: \r\n # author Tom Preston-Werner\r\n # author-mail <tom@mojombo.com>\r\n # author-time 1192271832\r\n # author-tz -0700\r\n # committer Tom Preston-Werner\r\n # committer-mail <tom@mojombo.com>\r\n # committer-time 1192271832\r\n # committer-tz -0700 - IGNORED BY US\r\n role = m.group(0)\r\n if firstpart.endswith('-mail'):\r\n info[\"%s_email\" % role] = parts[-1]\r\n elif firstpart.endswith('-time'):\r\n info[\"%s_date\" % role] = int(parts[-1])\r\n elif role == firstpart:\r\n info[role] = parts[-1]\r\n # END distinguish mail,time,name\r\n else:\r\n # handle\r\n # filename lib/grit.rb\r\n # summary add Blob\r\n # <and rest>\r\n if firstpart.startswith('filename'):\r\n info['filename'] = parts[-1]\r\n elif firstpart.startswith('summary'):\r\n info['summary'] = parts[-1]\r\n elif firstpart == '':\r\n if info:\r\n sha = info['id']\r\n c = commits.get(sha)\r\n if c is None:\r\n c = Commit( self, hex_to_bin(sha),\r\n author=Actor._from_string(info['author'] + ' ' + info['author_email']),\r\n authored_date=info['author_date'],\r\n committer=Actor._from_string(info['committer'] + ' ' + info['committer_email']),\r\n committed_date=info['committer_date'],\r\n message=info['summary'])\r\n commits[sha] = c\r\n # END if commit objects needs initial creation\r\n m = self.re_tab_full_line.search(line)\r\n text, = m.groups()\r\n blames[-1][0] = c\r\n blames[-1][1].append( text )\r\n info = {'id': sha}\r\n # END if we collected commit info\r\n # END distinguish filename,summary,rest\r\n # END distinguish author|committer vs filename,summary,rest\r\n # END distinguish hexsha vs other information\r\n return blames",
"def committer_email(self, committer_email):\n self._committer_email = committer_email"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to construct an address header for emails as Python stuffs it up | def address_header(self, name, email):
fixed_name = Header( name ).encode()
return unicode("{0} <{1}>").format(fixed_name, email) | [
"def construct_from_header():\n\n # The tenant properties will not be set if the call to this method\n # does not come via a django request => we need to setup the tenant\n # properties first.\n # properties.tenant_properties will be an empty dict if the tenant\n # properties has not be initialised yet.\n mail_address = properties.TENANT_MAIL_PROPERTIES.get('address')\n mail_name = properties.TENANT_MAIL_PROPERTIES.get('sender')\n if not mail_name:\n mail_name = properties.TENANT_MAIL_PROPERTIES.get('name')\n if not mail_name:\n mail_name = mail_address\n\n if not mail_address:\n return None\n\n return \"{0} <{1}>\".format(mail_name, mail_address)",
"def encode_rfc2822_address_header(header_text):\n def encode_addr(addr):\n name, email = addr\n # If s is a <text string>, then charset is a hint specifying the\n # character set of the characters in the string. The Unicode string\n # will be encoded using the following charsets in order: us-ascii,\n # the charset hint, utf-8. The first character set to not provoke a\n # UnicodeError is used.\n # -> always pass a text string to Header\n\n # also Header.__str__ in Python 3 \"Returns an approximation of the\n # Header as a string, using an unlimited line length.\", the old one\n # was \"A synonym for Header.encode().\" so call encode() directly?\n name = Header(pycompat.to_text(name)).encode()\n # if the from does not follow the (name <addr>),* convention, we might\n # try to encode meaningless strings as address, as getaddresses is naive\n # note it would also fail on real addresses with non-ascii characters\n try:\n return formataddr((name, email))\n except UnicodeEncodeError:\n _logger.warning(_('Failed to encode the address %s\\n'\n 'from mail header:\\n%s') % (addr, header_text))\n return \"\"\n\n addresses = getaddresses([pycompat.to_text(ustr(header_text))])\n return COMMASPACE.join(a for a in (encode_addr(addr) for addr in addresses) if a)",
"def generate_address_header(\n addr: LcnAddr, local_seg_id: int, wants_ack: bool\n ) -> str:\n return (\n \">\"\n f\"{'G' if addr.is_group else 'M'}\"\n f\"{addr.get_physical_seg_id(local_seg_id):03d}\"\n f\"{addr.addr_id:03d}\"\n f\"{'!' if wants_ack else '.'}\"\n )",
"def add_header(self, header, value):\n if not (header and value):\n raise ValueError('Header not provided!')\n if header.lower() == 'date':\n return False\n recipients_headers = ['to', 'cc', 'bcc']\n if header.lower() in recipients_headers or header.lower() == 'from':\n if not isinstance(value, list):\n value = [value]\n header_value = []\n for addr in value:\n # For each address in the recipients headers\n # Do the Header Object\n # PY3 works fine with Header(values, charset='utf-8')\n # PY2:\n # - Does not escape correctly the unicode values\n # - Must encode the display name as a HEADER\n # so the item is encoded properly\n # - The encoded display name and the address are joined\n # into the Header of the email\n mail_addr = address.parse(addr)\n display_name = Header(\n mail_addr.display_name, charset='utf-8').encode()\n if display_name:\n # decode_header method in PY2 does not look for closed items\n # so a ' ' separator is required between items of a Header\n if PY2:\n base_addr = '{} <{}>'\n else:\n base_addr = '{}<{}>'\n header_value.append(\n base_addr.format(\n display_name,\n mail_addr.address\n ).strip()\n )\n else:\n header_value.append(mail_addr.address)\n header_value = ','.join(header_value)\n else:\n header_value = Header(value, charset='utf-8').encode()\n # Get correct header name or add the one provided if custom header key\n header = Email.fix_header_name(header) or header\n if header.lower() == 'bcc':\n result = []\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n self.bccs = header_value\n else:\n self.email[header] = header_value\n return header_value",
"def header_email(strg):\n\taddr = email.utils.parseaddr(strg)\n\tif not addr[1]:\n\t\traise EmailMissed(strg)\n\treturn addr[1]",
"def to_address(self):\n return u'\"{0}\" <{1}>'.format(\n unicode(Header(unicode(self), 'utf-8')).replace(u'\"', u\"'\"),\n self.email)",
"def x12_270_with_address_message() -> str:\n return \"\"\"ISA*00* *00* *ZZ*890069730 *ZZ*154663145 *200929*1705*|*00501*000000001*0*T*:~\nGS*HS*890069730*154663145*20200929*1705*0001*X*005010X279A1~\nST*270*0001*005010X279A1~\nBHT*0022*13*10001234*20200929*1319~\nHL*1**20*1~\nNM1*PR*2*UNIFIED INSURANCE CO*****PI*842610001~\nHL*2*1*21*1~\nNM1*1P*2*DOWNTOWN MEDICAL CENTER*****XX*2868383243~\nHL*3*2*22*0~\nTRN*1*1*1453915417~\nNM1*IL*1*DOE*JOHN****MI*11122333301~\nREF*IL*500700~\nN3*5150 ANYWHERE STREET*APT 1B~\nN4*SOME CITY*SC*90210~\nDMG*D8*19800519*M~\nDTP*291*D8*20200101~\nEQ*30~\nSE*13*0001~\nGE*1*0001~\nIEA*1*000010216~\"\"\".replace('\\n', '')",
"def convert_address(self, addr_obj):\n return addr_obj.mailbox.decode() + '@' + addr_obj.host.decode()",
"def make_header(text):\n text_break = '=' * 16\n return f'{text_break} {text} {text_break}'",
"def format_address(**args):\n #Begin with the organisation and PO Box number, if applicable.\n address = ''.join([args[entry] + '\\n' \n for entry in ['organisation', 'PO box']\n if args.get(entry)])\n #Format building name/number components.\n address += format_building_components(*[args.get(x) for x in \n ['sub-building name', \n 'building name', \n 'building number',\n 'concatenation indicator']])\n #Add thoroughfare (if present), locality/town and postcode.\n address += ''.join([args[entry] + '\\n' \n for entry in ['dependent thoroughfare', \n 'thoroughfare',\n 'double dependent locality',\n 'dependent locality',\n 'town',\n 'postcode']\n if args.get(entry)])\n return address.strip()",
"def GetInfoAddress(self):\r\n return '%s@%s' % (options.options.info, options.options.mailer_domain)",
"def header_format(header, value, form = DEFAULT_FORMAT):\n\tif header in HEADER_ADDRESS_FIELDS:\n\t\treturn header_email(value)\n\telif header == \"Date\":\n\t\tparsed = email.utils.parsedate(value)\n\t\tif parsed:\n\t\t\treturn time.strftime(form, parsed)\n\t\treturn \"\"\n\tif header == \"Message-ID\":\n\t\treturn email.utils.unquote(value)\n\treturn value[:DEFAULT_MAXLEN]",
"def headers_add_host(headers, address):\n\n headers.setdefault('Host', address)\n\n return headers",
"def get_email_details(header: str) -> dict:\n # this is one way to solve the exercise\n # result_keys = [\"from\", \"to\", \"subject\", \"date\"]\n # search_strings = [\n # r\"From\\:\\s(.*)\",\n # r\"To\\:\\s(.*)\",\n # r\"Subject\\:\\s(.*)\",\n # r\"Date\\:\\s(.*)\\s[+-]\",\n # ]\n # result_values = [re.search(s, EMAIL_HEADER).group(1) for s in search_strings]\n # print(dict(zip(result_keys, result_values)))\n\n # or we could use groupdict as suggested\n m = re.search(\n r\"From\\:\\s(?P<from>.*)\\n.*To\\:\\s(?P<to>.*)\\n.*Subject\\:\\s(?P<subject>.+?)\\n.*Date\\:\\s(?P<date>.*)\\s[+-]\",\n header,\n re.MULTILINE | re.DOTALL,\n )\n return m.groupdict() if m else None",
"def config_header(self):\r\n self.msg['subject'] = self.subject\r\n self.msg['from'] = sender\r\n self.msg['to'] = \";\".join(self.receiver)",
"def other_mail_address(self):\n return (self.mail_address_2 + ' ' + \n self.mail_address_3 + ' ' +\n self.mail_address_4)",
"def format_addressproto(obj, autoescape=None):\n # it also supports some legacy variants of the address-protocol\n \n ret = []\n if autoescape:\n esc = conditional_escape\n else:\n esc = lambda x: x\n \n kdnstr = '<span class=\"org name1\">%s</span>' % esc(_get_attr(obj, 'name1'))\n if _get_attr(obj, 'kundennr'):\n kdnstr += ' (<span class=\"customerid\">%s</span>)' % esc(_get_attr(obj, 'kundennr'))\n elif _get_attr(obj, 'softmid'):\n kdnstr += ' (<span class=\"customerid\">SC%s</span>)' % esc(_get_attr(obj, 'softmid'))\n elif _get_attr(obj, 'curmid'):\n kdnstr += ' (<span class=\"customerid\">%s</span>)' % esc(_get_attr(obj, 'curmid'))\n ret.append(kdnstr)\n\n for dataname in ['name2', 'name3']:\n data = _get_attr(obj, dataname)\n if data:\n ret.append('<span class=\"%s\">%s</span>' % (dataname, esc(data)))\n \n if _get_attr(obj, 'iln'):\n kdnstr += 'ILN <span class=\"iln\">%s</span>' % esc(_get_attr(obj, 'iln'))\n \n addr = []\n for dataname in ['adresse', 'address', 'street', 'strasse']:\n data = _get_attr(obj, dataname)\n if data:\n addr.append('<span class=\"%s\">%s</span>' % (dataname, esc(data)))\n if _get_attr(obj, 'plz'):\n ortstr = ('<span class=\"zip postal-code\">%s</span>'\n ' <span class=\"city locality\">%s</span>' % (esc(_get_attr(obj, 'plz')),\n esc(_get_attr(obj, 'ort'))))\n else:\n ortstr = ('<span class=\"zip postal-code\">%s</span>'\n ' <span class=\"city locality\">%s</span>' % (esc(_get_attr(obj, 'postleitzahl')),\n esc(_get_attr(obj, 'ort'))))\n land = _get_attr(obj, 'land')\n if not land:\n land = _get_attr(obj, 'laenderkennzeichen')\n if not land:\n land = 'DE'\n if land != 'DE':\n ortstr = ('<span class=\"country-name land\">%s</span>-' % esc(land)) + ortstr\n addr.append(ortstr)\n ret.append('<span class=\"adr\">%s</span>' % '<br/>'.join(addr))\n \n if _get_attr(obj, 'tel'):\n ret.append('<span class=\"tel\"><span class=\"type\">main</span>:<span class=\"value\">%s</span></span>'\n % esc(_get_attr(obj, 'tel')))\n if _get_attr(obj, 'mobile'):\n ret.append('<span class=\"tel\"><span class=\"type\">mobile</span>:<span class=\"value\">%s</span></span>'\n % esc(_get_attr(obj, 'mobile')))\n if _get_attr(obj, 'fax'):\n ret.append('<span class=\"tel\"><span class=\"type\">fax</span>:<span class=\"value\">%s</span></span>'\n % esc(_get_attr(obj, 'fax')))\n if _get_attr(obj, 'email'):\n ret.append('<span class=\"email\">%s</span>' % esc(_get_attr(obj, 'email')))\n \n # the whole thing is enclesed in a vcard class tag\n return mark_safe('<div class=\"address vcard\">%s</div>' % '<br/>'.join(ret))",
"def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to",
"def create_github_header_anchor(header_title):\n return '[{}](#{})'.format(header_title, header_title.strip().replace(' ', '-'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse special keywords in commits to determine further postcommit actions. | def determine_keywords(self):
split = dict()
split['email_cc'] = re.compile("^\s*CC[-_]?MAIL[:=]\s*(.*)")
split['email_cc2'] = re.compile("^\s*C[Cc][:=]\s*(.*)")
split['fixed_in'] = re.compile("^\s*FIXED[-_]?IN[:=]\s*(.*)")
numeric = dict()
numeric['bug_fixed'] = re.compile("^\s*(?:BUGS?|FEATURE)[:=]\s*(.+)")
numeric['bug_cc'] = re.compile("^\s*CCBUGS?[:=]\s*(.+)")
presence = dict()
presence['email_gui'] = re.compile("^\s*GUI:")
presence['silent'] = re.compile("(?:CVS|SVN|GIT|SCM).?SILENT")
presence['notes'] = re.compile("(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')")
results = defaultdict(list)
for line in self.commit.message.split("\n"):
# If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off
# This allows for people to fill keywords in the Differential Summary and have this work smoothly for them
line = re.sub("^Summary: (.+)", "\g<1>", line)
# Start processing our keywords...
for (name, regex) in split.iteritems():
match = re.match( regex, line )
if match:
results[name] += [result.strip() for result in match.group(1).split(",")]
for (name, regex) in numeric.iteritems():
match = re.match( regex, line )
if match:
results[name] += re.findall("(\d{1,10})", match.group(1))
for (name, regex) in presence.iteritems():
if re.match( regex, line ):
results[name] = True
self.keywords = results | [
"def is_commit(tokens):\n return tokens[0].lower() == COMMIT",
"def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info",
"def commits_parsing(query):\n logging.info(\"GET request commit parsing is working\")\n results = {}\n list_of_commits = []\n clear_list_message = []\n clear_list_committer = []\n json_commits = {}\n json_all = {}\n for single_query in query:\n list_of_commits += {single_query[:-6]}\n\n try:\n results = requests.get(single_query[:-6])\n except requests.ConnectionError as exception:\n return f'{exception}'\n\n json_all = results.json()[0]\n\n json_commits = json_all['commit']\n clear_list_message += {json_commits['message']}\n clear_list_committer += {json_commits['committer']['name']}\n\n return clear_list_message, clear_list_committer",
"def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()",
"def commit_names(self, commit):\n return []",
"def process_keywords(keywords, ops, arch_status=None):\n\tnew_keywords = set(keywords).copy()\n\n\t# Process each op one at a time.\n\tfor op, oarch, refarch in ops:\n\t\t# Figure out which keywords we need to modify.\n\t\tif oarch == 'all':\n\t\t\tif not arch_status:\n\t\t\t\traise ValueError('unable to process \"all\" w/out profiles.desc')\n\t\t\told_arches = set([keyword_to_arch(a) for a in new_keywords])\n\t\t\tif op is None:\n\t\t\t\t# Process just stable keywords.\n\t\t\t\tarches = [k for k, v in arch_status.items()\n\t\t\t\t if v == 'stable' and k in old_arches]\n\t\t\telse:\n\t\t\t\t# Process all possible keywords. We use the arch_status as a\n\t\t\t\t# master list. If it lacks some keywords, then we might miss\n\t\t\t\t# somethings here, but not much we can do.\n\t\t\t\tarches = old_arches\n\t\t\t# We ignore the glob arch as we never want to tweak it.\n\t\t\tif '*' in arches:\n\t\t\t\tarches.remove('*')\n\t\telse:\n\t\t\tarches = (oarch,)\n\n\t\tif refarch:\n\t\t\t# Figure out the state for this arch based on the reference arch.\n\t\t\t# TODO: Add support for \"all\" keywords.\n\t\t\t# XXX: Should this ignore the '-' state ? Does it make sense to\n\t\t\t# sync e.g. \"s390\" to \"-ppc\" ?\n\t\t\trefkeyword = [x for x in new_keywords if refarch == keyword_to_arch(x)]\n\t\t\tif not refkeyword:\n\t\t\t\top = '^'\n\t\t\telif refkeyword[0].startswith('~'):\n\t\t\t\top = '~'\n\t\t\telif refkeyword[0].startswith('-'):\n\t\t\t\top = '-'\n\t\t\telse:\n\t\t\t\top = None\n\n\t\t# Finally do the actual update of the keywords list.\n\t\tfor arch in arches:\n\t\t\tnew_keywords -= set(['%s%s' % (x, arch) for x in ('', '~', '-')])\n\n\t\t\tif op is None:\n\t\t\t\tnew_keywords.add(arch)\n\t\t\telif op in ('~', '-'):\n\t\t\t\tnew_keywords.add('%s%s' % (op, arch))\n\t\t\telif op == '^':\n\t\t\t\t# Already deleted. Whee.\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\traise ValueError('unknown operation %s' % op)\n\n\treturn new_keywords",
"def _post_parse(self):\n pass",
"def test_commit__parse(repo, commit_message):\n\n commit = objects.Commit(repo, commit_message)\n\n assert commit.type_ == \"commit\"\n assert commit.message == \"Add attribute to model.\\n\"\n assert commit.tree_sha == \"29ff16c9c14e2652b22f8b78bb08a5a07930c147\"\n assert commit.parent_sha == \"206941306e8a8af65b66eaaaea388a7ae24d49a0\"\n\n assert commit.author.name == \"Carlton Duffett\"\n assert commit.author.email == \"carlton.duffett@example.com\"\n assert commit.author.authored_at == 1527025023\n assert commit.author.timezone == \"-0700\"\n\n assert commit.committer.name == \"Carlton Duffett\"\n assert commit.committer.email == \"cduffett@example.tech\"\n assert commit.committer.authored_at == 1527025044\n assert commit.committer.timezone == \"-0700\"",
"def applyPostbase(word, postbase):\n\t#TODO would be cool if you could pass a list of postbases in here and have it do the \"right thing\"\n\texp = Base.explode(word)\n\tkeepStrongCfinal = False\n\t# @ symbol?\n\tdropVCfinal = False\n\tattachIrregular = False\n\n\t#keep the final consonant\n\tplus = string.find(postbase, '+')\n\tif plus > -1:\n\t\tpostbase = postbase[:plus] + postbase[plus + 1:]\n\n\t# FIXME need to check against words that contain '-' as a part of the word\n\t# FIXME this might cause trouble with enclitics\n\t# remove the last consonant\n\tminus = string.find(postbase, '-')\n\tif minus > -1:\n\t\tpostbase = postbase[:minus] + postbase[minus + 1:]\n\t\tif not Word.isVowel(exp[-1]):\n\t\t\texp.pop(-1)\n\n\t# remove final 'e'\n\ttilde = string.find(postbase, '~')\n\tif tilde > -1:\n\t\tpostbase = postbase[:tilde] + postbase[tilde + 1:]\n\t\tif exp[-1] == 'e':\n\t\t\texp.pop(-1)\n\n\t# choose between letters in parenthesis\n\tparen = string.find(postbase, '(')\n\tif paren > -1:\n\t\tpl = parenLetter(word, postbase)\n\t\t#FIXME, what if multiple parens\n\t\tparenOpen = string.find(postbase, '(')\n\t\tparenClose = string.find(postbase, ')') + 1\n\n\t\tpostbase = postbase[:parenOpen] + pl + postbase[parenClose:]\n\n\t# add gemination if needed\n\t#FIXME not tested on words that contain 2 \\' ...does such a word exist?\n\tapos = string.find(postbase, '\\'')\n\tif apos > -1:\n\t\tpostbase = postbase[:apos] + postbase[apos + 1:]\n\n\t\t# FIXME this may indicate that there's something that needs tweaked about the syllablematches\n\t\t# function. A short base is defined as [C]VCe, currently this only tests the end of the word.\n\t\t# this should match VCe and CVCe only\n\t\tshortA = len(exp) == 3 and Syllables.syllableMatches(exp, 'VCe')\n\t\tshortB = len(exp) == 4 and Syllables.syllableMatches(exp, 'CVCe')\n\t\tif shortA or shortB:\n\t\t\texp.pop(-1)\n\t\t\tif Syllables.syllableCount(exp) == 1:\n\t\t\t\texp.append('\\'')\n\t\telif exp[-1] == 'e':\n\t\t\texp.pop(-1)\n\n\t# velar dropping suffixes\n\tcolon = string.find(postbase, ':')\n\tif colon > -1:\n\t\ttestsuf = exp[-1] + postbase\n\t\ttestExp = Base.explode(testsuf)\n\t\tcolon = testExp.index(':')\n\t\tvelar = testExp[colon + 1]\n\t\ttestExp = testExp[:colon] + testExp[colon + 1:]\n\n\t\tif Syllables.syllableMatches(testExp, 'CV' + velar + 'V'): #FIXME might crash if word isn't long enough\n\t\t\ttestExp = Base.explode(postbase)\n\t\t\tcolon = testExp.index(':')\n\t\t\ttestExp.pop(colon)\n\t\t\ttestExp.pop(colon)\n\t\telse:\n\t\t\ttestExp = Base.explode(postbase)\n\t\t\tcolon = testExp.index(':')\n\t\t\ttestExp.pop(colon)\n\n\t\tpostbase = ''.join(testExp)\n\n\tif postbase[0] == '÷':\n\t\tkeepStrongCfinal = True\n\n\tif string.find(postbase, ':') > -1:\n\t\tdropVelar = True\n\n\tif postbase[0] == '- -':\n\t\tdropVCfinal = True\n\n\tif postbase[0] == '%':\n\t\tattachIrregular = True\n\n\tword = ''.join(exp)\n\tword = word + postbase\n\n\t#cleanup for words that wind up not needing the \\' for gemination because they are followed by 2 vowels\n\t#FIXME not tested on words that contain 2 \\' ...does such a word exist\n\texp = Base.explode(word)\n\ttry:\n\t\tgemmarker = exp.index('\\'')\n\texcept ValueError:\n\t\tgemmarker = -1\n\tif gemmarker > -1 and len(exp) >= gemmarker + 3:\n\t\tsyl = exp[gemmarker + 1:gemmarker + 3]\n\t\tif Syllables.syllableMatches(syl, 'VV'):\n\t\t\texp.pop(gemmarker)\n\n\tword = ''.join(exp)\n\n\treturn word",
"def test_get_commit_dict_02(self):\n sha = 'shashashashasha'\n author = u'authorauthorauthorauthor'\n email = u'emailemailemail'\n date = 'datedatedate'\n subject = u' Merge branch \\'hotfix/branch_figure\\' '\n\n txt = self.get_git_log_commit_line(\n sha,\n author,\n email, date, subject,\n )\n\n result = self.e.get_commit_dict(txt)\n expected = {\n 'sha': sha,\n 'author': author,\n 'email': email,\n 'date': date,\n 'subject': subject,\n }\n\n self.assertEqual(expected['sha'], result['sha'])\n self.assertEqual(expected['date'], result['date'])\n self.assertEqual(expected['subject'], result['subject'])",
"def parse_commit_message(message: str) -> Tuple[int, str, str, Tuple[str, str, str]]:\n parsed = re_parser.match(message)\n if not parsed:\n raise UnknownCommitMessageStyleError(\n 'Unable to parse the given commit message: {}'.format(message)\n )\n\n level_bump = 0\n\n if parsed.group('type').lower() in MAJOR_TYPES:\n level_bump = max([level_bump, 3])\n\n if parsed.group('type').lower() in MINOR_TYPES:\n level_bump = max([level_bump, 2])\n\n if parsed.group('type').lower() in PATCH_TYPES:\n level_bump = max([level_bump, 1])\n\n body, footer = parse_text_block(parsed.group('text'))\n if debug.enabled:\n debug('parse_commit_message -> ({}, {}, {}, {})'.format(\n level_bump,\n TYPES[parsed.group('type').lower()],\n '',\n (parsed.group('subject'), body, footer)\n ))\n return (\n level_bump,\n TYPES[parsed.group('type').lower()],\n '',\n (parsed.group('subject'), body, footer)\n )",
"def parse_command(self, dct):\n pass",
"def parse_commitrefs(*commitrefs):\n try:\n return map(binascii.unhexlify, hashes(*commitrefs))\n except subprocess2.CalledProcessError:\n raise BadCommitRefException(commitrefs)",
"def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors",
"def parse(self):\r\n header, *self.body = self.raw_text.splitlines()\r\n self.command, *self.args = map(str.strip, header.split(';'))\r\n if self.command.startswith('!'):\r\n self.command = self.command[1:] # remove the initial \"!\"\r",
"def match_keyword_commit(\n keywords: list,\n commit_msg: str,\n case=True\n ) -> bool:\n if case is True:\n for keyword in keywords:\n if keyword in commit_msg:\n return True\n return False\n\n # Case insensitive\n for keyword in keywords:\n if keyword.lower() in commit_msg.lower():\n return True\n return False",
"def format_commit(cls, commit):\n return dict(\n author=commit.get('author') or '<no author>',\n committed=commit.get('committed'),\n message=remove_tags(commit.get('message') or ''),\n )",
"def format_commit_hook(hook):\n return dict(\n id=str(hook._id),\n shortname=hook.shortname,\n name=hook.name,\n description=hook.description,\n removable=hook.removable\n )",
"def handle_commits_published(extension=None, **kwargs):\n review_request = kwargs.get('review_request')\n\n if review_request is None:\n return\n\n commit_data = fetch_commit_data(review_request)\n\n if (not is_pushed(review_request, commit_data) or\n not is_parent(review_request, commit_data)):\n return\n\n # Check the change description and only continue if it contains a change\n # to the commit information. Currently change descriptions won't include\n # information about our extra data field, so we'll look for a change to\n # the diff which is mandatory if the commits changed. TODO: Properly use\n # the commit information once we start populating the change description\n # with it.\n #\n # A change description will not exist if this is the first publish of the\n # review request. In that case we know there must be commits since this\n # is a pushed request.\n cd = kwargs.get('changedesc')\n if (cd is not None and ('diff' not in cd.fields_changed or\n 'added' not in cd.fields_changed['diff'])):\n return\n\n # We publish both the review repository url as well as the landing\n # (\"inbound\") repository url. This gives consumers which perform hg\n # operations the option to avoid cloning the review repository, which may\n # be large.\n repo = review_request.repository\n repo_url = repo.path\n landing_repo_url = repo.extra_data.get('landing_repository_url')\n\n child_rrids = []\n commits = []\n ext_commits = json.loads(commit_data.extra_data.get(COMMITS_KEY, '[]'))\n\n for rev, rrid in ext_commits:\n child_rrids.append(int(rrid))\n commits.append({\n 'rev': rev,\n 'review_request_id': int(rrid),\n 'diffset_revision': None\n })\n\n # In order to retrieve the diff revision for each commit we need to fetch\n # their correpsonding child review request.\n review_requests = dict(\n (obj.id, obj) for obj in\n ReviewRequest.objects.filter(pk__in=child_rrids))\n\n for commit_info in commits:\n # TODO: Every call to get_latest_diffset() makes its own query to the\n # database. It is probably possible to retrieve the diffsets we care\n # about using a single query through Django's ORM, but it's not trivial.\n commit_info['diffset_revision'] = review_requests[\n commit_info['review_request_id']\n ].get_latest_diffset().revision\n\n msg = base.GenericMessage()\n msg.routing_parts.append('mozreview.commits.published')\n msg.data['parent_review_request_id'] = review_request.id\n msg.data['parent_diffset_revision'] = review_request.get_latest_diffset().revision\n msg.data['commits'] = commits\n msg.data['repository_url'] = repo_url\n msg.data['landing_repository_url'] = landing_repo_url\n\n # TODO: Make work with RB localsites.\n msg.data['review_board_url'] = get_server_url()\n\n publish_message(extension, msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send the commmit notification to CIA. The message is created incrementally using lxml's "E" builder. | def notify(self, builder):
# Build the <files> section for the template...
commit = builder.commit
files = E.files()
commit_msg = commit.message.strip()
commit_msg = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', commit_msg)
for filename in commit.files_changed:
safe_filename = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', filename)
file_element = E.file(safe_filename)
files.append(file_element)
# Build the message
cia_message = self.MESSAGE()
cia_message.append(self._generator)
source = self.SOURCE(E.project("KDE"))
source.append(E.module(self.repository.path))
source.append(E.branch(self.repository.ref_name))
cia_message.append(source)
cia_message.append(self.TIMESTAMP(commit.date))
body = self.BODY()
commit_data = self.COMMIT()
commit_data.append(E.author(commit.author_name))
commit_data.append(E.revision(commit.description))
commit_data.append(files)
commit_data.append(E.log(commit_msg))
commit_data.append(E.url(commit.url))
body.append(commit_data)
cia_message.append(body)
# Convert to a string
commit_xml = etree.tostring(cia_message)
# Craft the email....
message = MIMEText( commit_xml, 'xml', 'utf-8' )
message['Subject'] = "DeliverXML"
message['From'] = "sysadmin@kde.org"
message['To'] = "commits@platna.kde.org"
# Send email...
self.smtp.sendmail("sysadmin@kde.org", ["commits@platna.kde.org"],
message.as_string()) | [
"def notify(self, id, command, data = None):\n print \"sending:\", id, command, data\n if command == Code.START: data = [id]\n try:\n msg = Message(command = command, data = data)\n self.contacts[id].send(msg.encode())\n except:\n print \"msg failed\"",
"def write(self, notification):",
"def notify(self, message):\n pass",
"def action_invoice_dian_resend(self):\n self.ensure_one()\n template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False)\n compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)\n ctx = dict(\n default_model='account.invoice',\n default_res_id=self.id,\n default_use_template=bool(template),\n default_template_id=template and template.id or False,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n )\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }",
"async def send_cemi(self, cemi: CEMIFrame) -> None:",
"def sendMessage(self,number,message):\n self.ser.write('AT+CMGS=\"'+number+'\"\\r\\n')\n self.ser.write(message)\n self.ser.write(chr(26))\n print \"[+] Message Sent\"",
"def send_quote():\n quote = get_quote() \n notification.notify(title = quote[1] , message = quote[0], app_name = \"Quote of the Day\" )",
"def tester():\n\n msg = notification.Notification('test sub', 'test body')\n msg.send()\n return '<html>test complete</html>'",
"def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n \"New {0} Version: {1}\".format(\n self.properties[CONF_APP_NAME], new_version\n ),\n title=\"New Software 💿\",\n )",
"def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n f\"💿 New {self.args[CONF_APP_NAME]} Version: {new_version}\",\n )",
"def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)",
"def sendNotifyToAgent(self, data):\n self.parent.sendNotifyToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData=data)",
"def send_message(self, arbitration_id, data, extended, timeout=0.2):\n message = can.Message(arbitration_id=arbitration_id, data = data, extended_id=extended)\n\n \n self.can_bus.send(message, timeout)",
"def notification_email():\r\n send_email(\"Notifing You to FEED YOUR VIRTUAL CAT!!!!\", \"FEED THE CAT!!!!!\")",
"def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )",
"def test_notification_cp_email(self):\n # publish the item\n api.content.transition(obj=self.event, transition='publish')\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 2)\n msg = message_from_string(mailhost.messages[1])\n\n self.assertEqual(msg['To'], CP_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <noreply@eestec.net>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BCP=5D_=5BEVENTS=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been published', msg.get_payload())\n self.assertIn('http://nohost/plone/lc/test-event', msg.get_payload())",
"def send_notification():\n with open(config.OUTPUT_PATH, 'r', encoding='utf-8') as outFile:\n out_file = outFile.readlines()\n if len(out_file) != 0:\n for line in out_file:\n course, task, date, time = line.split(',', 3)\n subject = f'{course} {task}'\n body = f' פורסמה {task} בקורס {course} להגשה בתאריך {date} בשעה {time}'\n send_mail(subject, body) # call the mail sender func\n create_event(date, f'{task}-{course}') # call the google calendar func to make event",
"async def send_cemi(self, cemi: CEMIFrame) -> None:\n # send L_DATA_IND to network, create L_DATA_CON locally for routing\n cemi.code = CEMIMessageCode.L_DATA_IND\n routing_indication = RoutingIndication(raw_cemi=cemi.to_knx())\n\n async with self._flow_control.throttle():\n self._send_knxipframe(KNXIPFrame.init_from_body(routing_indication))\n\n cemi.code = CEMIMessageCode.L_DATA_CON\n self.cemi_received_callback(cemi.to_knx())",
"def send_mail(self):\n context2 = self.env.context.copy()\n if self.model and self.id_active and self.env.context.get('send_mail_wkf_signal'):\n obj = self.env[self.model].browse(self.id_active)\n obj.signal_workflow(self.env.context['send_mail_wkf_signal'])\n context2['thread_model'] = self.model\n if self.model and self.id_active and self.env.context.get('send_mail_method_next'):\n obj = self.env[self.model].browse(self.id_active)\n getattr(obj, self.env.context['send_mail_method_next'])()\n \n return super(mail_compose_message, self.with_context(context2)).send_mail()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for potential problems in a commit. | def check_commit_problems(self, commit, diff):
# Initialise
self._license_problem = False
self._commit_problem = False
self._commit_notes = defaultdict(list)
# Unsafe regex checks...
unsafe_matches = list()
unsafe_matches.append( r"\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\b\s*[\(\r\n]" )
unsafe_matches.append( r"\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\b\s*[\(\r\n]" )
unsafe_matches.append( r"(scanf)\b\s*[\(\r\n]" )
valid_filename_regex = r"\.(cpp|cc|cxx|C|c\+\+|c|l|y||h|H|hh|hxx|hpp|h\+\+|qml)$"
# Retrieve the diff and do the problem checks...
filename = unicode("")
filediff = list()
for line in diff:
file_change = re.match( "^diff --(cc |git a\/.+ b\/)(.+)$", line )
if file_change:
# Are we changing file? If so, we have the full diff, so do a license check....
if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename):
self.check_commit_license(filename, ''.join(filediff))
filediff = list()
filename = file_change.group(2)
continue
# Diff headers are bogus
if re.match("@@ -\d+,\d+ \+\d+ @@", line):
filediff = list()
continue
# Do an incremental check for *.desktop syntax errors....
if re.search("\.desktop$", filename) and re.search("[^=]+=.*[ \t]$", line) and line.startswith("+") and not re.match("^\+#", line):
self._commit_notes[filename].append( "[TRAILING SPACE] **" )
self._commit_problem = True
# Check for things which are unsafe...
for safety_match in unsafe_matches:
match = re.match(safety_match, line)
if match:
note = "[POSSIBLY UNSAFE: {0}] **".format( match.group(1) )
self._commit_notes[filename].append(note)
self._commit_problem = True
# Store the diff....
filediff.append(line)
if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename):
self.check_commit_license(filename, ''.join(filediff)) | [
"def validate_commit(commit, branch_version):\n\n # this returns headers, followed by a empty line, then the message, so\n # we strip the headers\n message = subprocess.check_output([\n 'git', 'cat-file', 'commit', commit]).split('\\n\\n', 1)[1]\n\n match = RESOLVES_RE.search(message)\n if not match:\n print('Commit %s does not include the required \"Resolves:\" '\n 'footer. Modify your commit to add this.' % commit[:8])\n sys.exit(1)\n\n bug_id = int(match.group(1))\n\n req = requests.get(BUG_URL % bug_id)\n\n if req.status_code == 401:\n print('Bug %d, referenced by commit %s, is private. You will '\n 'have to match the bug to the release manually.' % (\n bug_id, commit))\n return\n\n if req.status_code == 404:\n print('Bug %d, referenced by commit %s, does not exist. Typo?' % (\n bug_id, commit))\n sys.exit(1)\n\n if req.status_code != 200 or not req.json().get('bugs'):\n print('Got unexpected response (%d).\\n\\n%s' % (\n req.status_code, json.dumps(req.json(), indent=4)))\n sys.exit(1)\n\n bug_versions = []\n\n target_releases = req.json()['bugs'][0]['target_release']\n if not target_releases or target_releases[0] == '---':\n print('Bug %d, referenced by commit %s, does not have a target '\n 'release set. This must be set first.' % (\n bug_id, commit))\n sys.exit(1)\n\n for target_release in target_releases:\n bug_version = VERSION_RE.search(target_release).group(0)\n bug_versions.append(bug_version)\n if bug_version == branch_version:\n break\n else:\n print('This patch is for OSP %s yet bug %d is for version(s) %s. '\n 'Update the bug target release then try again.' % (\n branch_version, bug_id, ', '.join(bug_versions)))\n sys.exit(1)",
"def git_check():\n\n # check that changes staged for commit are pushed to origin\n output = local(\n 'git diff --name-only | egrep -v \"^({}/version.py)|(version.py)$\" | tr \"\\\\n\" \" \"'.format(project_name),\n capture=True).strip()\n if output:\n fatal('Stage for commit and commit all changes first: {}'.format(output))\n\n output = local(\n 'git diff --cached --name-only | egrep -v \"^({}/version.py)|(version.py)$\" | tr \"\\\\n\" \" \"'.format(project_name),\n capture=True).strip()\n if output:\n fatal('Commit all changes first: {}'.format(output))",
"def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")",
"def test_violations(self):\n git(\"checkout\", \"-b\", \"test-branch-commits-violations-base\", _cwd=self.tmp_git_repo)\n self.create_simple_commit(\"Sïmple title.\\n\")\n git(\"checkout\", \"-b\", \"test-branch-commits-violations\", _cwd=self.tmp_git_repo)\n\n self.create_simple_commit(\"Sïmple title2.\\n\")\n commit_sha1 = self.get_last_commit_hash()[:10]\n self.create_simple_commit(\"Sïmple title3.\\n\")\n commit_sha2 = self.get_last_commit_hash()[:10]\n output = gitlint(\n \"--commits\",\n \"test-branch-commits-violations-base...test-branch-commits-violations\",\n _cwd=self.tmp_git_repo,\n _tty_in=True,\n _ok_code=[4],\n )\n\n self.assertEqual(output.exit_code, 4)\n expected_kwargs = {\"commit_sha1\": commit_sha1, \"commit_sha2\": commit_sha2}\n self.assertEqualStdout(output, self.get_expected(\"test_commits/test_violations_1\", expected_kwargs))",
"def test_check_msg_fail(self, test_checkMsg):\n test_checkMsg.return_value = False\n res = self.commit('PRJ:', 'PRJ')\n self.assertEqual(res, self.BAD_COMMIT)",
"def check():\n print(\"\\n=== Commits\")\n\n for reference in Common.references:\n print(\" Browsing {}:\".format(reference))\n o_commit = Common.original.lookup_reference(reference).peel()\n n_commit = Common.new.lookup_reference(reference).peel()\n __browse_commits(o_commit, n_commit)\n\n print(\" OK\")",
"def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors",
"def validate_change(ticket):\n # First ensure topic line mentions tickets, and pull them out.\n topic = COMMIT_MSG.split('\\n', 1)[0]\n fix_tickets = re.findall(\"[A-Z]{2,5}-[0-9]{1,6}\", topic)\n if len(fix_tickets) == 0:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: commit message does not name a ticket!\"\n return False\n\n # Now get list of approved tickets from master ticket, and ensure\n # all \"fixed\" tickets are approved.\n approved_tickets = get_approved_tickets(ticket)\n for tick in fix_tickets:\n if not tick in approved_tickets:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: ticket {} is not approved (see approval ticket {})\".format(\n tick, ticket)\n return False\n return True",
"def validate_commit(ctx, sha, **_):\n\n gh = ctx.obj.github\n ci_provider = ctx.obj.ci_provider\n\n sha = sha or (ci_provider.sha if ci_provider else None)\n\n def _pre_issue():\n log.echo('Commit references an issue...', break_line=False)\n\n def _post_issue():\n log.checkmark()\n\n def _pre_label():\n log.echo('Issue is labeled with a release label...', break_line=False)\n\n def _post_label():\n log.checkmark()\n\n log.echo('Validating commit', add=True)\n\n try:\n gh.validate_commit(sha=sha,\n hooks={\n 'pre_issue': _pre_issue,\n 'pre_label': _pre_label,\n 'post_issue': _post_issue,\n 'post_label': _post_label\n })\n except exceptions.ReleaseValidationFailedException as e:\n log.xmark()\n log.sub()\n tb = sys.exc_info()[2]\n utils.raise_with_traceback(e, tb)\n log.sub()\n\n log.echo('Validation passed')",
"def test_lint_single_commit(self):\n self.create_simple_commit(\"Sïmple title.\\n\")\n first_commit_sha = self.get_last_commit_hash()\n self.create_simple_commit(\"Sïmple title2.\\n\")\n commit_sha = self.get_last_commit_hash()\n refspec = f\"{commit_sha}^...{commit_sha}\"\n self.create_simple_commit(\"Sïmple title3.\\n\")\n\n expected = '1: T3 Title has trailing punctuation (.): \"Sïmple title2.\"\\n' + \"3: B6 Body message is missing\\n\"\n\n # Lint using --commit <commit sha>\n output = gitlint(\"--commit\", commit_sha, _cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[2])\n self.assertEqual(output.exit_code, 2)\n self.assertEqualStdout(output, expected)\n\n # Lint using --commits <commit sha>,\n output = gitlint(\"--commits\", f\"{commit_sha},\", _cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[2])\n self.assertEqual(output.exit_code, 2)\n self.assertEqualStdout(output, expected)\n\n # Lint a single commit using --commits <refspec> pointing to the single commit\n output = gitlint(\"--commits\", refspec, _cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[2])\n self.assertEqual(output.exit_code, 2)\n self.assertEqualStdout(output, expected)\n\n # Lint the first commit in the repository. This is a use-case that is not supported by --commits\n # As <sha>^...<sha> is not correct refspec in case <sha> points to the initial commit (which has no parents)\n expected = '1: T3 Title has trailing punctuation (.): \"Sïmple title.\"\\n' + \"3: B6 Body message is missing\\n\"\n output = gitlint(\"--commit\", first_commit_sha, _cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[2])\n self.assertEqual(output.exit_code, 2)\n self.assertEqualStdout(output, expected)\n\n # Assert that indeed --commits <refspec> is not supported when <refspec> points the the first commit\n refspec = f\"{first_commit_sha}^...{first_commit_sha}\"\n output = gitlint(\"--commits\", refspec, _cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[254])\n self.assertEqual(output.exit_code, 254)",
"def check_files_committed(self):\n # staged but uncommitted\n uncommitted = self.repo.index.diff(self.current_hexsha)\n # unstaged changes\n unstaged = self.repo.index.diff(None)\n if uncommitted or unstaged:\n raise BuildError(\n 'There are uncommitted changes in the repo. Please stash or '\n 'commit before starting a new build.'\n )",
"def test_commit_guessing_fail(self):\n repo = self.init_test_repo('gbp-test-native')\n\n # Add \"very old\" header to changelog\n with open('packaging/gbp-test-native.changes', 'w') as ch_fp:\n ch_fp.write('* Sat Jan 01 2000 User <user@host.com> 123\\n- foo\\n')\n # rpm-ch should fail by not being able to find any commits before the\n # last changelog section\n eq_(mock_ch([]), 1)\n self._check_log(-1, \"gbp:error: Couldn't determine starting point\")",
"def resolve_conflicts(self, commit=True):\n pass # pragma: no cover",
"def test_lint_empty_commit_range(self):\n self.create_simple_commit(\"Sïmple title.\\n\")\n self.create_simple_commit(\"Sïmple title2.\\n\")\n commit_sha = self.get_last_commit_hash()\n # git revspec -> 2 dots: <exclusive sha>..<inclusive sha> -> empty range when using same start and end sha\n refspec = f\"{commit_sha}..{commit_sha}\"\n\n # Regular gitlint invocation should run without issues\n output = gitlint(\"--commits\", refspec, _cwd=self.tmp_git_repo, _tty_in=True)\n self.assertEqual(output.exit_code, 0)\n self.assertEqualStdout(output, \"\")\n\n # Gitlint should fail when --fail-without-commits is used\n output = gitlint(\n \"--commits\",\n refspec,\n \"--fail-without-commits\",\n _cwd=self.tmp_git_repo,\n _tty_in=True,\n _ok_code=[self.GITLINT_USAGE_ERROR],\n )\n self.assertEqual(output.exit_code, self.GITLINT_USAGE_ERROR)\n self.assertEqualStdout(output, f'Error: No commits in range \"{refspec}\"\\n')",
"def test_reports_one_commit(self):\n with self.assertRaises(SystemExit) as ec:\n self.run_command(\n '--rev-range HEAD^1..HEAD {0}'.format(self.gitrepodir)\n )\n\n self.assertSystemExitCode(ec.exception, 0)\n\n self.assertResults(u\"\"\"\n ▾ plugin01\n\n ⚠ line 1: c.txt\n c is +\n\n {0} Jig ran 1 plugin\n Info 0 Warn 1 Stop 0\n \"\"\".format(ATTENTION), self.output)",
"def _validate_commits(pull_request):\n commits = github.get_commits(pull_request[\"commits_url\"])\n analyzed = []\n\n for commit_wrapper in commits:\n commit = {\n \"sha\": commit_wrapper[\"sha\"],\n \"message\": commit_wrapper[\"commit\"][\"message\"],\n }\n\n commit[\"standard\"] = _validate_title(commit[\"message\"])\n analyzed.append(commit)\n\n result = all(commit[\"standard\"] for commit in analyzed)\n return analyzed, result",
"def lint(self, commit):\n LOG.debug(\"Linting commit %s\", commit.sha or \"[SHA UNKNOWN]\")\n LOG.debug(\"Commit Object\\n\" + str(commit))\n\n # Ensure the Deprecation class has a reference to the config currently being used\n Deprecation.config = self.config\n\n # Apply config rules\n for rule in self.configuration_rules:\n rule.apply(self.config, commit)\n\n # Skip linting if this is a special commit type that is configured to be ignored\n ignore_commit_types = [\"merge\", \"squash\", \"fixup\", \"fixup_amend\", \"revert\"]\n for commit_type in ignore_commit_types:\n if getattr(commit, f\"is_{commit_type}_commit\") and getattr(self.config, f\"ignore_{commit_type}_commits\"):\n return []\n\n violations = []\n # determine violations by applying all rules\n violations.extend(self._apply_line_rules([commit.message.title], commit, self.title_line_rules, 1))\n violations.extend(self._apply_line_rules(commit.message.body, commit, self.body_line_rules, 2))\n violations.extend(self._apply_commit_rules(self.commit_rules, commit))\n\n # Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules),\n # we replace None with -1 so that it always get's placed first. Note that we need this to do this to support\n # python 3, as None is not allowed in a list that is being sorted.\n violations.sort(key=lambda v: (-1 if v.line_nr is None else v.line_nr, v.rule_id))\n return violations",
"def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )",
"def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns index of the resource to use for making requests to get data if none of the resources are available, then send number of seconds until the resource is not available | def get_resource_index(self):
result = -1
max_sleep_time = self.time_window
with self._lock:
while result == -1:
for i in range(0, self.num_keys):
curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0)
max_sleep_time = min(max_sleep_time, curr_sleep_time)
if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time():
self.timers[i][0] = 0
self.timers[i][1] = 0
if self.timers[i][1] < self.window_limit:
result = i
break
if result == -1: # case when all streams are rate limited
# logging.warning('sleeping for %d seconds.' % max_sleep_time)
# time.sleep(max_sleep_time)
return -1 * max_sleep_time
if self.timers[result][0] == 0:
self.timers[result][0] = time.time()
self.timers[result][1] += 1
return result | [
"def perform_get_start(self):\n\t\treturn 0",
"def _get_task_index(self):\n\n if self._is_chief:\n self._server_socket = self._start_socket_server()\n self._server_socket.settimeout(5)\n users = []\n t_end = time.time() + self._wait_time\n\n while time.time() < t_end:\n try:\n sock, _ = self._server_socket.accept()\n connection_socket = ssl.wrap_socket(\n sock,\n server_side=True,\n certfile=SC.cert_path,\n keyfile=SC.key_path,\n ssl_version=ssl.PROTOCOL_TLSv1)\n if connection_socket not in users:\n users.append(connection_socket)\n except socket.timeout:\n pass\n\n num_workers = len(users) + 1\n _ = [us.send((str(i + 1) + ':' + str(num_workers)).encode('utf-8')) \\\n for i, us in enumerate(users)]\n self._nex_task_index = len(users) + 1\n _ = [us.close() for us in users]\n\n self._server_socket.settimeout(120)\n return 0, num_workers\n\n client_socket = self._start_socket_worker()\n message = client_socket.recv(1024).decode('utf-8').split(':')\n client_socket.close()\n return int(message[0]), int(message[1])",
"def perform_get_start(self) -> int:\n\t\treturn 0",
"def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text",
"def getWaitingTaskCount():",
"def request_more_resources():\n logger.info(\"NEED MORE RESOURCES!!!!\")",
"def try_query(pid):\n retries = 1\n while True:\n try:\n query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n return query\n except HTTPError as e:\n if retries > 10:\n raise e\n print(e)\n wait = retries * 15\n time.sleep(wait)\n retries += 1",
"def get_num_pages(self) -> Optional[int]:\n timeout: float = 5\n num_attempts = 0\n while num_attempts < 10:\n r = hit_api(self.key_manager, self.url, self.logger, timeout=timeout, method=\"HEAD\")\n\n if r:\n break\n\n timeout = timeout * 1.2\n else:\n raise RuntimeError(\"Unable to get the number of pages of data in 10 attempts\")\n\n if 'last' not in r.links.keys():\n return 1\n \n # get the last url from header\n last_page_url = r.links['last']['url']\n\n parsed_url = urlparse(last_page_url)\n try:\n num_pages = int(parse_qs(parsed_url.query)['page'][0])\n except (KeyError, ValueError):\n return None\n\n return num_pages",
"def get_readiness():\n return {}, 200",
"async def async_get_stage(self, attempts=50):\n\n # Query the API until a sensible (> 0) value is received, or the number of attempts is exceeded\n for attempt in range(attempts):\n res = await self.async_query_api(\"/GetStatus\")\n\n # Return the current loadshedding stage by subtracting 1 from the query result\n # Occasionally the Eskom API will return a negative stage, so simply retry if this occurs\n if res and int(res) > 0:\n return int(res) - 1\n\n # If the query does not succeed after the number of attempts has been exceeded, raise an exception\n raise Exception(\n f\"Error, invalid loadshedding stage received from API after {attempts} attempts\"\n )",
"async def _generic_request(self, cls: Union[DhtGetterResource, DhtPutterResouce], \n key: str, *args, timeout=120):\n result_key = self._generic_request_sync(cls, key, *args)\n total = 0\n while result_key not in self.results and total < timeout:\n log.debug(f\"Waiting for {result_key} to become available after {total}/{timeout} seconds.\")\n await asyncio.sleep(TOCK)\n total += TOCK\n\n return self.results.pop(result_key, None)",
"def winhttp_WinHttpQueryDataAvailable(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpdwNumberOfBytesAvailable\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def read_blinds_status_from_thingspeak():\n results = 1\n URL='https://api.thingspeak.com/channels/1152832/feeds.json?api_key='\n KEY='4DDGV289MS3GJCBY'\n prev_len_data = 0 #the length of the list of data points collected on the previous loop search\n \n while (1):\n HEADER='&results=%d' % (2**results)\n NEW_URL=URL+KEY+HEADER\n \n try: \n get_data=requests.get(NEW_URL).json()\n \n data = []\n for x in get_data['feeds']:\n print(x['field3'])\n data.append(x['field3']) #get lightstatus\n #End for\n \n index = search_for_nums(data) #searching for most recent lightstatus input\n \n if index != None: #found most recent data\n print(\"data point found...blindsstatus: %s \" % (data[index]))\n return int(data[index])\n else:\n print(\"missing data point\")\n results += 1\n \n if prev_len_data == len(data): #if the list of data previously collected is the same as the current\n print (\"No data points currently exist\") #all current available data has been exhausted. Move on\n return\n else: \n prev_len_data = len(data) #there are more points available. try again.\n #END if\n #END if\n except:\n print (\"Error reading blinds_status from ThingSpeak\")\n #END try-except\n #END WHILE",
"def getRetryCount():\n return int(webapp2.get_request().headers.get('X-Appengine-TaskRetryCount', 0))",
"def get_next_client_index(self, write=True):\r\n if write or len(self._server) == 1:\r\n return 0\r\n\r\n return random.randint(1, len(self._server) - 1)",
"def get_num_cache_requests():\n def get_num_cache_requests_util():\n rc, stdout, stderr = exec_process(\"hdfs cacheadmin -listDirectives -stats\")\n assert rc == 0, 'Error executing hdfs cacheadmin: %s %s' % (stdout, stderr)\n # remove blank new lines from output count\n lines = [line for line in stdout.split('\\n') if line.strip()]\n count = None\n for line in lines:\n if line.startswith(\"Found \"):\n # the line should say \"Found <int> entries\"\n # if we find this line we parse the number of entries\n # from this line.\n count = int(re.search(r'\\d+', line).group())\n break\n # if count is available we return it else we just\n # return the total number of lines\n if count is not None:\n return count\n else:\n return len(stdout.split('\\n'))\n\n # IMPALA-3040: This can take time, especially under slow builds like ASAN.\n wait_time_in_sec = build_flavor_timeout(5, slow_build_timeout=20)\n num_stabilization_attempts = 0\n max_num_stabilization_attempts = 10\n num_requests = None\n LOG.info(\"{0} Entered get_num_cache_requests()\".format(time.time()))\n while num_stabilization_attempts < max_num_stabilization_attempts:\n new_requests = get_num_cache_requests_util()\n if new_requests == num_requests: break\n LOG.info(\"{0} Waiting to stabilise: num_requests={1} new_requests={2}\".format(\n time.time(), num_requests, new_requests))\n num_requests = new_requests\n num_stabilization_attempts = num_stabilization_attempts + 1\n time.sleep(wait_time_in_sec)\n LOG.info(\"{0} Final num requests: {1}\".format(time.time(), num_requests))\n return num_requests",
"async def get_available_worker(self) -> int:\n while True:\n available_id = -1\n for worker_id in range(self._num_workers):\n try:\n worker_available = self._backend_manager.is_worker_available(worker_id)\n except IsWorkerAvailableError as e:\n worker_available = False\n if self._verbose > 1:\n print(\"IsWorkerAvailableError raised for worker {}: {}\".format(\n worker_id, e.message,\n ))\n if worker_available:\n available_id = worker_id\n break\n if available_id == -1:\n await asyncio.sleep(self._sleep_time)\n else:\n return available_id",
"def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)",
"def read_times(self, times):\n errcount = 0\n while(True):\n try:\n return self.read()\n except IOError:\n errcount += 1\n if errcount >= times:\n raise \n time.sleep(1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Chronos GR Config plugin writes new config when config has changed | def test_chronos_gr_config_changed(self, mock_run_command, mock_safely_write):
# Create the plugin
plugin = ChronosGRConfigPlugin({})
# Set up the config strings to be tested
old_config_string = "Old Chronos GR config"
new_config_string = "New Chronos GR config"
# Call 'on_config_changed' with file.open mocked out
with mock.patch('clearwater_etcd_plugins.chronos.chronos_gr_config_plugin.open', \
mock.mock_open(read_data=old_config_string), create=True) as mock_open:
plugin.on_config_changed(new_config_string, None)
# Test assertions
mock_open.assert_called_once_with(plugin.file(), "r")
mock_safely_write.assert_called_once_with(plugin.file(), new_config_string)
mock_run_command.assert_called_once_with("/usr/share/clearwater/clearwater-queue-manager/scripts/modify_nodes_in_queue add apply_chronos_gr_config") | [
"def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"",
"def test_write_config(self):\n config = Config()\n config.config = test_config\n config.config_file = \"./config\"\n config.write_config()\n with open(config.config_file) as config_file:\n data = config_file.read()\n self.assertTrue(data)\n os.remove(config.config_file)",
"def test_write_config(self, mock_get_alarm, mock_safely_write, mock_run_command):\n\n # Create a plugin with dummy parameters\n plugin = ChronosPlugin(PluginParams(ip='10.0.0.1',\n mgmt_ip='10.0.1.1',\n local_site='local_site',\n remote_site='remote_site',\n remote_cassandra_seeds='',\n signaling_namespace='',\n uuid=uuid.UUID('92a674aa-a64b-4549-b150-596fd466923f'),\n etcd_key='etcd_key',\n etcd_cluster_key='etcd_cluster_key'))\n\n # We expect this alarm to be called on creation of the plugin\n mock_get_alarm.assert_called_once_with('cluster-manager',\n alarm_constants.CHRONOS_NOT_YET_CLUSTERED)\n\n # Build a cluster_view that includes all possible node states\n cluster_view = {\"10.0.0.1\": \"waiting to join\",\n \"10.0.0.2\": \"joining\",\n \"10.0.0.3\": \"joining, acknowledged change\",\n \"10.0.0.4\": \"joining, config changed\",\n \"10.0.0.5\": \"normal\",\n \"10.0.0.6\": \"normal, acknowledged change\",\n \"10.0.0.7\": \"normal, config changed\",\n \"10.0.0.8\": \"waiting to leave\",\n \"10.0.0.9\": \"leaving\",\n \"10.0.0.10\": \"leaving, acknowledged change\",\n \"10.0.0.11\": \"leaving, config changed\",\n \"10.0.0.12\": \"finished\",\n \"10.0.0.13\": \"error\"}\n\n # Call the plugin to write the settings itself\n plugin.write_cluster_settings(cluster_view)\n mock_safely_write.assert_called_once()\n # Save off the arguments the plugin called our mock with\n args = mock_safely_write.call_args\n\n # Catch the call to reload chronos\n mock_run_command.assert_called_once_with('service chronos reload')\n\n # Check the plugin is attempting to write to the correct location\n self.assertEqual(\"/etc/chronos/chronos_cluster.conf\", args[0][0])\n\n # ConfigParser can't parse plain strings in python 2.7\n # Load the config into a buffer and pass it in as a string like object\n buf = StringIO(args[0][1])\n config = RawConfigParser(dict_type=MultiOrderedDict)\n config.readfp(buf)\n\n # Check identity section\n self.assertEqual(config.get('identity', 'instance_id'), '18')\n self.assertEqual(config.get('identity', 'deployment_id'), '6')\n # Check cluster section\n self.assertEqual(config.get('cluster', 'localhost'), '10.0.0.1')\n self.assertTrue(all(ip in config.get('cluster', 'joining')\n for ip in (\"10.0.0.3\", \"10.0.0.4\")))\n self.assertTrue(all(ip in config.get('cluster', 'node')\n for ip in (\"10.0.0.5\", \"10.0.0.6\", \"10.0.0.7\")))\n self.assertTrue(all(ip in config.get('cluster', 'leaving')\n for ip in (\"10.0.0.10\", \"10.0.0.11\")))",
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_update_reg_ex_config(self):\n pass",
"def test_config(setup_debug, tmp_path):\n os.chdir(tmp_path)\n \n ssh_tunnels = SSHTunnels(users=[\"bbeeson\"])\n c0 = (TEST_DATA / \"config\").read_text()\n # run and add 'queen'\n c1 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n # run and do nothing\n c2 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n assert len(c1) > len(c0)\n assert len(c1) == len(c2)\n \n # c_ref = (TEST_DATA / \"test_ssh_config2\").read_text()\n # should have just added queen\n #assert c2 == c_ref",
"def test_config_reload(self):\n server = self.start_server(\"hello world\", 200)\n try:\n self.setup_dynamic()\n\n cfg_file = \"test.yml\"\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.output_has(lines=1))\n\n self.assert_last_status(\"up\")\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://203.0.113.1:8186\"))\n\n self.wait_until(lambda: self.last_output_line()[\n \"url.full\"] == \"http://203.0.113.1:8186\")\n\n self.assert_last_status(\"down\")\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()",
"def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig",
"def test_set_config__twice__with_same_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'), \\\n mock.patch('optimizely.optimizely_config.OptimizelyConfigService.get_config') as mock_opt_service:\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual(1, mock_opt_service.call_count)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n mock_opt_service.reset_mock()\n\n # Call set config again and confirm that no new log message denoting config update is there\n project_config_manager._set_config(test_datafile)\n self.assertEqual(0, mock_logger.debug.call_count)\n self.assertEqual(0, mock_notification_center.call_count)\n # Assert that mock_opt_service is not called again.\n self.assertEqual(0, mock_opt_service.call_count)",
"def test_update_canary_config_using_put(self):\n pass",
"def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")",
"def config_changed(self):\n self.config_version += 1\n self.driver.config_changed()",
"def test_config_add(self):\n self.setup_dynamic()\n\n self.wait_until(lambda: self.log_contains(\n \"Starting reload procedure, current runners: 0\"))\n\n server = self.start_server(\"hello world\", 200)\n try:\n self.write_dyn_config(\n \"test.yml\", self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.log_contains(\n \"Starting reload procedure, current runners: 1\"))\n\n self.wait_until(lambda: self.output_has(lines=1))\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()",
"def test_change_configuration_property(self) -> None:\n\n self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True)\n csrf_token = self.get_new_csrf_token()\n new_config_value = False\n\n response_dict = self.get_json('/adminhandler')\n response_config_properties = response_dict['config_properties']\n self.assertDictContainsSubset({\n 'value': False,\n }, response_config_properties[\n config_domain.\n ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE.name])\n\n payload = {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.\n ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE.name: (\n new_config_value),\n }\n }\n self.post_json('/adminhandler', payload, csrf_token=csrf_token)\n\n response_dict = self.get_json('/adminhandler')\n response_config_properties = response_dict['config_properties']\n self.assertDictContainsSubset({\n 'value': new_config_value,\n }, response_config_properties[\n config_domain.\n ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE.name])\n\n self.logout()",
"def test_modify_server_config(self):\n shutil.copy(SERVER_CONFIG_ORIGINAL, SERVER_CONFIG)\n writer.sinkhole_file = 'sinkhole.conf'\n\n with open(writer.sinkhole_file, \"w\") as sfh:\n sfh.write('# dummy sinkhole file')\n\n # check that the test server config is ok\n try:\n unbound.test_server_config(SERVER_CONFIG)\n except CalledProcessError:\n self.fail()\n\n # enable\n writer.update_server_config(SERVER_CONFIG, enable=True)\n self.assertTrue(self.check_config(matches_original=False))\n # enable twice\n writer.update_server_config(SERVER_CONFIG, enable=True)\n self.assertTrue(self.check_config(matches_original=False))\n # disable\n writer.update_server_config(SERVER_CONFIG, enable=False)\n self.assertFalse(self.check_config(matches_original=True))\n # disable twice\n writer.update_server_config(SERVER_CONFIG, enable=False)\n self.assertFalse(self.check_config(matches_original=True))",
"def test_set_config__twice__with_diff_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('1', project_config_manager.optimizely_config.revision)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n\n # Call set config again\n other_datafile = json.dumps(self.config_dict_with_multiple_experiments)\n project_config_manager._set_config(other_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: 1. New revision number: 42.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('42', project_config_manager.optimizely_config.revision)",
"def conf_update(self):\n pass",
"def test_update_room_configuration(self):\n pass",
"def test_yaml_write(self):\n test_name = \"test_configuration\"\n my_configurator = setup_configurator(test_name)\n\n config = my_configurator._read_yaml()\n\n config[\"new_field\"] = 123\n config[\"paths\"][\"new_path\"] = \"/test/path/\"\n\n my_configurator._write_yaml(config)\n\n new_config = my_configurator._read_yaml()\n\n self.assertEqual(new_config[\"other\"][\"characters_per_line\"], 85)\n self.assertEqual(new_config[\"new_field\"], 123)\n self.assertEqual(new_config[\"paths\"][\"new_path\"], \"/test/path/\")\n\n # remove the testing configuration file\n setup_expected_file(test_name)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the draft results. | def test_load_draft(league):
draft = league.draft_results()
assert(len(draft) == 144)
#mcdavid 1st
assert(draft[0]['player_key'] == '396.p.6743')
# carter hart 67th
assert(draft[66]['player_key'] == '396.p.7156')
# zadorov last
assert(draft[-1]['player_key'] == '396.p.5995') | [
"def load(self):\n _path = glob(join_path(self.results_dir,'%s.results'%self.name)).pop()\n with open(_path,'r') as _f:\n self._loaded_data = load(_f)",
"def load(self):\n self.results = pickle_load('results', self.main_dir)",
"def load_draft_records(self):\n\n src_path = self.config['DRAFTS_METADATA_FOLDER']\n\n for f in glob.glob(os.path.join(src_path, \"*.json\")):\n draft = self._read_draft_from_file(f)\n yield draft",
"def fetch_results(self):\n self._get_flat_results()",
"def load(self):\n self.load_pages()\n self.load_posts()",
"def run(self):\n results = self.fetch()\n return results",
"def load_results_internal(self):\r\n filename = f\"{self.search_internal_path}/results_internal.dill\"\r\n\r\n with open_(filename, \"rb\") as f:\r\n return dill.load(f)",
"def test_loadrevisions_querycontinue(self):\n self.mysite.loadrevisions(self.mainpage, step=5, total=12)\n self.assertLength(self.mainpage._revisions, 12)",
"async def showdraft(self, ctx, draftid=\"\"):\n\t\tif Path(\"drafts/\"+draftid+\".p\").is_file():\n\t\t\twith open(\"drafts/\"+draftid+\".p\", \"rb\") as f:\n\t\t\t\tdraftobj = pickle.load(f)\n\t\t\temb = discord.Embed(title=\"Draft information\", description=\"Draft ID: \"+draftid, color=0x3498db)\n\t\t\temb.set_thumbnail(url=ctx.message.server.icon_url)\n\t\t\temb.add_field(name=\"Name\", value=draftobj.name)\n\t\t\temb.add_field(name=\"Size\", value=str(draftobj.size))\n\t\t\temb.add_field(name=\"Date\", value=draftobj.date)\n\t\t\thost = ctx.message.server.get_member(draftobj.host)\n\t\t\tif host != None:\n\t\t\t\temb.add_field(name=\"Host\", value=host.mention)\n\t\t\telse:\n\t\t\t\temb.add_field(name=\"Host\", value=\"Unknown\")\n\t\t\tif draftobj.draftpage != \"\":\n\t\t\t\temb.add_field(name=\"Draftpage\", value=draftobj.draftpage)\n\t\t\temb.add_field(name=\"Status\", value=draftobj.status)\n\t\t\teligibleRole = discord.utils.get(ctx.message.server.roles, id=draftobj.eligible)\n\t\t\tif eligibleRole != None:\n\t\t\t\temb.add_field(name=\"Eligible\", value=eligibleRole.name)\n\t\t\telse:\n\t\t\t\temb.add_field(name=\"Eligible\", value=\"Unknown\")\n\n\t\t\tif draftobj.results == None:\n\t\t\t\temb.add_field(name=\"Results\", value=\"Nothing entered yet\")\t\n\t\t\telse:\n\t\t\t\tresults = \"*__Player__ __W/L__*\\n\"\n\t\t\t\tfor key in draftobj.results:\n\t\t\t\t\tname = key\n\t\t\t\t\tif name.startswith(\"_\"):\n\t\t\t\t\t\tname = name[1:]\n\t\t\t\t\tresults = results + name +\"\\n \"+ str(draftobj.results[key][0]) +\"/\"+ str(draftobj.results[key][1])+\"\\n\"\n\t\t\t\temb.add_field(name=\"Results\", value=results, inline=False)\n\t\t\tawait self.bot.say(embed=emb)\n\n\t\t\tif draftobj.decks != None:\n\t\t\t\tdeckEmb = discord.Embed(title=\"Decks\", description=\"Decks of '\"+draftobj.name+\"'\", color=0x3498db)\n\t\t\t\tfor d in draftobj.decks:\n\t\t\t\t\tdeckEmb.add_field(name=d[\"name\"], value=d[\"value\"], inline=False)\n\t\t\t\tawait self.bot.say(embed=deckEmb)\n\t\t\t\n\t\t\tprint(\"## showed draftinfos of \"+ draftid)\n\t\t\t\n\t\telse:\n\t\t\tawait self.bot.say(\"That draft does not exists\", delete_after=autodeletetime)",
"def load_messages():\n rows = db(db.board).select()\n # d = {}\n # for r in rows:\n # d[r.message_id] = {'message_content': r.message_content}\n d = {r.message_id: {'message_content': r.message_content,\n 'is_draft': r.is_draft}\n for r in rows}\n return response.json(dict(msg_dict=d))",
"def _load_results(self):\n self.inputs = list(iofuncs.read_scores(self.outdir/rc.report_input_scores))\n self.results = list(iofuncs.read_scores(self.outdir/rc.report_result_scores))\n logging.info(\"Loaded saved results for %s\", self.dataset)",
"def load(self):\n results_fn = os.path.join(self.full_path, self.output_filename)\n self.results = rlpy.tools.results.load_single(results_fn)\n return self.results",
"def __loadResults(self):\r\n if self.flattenResults:\r\n for resultFile in self.resultFiles:\r\n self.__results.extend(self.readAnalysisResult(resultFile))\r\n else:\r\n for resultFile in self.resultFiles:\r\n self.__results.append(self.readAnalysisResult(resultFile))",
"def load(self):\n if self[\"id\"] is not None:\n loadAction = self.daofactory(classname=\"Jobs.LoadFromID\")\n results = loadAction.execute(self[\"id\"], conn=self.getDBConn(),\n transaction=self.existingTransaction())\n else:\n loadAction = self.daofactory(classname=\"Jobs.LoadFromName\")\n results = loadAction.execute(self[\"name\"], conn=self.getDBConn(),\n transaction=self.existingTransaction())\n\n self.update(results)\n if self['mask']['FirstRun'] is not None and self['mask']['FirstRun'] == self['mask']['LastRun']:\n self['mask'].load(jobID=self['id'])\n return",
"def loading(self):\n pass",
"def load_raw_results(self):\n if not self.setup.complete():\n raise AttributeError(\"Import setup is not complete\")\n access = DataImport(self.setup)\n if access.data_loaded:\n self.raw_results = access.results\n self.set_start_stop_time()\n return True\n return False",
"async def draftlist(self, ctx):\n\t\tif not Path(\"data/drafting/draftlist.p\").is_file():\n\t\t\twith open(\"data/drafting/draftlist.p\", \"wb\") as f:\n\t\t\t\tl = [[\"listbeginning\", \"nothing to see here\", \"nope nothin\"]]\n\t\t\t\tpickle.dump(l, f)\n\t\twith open(\"data/drafting/draftlist.p\", \"rb\") as f:\n\t\t\tl = pickle.load(f)\n\t\temb = discord.Embed(title=\"Draft list\", description=\"The 10 most recent drafts\", color=0x3498db)\n\t\temb.set_thumbnail(url=ctx.message.server.icon_url)\n\t\temb.set_footer(text=\"Use !showdraft <id> for more information on a single draft\")\n\t\tif len(l) >= 10:\n\t\t\tr = 10\n\t\telse:\n\t\t\tr = len(l)\n\t\tfor n in range(r, 1, -1):\n\t\t\temb.add_field(name=str(r-n+1)+\"- \"+l[n-1][0]+\" | \"+l[n-1][2], value=\"ID: \"+l[n-1][1], inline=False)\n\t\tawait self.bot.say(embed=emb)\n\t\tprint(\"## showed draftlist\")",
"def load_rentedout():",
"def drafts():\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return team roster at given date. | def get_team_roster(league):
pass | [
"async def roster(\n self, ctx: commands.Context, season: Optional[YearFinder] = None, *, search: HockeyTeams\n ) -> None:\n season_str = None\n season_url = \"\"\n if season:\n if season.group(3):\n if (int(season.group(3)) - int(season.group(1))) > 1:\n return await ctx.send(_(\"Dates must be only 1 year apart.\"))\n if (int(season.group(3)) - int(season.group(1))) <= 0:\n return await ctx.send(_(\"Dates must be only 1 year apart.\"))\n if int(season.group(1)) > datetime.now().year:\n return await ctx.send(_(\"Please select a year prior to now.\"))\n season_str = f\"{season.group(1)}{season.group(3)}\"\n else:\n if int(season.group(1)) > datetime.now().year:\n return await ctx.send(_(\"Please select a year prior to now.\"))\n year = int(season.group(1)) + 1\n season_str = f\"{season.group(1)}{year}\"\n if season:\n season_url = f\"?season={season_str}\"\n if search is None:\n return await ctx.send(_(\"You must provide a valid current team.\"))\n rosters = {}\n players = []\n teams = [team for team in TEAMS if search.lower() in team.lower()]\n if teams != []:\n for team in teams:\n url = f\"{BASE_URL}/api/v1/teams/{TEAMS[team]['id']}/roster{season_url}\"\n async with self.session.get(url) as resp:\n data = await resp.json()\n if \"roster\" in data:\n for player in data[\"roster\"]:\n players.append(player[\"person\"][\"id\"])\n else:\n return await ctx.send(_(\"No team name was provided.\"))\n\n if players:\n await BaseMenu(\n source=PlayerPages(pages=players, season=season_str),\n cog=self,\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)\n else:\n if season:\n year = _(\" in the {season} season\").format(\n season=f\"{season.group(1)}-{season.group(3)}\"\n )\n else:\n year = \"\"\n await ctx.send(\n _(\"I could not find a roster for the {team}{year}.\").format(team=team, year=year)\n )",
"def __call__(self, date):\n for game in self._games:\n if game.datetime.year == date.year and \\\n game.datetime.month == date.month and \\\n game.datetime.day == date.day:\n return game\n raise ValueError('No games found for requested date')",
"def dateToSeason(self, match_date : str) -> str:\r\n season_names : List = ['2012/2013', '2013/2014', '2014/2015', '2015/2016', '2016/2017', '2017/2018', '2018/2019', '2019/2020']\r\n \"\"\"Creates a list of Tuples (season name, datetime object of last day of season)\"\"\"\r\n season_boundaries : List[Tuple] = [(date_object(year, 5, 30), season_years) for year, season_years in zip(range(2013, 2021), season_names)]\r\n date_formatted = datetime.strptime(match_date, \"%d %B %Y\").date() #Converts the method parameter from string to date object\r\n for season in season_boundaries:\r\n date, years = season #Unpacks each tuple\r\n if date_formatted < date: #If the date from the parameter is less than the current iteration of the season\r\n return years",
"def get_team(roster, team_id):\n # Requires: roster is a list of player-info tuples.\n \n pass # delete and replace with code ",
"def get_roster(self) -> Snapshot:\n response = self._browse('Roster/List?pageNo=1&row=1', 'roster')\n if response.id == 'account-login':\n raise exceptions.NotLoggedInError\n list_view = response.content.find(attrs={'data-role': 'listview'})\n shifts: List[Item] = []\n date: Optional[date_type] = None\n title: Optional[str] = None\n for li in list_view.find_all('li', recursive=False):\n if 'data-role' in li.attrs and li['data-role'] == 'list-divider':\n raw_date, _, title = li.string.partition(' - ')\n date = datetime.strptime(raw_date, '%a %d/%m/%Y').date()\n else:\n detail = tuple([p.string for p in li.find('table').find_all('p')])\n shifts.append(Item(date, title, detail))\n return Snapshot(response.time, shifts)",
"def _games_on_date(date):\n games = {}\n endpoint = f\"schedule?date={date}\"\n data = _api_request(endpoint)\n if data:\n if data['totalGames'] == 0:\n return\n games['date'] = data['dates'][0]['date']\n games_list = data['dates'][0]['games']\n games['games'] = games_list\n return games",
"def _get_team_roster(team_id=None, team_name=None):\n if not team_id:\n team_id = _team_id(team_name)\n endpoint = f\"teams/{team_id}/roster\"\n data = _api_request(endpoint)\n player_list = data['roster']\n return player_list",
"def get_games_by_date(self, date):\n return self._db.Games.find({'date' : date})",
"def get_team_game_preview(self, team, date):\n abbr = convert_name(team, how='abbr')\n return self._db.Games.find({'date' : date,\n '$or' : [{'home' : abbr},\n {'away' : abbr}]})",
"def get_next_game(today_game_date: datetime, team_id: int) -> dict:\n\n game_date = today_game_date.strftime(\"%Y-%m-%d\")\n tomorrow = (today_game_date + timedelta(days=1)).strftime(\"%Y-%m-%d\")\n end_date = (today_game_date + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n\n logging.info(\"Checking the schedule API endpoint for the next game.\")\n url = f\"schedule?teamId={team_id}&startDate={game_date}&endDate={end_date}\"\n\n response = api.nhl_api(url)\n if not response:\n return None\n\n next_game_json = response.json()\n next_game = next_game_json.get(\"dates\")[1].get(\"games\")[0]\n\n return next_game",
"def starting_date(cls, player):\r\n\r\n\t\treturn cls.RESULTDAILY[player][0]",
"def fetch_team_roster(team_id, team_document):\n team_document['players'] = []\n team_document['coaches'] = []\n\n common_team_roster_params = {\n 'TeamID': team_id,\n 'Season': constants.CURRENT_SEASON\n }\n common_team_roster_request = NbaRequest(constants.COMMON_TEAM_ROSTER_ENDPOINT, common_team_roster_params)\n common_team_roster_meta = common_team_roster_request.send()\n\n if common_team_roster_meta:\n team_players_row_set = common_team_roster_meta['resultSets'][0]['rowSet']\n for player in team_players_row_set:\n player_info = {}\n player_info['nbaId'] = player[12]\n player_info['fullName'] = player[3]\n player_info['jersey'] = player[4]\n player_info['position'] = player[5]\n team_document['players'].append(player_info)\n\n team_coaches_row_set = common_team_roster_meta['resultSets'][1]['rowSet']\n for coach in team_coaches_row_set:\n coach_info = {}\n coach_info['nbaId'] = coach[2]\n coach_info['fullName'] = coach[5]\n coach_info['type'] = coach[8]\n coach_info['school'] = coach[9]\n team_document['coaches'].append(coach_info)",
"async def reschedule(self, ctx, match_id: str, *, date: str):\n tournament = self.get_tournament(ctx.guild.id)\n try:\n new_date = tournament.parse_date(date, prefer_dates_from=\"future\")\n except ValueError:\n raise commands.UserInputError()\n if not new_date:\n raise commands.UserInputError()\n\n for bracket in tournament.brackets:\n if await self.reschedule_for_bracket(\n ctx,\n tournament,\n bracket,\n match_id,\n new_date,\n ):\n return\n raise tosurnament.InvalidMatchId()",
"def get_schedule():\n startdate = '02/28/2020'\n enddate = '04/01/2020'\n return statsapi.schedule(start_date=startdate, end_date=enddate, team=134)",
"def get_semester_of_date(date):\n details = get_semester_details_from_date(date)\n return Semester.objects.get_or_create(\n year=details[\"year\"], semester=details[\"semester\"]\n )[0]",
"def scrape_roster(game_id):\n roster = get_roster(game_id)\n\n if not roster:\n print(\"Roster for game {} is either not there or can't be obtained\".format(game_id))\n return None\n\n try:\n players, head_coaches = get_content(roster)\n except Exception as e:\n print('Error parsing Roster for game {}'.format(game_id), e)\n return None\n\n return {'players': players, 'head_coaches': head_coaches}",
"def player_rank(cls, player, date):\r\n\r\n\t\ttry:\r\n\t\t\tP_RANKS = cls.RANKS[player]\r\n\t\texcept KeyError:\t# If player does not exist\r\n\t\t\treturn False\r\n\r\n\t\tinit_date = P_RANKS[0]\r\n\r\n\t\t# If player hadn't played yet by the date specified\r\n\t\tif date < init_date:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tdate_ind = DATES.day_diff(date, init_date)\r\n\r\n\t\trank = P_RANKS[date_ind + 1]\r\n\t\t\r\n\t\treturn rank",
"def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)",
"def roster(self):\r\n with self.lock:\r\n if self.roster_client is not None:\r\n return self.roster_client.roster\r\n else:\r\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calibrate the chemical shifts of each spin in the peak list. | def calibrate_peaklist(peaklist, calibration, attr='shift'):
if len(calibration) != peaklist.dims:
raise ValueError('incorrect calibration list length')
for peak in peaklist:
for spin, cal in zip(peak, calibration):
shift = getattr(spin, attr)
shift -= cal
setattr(spin, attr, shift)
return peaklist | [
"def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)",
"def calibrate(self, data):\n self.shift = data[7]\n lower_count_to_current = (data[0] / 8.0) / (data[5] - data[7])\n upper_count_to_current = (data[4] / 8.0) / (data[7] - data[9])\n self.counts_to_current = (float(lower_count_to_current) + float(\n upper_count_to_current)) / 2.0\n logging.info('adc calibrate, counts to current: {0}'.format(self.counts_to_current))",
"def apply_calibration(self, cal):\n\n n_edges = len(self.channels) + 1\n channel_edges = np.linspace(-0.5, self.channels[-1] + 0.5, num=n_edges)\n self.bin_edges_kev = cal.ch2kev(channel_edges)",
"def calibration_wheel(self):\n self.spectrum = self.spectrum",
"def calibrate(self, count=256, delay=0.200):\n print(\"Starting Calibration.\")\n print(\"The magnetometer needs to be turned in all possible directions \\\n during the callibration process. Ideally each axis would once \\\n line up with the magnetic field.\")\n \n self._offset = (0, 0, 0)\n self._scale = (1, 1, 1)\n\n raw_data = self._raw_magnet_data\n raw_x = raw_data[0][0]\n raw_y = raw_data[1][0]\n raw_z = raw_data[2][0]\n self._status # Enable updating readings again\n \n minx = maxx = raw_x\n miny = maxy = raw_y\n minz = maxz = raw_z\n\n while count:\n sleep(delay)\n\n raw_data = self._raw_magnet_data\n print(raw_x, raw_y, raw_z)\n raw_x = raw_data[0][0]\n raw_y = raw_data[1][0]\n raw_z = raw_data[2][0]\n self._status # Enable updating readings again\n \n minx = min(minx, raw_x)\n maxx = max(maxx, raw_x)\n miny = min(miny, raw_y)\n maxy = max(maxy, raw_y)\n minz = min(minz, raw_z)\n maxz = max(maxz, raw_z)\n\n count -= 1\n\n # Hard iron correction\n offset_x = (maxx + minx) / 2\n offset_y = (maxy + miny) / 2\n offset_z = (maxz + minz) / 2\n\n self._offset = (offset_x, offset_y, offset_z)\n\n print(\"+++++++++++\")\n print(\"Hard Iron Offset Values:\")\n print(self._offset)\n\n # Soft iron correction\n avg_delta_x = (maxx - minx) / 2\n avg_delta_y = (maxy - miny) / 2\n avg_delta_z = (maxz - minz) / 2\n\n avg_delta = (avg_delta_x + avg_delta_y + avg_delta_z) / 3\n\n scale_x = avg_delta / avg_delta_x\n scale_y = avg_delta / avg_delta_y\n scale_z = avg_delta / avg_delta_z\n\n self._scale = (scale_x, scale_y, scale_z)\n\n print(\"Soft iron values\")\n print(self._scale)",
"def calc_axialshift(self):\n for c in self.chains:\n c.calc_axialshift()",
"def calibrate(scan, elastics=None, energy_per_pixel=1, I0s=None):\n if elastics is None:\n elastics = np.zeros(scan.shape[0:2])\n if I0s is None:\n I0s = np.ones(scan.shape[0:2])\n\n scan_out = scan - elastics[:, :, np.newaxis, np.newaxis]\n scan_out[:, :, :, 0:1] *= energy_per_pixel\n scan_out[:, :, :, 1:2] /= I0s[:, :, np.newaxis, np.newaxis]\n return scan_out",
"def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)",
"def calibrate(self):\n\t\twl = BLi.getWavelength()\n\t\tif abs(self.stokes()) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset2(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset4(yyy)\n\t\t\tself.offset9(self.dettrans())\n\t\telif abs(self.stokes()-90.) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset3(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset8(yyy)\n\t\t\tself.offset10(self.dettrans())\n\t\telse:\n\t\t\tprint \"Can't calibrate at stokes=\",self.stokes()\n\t\treturn [self.sign(),self.offset2(), self.offset3(),self.offset4(),self.offset5(),self.offset8(),self.offset9(),self.offset10()]",
"def calibrateData(self):\n if self.calibrationFile == \"\":\n tkMessageBox.showinfo(\"File Error\", \"No calibration file selected\")\n return\n maximaMZ = []\n #window = self.calibrationMenu()\n potentialCalibrants = self.readCalibrants()\n if self.exclusionFile != \"\":\n included = self.readInclusionRange()\n else:\n included = self.createInclusionList(potentialCalibrants)\n data = self.readData(self.inputFile)\n maxima = self.getLocalMaxima(data, included)\n actualCalibrants = self.getObservedCalibrants(maxima, potentialCalibrants)\n if self.checkMaximaSpacing(actualCalibrants, data) is False:\n if self.log is True:\n with open('MassyTools.log', 'a') as fw:\n fw.write(str(datetime.now())+\"\\tNot enough datapoints for calibration\\n\")\n self.writeUncalibratedFile()\n return\n # Strip the m/z values from the maxima\n for i in maxima:\n if i[1] == 0:\n if self.log is True:\n with open('MassyTools.log', 'a') as fw:\n fw.write(str(datetime.now())+\"\\tUnexpected error!\\n\")\n else:\n maximaMZ.append(i[0])\n # Perform 2d degree polynomial fit\n z = numpy.polyfit(maximaMZ, actualCalibrants, 2)\n f = numpy.poly1d(z)\n y = f(maximaMZ)\n # Display the calibrated plot on the main screen\n if self.batchProcessing == 0:\n self.plotChange(data, f)\n self.writeCalibration(y, actualCalibrants)\n # Call function to write away calibrated file\n # Ideally this would throw a pop up with calibration\n # parameters and display the calibration 'curve'\n self.transformFile(f)",
"def calibrate(self, calibration):\n self.raw -= calibration.raw\n # Everything but raw data is affected by calibrating, reset them\n self.reset(raw=False)",
"def calibrate_alms(alms, cal = 1.0, pol_eff = 1.0):\n alms[0] *= cal\n alms[1] *= cal / pol_eff\n alms[2] *= cal / pol_eff\n\n return alms",
"def test_cys_calibration(self):\n self.calibrate(\"cys\")",
"def calibrate():\n F0ST = 3.6307805477010028e-09\n F0AB = 3.6307805477010029e-20\n #-- get calibrator\n wave,flux = get_calibrator(name='alpha_lyr')\n zp = filters.get_info()\n \n #-- calculate synthetic fluxes\n syn_flux = synthetic_flux(wave,flux,zp['photband'])\n syn_flux_fnu = synthetic_flux(wave,flux,zp['photband'],units='Fnu')\n Flam0_lit = conversions.nconvert(zp['Flam0_units'],'erg/s/cm2/AA',zp['Flam0'],photband=zp['photband'])\n Fnu0_lit = conversions.nconvert(zp['Fnu0_units'],'erg/s/cm2/Hz',zp['Fnu0'],photband=zp['photband'])\n \n #-- we have Flam0 but not Fnu0: compute Fnu0\n keep = (zp['Flam0_lit']==1) & (zp['Fnu0_lit']==0)\n Fnu0 = conversions.nconvert(zp['Flam0_units'],'erg/s/cm2/Hz',zp['Flam0'],photband=zp['photband'])\n zp['Fnu0'][keep] = Fnu0[keep]\n zp['Fnu0_units'][keep] = 'erg/s/cm2/Hz'\n \n #-- we have Fnu0 but not Flam0: compute Flam0\n keep = (zp['Flam0_lit']==0) & (zp['Fnu0_lit']==1)\n Flam0 = conversions.nconvert(zp['Fnu0_units'],'erg/s/cm2/AA',zp['Fnu0'],photband=zp['photband'])\n \n # set everything in correct units for convenience:\n Flam0 = conversions.nconvert(zp['Flam0_units'],'erg/s/cm2/AA',zp['Flam0'])\n Fnu0 = conversions.nconvert(zp['Fnu0_units'],'erg/s/cm2/Hz',zp['Fnu0'])\n \n #-- as a matter of fact, set Flam0 and Fnu for all the stuff for which we\n # have no literature values\n keep = (zp['Flam0_lit']==0) & (zp['Fnu0_lit']==0)\n zp['Flam0'][keep] = syn_flux[keep]\n zp['Flam0_units'][keep] = 'erg/s/cm2/AA'\n zp['Fnu0'][keep] = syn_flux_fnu[keep]\n zp['Fnu0_units'][keep] = 'erg/s/cm2/Hz'\n \n keep = np.array(['DENIS' in photb and True or False for photb in zp['photband']])\n \n #-- we have no Flam0, only ZP vegamags\n keep = (zp['vegamag_lit']==1) & (zp['Flam0_lit']==0)\n zp['Flam0'][keep] = syn_flux[keep]\n zp['Flam0_units'][keep] = 'erg/s/cm2/AA'\n \n #-- we have no Flam0, no ZP vegamas but STmags\n keep = (zp['STmag_lit']==1) & (zp['Flam0_lit']==0)\n m_vega = 2.5*np.log10(F0ST/syn_flux) + zp['STmag']\n zp['vegamag'][keep] = m_vega[keep]\n \n #-- we have no Fnu0, no ZP vegamas but ABmags\n keep = (zp['ABmag_lit']==1) & (zp['Flam0_lit']==0)\n F0AB_lam = conversions.convert('erg/s/cm2/Hz','erg/s/cm2/AA',F0AB,photband=zp['photband'])\n m_vega = 2.5*np.log10(F0AB_lam/syn_flux) + zp['ABmag']\n zp['vegamag'][keep] = m_vega[keep]\n \n #-- set the central wavelengths of the bands\n set_wave = np.isnan(zp['eff_wave'])\n zp['eff_wave'][set_wave] = filters.eff_wave(zp['photband'][set_wave])\n \n return zp",
"def calibrateAccn(accn):\n\n print(\"Calibrating Accelerometer\")\n\n # Capture Data in each position\n positions = ['+ve x up', '-ve x up', '+ve y up', '-ve y up', '+ve z up', '-ve z up']\n pos_counter = 0\n w = []\n y =[]\n for pos in positions:\n input(\"Position IMU %s and press [ENTER]\" % pos)\n # take mean of 2 seconds\n nsamp = 0\n axtmp = []\n aytmp = []\n aztmp = []\n while nsamp <600:\n ax, ay, az = accn.read()\n axtmp.append(ax)\n aytmp.append(ay)\n aztmp.append(az)\n time.sleep(0.005)\n nsamp +=1\n ax = np.mean(ax)\n ay = np.mean(ay)\n az = np.mean(az)\n\n # Convert to LS Form\n w.append([ax,ay,az,1])\n if '+ve' in pos:\n temp = [0,0,0]\n temp[pos_counter] = -1\n y.append(temp)\n elif '-ve' in pos:\n temp = [0,0,0]\n temp[pos_counter] = 1\n y.append(temp)\n pos_counter +=1\n print(temp)\n\n # Generate LS Soln\n w = np.array(w)\n y = np.array(y)\n x = np.dot(np.dot(np.linalg.inv(np.dot(np.transpose(w), w)), np.transpose(w)),y)\n\n bias = x[3,:]\n sensitivity = x[0:3,:]\n\n print(\"Bias = \")\n print(bias)\n print(\"Sensitivity = \")\n print(sensitivity)\n return bias, sensitivity",
"def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT",
"def updateAllShifts(shiftList):\n \n for shift in shiftList.measurements:\n averageShiftValue(shift)",
"def expandcal(self):\n ind=np.zeros(self.spec.shape[0]).astype(int)\n for k in range(self.nscan):\n ind[self.getscanind(k)]=k\n ind[self.getcalind(k)]=k\n return ind",
"def calibrate(): \n \n # Calibrate of the run using beam data. Creates a folder cal-files/caltag \n # containing all calibration data. \n CalObj = Calibration(steerfiles=steerfiles, name=caltag + '-cal') \n\n # Set Beam energy\n CalObj.set_beam_momentum(beamenergy)\n\n # Get gearfile and set air as DUT material\n localgearfile = CalObj.get_filename('gear.xml')\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='radLength', value=304000.0)\n \n # Create list of calibration steps \n calpath = create_calibration_path(CalObj)\n \n # Run the calibration steps \n CalObj.calibrate(path=calpath,ifile=rawfile_air,caltag=caltag)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Map each unique spin link to all of its corresponding peaks. NOESY peak lists represent spin links between Hydrogen atoms. Whether 2D, 3D or 4D, each peak in a NOESY peak list has exactly two Hydrogen spins. Here, a spin link is represented by a frozenset containing the spin.assignment tuples for each Hydrogen atom. This function returns a dictionary mapping each unique spin link to a list of the Peaks in the PeakList that contain those two Hydrogen atoms. Examples >>> spin_link_dict = peaklist.spin_link_dict() >>> spin_link, peaks = spin_link_dict.popitem() >>> spin_link frozenset([Assignment(res_type='Q', res_num=21, atom='HN'), Assignment( res_type='G', res_num=17, atom='HN')]) >>> print(peaks[0]) Peak(spins=[ Spin(res_type=G, res_num=17, atom=HN), Spin(res_type=G, res_num=17, atom=N), Spin(res_type=Q, res_num=21, atom=HN)]) >>> print(peaks[1]) Peak(spins=[ Spin(res_type=Q, res_num=21, atom=HN), Spin(res_type=Q, res_num=21, atom=N), Spin(res_type=G, res_num=17, atom=HN)]) Returns | def get_spin_link_dict(peaklist):
spin_link_dict = {}
for peak in peaklist:
spins = [spin for spin in peak
if spin.atom is not None and spin.atom[0] == 'H']
if len(spins) != 2:
err = ('expected 2 Hydrogens in each peak, '
'found %d' % len(spins))
raise ValueError(err)
link = frozenset(spin.assignment for spin in spins)
spin_link_dict.setdefault(link, []).append(peak)
return spin_link_dict | [
"def spinnaker_links(self):\n return iter(self._spinnaker_links.items())",
"def list_to_dict(links):\n dic = defaultdict(list)\n for link in links:\n dic[int(link[0][1:])].append(int(link[1][1:]))\n if int(link[1][1:]) not in dic:\n dic[int(link[1][1:])] = []\n return dic",
"def peak_list_to_dict(peak_list):\n peak_dict = {}\n\n for peak in peak_list:\n peak_dict[peak.peak_id] = peak\n\n return peak_dict",
"def peakAssignmentSetData(self):\n\n #\n # Once the linking is known, check whether peak number matches.\n # First make temporary dict with original peak number (if any - ignore if not available?!?)\n # Then get the right peak.\n #\n \n peakNumDict = {}\n \n for peak in self.peakList.sortedPeaks():\n \n applData = peak.findFirstApplicationData(application = self.format, keyword = peakNum_kw)\n \n if applData:\n peakNumDict[applData.value] = peak\n \n \n for self.peakAssignment in self.peakAssignmentsFile.peakAssignments:\n \n peakNum = self.peakAssignment.peakNum\n \n #\n # If there is a match, then see if there are already assignments attached\n # Only take the self.assignDims into account\n #\n\n if peakNumDict.has_key(peakNum):\n\n self.convertCount[self.mainCode][0] += 1\n\n self.peak = peakNumDict[peakNum]\n self.peakContribResonances = [] # TODO: could set these up from the start...\n self.resonancesPeakContrib = []\n \n #\n # Check if both relevant dims are empty: if not, then addMode has to be ON\n # before anything is added\n #\n \n origAssignments = 0\n numPeakAssignDim = len(self.assignDim)\n \n if self.overwriteMode == 0:\n \n for self.peakAssignDim in range(numPeakAssignDim):\n \n if self.assignDim[self.peakAssignDim] != None:\n\n self.peakDim = self.peak.sortedPeakDims()[self.assignDim[self.peakAssignDim]]\n \n if self.peakDim.peakDimContribs:\n \n origAssignments = 1\n \n #\n # Loop over relevant peakDims\n #\n \n for self.peakAssignDim in range(numPeakAssignDim):\n \n self.peakContribResonances.append({})\n self.resonancesPeakContrib.append({})\n\n if self.assignDim[self.peakAssignDim] != None:\n\n self.peakDim = self.peak.sortedPeakDims()[self.assignDim[self.peakAssignDim]]\n \n #\n # If there are assignments present, delete them if overwrite mode is on\n #\n \n if self.peakDim.peakDimContribs:\n \n if self.overwriteMode == 1:\n \n #\n # Delete all exisiting ones\n #\n \n for peakDimContrib in self.peakDim.peakDimContribs:\n peakDimContrib.delete()\n \n for peakContrib in self.peak.sortedPeakContribs():\n peakContrib.delete()\n \n #\n # If no assignment present (or just deleted), or if assignments need to be added,\n # then add the new ones, create resonances if necessary\n #\n \n if (not self.peakDim.peakDimContribs and origAssignments == 0) or self.addMode == 1:\n \n self.createPeakAssignmentResonances()\n \n #\n # If addMode is on, or if there were no original peakdimcontribs, set the peakContribs\n #\n \n if origAssignments == 0 or self.addMode == 1:\n \n for self.assignCombination in self.peakAssignment.assignCombinations:\n\n self.setPeakDimContribs(0,peakDims = [], resonances = [])",
"def newResonanceLinkMapping(self, **attrlinks):\n return ResonanceLinkMapping(self, **attrlinks)",
"def make_links_dict(pairs_dict):\n links_dict = {}\n for end1 in pairs_dict:\n \n if (end1 in pairs_dict) and (len(pairs_dict[end1])) > 0:\n best_pair = max(pairs_dict[end1], key = pairs_dict[end1].get)\n \n if best_pair in pairs_dict and len(pairs_dict[best_pair]) > 0:\n \n if max(pairs_dict[best_pair], key = pairs_dict[best_pair].get) == end1:\n links_dict[end1] = best_pair\n links_dict[best_pair] = end1\n return links_dict",
"def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict",
"def build_links_dicts(links):\n merge_soft_links(links)\n # print \"Before pruning:\"\n # show_stats(links) \n prune_hard_links(links)\n # print \"After pruning:\"\n # show_stats(links)\n merge_soft_and_hard_links(links)\n make_path2lg(links)\n # return links",
"def getSeqSpinSystemLinks(spinSystem, delta=None):\n\n seqLinks = {}\n for link in spinSystem.findAllResonanceGroupProbs(linkType='sequential',isSelected=True):\n if delta is None:\n seqLinks[link] = None\n \n elif link.sequenceOffset == delta:\n seqLinks[link] = None\n\n for link in spinSystem.findAllFromResonanceGroups(linkType='sequential',isSelected=True):\n if delta is None:\n seqLinks[link] = None\n \n elif link.sequenceOffset == -delta:\n seqLinks[link] = None\n\n return seqLinks.keys()",
"def parse_linkmap(fh):\n linkmap = {}\n for line in fh:\n if line[0] == '~':\n continue\n sline = line.split('\\t')\n if len(sline) != 2:\n sys.stderr.write('ERROR: bad line \"%s\"\\n' % (line))\n return None\n newlink = eval(sline[0])\n oldlink_list = [eval(t) for t in sline[1].split(';')]\n if linkmap.has_key(newlink):\n sys.stderr.write(\"ERROR duplicate link key %d\\n\" % (newlink))\n return None\n linkmap[newlink] = oldlink_list\n return linkmap",
"def link_dict(self):\n return self._all_link_dict",
"def parse_map(plinkmap):\n plink = {}\n with open(plinkmap, 'r') as f:\n for line in f:\n tmp = line.strip().split()\n chrom = tmp[0]\n if chrom not in plink:\n plink[chrom] = []\n plink[chrom].append(tmp)\n # Then sort on physical position\n for c in plink:\n plink[c] = sorted(plink[c], key=lambda x: int(x[3]))\n return plink",
"def get_all_backup_links(\n network_name: str,\n node_mac_map: DefaultDict,\n link_name_map: Dict[str, Dict],\n conn_list: List,\n) -> DefaultDict:\n backup_links: DefaultDict = defaultdict(dict)\n for conn_list_item in conn_list:\n tx_node_mac = conn_list_item[\"tx_node\"]\n rx_node_mac = conn_list_item[\"rx_node\"]\n backup_link_candidate = {\n \"link_type\": 1,\n \"linkup_attempts\": 0,\n \"is_alive\": False,\n \"name\": \"\",\n \"is_backup_cn_link\": True,\n }\n\n if tx_node_mac not in node_mac_map or rx_node_mac not in node_mac_map:\n logging.debug(f\"One of the mac addresses is not in {network_name}.\")\n continue\n\n # TODO: This part will be used in the later version.\n # No CNs can be tested at this point in the live network.\n # Will come back to complete the logic later on.\n tx_node_type = node_mac_map[tx_node_mac][\"type\"]\n rx_node_type = node_mac_map[rx_node_mac][\"type\"]\n if tx_node_type == NodeType.CN or rx_node_type == NodeType.CN:\n backup_link_candidate[\"is_backup_cn_link\"] = True\n\n if node_mac_map[tx_node_mac][\"name\"] < node_mac_map[rx_node_mac][\"name\"]:\n backup_link_candidate[\"a_node_mac\"] = tx_node_mac\n backup_link_candidate[\"z_node_mac\"] = rx_node_mac\n backup_link_candidate[\"a_node_name\"] = node_mac_map[tx_node_mac][\"name\"]\n backup_link_candidate[\"z_node_name\"] = node_mac_map[rx_node_mac][\"name\"]\n else:\n backup_link_candidate[\"a_node_mac\"] = rx_node_mac\n backup_link_candidate[\"z_node_mac\"] = tx_node_mac\n backup_link_candidate[\"a_node_name\"] = node_mac_map[rx_node_mac][\"name\"]\n backup_link_candidate[\"z_node_name\"] = node_mac_map[tx_node_mac][\"name\"]\n\n backup_link_candidate_name = (\n f\"link-{backup_link_candidate['a_node_name']}\"\n f\"-{backup_link_candidate['z_node_name']}\"\n )\n backup_link_candidate[\"name\"] = backup_link_candidate_name\n # Do not process any active links in the topology file\n # TODO: check whether this part is necessary.\n # If it is the case, we need to check node macs instead of link name only.\n if backup_link_candidate_name not in link_name_map:\n backup_links[backup_link_candidate_name][\"link\"] = backup_link_candidate\n if len(conn_list_item[\"routes\"]) != 0:\n (_tx_beam_idx, _rx_beam_idx, snr) = conn_list_item[\"routes\"][0]\n backup_links[backup_link_candidate_name][\"snr\"] = snr\n\n return backup_links",
"def get_endpoints_from_link(link_map):\n from_sw = link_map[\"port_map\"][\"dp_a\"]\n from_port = int(link_map[\"port_map\"][\"port_a\"][5:])\n to_sw = link_map[\"port_map\"][\"dp_z\"]\n to_port = int(link_map[\"port_map\"][\"port_z\"][5:])\n\n return from_sw, from_port, to_sw, to_port",
"def correct_strongs(strong_dict: \"dictionary, result of section_peaks function\"):\n def correct_for_inbed_strongs(strong_dict):\n \"\"\"Tests whether we have consecutive sections with weak peaks, separated by \n strong peaks which should be weak. Returns corrected strong_dict\"\"\"\n ordered_start_times = sorted(list(strong_dict.keys()))\n for i, time in enumerate(ordered_start_times):\n if i == len(ordered_start_times)-1:\n continue\n current_weaks = strong_dict[time]\n next_weaks = strong_dict[ordered_start_times[i+1]]\n\n if current_weaks != [] and next_weaks != []:\n strong_dict[time].append(ordered_start_times[i+1])\n del strong_dict[ordered_start_times[i+1]]\n return strong_dict\n return strong_dict\n \n def correct_for_empty_bed_strongs(strong_dict):\n \"\"\"Test for consecutive sections without peaks, separated by \n strong peaks.\"\"\"\n ordered_start_times = sorted(list(strong_dict.keys()))\n for i, time in enumerate(ordered_start_times):\n pass\n if i == len(ordered_start_times)-1:\n continue\n if i == len(ordered_start_times)-2:\n #Special treatment on account of not being able to look two sections into the future. Also, merging is different.\n next_time = ordered_start_times[i+1]\n current_weaks = strong_dict[time]\n next_weaks = strong_dict[next_time]\n if current_weaks == next_weaks == []:\n #Strong peak that defines start of 'next' section will be added to 'current' section as a weak peak, but the section itself will remain\n strong_dict[time].append(next_time)\n else:\n next_time = ordered_start_times[i+1]\n next_next_time = ordered_start_times[i+2]\n current_weaks = strong_dict[time]\n next_weaks = strong_dict[next_time]\n next_next_weaks = strong_dict[next_next_time]\n if current_weaks == next_weaks == []:\n dt1 = abs(time-next_time)\n dt2 = abs(next_next_time - next_time)\n if dt2 <= dt1:\n next_weaks.extend(next_next_weaks)\n next_weaks.append(next_next_time)\n strong_dict[next_time].extend(next_weaks)\n del strong_dict[next_next_time]\n return strong_dict\n else:\n current_weaks.extend(next_weaks)\n current_weaks.append(next_time)\n strong_dict[time].extend(current_weaks)\n del strong_dict[time]\n return strong_dict\n return strong_dict\n for i in range(len(strong_dict)):\n \"\"\"Since both helper functions only do one pass over the sections, multiple passes have to be made.\"\"\"\n strong_dict = correct_for_empty_bed_strongs(strong_dict)\n strong_dict = correct_for_inbed_strongs(strong_dict) \n return strong_dict",
"def start_end_nodes(cls, link: Element) -> dict:\n start_node = cls.get_text(\n link, f\"fromPoint/ns:fromReferent/ns:referentIdentifier\"\n )\n end_node = cls.get_text(link, f\"toPoint/ns:fromReferent/ns:referentIdentifier\")\n return {\"start_node\": start_node, \"end_node\": end_node}",
"def link_to_dict(L):\n D ={}\n while L is not Link.empty:\n key, value = L.first, L.rest.first\n if key not in D:\n D[key] = [value]\n else:\n D[key].append(value)\n L.rest,L = L.rest.rest, L.rest.rest\n # L.rest = L.rest.rest remove all the values\n return D",
"def backlinks(self) -> Dict[str, List[str]]:\n bk_links: Dict[str, List[str]] = {}\n for note in filter(lambda n: n.links_to is not None, self.by_id.values()):\n for fwd in note.links_to:\n if fwd not in bk_links:\n bk_links[fwd] = [note.id]\n else:\n bk_links[fwd].append(note.id)\n\n return bk_links",
"def find_candidate_points(link_data, probe_dict):\n link_dict = {}\n slope = []\n ground_truth = []\n germany = Metadata(n)\n equ = lambda x, y, xp, yp, m: (yp - y) - m*(xp - x)\n pbar = tqdm(total=len(link_data.index))\n for index, row in link_data.iterrows():\n if row.slopeInfo != row.slopeInfo:\n pbar.update(1)\n continue\n link_slope = 0\n denominator = 0\n link_dict[index] = {} # {toRefSpeedLimit, fromRefSpeedLimit....., subLinks}\n link_dict[index]['toRefSpeedLimit'] = row.toRefSpeedLimit\n link_dict[index]['fromRefSpeedLimit'] = row.fromRefSpeedLimit\n link_dict[index]['toRefNumLanes'] = row.toRefNumLanes\n link_dict[index]['fromRefNumLanes'] = row.fromRefNumLanes\n link_dict[index]['subLinks'] = {}\n points = [(x.split('/')) for x in row.shapeInfo.split('|')]\n for point_idx in range(len(points)-1): # for each sub-link\n sub_link_dict = {} # {co-ordinates, theta, candidates}\n s = gps_to_ecef_pyproj(list(map(float, points[point_idx][:2])))\n e = gps_to_ecef_pyproj(list(map(float, points[point_idx + 1][:2])))\n\n if e[1] > s[1]:\n s, e = e, s\n m = (s[1] - e[1]) / (s[0] - e[0])\n theta = math.atan(m) # in radians\n\n (d1, d2) = (int(link_dict[index]['toRefNumLanes'] + 20)*lane_width,\n int(link_dict[index]['fromRefSpeedLimit'] + 20)*lane_width)\n\n (x1, y1) = (s[0] + d1*math.sin(theta)*cov_constant + (err*math.sin(theta)/abs(math.sin(theta))),\n s[1] - d1*math.cos(theta)*cov_constant - (err*math.cos(theta)/abs(math.cos(theta))))\n (x2, y2) = (e[0] - d2*math.sin(theta)*cov_constant - (err*math.sin(theta)/abs(math.sin(theta))),\n e[1] + d2*math.cos(theta)*cov_constant + (err*math.cos(theta)/abs(math.cos(theta))))\n\n i = (s[0] - germany.x1) // germany.d_x # row number for nxn grid\n j = (s[1] - germany.y2) // germany.d_y # col number for nxn grid\n zone = (n * j) + i\n sub_link_dict['zone'] = zone\n\n sub_link_dict['co-ordinates'] = [s, e]\n sub_link_dict['theta'] = theta\n sub_link_dict['candidates'] = []\n\n for index1, probe in probe_dict[zone].items():\n\n x, y = probe['co-ordinates']\n\n if equ(x1, y1, x, y, math.tan(theta)) >= 0 \\\n and equ(x1, y1, x, y, math.tan(math.atan(-1/m))) <= 0 \\\n and equ(x2, y2, x, y, math.tan(theta)) <= 0 \\\n and equ(x2, y2, x, y, math.tan(math.atan(-1/m))) >= 0:\n\n sub_link_dict['candidates'].append(index1)\n\n if sub_link_dict['candidates']:\n sub_link_dict['candidates'] = refine_points(sub_link_dict, probe_dict[zone], link_dict[index])\n link_dict[index]['subLinks'][point_idx] = sub_link_dict\n\n slopes = []\n if len(sub_link_dict['candidates']) not in [0, 1]:\n for _ in range(5):\n a, b = random.sample(list(sub_link_dict['candidates']), 2)\n try:\n slopes.append(slope_using_points_and_altitude(probe_dict[zone][a]['altitude'],\n probe_dict[zone][b]['altitude'],\n probe_dict[zone][a]['co-ordinates'][0],\n probe_dict[zone][a]['co-ordinates'][1],\n probe_dict[zone][b]['co-ordinates'][0],\n probe_dict[zone][b]['co-ordinates'][1]))\n except ValueError:\n pass\n [(x1, y1), (x2, y2)] = sub_link_dict['co-ordinates']\n if slopes:\n link_slope += ((x1 - x2)**2 + (y1 - y2)**2)**(1/2) * sum(slopes) / len(slopes)\n denominator += ((x1 - x2)**2 + (y1 - y2)**2)**(1/2)\n\n probe_dict[zone] = delete_keys_dict(probe_dict[zone], sub_link_dict['candidates'])\n if denominator != 0:\n link_dict[index]['slope'] = link_slope / denominator\n slope.append(link_dict[index]['slope'])\n ground_truth_link = [float(x.split('/')[1]) for x in row.slopeInfo.split('|')]\n ground_truth.append(sum(ground_truth_link) / len(ground_truth_link))\n pbar.update(1)\n pbar.close()\n print(r2_score(ground_truth, slope))\n return link_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort peaks by the assignments of their constituent spins. Sort the peaks by the assignments of spins in particular dimensions. The default order sorts the peaks by the dimensions associated with spin anchors first then by the remaining dimensions in the order they appear in each peak. Optionally place all commented peaks at the end of the peak list. | def sort_by_assignments(peaklist, order=None, commented_at_end=False):
anchors = peaklist.anchors
anchored = tuple(i for anchor in anchors for i in anchor)
unanchored = set(range(peaklist.dims)) - set(anchored)
default_order = anchored + tuple(sorted(unanchored))
order = order if order is not None else default_order
peaklist.sort(key=lambda peak: tuple(peak[i] for i in order))
if commented_at_end:
peaklist.sort(key=lambda peak: peak.commented)
return peaklist | [
"def sort_merge_peak(peaks, min_gap=None, no_smt=False):\n ch2peaks = {}\n for peak in peaks:\n ch = peak[0]\n if ch in ch2peaks:\n ch2peaks[ch].append(peak[1:5])\n else:\n ch2peaks[ch] = [peak[1:5]]\n\n for ch in ch2peaks:\n ch2peaks[ch].sort(key=lambda x: x[:2])\n if min_gap is None:\n return ch2peaks\n\n for ch in ch2peaks:\n ps = ch2peaks[ch]\n s, rightmost, summit, id = ps[0]\n summits = [summit]\n mps = []\n for peak in ps[1:]:\n start, end, summit, ID = peak\n if start - rightmost >= min_gap:\n mps.append((s, rightmost, None if no_smt else modified_median(summits), id))\n s, rightmost, summits, id = start, end, [summit], ID\n else:\n if end > rightmost:\n rightmost = end\n summits.append(summit)\n mps.append((s, rightmost, None if no_smt else modified_median(summits), id))\n ch2peaks[ch] = mps\n return ch2peaks",
"def sortPeaksByLength(peaks):\n #sort peaks according to size, from small to big\n peaks = sorted( peaks, key=lambda peak: peak.length )\n return peaks",
"def removepeaks(self, pks, segments):\n peaks = []\n tol = self.getkey(\"minchan\")\n for pk in pks:\n for seg in segments:\n # only keep peaks that are inside of a segment\n # also remove any peaks that are in the first or last channel\n # of the spectra, as they are likely false peaks\n if (seg[0] <= pk <= seg[1]) and pk >= 1 and pk < len(self.freq) - 1:\n peaks.append(pk)\n break\n npeaks = set()\n peaks.sort()\n if len(peaks) == 1:\n return peaks\n # now go through the remaining peaks and combine any that are within tol of each other\n for i in range(len(peaks)):\n for j in range(i + 1, len(peaks)):\n if j < len(peaks) - 1:\n if abs(peaks[i] - peaks[j]) < tol \\\n and abs(peaks[j] - peaks[j + 1]) < tol:\n npeaks.add(peaks[j])\n break\n else:\n npeaks.add(peaks[i])\n break\n else:\n if abs(peaks[i] - peaks[j]) < tol:\n npeaks.add(peaks[j])\n break\n else:\n npeaks.add(peaks[i])\n break\n if i == len(peaks) - 1:\n if abs(peaks[i] - peaks[i - 1]) > tol:\n npeaks.add(peaks[i])\n return list(npeaks)",
"def sort_edgeSources(self):\n if self.groupEdges.size > 0:\n self.groupEdges=self.groupEdges[:,numpy.lexsort((self.groupEdges[2,:],self.groupEdges[1,:]))] \n if self.groupIntervals.size > 0:\n self.groupIntervals=self.groupIntervals[:,numpy.lexsort((self.groupIntervals[2,:],self.groupIntervals[1,:]))]\n if self.groupSamples.size > 0:\n self.groupSamples=self.groupSamples[:,numpy.lexsort((self.groupSamples[2,:],self.groupSamples[1,:]))]",
"def sortPool(self):\n\t\tif not self.sorted:\n\t\t\tself.schedules.sort(key=lambda schedule: schedule.fitness, reverse=True)\n\t\t\tself.sorted = True",
"def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the same number \n # of dimensions to be compared.\n if len(cube.coord(self.time_coord).points) == 1:\n cube = iris.util.new_axis(cube, scalar_coord=self.time_coord)\n \n # Chop cubes into individual realizations for relabelling.\n member_slices = get_coordinate_slice_dimensions(\n cube, [self.realization,self.forecast_ref_time],\n ignore_missing_coords=True)\n for member_slice in cube.slices(member_slices):\n \n if self.realization in [coord.name() \n for coord in member_slice.coords()]:\n member_slice.coord(\n self.realization).points = [realization_num]\n else:\n realization_coord = iris.coords.AuxCoord([realization_num],\n self.realization)\n member_slice.add_aux_coord(realization_coord)\n \n member_slice.cell_methods = None\n sorted_cubelist.append(member_slice)\n realization_num += 1\n \n sorted_cubelist = iris.cube.CubeList(sorted_cubelist)\n # Mask missing time steps so merging can be done.\n sorted_cubelist = pad_coords(sorted_cubelist, self.time_coord)\n cube = sorted_cubelist.merge_cube()\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n \n if cube.coord_dims(cube.coord(self.realization)) == \\\n cube.coord_dims(cube.coord(self.forecast_ref_time)):\n # Re order realizations in initialisation date order.\n ordered_inits = sorted(cube.coord('forecast_reference_time').points)\n ordered_mems = range(1, len(cube.coord('realization').points)+1)\n ordered_cubes = []\n for member_slice in cube.slices(member_slices):\n mem_index = ordered_inits.index(\n member_slice.coord(self.forecast_ref_time).points[0])\n member_slice.coord('realization').points = ordered_mems[mem_index]\n del ordered_inits[mem_index]\n del ordered_mems[mem_index]\n ordered_cubes.append(member_slice)\n cube = iris.cube.CubeList(ordered_cubes).merge_cube()\n \n return cube",
"def peakAssignmentSetData(self):\n\n #\n # Once the linking is known, check whether peak number matches.\n # First make temporary dict with original peak number (if any - ignore if not available?!?)\n # Then get the right peak.\n #\n \n peakNumDict = {}\n \n for peak in self.peakList.sortedPeaks():\n \n applData = peak.findFirstApplicationData(application = self.format, keyword = peakNum_kw)\n \n if applData:\n peakNumDict[applData.value] = peak\n \n \n for self.peakAssignment in self.peakAssignmentsFile.peakAssignments:\n \n peakNum = self.peakAssignment.peakNum\n \n #\n # If there is a match, then see if there are already assignments attached\n # Only take the self.assignDims into account\n #\n\n if peakNumDict.has_key(peakNum):\n\n self.convertCount[self.mainCode][0] += 1\n\n self.peak = peakNumDict[peakNum]\n self.peakContribResonances = [] # TODO: could set these up from the start...\n self.resonancesPeakContrib = []\n \n #\n # Check if both relevant dims are empty: if not, then addMode has to be ON\n # before anything is added\n #\n \n origAssignments = 0\n numPeakAssignDim = len(self.assignDim)\n \n if self.overwriteMode == 0:\n \n for self.peakAssignDim in range(numPeakAssignDim):\n \n if self.assignDim[self.peakAssignDim] != None:\n\n self.peakDim = self.peak.sortedPeakDims()[self.assignDim[self.peakAssignDim]]\n \n if self.peakDim.peakDimContribs:\n \n origAssignments = 1\n \n #\n # Loop over relevant peakDims\n #\n \n for self.peakAssignDim in range(numPeakAssignDim):\n \n self.peakContribResonances.append({})\n self.resonancesPeakContrib.append({})\n\n if self.assignDim[self.peakAssignDim] != None:\n\n self.peakDim = self.peak.sortedPeakDims()[self.assignDim[self.peakAssignDim]]\n \n #\n # If there are assignments present, delete them if overwrite mode is on\n #\n \n if self.peakDim.peakDimContribs:\n \n if self.overwriteMode == 1:\n \n #\n # Delete all exisiting ones\n #\n \n for peakDimContrib in self.peakDim.peakDimContribs:\n peakDimContrib.delete()\n \n for peakContrib in self.peak.sortedPeakContribs():\n peakContrib.delete()\n \n #\n # If no assignment present (or just deleted), or if assignments need to be added,\n # then add the new ones, create resonances if necessary\n #\n \n if (not self.peakDim.peakDimContribs and origAssignments == 0) or self.addMode == 1:\n \n self.createPeakAssignmentResonances()\n \n #\n # If addMode is on, or if there were no original peakdimcontribs, set the peakContribs\n #\n \n if origAssignments == 0 or self.addMode == 1:\n \n for self.assignCombination in self.peakAssignment.assignCombinations:\n\n self.setPeakDimContribs(0,peakDims = [], resonances = [])",
"def take_peaks(self):\r\n\r\n max_time = self.S.shape[1]\r\n values = np.zeros((6, max_time))\r\n freq = np.zeros((6, max_time))\r\n time = np.arange(max_time)\r\n\r\n for i in range(0, 6):\r\n values[i] = np.amax(self.S[AnalyseSong.borders_[i]:\r\n AnalyseSong.borders_[i+1]], axis=0)\r\n freq[i] = np.argmax(self.S[AnalyseSong.borders_[i]:\r\n AnalyseSong.borders_[i+1]], axis=0)\r\n\r\n for Ssub, pos in matrixslider.MatrixSlide(values, 60, 1):\r\n for i in range(0, 6):\r\n Ssubrow = Ssub[i]\r\n av = np.average(Ssubrow)\r\n std = np.std(Ssubrow)\r\n if values[i][pos] > av + AnalyseSong.coeff_[i]*std:\r\n self.constellation.append((int(time[pos]),\r\n int(freq[i][pos]) + AnalyseSong.borders_[i]))\r\n\r\n self.constellation = sorted(self.constellation,\r\n key=operator.itemgetter(0))",
"def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak",
"def fit_peaks_init_params(self,reflection_list, peak_ranges, init_params):\n self.reflection_list=reflection_list\n #zip iterates through each list together\n for reflection,p_range in zip(reflection_list, peak_ranges):\n peak_data=get_peak(self.spectrum, ttheta_lims=p_range)\n self.data_dict[reflection]=peak_data\n #store data in dictionary with peak label as the key \n for reflection,peak_data in self.data_dict.items():\n #why reflection,peak_data?\n # to pick out key and values, to loop through the peak labels and pass on the peak data\n fit_results, fit_line=fit_peak(peak_data,initParams=init_params.fits_dict[reflection].params)\n self.fits_dict[reflection]=fit_results\n self.lines_dict[reflection]=np.array(fit_line).T\n #why transpose the array? - for vertical stack, so x[o] = [1 11] rather than [1,2,3,...]",
"def writePeakAssignments(self, fileName = None, verbose = True, **keywds):\n \n #\n # Initialize the keywords\n #\n \n if not self.setIOkeywords('writePeakAssignments', keywds, verbose = verbose):\n return\n \n #\n # Initialize other variables...\n #\n \n self.fileName = fileName\n \n self.setPeakDimOrder()\n \n self.setPeakAssignmentFileClass()\n \n #\n # Initial check\n #\n \n if isinstance(self.peakList, (list, tuple)):\n self.peakList = self.peakList[0]\n \n if not self.peakList or not isinstance(self.peakList,Nmr.PeakList):\n self.messageReporter.showError(\"Error\",\"No or invalid peak list provided for writing %s: file not written.\" % fileName,parent = self.guiParent)\n return\n\n \n #\n # Additional check - for some formats (e.g. XEasy) a shift list has to be written\n # out before the assignments can be transferred...\n #\n \n if not self.peakListAssignmentCheck([self.peakList]): \n return\n\n #\n # Further initialization\n #\n\n self.dataSource = self.peakList.dataSource\n self.numDim = self.dataSource.numDim\n\n #\n # If dataDimRefs were passed in, check if they are valid\n #\n \n if self.dataDimRefs:\n \n if not isinstance(self.dataDimRefs, (list, tuple)):\n self.messageReporter.showWarning(\"Invalid dataDimRef list\",\"No list of dataDimRefs passed in: are ignored.\",self.guiParent)\n self.dataDimRefs = None\n \n else:\n for dataDimRef in self.dataDimRefs:\n if not isinstance(dataDimRef,Nmr.DataDimRef) or dataDimRef.dataDim.dataSource != self.dataSource:\n self.messageReporter.showWarning(\"Invalid dataDimRef\",\"Invalid dataDimRef object passed in (%s). All ignored.\" % dataDimRef.__class__,self.guiParent)\n self.dataDimRefs = None\n break\n\n #\n # Check whether unlinked resonances, ask to run linkResonances\n #\n \n self.checkUnlinkedResonances()\n\n #\n # Get initial info\n #\n\n self.peakListGetDimMapping()\n \n if not self.dataDimRefs or not self.minimalPrompts:\n \n if self.guiParent:\n self.peakDimSelect()\n \n else:\n self.messageReporter.showError(\"No peakDim mapping\",\"No dataDimRefs passed in: is necessary for writePeakAssignments in non-GUI environment. Aborting.\",self.guiParent)\n return\n\n \n if not self.dimMapping:\n self.dimMapping = range(0,len(self.dataDimRefs))\n \n #\n # Create the format peak assignment file\n #\n \n self.createPeakAssignmentFile()\n\n #\n # Loop over peaks and set assignment information\n #\n\n for self.peak in self.peakList.sortedPeaks():\n \n self.peakAssignmentsSetAssignmentInfo()\n \n self.peakAssignmentsCreate()\n\n \n if self.noWrite == False:\n self.peakAssignmentsFile.write()\n del self.peakAssignmentsFile\n \n return True",
"def preference_ordering(self) -> None:\n for i in self._destinations:\n self._destinations[i] = sorted(self._destinations[i])",
"def recognize_modification_peaks(self):\n self.plot.delete('largepeaks')\n self.plot.delete('smallpeaks')\n svsl_peak_thresh = parms.get('svsl_peak_thresh')\n direction = 0\n last_peak = 0\n last_trough = 0\n self.peaks = []\n self.troughs = []\n self.peak_count = 0\n self.trough_count = 0\n smoothing_gap = self.parent.parent.parent.smoothing_gap\n global_x_scale = self.parent.parent.parent.global_x_scale\n min_weight = self.parent.parent.parent.min_weight\n height_thresh = self.parent.parent.parent.height_thresh\n min_peak_height = self.parent.parent.parent.min_peak_height\n # first recognize peaks in the zero timepoint, to confirm fragment recognition\n self.parent.parent.msp[0].recognize_peaks(0) # dont draw\n confirmation_peaks = self.parent.parent.msp[0].peaks\n # this code performs the recognition of peaks in the sum window\n for i in range(0,(len(self.x)-2*smoothing_gap)):\n if self.x[i] > global_x_scale * min_weight:\n if direction == 0: # if no direction yet\n if self.y[last_peak] >= self.y[i] + height_thresh: # if last peak >> current\n direction = -1 # then decreasing\n elif self.y[i] >= self.y[last_trough] + height_thresh: # else if current >> last trough\n direction = 1 # then increasing\n if self.y[last_peak] < self.y[i]: # if last peak < current \n last_peak = i # last peak = current\n elif self.y[i] < self.y[last_trough]: # else if current < last trough\n last_trough = i # last trough = current\n elif direction == 1: # else if increasing\n if self.y[last_peak] < self.y[i]: # if last peak < current\n last_peak = i # last peak = current\n elif self.y[last_peak] >= self.y[i] + height_thresh: # else if last peak >> current\n direction = -1 # direction decreasing\n last_trough = i # last trough = current\n if self.y[i] > min_peak_height: # if current > min peak height\n print '%s > %s'%(self.y[i], min_peak_height)\n if self.modification_system == None:\n print self.modification_system\n self.peaks.append(last_peak) # record this peak\n self.peak_count = self.peak_count+1 #\n else:\n # for changing the number of missed sites, modify the fill_proteolysis_fragments call, not this\n fraglist = self.modification_system.get_proteolysis_fragments_within(self.x[last_peak], self.x[last_peak]*0.002)\n if len(fraglist) > 0:\n for confirmation_peak in confirmation_peaks: # peaks should be within two ranges of error of each other\n first = self.x[confirmation_peak]\n second = self.x[last_peak]\n if abs(first-second) < (first+second)/10000: # ... < 2 * (((first+second)/2) / 500)\n self.peaks.append(last_peak) # record this peak\n self.peak_count = self.peak_count+1 #\n elif direction == -1: # else if decreasing\n if self.y[last_trough] > self.y[i]: # if last trough > current\n last_trough = i # last trough = current\n elif self.y[i] >= self.y[last_trough] + height_thresh: # else if current >> last trough\n direction = 1 # direction increasing\n last_peak = i # last peak = current\n self.troughs.append(last_trough) # record this trough\n # make sure none have been stored in the 'manually created' list, if so, delete them\n # from the new set\n for ii in self.created_peaks:\n count = 0\n for jj in self.peaks:\n if (ii == jj):\n del self.peaks[count]\n break\n count = count+1\n # create lines for the peaks that were recognized in the sum window\n self.lin_list = []\n for i in range(len(self.peaks)):\n if (self.y[self.peaks[i]] < svsl_peak_thresh):\n lin = self.plot.create_line(self.x[self.peaks[i]],0,\n self.x[self.peaks[i]],self.height*0.8,\n fill='cyan', tags='smallpeaks')\n test = 0\n self.lin_list.append(lin)\n else:\n lin = self.plot.create_line(self.x[self.peaks[i]],0,\n self.x[self.peaks[i]],self.height*0.8,\n fill='blue', tags='largepeaks')\n self.lin_list.append(lin)\n \n self.plot.scale(\"largepeaks\", 0, 0, 1, -1.5)\n self.plot.move(\"largepeaks\", 0, (self.height * 0.80))\n self.plot.scale(\"smallpeaks\", 0, 0, 1, -1.5)\n self.plot.move(\"smallpeaks\", 0, (self.height * 0.80))\n for id in self.lin_list:\n self.plot.tag_bind(id, '<ButtonPress-1>', self.changePeakLabel);\n self.plot.bind('<ButtonPress-3>', self.removePeak);\n # now append created peaks to the peak list, if it isn't already there.\n for ii in self.created_peaks:\n for jj in self.peaks:\n if (ii == jj):\n break;\n else:\n self.peaks.append(ii)\n for ii in self.created_lin_list:\n for jj in self.lin_list:\n if (ii == jj):\n break\n else:\n self.lin_list.append(ii)",
"def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)",
"def sort(self):\n self.spikes = np.sort(self.spikes)",
"def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n for cube in year_cubelist.merge():\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, \n self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n sorted_cubelist.append(cube)\n return iris.cube.CubeList(sorted_cubelist)",
"def changePeaks(self):\n # Change the number of peaks\n if self.minpeaks is not None and self.maxpeaks is not None:\n npeaks = len(self.peaks_function)\n u = self.random.random()\n r = self.maxpeaks - self.minpeaks\n if u < 0.5:\n # Remove n peaks or less depending on the minimum number of peaks\n u = self.random.random()\n n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n idx = self.random.randrange(len(self.peaks_function))\n self.peaks_function.pop(idx)\n self.peaks_position.pop(idx)\n self.peaks_height.pop(idx)\n self.peaks_width.pop(idx)\n self.last_change_vector.pop(idx)\n else:\n # Add n peaks or less depending on the maximum number of peaks\n u = self.random.random()\n n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n self.peaks_function.append(self.random.choice(self.pfunc_pool))\n self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])\n self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))\n self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))\n self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])\n\n for i in range(len(self.peaks_function)):\n # Change peak position\n shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]\n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n \n shift = [shift_length * (1.0 - self.lambda_) * s \\\n + self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]\n \n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n\n shift = [s*shift_length for s in shift]\n \n new_position = []\n final_shift = []\n for pp, s in zip(self.peaks_position[i], shift):\n new_coord = pp + s\n if new_coord < self.min_coord:\n new_position.append(2.0 * self.min_coord - pp - s)\n final_shift.append(-1.0 * s)\n elif new_coord > self.max_coord:\n new_position.append(2.0 * self.max_coord - pp - s)\n final_shift.append(-1.0 * s)\n else:\n new_position.append(new_coord)\n final_shift.append(s)\n\n self.peaks_position[i] = new_position\n self.last_change_vector[i] = final_shift\n\n # Change peak height\n change = self.random.gauss(0, 1) * self.height_severity\n new_value = change + self.peaks_height[i]\n if new_value < self.min_height:\n self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change\n elif new_value > self.max_height:\n self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change\n else:\n self.peaks_height[i] = new_value\n\n # Change peak width\n change = self.random.gauss(0, 1) * self.width_severity\n new_value = change + self.peaks_width[i]\n if new_value < self.min_width:\n self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change\n elif new_value > self.max_width:\n self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change\n else:\n self.peaks_width[i] = new_value\n\n self._optimum = None",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def sortPoints(self):\r\n self._allPoints.sort(pointSorterOnX)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an shellescaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the | def DoubleQuote(s):
if not s:
return '""'
elif all(c in _SafeShellChars for c in s):
return s
else:
return '"' + s.replace('"', '\\"') + '"' | [
"def __shellquote(s):\n return \"'\" + s.replace(\"'\", \"'\\\\''\") + \"'\"",
"def shell_quote(s):\n return \"\\\"%s\\\"\" % s.replace('\"', '\\\"')",
"def quoted(s):\n return '\"%s\"' % s",
"def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)",
"def shquote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")",
"def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")",
"def ensure_quotes(s):\n return '\"{}\"'.format(s) if not s.isalnum() else s",
"def quote(string):\n return \"'%s'\" % string",
"def quote_string(s):\n return impl_util.render_json_string(s)",
"def shquote(arg):\n for c in '\"', \"'\", \"\\\\\", \"#\":\n if c in arg:\n return repr(arg)\n if arg.split() != [arg]:\n return repr(arg)\n return arg",
"def single_quoted(string):\n return u\"'\" + single_quote_escape(string) + u\"'\"",
"def safe_stata_string_quote(text: str) -> str:\n if '\"' in text:\n # Compound double quotes\n return f\"\"\"`\"{text}\"'\"\"\"\n return f'\"{text}\"'",
"def str_wrap_double(s):\n s = str(s)\n return '\"' + s + '\"'",
"def sanitize(string) -> str:\n return '\"'+string+'\"'",
"def quote_literal(s):\r\n\r\n if s == None:\r\n return \"null\"\r\n s = str(s).replace(\"'\", \"''\")\r\n s2 = s.replace(\"\\\\\", \"\\\\\\\\\")\r\n if len(s) != len(s2):\r\n return \"E'\" + s2 + \"'\"\r\n return \"'\" + s2 + \"'\"",
"def escape_quote(string):\n return str(string).replace('\"', ' ').replace(\"'\", \" \")",
"def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")",
"def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n return shlex.quote(val)\n return shlex.quote(s)",
"def quote(s):\n if isinstance(s, str):\n if \" \" in s or len(s.split()) > 1:\n start, end = s[0], s[-1]\n if start != end or start not in ('\"', \"'\"):\n q1s, q1d, q3s, q3d = \"'\", '\"', 3 * \"'\", 3 * '\"'\n if q1d not in s:\n s = q1d + s + q1d\n elif q1s not in s:\n s = q1s + s + q1s\n elif q3d not in s:\n s = q3d + s + q3d\n elif q3s not in s:\n s = q3s + s + q3s\n return s"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen. | def ShrinkToSnippet(cmd_parts, var_name, var_value):
def shrink(value):
parts = (x and SingleQuote(x) for x in value.split(var_value))
with_substitutions = ('"$%s"' % var_name).join(parts)
return with_substitutions or "''"
return ' '.join(shrink(part) for part in cmd_parts) | [
"def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell",
"def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n return shlex.quote(val)\n return shlex.quote(s)",
"def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE",
"def remove_variable_from_command(raw_command_string):\n variable, command_string = __split_variable(raw_command_string)\n return command_string",
"def _instantiateSecrets(cmd, secrets, hide):\n if secrets:\n for (i, secret) in enumerate(secrets):\n if hide:\n secret = '<hidden>'\n cmd = cmd.replace(f':{i}:', secret)\n return cmd",
"def _wrap_command(self, command, context, sandbox):\n result = []\n\n # Ensure that the start-path exists (only if one was given. Not for the ~)\n if self.start_path:\n result.append(\"(mkdir -p '%s' 2> /dev/null || true) && \" %\n esc1(self.expand_path(self.start_path, context)))\n\n # Prefix with all cd-statements\n for p in [self._get_start_path()] + context._path:\n # TODO: We can't have double quotes around paths,\n # or shell expansion of '*' does not work.\n # Make this an option for with cd(path):...\n if sandbox:\n # In sandbox mode, it may be possible that this directory\n # is not yet created, only 'cd' to it when the directory\n # really exists.\n result.append('if [ -d %s ]; then cd %s; fi && ' % (p,p))\n else:\n result.append('cd %s && ' % p)\n\n # Prefix with variable assignments\n for var, value in context._env:\n #result.append('%s=%s ' % (var, value))\n result.append(\"export %s=%s && \" % (var, value))\n\n # We use the export-syntax instead of just the key=value prefix\n # for a command. This is necessary, because in the case of pipes,\n # like, e.g.: \" key=value yes | command \"\n # the variable 'key' will not be passed to the second command.\n #\n # Also, note that the value is not escaped, this allow inclusion\n # of other variables.\n\n result.append(command)\n return ''.join(result)",
"def build_sh_cmd(cmd, cwd=None):\n args = cmd.split()\n return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])",
"def _preprocess(command):\n for shell_command in DockerProxy.DockerProxy.shell_commands:\n if shell_command in command:\n replace_string = \"/bin/bash -c \\\"\" + shell_command\n command = command.replace(shell_command, replace_string)\n command += \"\\\"\"\n return command",
"def _get_srandardized_cli_argument(cls, a_tostrip):\r\n the_str = a_tostrip\r\n \r\n while the_str.startswith('-'):\r\n the_str = the_str[1:]\r\n \r\n return '--%s' % (the_str)",
"def _build_simple_command(self, cmd):\n return cmd+SBE37_NEWLINE",
"def wraps(command):\n\n return CommandWrapper(command)",
"def quote(command):\n\n if isinstance(command, (str,)):\n return _quote(command)\n\n if isinstance(command, collections.abc.Iterable):\n return ' '.join([_quote(arg) for arg in _normalize_args(command)])\n\n raise ValueError('Invalid command type: {}'.format(type(command).__name__))",
"def shell_command_strings(self, command):\n return (None, \"$(shell \" + command + \")\", None)",
"def _make_posix_command():\n qsearch = _re.compile(r'[^a-zA-Z\\d_./-]').search\n needq = lambda x: not x or qsearch(x)\n\n def posix_command(command, *args, **kwargs):\n \"\"\"\n Return a POSIX shell suitable commandline\n\n Either args or kwargs or neither of them can be set. There cannot be\n set both of them.\n\n :Parameters:\n `command` : ``str``\n Generic commandline, possibly containing substitutions, filled by\n args or kwargs. See `split_command` for generic commandline\n syntax.\n\n `args` : ``tuple``\n Substitution tuple\n\n `kwargs` : ``dict``\n Substitution dict\n\n :Return: Strictly quoted shell commandline for POSIX shells\n :Rtype: ``str``\n \"\"\"\n # pylint: disable = redefined-outer-name\n return ' '.join([\n \"'%s'\" % (token.replace(\"'\", \"'\\\\''\")) if needq(token) else token\n for token in map(_make_formatter(*args, **kwargs),\n split_command(command))\n ])\n return posix_command",
"def bash(self, command, **kwargs):\n return self.utils.bash(command, **kwargs)",
"def format_with_command(var_xform=None,\n other_xform=None):\n def formatter(func, _, params):\n \"\"\"Return formatted docstring with command setting variable.\"\"\"\n source_line = gen_source_line(params.args[0],\n match_transform=var_xform,\n other_transform=other_xform)\n return func.__doc__.format(source_line)\n\n return formatter",
"def shell_quote(s):\n return \"\\\"%s\\\"\" % s.replace('\"', '\\\"')",
"def get_stdout_as_shell(command: str, **kwargs) -> str:\n return subprocess.run(command,\n shell=True,\n stdout=subprocess.PIPE,\n encoding='utf-8',\n **kwargs).stdout.strip()",
"def powershell_launcher(raw, baseCmd=\"powershell.exe -NoP -sta -NonI -W Hidden -Enc \"):\n\n # encode the data into a form usable by -enc\n encCMD = enc_powershell(raw)\n\n return baseCmd + encCMD"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An fcntlbased implementation of _IterProcessStdout. | def _IterProcessStdoutFcntl(process,
iter_timeout=None,
timeout=None,
buffer_size=4096,
poll_interval=1):
# pylint: disable=too-many-nested-blocks
import fcntl
try:
# Enable non-blocking reads from the child's stdout.
child_fd = process.stdout.fileno()
fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)
fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
end_time = (time.time() + timeout) if timeout else None
iter_end_time = (time.time() + iter_timeout) if iter_timeout else None
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
if iter_end_time and time.time() > iter_end_time:
yield None
iter_end_time = time.time() + iter_timeout
if iter_end_time:
iter_aware_poll_interval = min(poll_interval,
max(0, iter_end_time - time.time()))
else:
iter_aware_poll_interval = poll_interval
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if not data:
break
yield data
if process.poll() is not None:
# If process is closed, keep checking for output data (because of timing
# issues).
while True:
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if data:
yield data
continue
break
break
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait() | [
"def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers",
"def _communicate(proc, stdout, stderr, print_output=True):\n stdouts = []\n stderrs = []\n # Selection will allow us to read from stdout and stderr whenever either is available. This will allow the\n # program to capture both, while still printing as soon as is possible. This will keep the user informed, not\n # merge streams, and capture output.\n #\n # Selection blocks on the read of \"either\" file descriptor, and then passes off the execution to the below code\n # with a key that represents which descriptor was the one available to read without blocking.\n selector = selectors.DefaultSelector()\n selector.register(stdout, selectors.EVENT_READ, data=(stdouts, sys.stdout))\n selector.register(stderr, selectors.EVENT_READ, data=(stderrs, sys.stderr))\n while not stdout.closed or not stderr.closed:\n # This line *BLOCKS* until on of the above registered handles is available to read. Then a set of events is\n # returned signaling that a given object is available for IO.\n events = selector.select()\n for key, _ in events:\n appendable, stream = key.data\n try:\n line = key.fileobj.readline().decode().replace(\"\\r\\n\", \"\\n\")\n # Some systems (like running inside Docker) raise an io error instead of returning \"\" when the device\n # is ended. Not sure why this is, but the effect is the same, on IOError assume end-of-input\n except IOError as ioe:\n line = \"\"\n appendable.append(line)\n # Streams are EOF when the line returned is empty. Once this occurs, we are responsible for closing the\n # stream and thus closing the select loop. Empty strings need not be printed.\n if line == \"\":\n key.fileobj.close()\n continue\n # Forwards output to screen. Assuming a PTY is used, then coloring highlights should be automatically\n # included for output. Raw streams are used to avoid print quirks\n elif print_output:\n stream.write(line)\n stream.flush()\n # Spin waiting for the .poll() method to return a non-None result ensuring that the process has finished.\n while proc.poll() is None:\n time.sleep(0.0001)\n return proc.poll(), stdouts, stderrs",
"def __readStdout(self):\n if self.process is not None:\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n if (\n self.currentChangelist != \"\" and\n self.rx_status.exactMatch(s)\n ):\n file = self.rx_status.cap(5).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif (\n self.currentChangelist != \"\" and\n self.rx_status2.exactMatch(s)\n ):\n file = self.rx_status2.cap(2).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif self.rx_changelist.exactMatch(s):\n self.currentChangelist = self.rx_changelist.cap(1)\n if self.currentChangelist not in self.changeListsDict:\n self.changeListsDict[self.currentChangelist] = []",
"def read_stdout(self):\n while True:\n data = os.read(self.proc.stdout.fileno(), 2 ** 15)\n\n if(len(data) > 0):\n if(self.listener):\n self.listener._on_data(data)\n else:\n self.proc.stdout.close()\n if(self.listener):\n time.sleep(0.01)\n self.listener._on_finished(self)\n break",
"def __readStdout(self):\n self.__process.setReadChannel(QProcess.StandardOutput)\n \n while self.__process.canReadLine():\n s = str(self.__process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n self.__logOutput(s)",
"def tee(process, filter_func):\r\n # We simply use readline here, more fancy IPC is not warranted\r\n # in the context of this package.\r\n lines = []\r\n while True:\r\n line = process.stdout.readline()\r\n if line:\r\n stripped_line = line.rstrip()\r\n if filter_func(stripped_line):\r\n sys.stdout.write(s(line))\r\n lines.append(stripped_line)\r\n elif process.poll() is not None:\r\n break\r\n return lines",
"def __readStdout(self):\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(), self.vcs.getEncoding(),\n 'replace').strip()\n self.__processOutputLine(s)",
"def _readServerStdOutLine(self):\r\n for line in iter(self._server_process.stdout.readline, ''):\r\n yield line",
"def _stdout_to_flag(self):\n self._is_running.wait()\n while self._is_running.is_set():\n msg = self.stdout_queue.get()\n if msg is None or len(msg) < 1: # It's time to stop\n break\n if msg[0] == \"#\": # It's a signal from the kxkmcard program\n self.onEvent(msg[1:].split(' '))\n else:\n self._log(\"warning\", \"unknown stdout line {0}\".format(msg))",
"def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)",
"def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)",
"def _redirect_output(self):\n if self._binary_output:\n while True:\n data = self._process.stdout.read(1024)\n\n if not data:\n return\n else:\n self._on_output_callback(data)\n else:\n while True:\n line = self._process.stdout.readline().decode('utf-8',\n errors='replace')\n\n if not line:\n return\n else:\n # Output the line without trailing \\n and whitespace.\n self._on_output_callback(line.rstrip())",
"def redirect_stdout():\n save_stdout = sys.stdout\n sys.stdout = _TQDMFile(sys.stdout)\n yield\n sys.stdout = save_stdout",
"def piped(self):\n\t\tpass",
"def pipe_stdout(filepath=None):\n sys.stdout = get_filelike(filepath)\n yield\n sys.stdout = sys.__stdout__",
"def PrefixOutput(process, prefix):\n output_line = process.stdout.readline()\n while output_line:\n log.status.Print('[{0}] {1}'.format(prefix, output_line.rstrip()))\n log.status.flush()\n output_line = process.stdout.readline()",
"def _StreamOutputToFile(fd, file, line_extractor, cmd=None):\n def func(fd, file, line_extractor):\n with open(file, 'ab+') as f:\n if cmd:\n line = cmd + '\\n'\n f.write(line.encode('utf-8'))\n try:\n for line in iter(lambda: fd.readline(2048), ''):\n f.write(line.encode('utf-8', errors='ignore'))\n f.flush()\n if line_extractor:\n line_extractor(line)\n except UnicodeDecodeError as err:\n print('UnicodeDecodeError parsing stdout/stderr, bug in paramiko:{}'\n .format(err))\n t = threading.Thread(target=func, args=(fd, file, line_extractor))\n t.start()\n return t",
"def _stream_output(self):\n if self._final_outfile:\n output_file = self._final_outfile\n else:\n output_file = os.path.join(self._output_dir, 'part-00000')\n log.info('streaming final output from %s' % output_file)\n\n for line in open(output_file):\n yield line",
"def handle_process(self, proc, err):\n poll = select.poll()\n poll.register(proc.stdout)\n poll.register(proc.stderr)\n while proc.poll() is None:\n res = poll.poll(1)\n for fd, evt in res:\n if not (evt & select.POLLIN):\n continue\n if fd == proc.stdout.fileno():\n line = proc.stdout.readline().strip()\n if line:\n print(line)\n elif fd == proc.stderr.fileno():\n line = proc.stderr.readline().strip()\n if line:\n print(line)\n if proc.poll():\n proc.terminate()\n proc.wait()\n raise RuntimeError(\"Process interrupted\")\n if proc.returncode:\n raise err"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a SparseAutoEncoder object. | def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',
out_transfer='identity', reconstruct_loss='squared',
c_sparsity=1, sparsity_loss='bern_bern_kl',
sparsity_target=0.01,
tied_weights=True, batch_size=None,
optimizer='lbfgs', max_iter=1000, verbose=False):
super(SparseAutoEncoder, self).__init__(
n_inpt, n_hidden, hidden_transfer, out_transfer,
reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target,
tied_weights)
self.batch_size = batch_size
self.optimizer = optimizer
self.f_transform = None
self.f_reconstruct = None
self.parameters.data[:] = np.random.standard_normal(
self.parameters.data.shape).astype(theano.config.floatX)
self.max_iter = max_iter
self.verbose = verbose | [
"def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')",
"def make_sparse(data):\n assert data.train_pos_edge_index is not None\n\n (row, col), N = data.train_pos_edge_index, data.num_nodes\n perm = (col * N + row).argsort()\n row, col = row[perm], col[perm]\n\n value = [data.edge_id[(row[i] * N + col[i]).item()].item() for i in perm]\n\n data.adj_t = SparseTensor(\n row=col,\n col=row,\n value=torch.tensor(value, dtype=torch.float32),\n sparse_sizes=(N, N),\n is_sorted=True,\n )\n\n # Pre-process some important attributes.\n data.adj_t.storage.rowptr()\n data.adj_t.storage.csr2csc()\n\n return data",
"def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)",
"def from_sparse_storage(cls, sparse, **kwargs):\n matrix, labels = sparse.get_matrix_and_labels()\n return cls.from_matrix(matrix, labels, **kwargs)",
"def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> 'SparseTensor':\n return SparseTensor(indices, values, shape)",
"def sparse_constructor(value, name=None, strict=False, allow_downcast=None,\r\n borrow=False, format=None):\r\n if not isinstance(value, scipy.sparse.spmatrix):\r\n raise TypeError(\"Expected a sparse matrix in the sparse shared variable constructor. Received: \",\r\n value.__class__)\r\n\r\n if format is None:\r\n format = value.format\r\n type = SparseType(format=format, dtype=value.dtype)\r\n if not borrow:\r\n value = copy.deepcopy(value)\r\n return SparseTensorSharedVariable(type=type, value=value, name=name,\r\n strict=strict, allow_downcast=allow_downcast)",
"def makeSparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)",
"def to_sparse(self):\n from divisi2.sparse import SparseVector\n return SparseVector(self, self.labels)",
"def _from_dict_to_sparse(self, adj_dict):\n indices = list(adj_dict.keys())\n values = [1] * len(indices)\n\n edge_index = torch.LongTensor(indices).T.to(self.device)\n edge_attr = torch.FloatTensor(values).to(self.device)\n\n edge_index, edge_attr = utils.to_symmetric(edge_index, edge_attr, self.n)\n\n return SparseTensor.from_edge_index(edge_index=edge_index,\n edge_attr=edge_attr,\n sparse_sizes=torch.Size([self.n, self.n]))",
"def SparseEmbedding(data=None, weight=None, input_dim=_Null, output_dim=_Null, dtype=_Null, out=None, name=None, **kwargs):\n return (0,)",
"def _make_train(data, smooth_factor):\n train_matrix = data_to_sparse(data).tolil()\n user_counts = np.array(train_matrix.sum(axis=1))[:, 0]\n train_matrix[np.where(user_counts == 0)] = smooth_factor\n train_matrix = normalize(train_matrix, 'l1', axis=1)\n return train_matrix.tocsr()",
"def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())",
"def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)",
"def train_clustermodel_sparse(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse']]",
"def from_dense(\n dense: torch.Tensor,\n ) -> 'SparseCOOMatrix':\n # convert to sparse matrix\n indices = dense.nonzero(as_tuple=True)\n values = dense[indices]\n return SparseCOOMatrix.from_indices_values_pair(\n indices=torch.stack(indices, dim=0),\n values=values,\n size=dense.shape,\n )",
"def save_sparse_csr(filename,array, labels, vocab):\n np.savez(filename,data = array.data ,indices=array.indices,\n indptr =array.indptr, shape=array.shape, labels=labels, vocab=vocab)",
"def to_sparse(self) -> Tensor:\n # crow_indices[i] is index where values of row i begin\n return torch.sparse_csr_tensor(\n crow_indices=torch.arange(self.shape[-1] + 1).expand(*self.batch_shape, -1).contiguous(),\n col_indices=self.perm,\n values=torch.ones_like(self.perm),\n )",
"def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> AnySparseNdArray:\n raise NotImplementedError",
"def createOptFlow_SparseToDense(): # real signature unknown; restored from __doc__\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a DenoisingAutoEncoder object. | def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',
out_transfer='identity', reconstruct_loss='squared',
noise_type='gauss', c_noise=.2,
tied_weights=True, batch_size=None,
optimizer='lbfgs', max_iter=1000, verbose=False):
super(DenoisingAutoEncoder, self).__init__(
n_inpt, n_hidden, hidden_transfer, out_transfer,
reconstruct_loss, noise_type, c_noise,
tied_weights)
self.batch_size = batch_size
self.optimizer = optimizer
self.f_transform = None
self.f_reconstruct = None
climin.initialize.randomize_normal(self.parameters.data)
self.max_iter = max_iter
self.verbose = verbose | [
"def _define_encoder(self):\n raise NotImplementedError",
"def get_model(*args, **kwargs):\n return AutoEncoder(*args, **kwargs)",
"def create_autoencoder():\n\n model = create_model()\n model.compile(optimizer=Adam(), loss=binary_crossentropy)\n model.summary()\n model.save('autoencoder.h5')",
"def get_encoder(self):",
"def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder",
"def from_code_id(cls, x: str) -> \"Encoder\":\n return cls(**copy.deepcopy(CODE_METADATA[x]), name=x)",
"def encode(\n cls: Type[\"DataDocument\"], encoding: str, data: D, **kwargs: Any\n ) -> \"DataDocument[D]\":\n # Dispatch encoding\n blob = lookup_serializer(encoding).dumps(data, **kwargs)\n\n inst = cls(blob=blob, encoding=encoding)\n inst._cache_data(data)\n return inst",
"def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)",
"def build_encoder(draw) -> Encoder:\n wheels_list = draw(lists(wheels(), max_size=10))\n return Encoder(wheels_list)",
"def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )",
"def autoencoder(input_dim, encoding_dim):\n from keras.layers import Input, Dense\n from keras.models import Model\n \n input_layer = Input(shape=(input_dim, ))\n\n encoder = Dense(encoding_dim, activation=\"tanh\",\n activity_regularizer=regularizers.l1(10e-5))(input_layer)\n encoder = Dense(int(encoding_dim / 2), activation=\"relu\")(encoder)\n\n decoder = Dense(int(encoding_dim / 2), activation='tanh')(encoder)\n decoder = Dense(input_dim, activation='relu')(decoder)\n\n autoencoder = Model(inputs=input_layer, outputs=decoder)\n \n return autoencoder",
"def __init__(self, params, model, name=\"las_encoder\", mode='train'):\n super(ListenAttendSpellEncoder, self).__init__(params, model, name, mode)",
"def registerEncoder (encoder):\n assert False, \"TODO:\"",
"def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder",
"def index_encoder_from_alphabet(alphabet):\n categories = [[letter for letter in alphabet]]\n fit_list = [[letter] for letter in alphabet]\n return OrdinalEncoder(dtype=float, categories=categories).fit(fit_list)",
"def get_keras_autoencoder(**input_kwargs):\n layers = tf.keras.layers\n regularizers = tf.keras.regularizers\n\n dense_kwargs = {\n 'kernel_initializer': tf.glorot_uniform_initializer(),\n 'bias_initializer': tf.zeros_initializer(),\n 'kernel_regularizer': regularizers.l2(l=FLAGS.l2_reg),\n 'bias_regularizer': regularizers.l2(l=FLAGS.l2_reg),\n }\n\n if FLAGS.use_sequential_for_keras:\n model = tf.keras.Sequential()\n # Create Encoder\n model.add(layers.Input(**input_kwargs))\n for size in _ENCODER_SIZES[:-1]:\n model.add(layers.Dense(\n size, activation=_NONLINEARITY, **dense_kwargs))\n model.add(layers.Dense(_ENCODER_SIZES[-1], **dense_kwargs))\n\n # Create Decoder\n for size in _DECODER_SIZES:\n model.add(layers.Dense(size, activation=_NONLINEARITY, **dense_kwargs))\n model.add(layers.Dense(784, **dense_kwargs))\n\n else:\n # Make sure you always wrap the input in keras\n inputs = layers.Input(**input_kwargs)\n\n x = inputs\n # Create Encoder\n for size in _ENCODER_SIZES[:-1]:\n x = layers.Dense(size, activation=_NONLINEARITY, **dense_kwargs)(x)\n x = layers.Dense(_ENCODER_SIZES[-1], **dense_kwargs)(x)\n\n # Create Decoder\n for size in _DECODER_SIZES:\n x = layers.Dense(size, activation=_NONLINEARITY, **dense_kwargs)(x)\n x = layers.Dense(784, **dense_kwargs)(x)\n\n model = tf.keras.Model(inputs=inputs, outputs=x)\n\n return model",
"def __init__(self, embed_dim, hidden_dim, batch_size, id=\"\", shared=True):\r\n self.embed_dim = embed_dim\r\n self.hidden_dim = hidden_dim\r\n self.batch_size = batch_size\r\n self.shared = shared\r\n self.id = id\r\n # there is no self parameters for BidirectionalEncoder\r\n self.params = dict()\r\n self.param_names = {\"orign\": [], \"cache\": []}\r\n # left encoder scans input from left to right\r\n self.left_encoder = BasicEncoder(\r\n embed_dim=embed_dim,\r\n hidden_dim=hidden_dim,\r\n batch_size=batch_size,\r\n id=id + \"left_\"\r\n )\r\n self.params.update(self.left_encoder.params)\r\n for name in [\"orign\", \"cache\"]:\r\n self.param_names[name] += self.left_encoder.param_names[name]\r\n\r\n # right encoder scans input from right to left\r\n if not shared:\r\n self.right_encoder = BasicEncoder(embed_dim, hidden_dim, batch_size, id + \"right_\")\r\n self.params.update(self.right_encoder.params)\r\n for name in [\"orign\", \"cache\"]:\r\n self.param_names[name] += self.right_encoder.param_names[name]\r\n import json\r\n print \"Bidirectional Encoder Build! Params = %s\" % (\r\n json.dumps(\r\n {\"id\":id,\r\n \"embed_dim\":embed_dim,\r\n \"output_dim\":hidden_dim,\r\n \"hidden_dim\":hidden_dim,\r\n \"batch_size\":batch_size\r\n }, indent=4)\r\n )",
"def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))",
"def __init__(self, encoder, \n input_size, hidden_size,\n output_size, dropout=0.0,\n score_fn_type='MLP'):\n super(SingleEncoder, self).__init__()\n self.encoder = encoder\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout = dropout ### for extension \n self.score_fn_type = score_fn_type\n self.cls_arch = 'SingleEncoder'\n\n # define score function, now only MLP \n # for classification, \n self.score_fn, self.bias = self.build_classifier()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
根据一系列离散的版本猜测版本范围 会把 group_digits 位的版本分为同一段 Examples: (digits=1) "1.1|1.2|1.3|1.4" > ">=1.1, ">=1.1,=2.1, '>=1.1.1,=1.2,=2.0,<=2.0.2|3.0' | def guess_range(versions, digits=2):
if isinstance(versions, six.string_types):
versions = [Version(x) for x in versions.split('|')]
else:
versions = [Version(x) for x in versions]
versions.sort()
if not versions:
raise ValueError('must given at least one version')
sections = []
group_buff = [versions[0]]
for version in versions[1:]:
if version.version[:digits] == group_buff[0].version[:digits]:
group_buff.append(version)
else:
sections.append(_internal_guess_range(group_buff))
group_buff = [version]
# 最后一组
sections.append(_internal_guess_range(group_buff))
version_ranges = []
for low, high in sections:
if low == high:
cg = low.vstring
else:
cg = ">={},<={}".format(low, high)
version_ranges.append(cg)
vr = VersionRange(version_ranges)
return vr | [
"def make_version_sortable(groups):\n postfix = groups.get('postfix1') or groups.get('postfix2') or \"\"\n sortable_version = \"0.0.0.0.0\"\n sortable_postfix = None\n if postfix.startswith(\"a\"):\n sortable_postfix = postfix.replace('a', '0.')\n if postfix and postfix.startswith(\"b\"):\n sortable_postfix = postfix.replace('b', '1.')\n if not postfix:\n sortable_postfix = '2.0'\n sortable_version = \"\"\n major = groups.get('major')\n minor = groups.get('minor', '0')\n bugfix = groups.get('bugfix', '0')\n sortable_version += major\n if minor:\n sortable_version += f\".{minor}\"\n if bugfix:\n sortable_version += f\".{bugfix}\"\n if sortable_postfix:\n sortable_version += f\".{sortable_postfix}\"\n return sortable_version",
"def _split_version(self, version):\n major = minor = release = build = '0'\n splitted_version = version.split('.')\n list_size = len(splitted_version)\n if list_size > 0:\n major = splitted_version[0]\n if list_size > 1:\n minor = splitted_version[1]\n if list_size > 2:\n release = splitted_version[2]\n if list_size > 3:\n build = splitted_version[3]\n return major, minor, release, build",
"def solution(l):\n for i in range(len(l)):\n l[i]=l[i].split('.')\n\n #groups all majors\n major_list = {}\n for x in range(len(l)):\n major_number = int(l[x][0])\n if major_number not in major_list:\n major_list[major_number] = [l[x]]\n else:\n major_list[major_number].append(l[x])\n\n #groups all minors and sorts revisions within each minor\n for key,value in major_list.items():\n minor_list = {}\n for y in range(len(value)):\n minor_number = int(value[y][1])+1 if len(value[y]) != 1 else 0\n if minor_number not in minor_list:\n minor_list[minor_number] = [value[y]]\n elif minor_number == 0:\n minor_list[minor_number].append(value[y])\n else:\n try:\n ordered_revision_list = minor_list[minor_number]\n position = len(ordered_revision_list)\n for j in range(len(ordered_revision_list)):\n if len(ordered_revision_list[j]) == 2:\n continue\n elif int(value[y][2]) < int(ordered_revision_list[j][2]):\n position = j\n break\n ordered_revision_list.insert(position,value[y])\n minor_list[minor_number] = ordered_revision_list\n except:\n if len(value[y]) == 2:\n minor_list[minor_number].insert(0,value[y])\n major_list[key] = minor_list\n\n\n answer =[]\n for major in sorted(major_list.keys()):\n if type(major_list[major]) == list:\n answer += (minor_list)\n else:\n for minor in sorted(major_list[major].keys()):\n answer += major_list[major][minor]\n\n for n in range(len(answer)):\n answer[n] = '.'.join(answer[n])\n return answer",
"def test_versions_equal(self):\n self.check_versions_equal('1', '1')\n self.check_versions_equal('1', '1.0')\n self.check_versions_equal('1', '1.0.0')\n self.check_versions_equal('1.0', '1.0.0')\n self.check_versions_equal('1', '1-0')\n self.check_versions_equal('1', '1.0-0')\n self.check_versions_equal('1.0', '1.0-0')\n # no separator between number and character\n self.check_versions_equal('1a', '1-a')\n self.check_versions_equal('1a', '1.0-a')\n self.check_versions_equal('1a', '1.0.0-a')\n self.check_versions_equal('1.0a', '1-a')\n self.check_versions_equal('1.0.0a', '1-a')\n self.check_versions_equal('1x', '1-x')\n self.check_versions_equal('1x', '1.0-x')\n self.check_versions_equal('1x', '1.0.0-x')\n self.check_versions_equal('1.0x', '1-x')\n self.check_versions_equal('1.0.0x', '1-x')\n\n # aliases\n self.check_versions_equal('1ga', '1')\n self.check_versions_equal('1release', '1')\n self.check_versions_equal('1final', '1')\n self.check_versions_equal('1cr', '1rc')\n\n # special 'aliases' a, b and m for alpha, beta and milestone\n self.check_versions_equal('1a1', '1-alpha-1')\n self.check_versions_equal('1b2', '1-beta-2')\n self.check_versions_equal('1m3', '1-milestone-3')\n\n # case insensitive\n self.check_versions_equal('1X', '1x')\n self.check_versions_equal('1A', '1a')\n self.check_versions_equal('1B', '1b')\n self.check_versions_equal('1M', '1m')\n self.check_versions_equal('1Ga', '1')\n self.check_versions_equal('1GA', '1')\n self.check_versions_equal('1RELEASE', '1')\n self.check_versions_equal('1release', '1')\n self.check_versions_equal('1RELeaSE', '1')\n self.check_versions_equal('1Final', '1')\n self.check_versions_equal('1FinaL', '1')\n self.check_versions_equal('1FINAL', '1')\n self.check_versions_equal('1Cr', '1Rc')\n self.check_versions_equal('1cR', '1rC')\n self.check_versions_equal('1m3', '1Milestone3')\n self.check_versions_equal('1m3', '1MileStone3')\n self.check_versions_equal('1m3', '1MILESTONE3')\n\n self.check_versions_equal('1', '01', '001')",
"def _versions(self):\n\n ver = self.options.version.lower()\n if ver != 'head':\n try:\n vLHCb = _versiondecode(ver)\n versiontuple = (vLHCb[0] / 100, vLHCb[0] - (vLHCb[0] / 100) * 100, vLHCb[1])\n pRelease = re.compile(r'Ganga-(\\d+)-(\\d+)-(\\d+)$')\n mRelease = pRelease.match(ver)\n except Exception as err:\n print(\"Error in understanding the version number!\")\n print(\"%s\" % str(err))\n raise InstallLHCbError(\"Could not identify LHCb and Ganga versions for input version %s\" % ver)\n\n\n if ver == 'head' or ver == 'master':\n self.versionLHCb = 'HEAD'\n self.versionGanga = 'master'\n elif versiontuple:\n self.versionLHCb = ver\n self.versionGanga = \"%d.%d.%d\" % versiontuple\n if self.options.prerelease is True:\n self.versionGanga = 'release/' + self.versionGanga\n if ver.find('p') > 0:\n raise InstallLHCbError( \"This script does not Currently support patch version numbers for LHCb!\" )\n else:\n raise InstallLHCbError(\"Could not identify LHCb and Ganga versions for input version %s\" % ver)\n\n print(\"LHCb release will be named: %s\" % self.versionLHCb)",
"def _build_version(self, version, num_of_digits):\n version = \"{}\".format(version).replace(\".\", \"\").replace(\" \", \"\").strip()\n num_of_digits_to_add = (num_of_digits - len(version))\n version += (\"0\" * num_of_digits_to_add)\n version = int(version)\n return version",
"def split_version(verstring):\n vers = []\n for v in verstring.split(\".\"):\n # only care about leading digits\n # Assumes that for the purposes of version checking 4.0rc1 == 4.0.0\n matchint = re.match(r\"(\\d+)\", v)\n if matchint:\n vers.append(int(matchint.group(1)))\n else:\n raise ValueError(f\"Version string ({verstring}) does not have numeric components\")\n return vers",
"def show_group_versions(releaser):\n releaser.show_group_versions()",
"def parse_version_string(version_string):\r\n string_parts = version_string.split(\".\")\r\n version_parts = (\r\n int(re.match(\"([0-9]*)\", string_parts[0]).group(0)), # type: ignore\r\n int(re.match(\"([0-9]*)\", string_parts[1]).group(0)), # type: ignore\r\n int(re.match(\"([0-9]*)\", string_parts[2]).group(0)) # type: ignore\r\n )\r\n return version_parts",
"def find_branches(versions):\n\n versions = map(LooseVersion, versions)\n\n # group versions by (major, minor) parts\n major_minor = lambda item: item.version[:2]\n versions.sort()\n tip = last(versions)\n grouped = groupby(versions, key=major_minor)\n\n chunks = (tuple(value) for key, value in grouped)\n\n # we only take versions which has patches\n chunks = (versions for versions in chunks if len(versions) > 1)\n\n # and we only need latest patch releases\n result = map(last, chunks)\n\n # we also add the last version bacause it is a tip\n if last(result) is not tip:\n result.append(tip)\n\n return [item.vstring for item in result]",
"def test_check_version():\n assert check_version('0.9.4-1', '0.9.4', '>=')\n assert check_version('3.0.0rc1', '3.0.0', '<')\n assert check_version('1.0', '1.0b2', '>')",
"def convert_into_std_range(range_str):\r\n try:\r\n import re\r\n if not range_str:\r\n range_str = ''\r\n # Remove v if followed by a digit\r\n if re.match(r'[\\s\\S]*v\\d+', range_str):\r\n range_str = range_str.replace('v', '')\r\n # Remove leading / trailing spaces\r\n range_str = range_str.strip()\r\n # Remove space between op and ver number\r\n range_str = re.sub(r'(?<=[><=~^])\\s', '', range_str)\r\n # If ' - ' in string, it is a range, change format\r\n range_parts = range_str.split(' - ')\r\n if len(range_parts) == 2:\r\n # Handle cases like - ^3.0.0 - ^4.1.0\r\n if (range_parts[0][0] == \"^\" or range_parts[1][0] == \"^\"):\r\n range_parts[0] = range_parts[0].replace(\"^\", \"\")\r\n range_str = '>=' + range_parts[0]\r\n if (range_parts[1][0] == \"^\"):\r\n range_str += f',<{int(range_parts[1][1]) + 1}.0.0'\r\n else:\r\n range_parts[1] = range_parts[1].replace(\"^\", \"\")\r\n range_str += ',<=' + range_parts[1]\r\n else:\r\n if ('>=' not in range_parts[0]):\r\n range_parts[0] = '>=' + range_parts[0]\r\n if ('<=' not in range_parts[1]):\r\n range_parts[1] = ',<=' + range_parts[1]\r\n\r\n range_str = range_parts[0] + range_parts[1]\r\n\r\n # Replace && string with a ','\r\n range_str = range_str.replace(' && ', ',')\r\n # Replace ~>= string with ~\r\n range_str = range_str.replace('~>=', '~')\r\n # Replace ~> string with ~\r\n range_str = range_str.replace('~>', '~')\r\n # Replace any other space with a ','\r\n range_str = range_str.replace(' ', ',')\r\n # If 2 conditions are specified in the same range without a ','\r\n # add it\r\n pattern = re.compile(r'(?<=[\\w\\d])(?P<op>[><])')\r\n range_str = pattern.sub(r',\\g<op>', range_str)\r\n # Replace => with >= and =< with <=\r\n range_str = range_str.replace('=<', '<=').replace('=>', '>=')\r\n # Remove = in the beginning\r\n range_str = re.sub('^=', '', range_str)\r\n # Remove extra = in the beginning in case there were two ==\r\n range_str = re.sub('^=', '', range_str)\r\n\r\n # Handle x, 0 and missing 0s in each part of the range\r\n range_parts = range_str.split(',')\r\n new_range_parts = []\r\n for part in range_parts:\r\n # Extract operator\r\n res = re.match(r'([><=~^*]*)(\\d+)', part)\r\n value = part\r\n op = ''\r\n if res and res[1]:\r\n op = res[1]\r\n value = value.replace(op, '')\r\n\r\n # Handle x and * in versions\r\n res = re.match(r'(\\d+)\\.[x\\*]', value)\r\n if res:\r\n value = f'{\"\" if op else \"^\"}{res[1]}.0.0'\r\n else:\r\n res = re.match(r'(\\d+)\\.(\\d+)\\.[x\\*]', value)\r\n if res:\r\n value = f'{\"\" if op else \"~\"}{res[1]}.{res[2]}.0'\r\n # all_vers_list = ['*', 'x', 'x.x.x', '^*', '*.*.*', '^x', '', 'x.*', '>=x', '~x.x.x', '^x.x.x', 'x.x', '~*', '*.*', '^x.x']\r\n all_vers = re.fullmatch(r'([^a-zA-Z0-9]*[x.*]*[^a-zA-Z0-9]*)', value)\r\n if all_vers:\r\n value = 'all_versions'\r\n # If only 1 or 2 digits in version, append .0\r\n res = re.match('(\\d+)\\.(\\d+)\\.(\\d+)', value)\r\n if res:\r\n pass\r\n else:\r\n res = re.match('(\\d+)\\.(\\d+)(.*)', value)\r\n if res:\r\n value = f'{res[1]}.{res[2]}.0{res[3] if res[3] else \"\"}'\r\n else:\r\n res = re.match('(\\d+)(.*)', value)\r\n if res:\r\n if (op is \"\" or op is \"^\" or op is \"~\"):\r\n new_range_parts.append(f'{\"^\"}{res[1]}.0.0{res[2] if res[2] else \"\"}')\r\n else:\r\n new_range_parts.append(f'{op}{res[1]}.0.0{res[2] if res[2] else \"\"}')\r\n value = ''\r\n if value:\r\n if (op == \"^=\"):\r\n op = \"^\"\r\n if (op == \"*\"):\r\n new_range_parts.append(f'{value}')\r\n else:\r\n new_range_parts.append(f'{op}{value}')\r\n\r\n # Put back the range and replace 2 or more dots with 1\r\n range_str = ','.join(new_range_parts)\r\n range_str = re.sub('\\.{2,}', '.', range_str)\r\n # Convert into lower and/or upper bound if\r\n # ^ or ~ in string\r\n if range_str[0] == '~' or range_str[0] == '^':\r\n op = range_str[0]\r\n res = re.match('[\\^~](\\d+)\\.(\\d+)\\.(\\d+)', range_str)\r\n if res:\r\n pre_rel_tag = range_str.replace(res.group(0), '')\r\n range_str = f'>={res[1]}.{res[2]}.{res[3]}'\r\n range_str += f'{pre_rel_tag}' if pre_rel_tag else ''\r\n if op == '^':\r\n range_str += f',<{int(res[1]) + 1}.0.0'\r\n else:\r\n range_str += f',<{res[1]}.{int(res[2]) + 1}.0'\r\n except Exception as e:\r\n print('Error: ', range_str, str(e))\r\n return range_str",
"def _parse_project_version(version=''):\n \n def mm_version(vers):\n stage = ''\n stage_sep = ''\n finalvers = ''\n if not vers.isdigit():\n for num,char in enumerate(vers):\n if char.isdigit():\n finalvers += str(char)\n elif char.isalpha():\n stage = vers[num:]\n break\n elif char in [' ','-']: #sep\n #We will strip spaces to avoid needing to 'quote' paths\n stage_sep = '-'\n stage = vers[num+1:]\n break\n else:\n finalvers = vers\n #remove any final build numbers\n if ' ' in stage:\n stage = stage.split(' ')[0]\n elif '-' in stage:\n stage = stage.split('-')[0]\n return (finalvers,stage,stage_sep)\n \n v = version.split('.')\n major = v[0]\n minor = v[1]\n maint = ''\n stage = ''\n if len(v)>2 and v[2]<>'0': #(1.0.0 == 1.0)\n maint = v[2]\n if len(v)>3 and v[3][0].isalpha():\n stage = v[3]\n project_version = '.'.join([major,minor,maint,stage])\n else:\n #Detect stage in minor\n minor,stage_minor,stage_minor_sep = mm_version(minor)\n if maint: #may be maint = ''\n maint, stage_maint, stage_maint_sep = mm_version(maint)\n else:\n stage_maint = ''; stage_maint_sep = ''\n if stage_minor:\n stage = stage_minor\n stage_sep = stage_minor_sep\n elif stage_maint:\n stage = stage_maint\n stage_sep = stage_maint_sep\n finalvers = [major,minor]\n if maint: finalvers.append(maint)\n finalvers = '.'.join(finalvers)\n if stage:\n finalvers = stage_sep.join([finalvers,stage])\n project_version = finalvers\n \n return project_version",
"def test_version_range_before():\n\n new_versions = {\"1\": {\"guid\": \"foo\",\n \"versions\": map(str, range(10))}}\n\n eq_(version_range(\"foo\", \"5\", \"6\", app_versions=new_versions), [\"5\"])\n eq_(version_range(\"foo\", \"8\", \"50\", app_versions=new_versions), [\"8\", \"9\"])",
"def test_case03(self):\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)",
"def normalise_version_str(version_num, normal_len):\n version_num_parts = len(version_num.split(\".\"))\n if version_num_parts < normal_len:\n for i in range(version_num_parts, normal_len):\n version_num = version_num + \".0\"\n return version_num",
"def version_components(version):\n match = re.match(r\"([\\d.]*)(-?)(.*)\", str(version))\n if not match:\n return \"\", \"\"\n\n version_number = match.group(1)\n suffix = match.group(3)\n\n return version_number, suffix",
"def compare_version(version_str1, version_str2):\n compare_result = 0\n pattern = '([^\\.]+)\\.?([^\\.]*)\\.?([^\\.]*)'\n match1 = re.match(pattern, version_str1.strip())\n match2 = re.match(pattern, version_str2.strip())\n major2 = match2.group(1)\n major1 = match1.group(1)\n minor2 = match2.group(2) if match2.group(2) else '0'\n minor1 = match1.group(2) if match1.group(2) else '0'\n patch2 = match2.group(3) if match2.group(3) else '0'\n patch1 = match1.group(3) if match1.group(3) else '0'\n\n if int(major2) > int(major1):\n return CompareResult.GREATER\n elif int(major2) < int(major1):\n return CompareResult.LESS\n else: # same major version\n if int(minor2) > int(minor1):\n return CompareResult.GREATER\n elif int(minor2) < int(minor1):\n return CompareResult.LESS\n else:\n if patch2 > patch1:\n return CompareResult.GREATER\n elif patch2 < patch1:\n return CompareResult.LESS\n else:\n return CompareResult.EQUAL",
"def version_compare(compare_ver, min_version, max_version):\n if max_version == \"*\":\n return True\n if max_version == \"-\" or not max_version:\n max_version = \"0\"\n if not min_version or min_version == \"*\" or min_version == \"-\":\n min_version = \"0\"\n if compare_ver == \"-\" or compare_ver == \"*\":\n compare_ver = \"0\"\n if compare_ver == min_version or compare_ver == max_version:\n return True\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n # If all versions follow proper versioning then perform a simple numerical comparison\n if len(compare_ver_parts) == len(min_version_parts) and len(\n compare_ver_parts\n ) == len(max_version_parts):\n compare_ver_num = normalise_num(compare_ver, len(compare_ver_parts))\n min_version_num = normalise_num(min_version, len(compare_ver_parts))\n max_version_num = normalise_num(max_version, len(compare_ver_parts))\n if compare_ver_num >= min_version_num and compare_ver_num <= max_version_num:\n return True\n\n normal_len = len(compare_ver_parts)\n if len(min_version_parts) > normal_len:\n normal_len = len(min_version_parts)\n if len(max_version_parts) > normal_len:\n normal_len = len(max_version_parts)\n\n # Normalise the version numbers to be of same length\n compare_ver = normalise_version_str(compare_ver, normal_len)\n min_version = normalise_version_str(min_version, normal_len)\n max_version = normalise_version_str(max_version, normal_len)\n\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n for i in range(0, normal_len):\n if (\n not compare_ver_parts[i].isdigit()\n or not min_version_parts[i].isdigit()\n or not max_version_parts[i].isdigit()\n ):\n if (\n compare_ver_parts[i] == min_version_parts[i]\n and compare_ver_parts[i] == max_version_parts[i]\n ):\n continue\n else:\n return False\n elif int(compare_ver_parts[i]) >= int(min_version_parts[i]) and int(\n compare_ver_parts[i]\n ) <= int(max_version_parts[i]):\n continue\n else:\n return False\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fixture for setting up configuration parser | def setup_config():
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
return config | [
"def test_polarion_config_parser(polarion_config):\n assert polarion_config.test_case_url() == 'https://127.0.0.1/polarion/import/testcase'\n assert polarion_config.test_run_url() == 'https://127.0.0.1/polarion/import/xunit'\n assert polarion_config.username() == 'my_user'\n assert polarion_config.password() == 'my_pass'",
"def configure_parser(parser: ArgumentParser) -> None:",
"def test_config_fixture(self):\n self.conf.set_default(\"length_diff_percent\", 1000.0, group=\"test\")\n self.conf.set_default(\"time_diff_percent\", 1000.0, group=\"test\")\n self.conf.set_default(\"max_time\", 10, group=\"test\")\n self.conf.set_default(\"max_length\", 500, group=\"test\")",
"def test_parse_config_file_standard(self):\n exp = [['QIIME', 'source /bin/setup.sh; cd /bin; ./tests.py'],\n ['PyCogent', '/bin/cogent_tests']]\n obs = _parse_config_file(self.config1)\n self.assertEqual(obs, exp)",
"def test_read_initializer_configuration(self):\n pass",
"def mocked_config():\n mocked_config_path = path.abspath(path.dirname(__file__) + '/unittest_data/config/config.yml')\n cp = dirbs.config.ConfigParser()\n mocked_config = cp.parse_config(ignore_env=True,\n config_paths=[mocked_config_path])\n yield mocked_config",
"def __init__(self):\n self._parser = SafeConfigParser()",
"def setup_parser_config(subparsers):\r\n parser = subparsers.add_parser('config', help='Freeseer configuration functions')\r\n subparsers = parser.add_subparsers(dest=\"config_service\")\r\n setup_parser_config_reset(subparsers)\r\n setup_parser_config_youtube(subparsers)",
"def __init__(self):\n # create config parser\n self.config = RawConfigParser()\n self.config.read(CONFIG_PATH)",
"def setUp(self):\n self.parser = command_line.get_args()",
"def test_fully_default_configuration(self):\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 1)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(gif|png|jpg|bmp)$')",
"def setupFromYml(self, yml):",
"def test_normal_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'url_list_file: ./urls\\n'\n 'output_directory: ./output\\n'\n 'max_depth: 6\\n'\n 'crawl_interval: 1\\n'\n 'crawl_timeout: 5\\n'\n 'target_url: .*\\.(gif|png|jpg|bmp)$\\n'\n 'thread_count: 8\\n'\n )\n\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 6)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 5)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(gif|png|jpg|bmp)$')",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def test_create_initializer_configuration(self):\n pass",
"def test_config_reader_can_read_example_configs(example_config):\n read_config(example_config)",
"def test_partly_default_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'max_depth: 10\\n'\n 'crawl_interval: 2\\n'\n 'crawl_timeout: 10\\n'\n 'target_url: .*\\.(com|cn|net)$\\n'\n )\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 10)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 2)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 10)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(com|cn|net)$')",
"def test_config():\n return {}",
"def test_config_read_sample(self):\n\n try:\n open(self.config.name)\n except FileNotFoundError:\n pytest.skip(\"config file not found\")\n\n config_obj = read_config(self.config)\n self.assertIsInstance(config_obj, Config)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fixture for retrieving mock event | def get_mock_event():
event = {
"httpMethod": "GET",
"//body": "{\"name\": \"Sam\"}",
"resource": "/{proxy+}",
"queryStringParameters": {},
"pathParameters": {
"proxy": "users"
},
"requestContext": {
"accountId": "222222222",
"identity": {
"sourceIp": "2a02:a445:6d36:1:1e3:a188:313c:1d31",
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_1_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2743.116 Safari/537.36",
},
"resourcePath": "/{proxy+}",
"httpMethod": "GET",
"apiId": "xxxxxxxxxx"
}
}
return event | [
"def test_get_event(self):\n pass",
"def test_describe_event(self):\n pass",
"def test_future_event(self):\n pass",
"def test_track_event(self):\n\n created_at = datetime.now()\n\n mock_payload = {\n 'user_id': 1,\n 'event_name': 'Event',\n 'metadata': {\n 'hello': 'world',\n },\n 'created_at': datetime_to_unix_timestamp(created_at)\n }\n\n @httmock.urlmatch(method='POST', netloc='api.intercom.io', path='/events')\n def mock_track_event(url, request):\n result = json.loads(request.body)\n expected = json.loads(json.dumps(mock_payload, cls=JSONEncoder))\n\n self.assertEqual(result, expected)\n return request.body\n\n with httmock.HTTMock(mock_track_event):\n self.client.track_event(\n user_id=1,\n event_name='Event',\n metadata={\n 'hello': 'world',\n },\n created_at=created_at,\n )",
"def test_update_event(self):\n pass",
"def test_api_predictor_events_get(self):\n pass",
"def test_event_study(self):\n pass",
"def test_update_event_type(self):\n pass",
"def test_create_event(self, cyl_generator):\n event = cyl_generator.create_event()\n assert isinstance(event, Event)",
"def test_new_general_event(client, transactional_db, mocker):\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n settings.SNS_ARN = arn\n mock = mocker.patch('coordinator.api.models.boto3.client')\n assert Event.objects.count() == 0\n\n ev = Event(event_type='error', message='test error event')\n ev.save()\n assert Event.objects.count() == 1\n assert mock().publish.call_count == 1\n message = {\n 'default': json.dumps({\n 'event_type': 'error',\n 'message': 'test error event',\n 'task_service': None,\n 'task': None,\n 'release': None\n })\n }\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n mock().publish.assert_called_with(Message=json.dumps(message),\n MessageStructure='json',\n TopicArn=arn)\n settings.SNS_ARN = None",
"def make_event(entity_id):\n domain = split_entity_id(entity_id)[0]\n state = mock.MagicMock(\n state=\"not blank\",\n domain=domain,\n entity_id=entity_id,\n object_id=\"entity\",\n attributes={},\n )\n return mock.MagicMock(data={\"new_state\": state}, time_fired=12345)",
"def event_request_factory_fixture(event_factory):\n def _factory(device_ids=None, events=None):\n request = Mock()\n request.installed_app_id = uuid4()\n if events is None:\n events = []\n if device_ids:\n events.extend([event_factory(id) for id in device_ids])\n events.append(event_factory(uuid4()))\n events.append(event_factory(device_ids[0], event_type=\"OTHER\"))\n request.events = events\n return request\n return _factory",
"def test_get_events(mocker):\n mocker.patch.object(FireEyeClient, '_get_token', return_value='token')\n client = Client(base_url=\"https://fireeye.cm.com/\", username='user', password='pass', verify=False, proxy=False)\n mocker.patch.object(FireEyeClient, 'get_events_request',\n return_value=util_load_json('test_data/get_events.json'))\n command_results = get_events(client=client, args={'end_time': '2021-05-19T23:00:00.000-00:00',\n 'duration': '48_hours', 'limit': '3'})\n assert command_results.outputs == GET_EVENTS_CONTEXT",
"def test_iam_project_event_get(self):\n pass",
"def test_v2_events_event_idget(self):\n response = self.client.open(\n '/v2/events/{eventID}'.format(event_id=EventID()),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_get_events(self):\n\n # Check event class is properly set\n response = self.api.get_events(limit=1)\n event = response.data[0]\n event_class = type(event.data).__name__\n self.assertTrue(event_class != 'Event')\n self.assertTrue(event_class.endswith('Created') or event_class.endswith('Updated') or event_class.endswith('Deleted'))",
"def test_get_event__valid_key(self):\n\n self.assertEqual(entities.Event('111095', 'test_event', ['111127']),\n self.project_config.get_event('test_event'))",
"def test_past_event(self):\n pass",
"def testEventInit(self):\n e1 = Event(5, 'obj', 'message')\n self.assertEqual(e1.timestamp, 5)\n self.assertEqual(e1.eventObject, 'obj')\n self.assertEqual(e1.logMessage, 'message')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit test get_ip_type_by_address method of the Bad Bots class | def test_get_ip_type_by_address(setup_config, get_mock_event):
# !ARRANGE!
bad_bots = BadBots(setup_config, get_mock_event)
ipv4_address_1 = '1.1.1.1'
ipv4_address_2 = '11.22.33.44'
ipv4_address_3 = '123.123.123.123'
ipv6_address_1 = '2a02:a445:6d36:1:1e3:a188:313c:1d31'
ipv6_address_2 = '3731:54:65fe:2::a7'
ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463'
# !ACT!
# Detect the IP type of provided IP addresses
ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1)
ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2)
ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3)
ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1)
ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2)
ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3)
# !ASSERT!
# Assert IP addresses are of type IPv4
assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value
assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value
assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value
# Assert IP addresses are of type IPv6
assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value
assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value
assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value | [
"def testIpAddress(self):\n self.assertRaises(ValueError,\n basictypes.build,\n \"SNIMPY-MIB\", \"snimpyIpAddress\", \"999.5.6.4\")\n a = basictypes.build(\"SNIMPY-MIB\", \"snimpyIpAddress\", \"10.0.4.5\")\n self.assert_(isinstance(a, basictypes.IpAddress))\n self.assertEqual(a, \"10.0.4.5\")\n self.assertEqual(a, \"10.00.4.05\")\n self.assertEqual(a, [10,0,4,5])\n self.assertEqual(a[2], 4)\n self.assert_(a < \"10.1.2.4\")\n self.assert_(a > \"10.0.0.1\")\n a = basictypes.build(\"SNIMPY-MIB\", \"snimpyIpAddress\", [1, 2, 3, 5])\n self.assertEqual(a, \"1.2.3.5\")",
"def test_ip_addresses_exists():\n load_ips()\n validate_names()",
"def test_external_ip_get_kind(self):\n assert_equal(self.test_external_ip.get_kind(), 'mpexternalip')",
"def test_discover_ip_reservation(self):\n pass",
"def test_ipam_ip_addresses_read(self):\n pass",
"def test_get_ip_reservation(self):\n pass",
"def test_get_source_ip(self):\n pass",
"def test_get_node_internal_ip_address(self):\n pass",
"def test_search_address_type(self):\n pass",
"def test_endpoint_ip_address(self):\n\n for i in range(60):\n utils.build_full_ip(address=f\"10.10.10.{i}/24\")\n\n resp = self.client.get(\"/api/plugins/prometheus-sd/ip-addresses/\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.content)\n\n self.assertIsNotNone(data[0][\"targets\"])\n self.assertIsNotNone(data[0][\"labels\"])\n self.assertEqual(len(data), 60)",
"def test__get_address_positive(self, *mocks):\n ctx = ContextMock()\n _try_get_address(ctx, \"type\")",
"def test_ipam_ip_addresses_list(self):\n pass",
"def test_ipam_ip_addresses_create(self):\n pass",
"def test_update_address_type(self):\n pass",
"def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)",
"def test_retrieve_address(self):\n pass",
"def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_get_ip_tags_invalid_ip(client, database):\n\n invalid_ip = \"http://127.0.0.1:5000/ip-tags/10.1.2.3000\"\n response = client.get(invalid_ip)\n response_data = response.get_json()\n\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert (\n response_data[\"error\"]\n == \"400 Bad Request: Address 10.1.2.3000 does not have IPv4 format\"\n )",
"def test_get_ip_ban(self):\n nonexistent_ban = get_ip_ban('123.0.0.1')\n self.assertIsNone(nonexistent_ban)\n\n Ban.objects.create(banned_value='124.0.0.1',\n check_type=BAN_IP,\n expires_on=timezone.now() - timedelta(days=7))\n\n expired_ban = get_ip_ban('124.0.0.1')\n self.assertIsNone(expired_ban)\n\n Ban.objects.create(banned_value='wrongtype',\n check_type=BAN_EMAIL)\n\n wrong_type_ban = get_ip_ban('wrongtype')\n self.assertIsNone(wrong_type_ban)\n\n valid_ban = Ban.objects.create(\n banned_value='125.0.0.*',\n check_type=BAN_IP,\n expires_on=timezone.now() + timedelta(days=7))\n self.assertEqual(get_ip_ban('125.0.0.1').pk, valid_ban.pk)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit test check_bot_confidence method of the Bad Bots class | def test_check_bot_confidence(setup_config, get_mock_event):
# !ARRANGE!
bad_bots = BadBots(setup_config, get_mock_event)
bot_1 = Bot()
bot_1.source_ip = '1.1.1.1'
bot_1.http_query_string_parameters = '<script></script>'
bot_1.http_body = 'EXEC'
bot_1.geolocation = 'United States'
bot_1.source_ip_type = BadBots.SourceIPType.IPV4
bot_1.http_method = "CONNECT"
bot_1.http_user_agent = "Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)"
bot_2 = Bot()
bot_2.source_ip = '77.168.51.231'
bot_2.http_query_string_parameters = 'hello'
bot_2.http_body = 'hello!'
bot_2.geolocation = 'Netherlands'
bot_2.source_ip_type = BadBots.SourceIPType.IPV4
bot_2.http_method = "GET"
bot_2.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
bot_3 = Bot()
bot_3.source_ip = '2a02:a445:6d36:1:1e3:a188:313c:1d33'
bot_3.http_query_string_parameters = 'param=true'
bot_3.http_body = 'username=xxx'
bot_3.geolocation = 'United States'
bot_3.source_ip_type = BadBots.SourceIPType.IPV6
bot_3.http_method = "GET"
bot_3.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
# !ACT!
# Do confidence check on potential bots
confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1)
confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2)
confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3)
# !ASSERT!
# Assert IP addresses are of type IPv4
assert(confidence_score_bot_1 == 25)
assert(confidence_score_bot_2 == 0)
assert(confidence_score_bot_3 == 5) | [
"def test_word_confidences(self):\n self._api.SetImageFile(self._image_file)\n words = self._api.AllWords()\n self.assertEqual(words, [])\n self._api.Recognize()\n words = self._api.AllWords()\n confidences = self._api.AllWordConfidences()\n self.assertEqual(len(words), len(confidences))\n mapped_confidences = self._api.MapWordConfidences()\n self.assertEqual([v[0] for v in mapped_confidences], words)\n self.assertEqual([v[1] for v in mapped_confidences], confidences)",
"def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))",
"def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)",
"def test_preflop_betting(self):\n bot, brain = self.brain()\n # 20 to call, 140 in the pot\n self.assertAlmostEqual(brain.pot_odds(), 100 * 2.0/(14 + 2))\n brain.do_turn('bot_0', 1000)\n self.assertTrue(bot.raise_amount > 0) # shouldn't fold with a pair of aces",
"async def test_warnings_with_high_confidence(self):\n self.sources[\"source_id\"][\"parameters\"][\"confidence_levels\"] = [\"high\"]\n response = await self.collect(self.metric, get_request_json_return_value=self.bandit_json)\n self.assert_measurement(response, value=\"0\", entities=[])",
"def test_lob_confidence(self):\n basic_filter = kalman.Kalman((1, 0), (1, 0))\n self.assertEqual([], basic_filter.lob_confidence_intervals(1.96))\n\n # Now give it a transmitter to track.\n basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (5, 0))\n # We should have a non-zero margin of error.\n error = basic_filter.lob_confidence_intervals(1.96)\n self.assertGreater(error, 0)",
"def verifyBuckling(self):\n pass",
"def test_confidences(self):\n\n # Add alignments to pipeline\n for hit, aln in zip(self.pipeline[\"templates\"], self.ALIGNMENTS):\n hit[\"alignment\"] = aln\n\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"][\"confidence\"],\n \"---5-4-----\")\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"][\"confidence\"],\n \"----3-----\")",
"def test_rb_utils(self):\n\n t1 = 100.\n t2 = 100.\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],\n [t2, t2], gate2Q)\n\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],\n [t2], gate1Q)\n\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n \"Error: 1Q Coherence Limit\")\n\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n \"Error: 2Q Coherence Limit\")\n\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],\n [0, 1, -1],\n [0.001, 0.0015, 0.02])\n\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n \"Error: 2Q EPC Calculation\")",
"def test_eat_healthy(self):\n \tself.assertEqual(\n\t\t\teat(\"broccoli\", isHealthy=True),\n\t\t\t\"I'm eating broccoli, because my body is a temple\"\n \t)",
"def test_calculate_confidence_statistics(input_file):\n\n logging.info(\"test_calculate_confidence_statistics\")\n\n # GIVEN a data dict\n # input_file = \"sample_multiple.json\"\n data = tscribe.load_json_as_dict(input_file)\n\n # WHEN calling calculate_confidence_statistics(...)\n stats = tscribe.calculate_confidence_statistics(data)\n\n # THEN return the data model with the right components\n assert isinstance(stats, dict), \"Stats should be of dict type\"\n assert \"timestamps\" in stats, \"Data model should include timestamps\"\n assert \"9.8\" in stats, \"Data model should include 9.8\"\n assert \"9\" in stats, \"Data model should include 9\"\n assert \"8\" in stats, \"Data model should include 8\"\n assert \"7\" in stats, \"Data model should include 7\"\n assert \"6\" in stats, \"Data model should include 6\"\n assert \"5\" in stats, \"Data model should include 5\"\n assert \"4\" in stats, \"Data model should include 4\"\n assert \"3\" in stats, \"Data model should include 3\"\n assert \"2\" in stats, \"Data model should include 2\"\n assert \"1\" in stats, \"Data model should include 1\"\n assert \"0\" in stats, \"Data model should include 0\"",
"def test_is_winner_is_incorrect(self):\n assert not Bet.is_winner(3, 5)",
"def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))",
"def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))",
"def testIsBiconnected(self):\n self.assertEqual(is_biconnected(self.G1), True)\n self.assertEqual(is_biconnected(self.G2), False)",
"def test_dice_coef_loss():\n assert dice_coef_loss() == expected_dice_coef_loss",
"def test_verify_fails_expected_metric_kwargs(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm - pm.mean(\"time\").mean(\"init\")\n with pytest.raises(ValueError) as excinfo:\n pm.verify(\n metric=\"threshold_brier_score\", comparison=\"m2c\", dim=[\"init\", \"member\"]\n )\n assert \"Please provide threshold.\" == str(excinfo.value)",
"def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')",
"def check():\n hokusai.check()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates IDL files from a template for user and system marshaling. | def _Main():
cmd_parser = argparse.ArgumentParser(
description='Tool to generate IDL from template.')
cmd_parser.add_argument('--idl_template_file',
dest='idl_template_file',
type=str,
required=True,
help='Input IDL template file.')
cmd_parser.add_argument('--idl_output_file',
type=str,
required=True,
help='Output IDL file.')
flags = cmd_parser.parse_args()
_GenerateIDLFile(flags.idl_template_file, flags.idl_output_file) | [
"def generate_files_for_template(env, template_file, input_files, output_dir):\n # Open template\n with open(template_file, \"r\") as template_contents:\n template_object = env.from_string(template_contents.read())\n _, template_extension = os.path.splitext(template_file)\n\n # Create output dir\n if not os.path.exists(output_dir):\n try:\n os.makedirs(output_dir)\n except OSError:\n print(\"Could not make output directory\", output_dir)\n sys.exit(1)\n\n for peripheral in input_files:\n generate_source_file(\n template_object, peripheral, template_extension, output_dir\n )",
"def emit_scripts(templates):\n\n for platform, ver, apps, deps in load_deps_transformed():\n shell = \"batch\" if platform[\"os\"] in [\"windows\"] else \"shell\"\n suffix = \".bat\" if shell == \"batch\" else \".sh\"\n ident = f\"{platform['name']}-{ver}\"\n script_filename = Path(__file__).parent.with_name(ident + suffix)\n\n lines = templates[\"script\"][shell][\"default\"].render()\n lines += \"\\n\\n\"\n\n for origin, aliases in deps:\n if not aliases:\n continue\n\n if origin == \"sys\":\n lines += (\n templates[\"pkgman\"][platform[\"mng\"]]\n .get(ident, templates[\"pkgman\"][platform[\"mng\"]][\"default\"])\n .render(apps=apps, deps=aliases, platform=platform)\n )\n lines += \"\\n\\n\"\n elif origin == \"src\":\n for alias in aliases:\n lines += (\n templates[\"src\"][alias]\n .get(ident, templates[\"src\"][alias][\"default\"])\n .render(apps=apps, deps=aliases, platform=platform)\n )\n lines += \"\\n\\n\"\n else:\n lines += (\n templates[\"pkgman\"][origin]\n .get(ident, templates[\"pkgman\"][origin][\"default\"])\n .render(apps=apps, deps=aliases, platform=platform)\n )\n lines += \"\\n\\n\"\n\n with script_filename.resolve().open(\"w\") as script:\n script.write(lines)",
"def __emit_package_init_file(self, template, package_imports_fn, package_exports_fn):\n # Open template.\n code = _get_template(template)\n\n # Set helper vars.\n module_imports = package_imports_fn()\n module_exports = package_exports_fn()\n\n # Generate code.\n code = inject_standard_template_params(self.ontology, self.opts, code)\n code = code.replace('{module-imports}', module_imports)\n code = code.replace('{module-exports}', module_exports)\n\n return code",
"def buildAutogenContents(self):\n if len(self.mTemplates) == 0:\n return None\n \n content = \"/** Autogenerated temporary file for template instantiation. */\\n\"\n for t in self.mTemplates:\n template_type = t.mTemplateType\n typedef_name = t.mTypedefName\n content += \"\"\"\n typedef %(template_type)s %(typedef_name)s;\n inline unsigned __instantiate_%(typedef_name)s()\n { return unsigned(sizeof(%(typedef_name)s)); }\n \"\"\" % vars() \n \n return content",
"def write_templates(albumPath):\n for k in templates:\n template = templates[k]\n oPath = os.path.join(albumPath, template['oFile'])\n log('Generating %s' % oPath)\n open(oPath, 'w').write(template['output'])",
"def test_pnictogen():\n for template in templates:\n template_prefix, extension = os.path.splitext(template)\n for xyz_file in example_xyz_files:\n input_prefix, xyz_file_extension = os.path.splitext(xyz_file)\n\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(xyz_file, xyz_file_extension[1:])\n )\n written_files = pnictogen(mol, input_prefix, template, extension[1:])\n\n assert_equals(type(written_files), list)\n for written_file in written_files:\n assert_equals(type(written_file), str)\n\n written_files2 = pnictogen(mol, input_prefix, template)\n assert_equals(written_files, written_files2)\n\n # Allow use of template in the parent directory\n with cd(\"pnictogen/repo\"):\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(\"../../data/water-dimer.xyz\", \"xyz\")\n )\n written_files = pnictogen(mol, \"../../data/water-dimer\", \"ADF.in\", \"in\")\n\n assert_equals(written_files, [\"../../data/water-dimer.in\"])\n\n main([\"-g\", \"/tmp/hello.world.ORCA.inp\"])\n mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/co.xyz\", \"xyz\"))\n written_files = pnictogen(mol, \"data/co\", \"/tmp/hello.world.ORCA.inp\", foo=\"bar\")\n\n assert_equals(written_files, [\"data/co.inp\"])",
"def produce_ppi_template_files (inPath, templateSeqFile, templateStrucResFile):\n load_pdbtools_chain_sequences (templateSeqFile)\n load_pdbtools_chain_strucRes_labels (templateStrucResFile)\n interactome = read_single_interface_annotated_interactome (inPath)\n n = len(interactome)\n \n templateFiles = os.listdir(templateDir)\n for i, row in interactome.iterrows():\n sys.stdout.write(' PPI %d out of %d (%.2f%%) \\r' % (i+1, n, 100*(i+1)/n))\n sys.stdout.flush()\n for chain1, chain2 in row.Chain_pairs:\n pdbid, chainID1 = chain1.split('_')\n _, chainID2 = chain2.split('_')\n selectChains = sorted([chainID1, chainID2])\n templateID = '-'.join([pdbid] + selectChains)\n filename = pdbfile_name (templateID)\n if filename not in templateFiles:\n resIDs = {c:structured_residue_IDs (pdbid, c, pdbDir) for c in selectChains}\n write_partial_structure (pdbid,\n selectChains,\n pdbDir,\n templateDir / filename,\n resIDs = resIDs)\n print()",
"def _gen_init_files(\n output_dir: str,\n output_package: str,\n api_version: int,\n symbols_by_module: Mapping[str, set[_Entrypoint]],\n generated_imports_by_module: Mapping[str, set[str]],\n docs_by_module: Mapping[str, str],\n root_template_path: str,\n file_prefixes_to_strip: Sequence[str],\n use_lazy_loading: bool,\n module_prefix: str,\n subpackage_rewrite: Optional[str] = None,\n):\n modules = set(symbols_by_module.keys())\n modules.update(generated_imports_by_module.keys())\n for module in modules:\n if len(module) < len(output_package):\n continue\n module_relative_to_package = module[len(output_package) + 1 :]\n module_path = os.path.join(\n output_dir, module_relative_to_package.replace('.', '/')\n )\n os.makedirs(module_path, exist_ok=True)\n with open(os.path.join(module_path, '__init__.py'), 'w') as f:\n module_imports = _get_imports_for_module(\n module,\n output_package,\n symbols_by_module,\n generated_imports_by_module,\n file_prefixes_to_strip,\n module_prefix,\n use_lazy_loading,\n subpackage_rewrite,\n )\n if use_lazy_loading:\n module_imports = _LAZY_LOADING_MODULE_TEXT_TEMPLATE % module_imports\n # If this module is the root and there is a root template, use it\n if module == output_package and root_template_path:\n with open(root_template_path, 'r') as template:\n content = template.read()\n content = content.replace('# API IMPORTS PLACEHOLDER', module_imports)\n\n underscore_elements = [\n s.name\n for s in symbols_by_module[module]\n if s.name.startswith('_')\n ]\n for i in generated_imports_by_module[module]:\n module_name = i[i.rfind('.') + 1 :]\n if module_name.startswith('_'):\n underscore_elements.append(module_name)\n\n root_module_footer = f\"\"\"\n_names_with_underscore = [{', '.join(sorted([f\"'{s}'\" for s in underscore_elements]))}]\n__all__ = [_s for _s in dir() if not _s.startswith('_')]\n__all__.extend([_s for _s in _names_with_underscore])\n \"\"\"\n\n content = content.replace('# __all__ PLACEHOLDER', root_module_footer)\n\n content = content.replace(\n '# WRAPPER_PLACEHOLDER',\n _get_module_wrapper(\n module,\n output_dir,\n output_package,\n api_version,\n symbols_by_module,\n use_lazy_loading,\n ),\n )\n\n f.write(content)\n continue\n\n f.write(\n _GENERATED_FILE_HEADER % _get_module_docstring(docs_by_module, module)\n )\n\n f.write(module_imports)\n\n f.write(\n _get_module_wrapper(\n module,\n output_dir,\n output_package,\n api_version,\n symbols_by_module,\n use_lazy_loading,\n )\n )",
"def generate(pluginDatas):\n template=getFileTemplate(pluginDatas)\n file =fillTemplate(template,pluginDatas)\n writeFile(file,pluginDatas)",
"def generate_basic_modules(template_dir=TEMPLATE_DIR, out_dir=PKG_DIR):\n print(80 * \"-\")\n print(\"Package:\", out_dir)\n\n basic_modules = [\"_init.py\",\n \"constants.py\",\n \"base_api.py\",\n \"exception.py\"]\n\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n installed = []\n for module in basic_modules:\n in_file = os.path.join(template_dir, module)\n\n if module == \"_init.py\":\n module = \"__init__.py\"\n\n out_file = os.path.join(out_dir, module)\n try:\n shutil.copy(in_file, out_file)\n except (FileNotFoundError, shutil.SameFileError) as err:\n print(err)\n installed.append(\"- \" + out_file)\n\n print(\"Basic modules:\")\n print(\"\\n\".join(installed))",
"def gen_implem_file(schema):\n with open(C_FILE_PATH, \"w\") as out:\n out.write(C_FILE_START)\n gen_implem_types(out, schema)\n out.write(C_FILE_END)",
"def do_createTemplate(gDict, args):\n\n (doThis, todo) = splitArgs(args, 1)\n fail = checkPresence(doThis, [\"type\"], [])\n kind = fail[\"type\"] # todo[0]\n if fail[\"truth\"]:\n print kind, \" makes no sense\"\n sys.exit()\n n = 1\n if \"n\" in doThis:\n n = int(doThis[\"n\"])\n if kind != \"sensor\" and kind != \"site\":\n print kind, \" must be either sensor or site\"\n sys.exit()\n\n fields = templateObjs[kind]\n examples = {\n \"operator\" : \"id:xx|contact:xx|tel:xxxyyyyzzz|email:xxx@yyy\",\n \"provider\" : \"id:xx|contact:xx|tel:xxxyyyyzzz|email:xxx@yyy\",\n \"firstdate\": \"DD-MM-YYYY\",\n \"energysupply\": \"solar|mains\",\n }\n hide = [\"listOfDetectors\", \"detector\", \"epoch.f\", \"date.l\", \"epoch.l\", \"history\", \"username\"]\n for i in range(0,n):\n print \"begin.asset\"\n for uf in fields:\n uk = uf.keys()\n for f in uk:\n uff = uf[f]\n if uff in hide:\n continue\n if uff == \"n-detectors\":\n print \"# add more detectors if necessary\"\n for i in range(0,5):\n print \"detector=name:xxx|unit:xx|epsilon:xx\"\n break\n if uff in examples:\n print \"{}={}\".format(uff, examples[uff])\n else:\n print \"{}=\".format(uff)\n print \"end.asset\\n\"\n sys.exit()",
"def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render",
"def generate(env):\r\n if not exists(env):\r\n return 0;\r\n\r\n TLBImpBuilder = env.Builder(\r\n action = SCons.Action.Action(\r\n TLBImpGenerator\r\n , generator = 1\r\n #, cmdstr = \"$TLBIMPCOMSTR\"\r\n )\r\n , src_suffix = '.dll'\r\n , target_suffix = '.dll'\r\n )\r\n\r\n dotNETSDK = _getNETSDKPath()\r\n homedir = env.Dir(dotNETSDK)\r\n bindir = homedir.Dir('bin')\r\n\r\n env['TLBIMP'] = 'tlbimp.exe'\r\n env['TLBIMPFLAGS'] = '/nologo /silent /strictref:nopia'\r\n env['TLBIMPCOMSTR'] = '[.NET] TLBIMP: Generating interop assembly for typelib in: $SOURCE to: $TARGET'\r\n env['BUILDERS']['TLBImp'] = TLBImpBuilder\r\n\r\n # Agrego al PATH el directorio del tlbimp\r\n env.PrependENVPath(\r\n 'PATH',\r\n bindir.abspath\r\n )",
"def _makescript(self, file):\n # check argument\n if sys.version < '3':\n if not hasattr(file, 'read'):\n raise TypeError('%r is not supported type' % file)\n else:\n if not isinstance(file, io.IOBase):\n raise TypeError('%r is not supported type' % file)\n \n # read all\n template_body = file.read()\n\n # detect encoding\n encoding = getattr(file, 'encoding', '')\n if not encoding:\n try:\n encoding = get_encodings_from_content(template_body)\n except Exception:\n logger.debug('encoding detection error', exc_info=True)\n # check encoding registered in Python\n try:\n b'test string'.decode(encoding)\n except LookupError:\n encoding = ''\n if not encoding:\n encoding = sys.getdefaultencoding()\n if encoding == 'ascii':\n encoding = 'utf-8'\n\n # cast string\n if isinstance(template_body, BytesType):\n template_body = template_body.decode(encoding)\n\n # save variables\n if hasattr(file, 'name'):\n self.name = file.name\n else:\n self.name = '<template-script#%d>' % self._name_counter\n self._name_counter += 1\n self.encoding = encoding\n self.features = 0\n self._template_body = template_body\n\n # loop vars\n self._lines = []\n self._indent = []\n self._current_position = (1, 0)\n self._firstmost_executable = True\n last = 0\n pattern = re.compile(\n re.escape(PREFIX) + '(?P<body>.*?)' + re.escape(SUFFIX), re.DOTALL)\n\n for match in pattern.finditer(template_body):\n start, end = match.span()\n\n # get pos\n _ = template_body[:start].splitlines()\n self._current_position = (len(_), len(_[-1])) if _ else (1, 0)\n del _\n\n # leading chunk\n chunk = template_body[last:start]\n if chunk:\n self._appendline('yield ' + literalize(chunk))\n last = end\n\n # insert marker\n self._appendline(\n '# -*- line %d, column %d -*-' % self._current_position)\n\n # process PI\n chunk = match.group('body')\n\n for i in sorted(i for i in dir(self) if i.startswith('_handle_')):\n handler = getattr(self, i)\n if re.match(handler.pattern, chunk):\n handler(chunk)\n if getattr(handler, 'executable', True):\n self._firstmost_executable = False\n break\n\n # not supported <?...?>\n else:\n chunk = PREFIX + chunk + SUFFIX\n self._appendline('yield ' + literalize(chunk))\n\n # trailing chunk\n chunk = template_body[last:]\n if chunk:\n self._appendline('yield ' + literalize(chunk))\n\n # check remaining indentation\n if self._indent:\n lineno, offset = self._indent[-1]\n raise IndentationError(\n 'brace is not closed', (\n self.name,\n lineno,\n offset,\n self._template_body.splitlines()[lineno - 1],\n ))\n\n # make a script\n prefix = [\n '__file__ = %s' % literalize(self.name),\n # '__name__ = \"__main__\"',\n '__encoding__ = %s' % literalize(self.encoding),\n # make a code as function for `yield` and `return`\n 'def __main__():',\n ]\n if not self._lines:\n self._lines.insert(0, 'pass')\n self.script = '\\n'.join(prefix) + '\\n' \\\n + '\\n'.join(TAB + i for i in self._lines)\n\n # cleanup\n del self._lines\n assert not self._indent\n del self._indent\n del self._current_position\n del self._firstmost_executable",
"def create_soft_ioc(\n filename: str, template_file: str, output_directory: str, config_name: str\n) -> None:\n\n templated_filename = create_force_pvs(\n filename, output_directory, template_file, config_name\n )\n summary_pv_filename = create_summary_pvs(filename, output_directory, config_name)\n\n # get working directory\n with open(f\"{output_directory}/st.cmd\", \"w\") as f:\n f.write(f'dbLoadRecords(\"{summary_pv_filename}\") \\n')\n f.write(f'dbLoadTemplate(\"{templated_filename}\") \\n')\n f.write(\"iocInit \\n\")\n\n template_base = str(template_file).split(\"/\")[-1]\n copyfile(template_file, f\"{output_directory}/{template_base}\")\n\n print(f\"Created {templated_filename}, {summary_pv_filename}, and st.cmd.\")",
"def create_driver_file(self, template_dir, model_string, desvar_shapes, desired_outputs, output_scalers):\r\n desvar_shapes_lines = []\r\n for key in desvar_shapes:\r\n desvar_shapes_lines.append(f'desvar_shapes[\"{key}\"] = {desvar_shapes[key]}')\r\n desvar_shapes_lines = \"\"\"\r\n{}\r\n \"\"\".format(\r\n \"\\n\".join(desvar_shapes_lines)\r\n )\r\n\r\n desired_outputs_string = \"desired_outputs = [\" + \" \".join(['\"' + key + '\"' for key in desired_outputs]) + \"]\"\r\n\r\n write_outputs_string = []\r\n for i, key in enumerate(desired_outputs):\r\n string = f'f.write(str(float(outputs[\"{key}\"]) * {output_scalers[i]}) + \"\\\\n\")'\r\n write_outputs_string.append(string)\r\n\r\n write_outputs_string = \"\"\"\r\n {}\r\n \"\"\".format(\r\n \"\\n \".join(write_outputs_string)\r\n )\r\n\r\n # Create openmdao_driver.py\r\n driver_file = (\r\n textwrap.dedent(\r\n \"\"\"\\\r\n # Import modules\r\n import sys\r\n from subprocess import call\r\n import numpy as np\r\n from yaml import safe_load\r\n\r\n\r\n #########################################\r\n # #\r\n # Step 1: Use Dakota created #\r\n # input files to prepare for #\r\n # model run. #\r\n # #\r\n #########################################\r\n input_template = \"input_template.yml\"\r\n inputs = \"inputs.yml\"\r\n call([\"dprepro\", sys.argv[1], input_template, inputs])\r\n call(['rm', input_template])\r\n\r\n #########################################\r\n # #\r\n # Step 2: Run Model #\r\n # #\r\n #########################################\r\n # Load parameters from the yaml formatted input.\r\n with open(inputs, \"r\") as f:\r\n desvars = safe_load(f)\r\n\r\n desvars_list = []\r\n for key in desvars:\r\n desvars_list.append(float(desvars[key]))\r\n flattened_desvars = np.array(desvars_list)\r\n\r\n desvar_shapes = {}\r\n \"\"\"\r\n )\r\n + desvar_shapes_lines\r\n + textwrap.dedent(\r\n \"\"\"\r\n size_counter = 0\r\n desvars = {}\r\n for key, shape in desvar_shapes.items():\r\n size = int(np.prod(shape))\r\n desvars[key] = flattened_desvars[\r\n size_counter : size_counter + size\r\n ].reshape(shape)\r\n size_counter += size\r\n\r\n print()\r\n print('Design variables:')\r\n print(desvars)\r\n \"\"\"\r\n )\r\n + model_string\r\n + \"\\n\"\r\n + textwrap.dedent(\r\n \"\"\"\\\r\n model_instance = model(desvars)\r\n outputs = model_instance.compute(desvars)\r\n #########################################\r\n # #\r\n # Step 3: Write Output in format #\r\n # Dakota expects #\r\n # #\r\n #########################################\r\n \"\"\"\r\n )\r\n + desired_outputs_string\r\n + \"\\n\"\r\n + textwrap.dedent(\r\n \"\"\"\\\r\n print('Outputs:')\r\n print(outputs)\r\n # Write it to the expected file.\r\n with open(sys.argv[2], \"w\") as f:\"\"\"\r\n )\r\n + write_outputs_string\r\n )\r\n\r\n with open(template_dir + \"openmdao_driver.py\", \"w\") as text_file:\r\n text_file.write(driver_file)",
"def writeDomainFile():\n writeTemplate(localTemplate)",
"def make_template():\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add rankig to each node using google pagerank algorithm | def add_pagerank(self):
query = '''
MATCH (c1:)-[r:INTERACTS]->(c2:)
RETURN c1.name, c2.name, r.weight AS weight
'''
ig = IGraph.TupleList(self.graph.run(query), weights=True)
pg = ig.pagerank()
pgvs = []
for p in zip(ig.vs, pg):
print(p)
pgvs.append({"name": p[0]["name"], "pg": p[1]})
write_clusters_query = '''
UNWIND {nodes} AS n
MATCH (c:) WHERE c.name = n.name
SET c.pagerank = n.pg
'''
self.graph.run(write_clusters_query, nodes=pgvs) | [
"def _run_pagerank_iteration(self):\r\n\r\n sink_nodes = self.recipients - self.senders\r\n S = sum([sink.pagerank for sink in sink_nodes])\r\n\r\n number_nodes = len(self.nodes)\r\n\r\n # The LHS of the PageRank addition is constant for each node, so can be\r\n # precomputed.\r\n random_jump_numerator = (1 - Graph._LAMBDA) + (Graph._LAMBDA * S)\r\n random_jump = random_jump_numerator / number_nodes\r\n\r\n # Calculate new pageranks and store in scratch space.\r\n for node in self.nodes.values():\r\n follow = Graph._LAMBDA * \\\r\n sum([n.pagerank / n.outs_count for n in node.ins.elements()])\r\n\r\n node.tmp_pagerank = random_jump + follow\r\n\r\n # Update the actual pageranks.\r\n for node in self.nodes.values():\r\n node.pagerank = node.tmp_pagerank",
"def _initialize_pagerank(self):\r\n\r\n number_nodes = len(self.nodes)\r\n for node in self.nodes.values():\r\n node.pagerank = 1.0 / number_nodes",
"def apply_pagerank(vector_embeddings):\n\n vlen = len(vector_embeddings)\n try:\n sim_mat = cosine_similarity(vector_embeddings)\n\n nx_graph = nx.from_numpy_array(sim_mat)\n scores = nx.pagerank(nx_graph, max_iter=100)\n page_ranks = [x for k, x in scores.items()]\n except ValueError:\n print('TextRank could not converge')\n page_ranks = [1/vlen]*vlen\n return page_ranks",
"def add_rank_text(self):\n rank_dict = {}\n key_set = set()\n dep_set= set()\n for n in self.nodes:\n # get list of all nodes\n key_set |= {n.name}\n # combine all node dependencies into one set\n dep_set |= n.deps\n\n # get only nodes that are not dependencies of others\n rank_dict[0] = key_set - dep_set\n # Rank all other nodes\n irank = 0\n done = False\n while not done:\n done = True\n irank_set = rank_dict[irank]\n irankn = irank + 1\n for irn in irank_set:\n node = self.node_by_name(irn)\n if node is None:\n continue\n if 0 == len(node.deps):\n continue\n if not irankn in rank_dict:\n rank_dict[irankn] = set()\n rank_dict[irankn] |= node.deps\n done = False\n\n irank = irankn\n\n # Ensure nodes are only specified in 1 rank\n for i in range(irank-1,-1,-1):\n for j in range(0,i):\n rank_dict[j] -= rank_dict[i]\n\n rank_text = ''\n for n in rank_dict:\n # Only rank sets with more than 1 node\n if 1 < len(rank_dict[n]):\n rank_text += '{' + 'rank=same {0}'.format(\" \".join(rank_dict[n])) + '}\\n'\n\n self.text += rank_text",
"def test_ranks(self):\n expected = ['P4', 'P6', 'P5', 'P2', 'P3', 'P1']\n self.graph.create_h_matrix()\n self.graph.create_s_matrix()\n self.graph.create_g_matrix(damping_factor=0.9)\n self.graph.create_pi_vector()\n self.graph.compute_page_rank()\n self.assertListEqual(self.graph.rankings, expected)",
"def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()",
"def iterate_pagerank(corpus, damping_factor):\n ipagerank = {}\n ipagerank_old = {}\n N = len(corpus)\n \n #start with pagerank value for each page 1/N\n for page in corpus:\n ipagerank[page] = 1 / N\n ipagerank_old[page] = 1 / N\n\n loop = True\n \n while loop:\n\n for page in ipagerank:\n ipagerank_old[page] = ipagerank[page]\n \n for page in ipagerank:\n sumpr = 0\n\n\n for page_i in corpus:\n #calculating a page’s PageRank based on the PageRanks of all pages that link to it\n if page in corpus[page_i]:\n sumpr += ipagerank_old[page_i] / len(corpus[page_i])\n \n #A page that has no links at all should be interpreted as having one link for every page in the corpus (including itself).\n if len(corpus[page_i]) == 0:\n sumpr += ipagerank_old[page_i] / len(corpus)\n\n\n total = (1 - damping_factor) / N + damping_factor * sumpr\n\n ipagerank[page] = total\n \n \n #This process should repeat until no PageRank value changes by more than 0.001 between the current rank values and the new rank values.\n for page in ipagerank:\n if math.isclose(ipagerank[page], ipagerank_old[page], abs_tol=0.001):\n loop = False\n \n\n\n return ipagerank",
"def calculate_page_rank(self, iterations=5):\n # clear out the current page rank tables\n self.con.execute('drop table if exists pagerank')\n self.con.execute('create table pagerank(urlid primary key,score)')\n\n # initialize every url with a page rank of 1\n for (urlid,) in self.con.execute('select rowid from urllist'):\n self.con.execute('insert into pagerank(urlid,score) values (%d,1.0)' % urlid)\n self.dbcommit()\n\n for i in range(iterations):\n # Need multiple iterations, as the page ranks of pages linked to this\n # one will be consistently updated on each iteration\n print(\"Iteration %d\" % i)\n for (urlid,) in self.con.execute('select rowid from urllist'):\n # Default page rank\n page_rank = 0.15\n\n # Loop through all the pages that link to this one\n for (linker,) in self.con.execute('select distinct fromid from link where toid=%d'\n % urlid):\n # Get the page rank of the linker\n linkingpr = self.con.execute('select score from pagerank where urlid=%d'\n % linker).fetchone()[0]\n\n # Get the total number of links from the linker\n linkingcount = self.con.execute('select count(*) from link where fromid=%d'\n % linker).fetchone()[0]\n # add to page rank, accounting for the link count\n page_rank += 0.85 * (linkingpr / linkingcount)\n self.con.execute('update pagerank set score=%f where urlid=%d'\n % (page_rank, urlid))\n self.dbcommit()",
"def pagerank(self, limit=20):\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tself.all_scores[urlid] = 1.0\r\n\r\n\t\tfor i in range(limit):\r\n\t\t\tfor urlid in self.url_ids:\r\n\t\t\t\tscore = self.all_scores[urlid]\r\n\t\t\t\tfor fromid in self.from_ids[urlid]:\r\n\t\t\t\t\tscore += self.all_scores[fromid] / \\\r\n\t\t\t\t\t\t\t (len(self.from_ids[fromid])+len(self.to_ids[fromid]))\r\n\t\t\t\tscore *= 0.85\r\n\t\t\t\tscore += 0.15\r\n\t\t\t\tself.all_scores[urlid] = score\r\n\t\tself.save_pr()",
"def pagerank(self):\n\n raise NotImplementedError",
"def rank(result, metric):\n sr = sorted(result, key=lambda r: r[metric]['value'])\n for i,r in enumerate(sr):\n r[metric]['rank'] = i+1",
"def set_rank(self, rank, tourn):\n self.__ranking[tourn] = round(rank, 2)",
"def autogen_rank(self):\n # Stick rank at the front of the list if its not already there.\n if 'Rank' not in self.ordered_columns:\n self.ordered_columns.insert(0, 'Rank')\n for index, peak in enumerate(self.peaks):\n peak['Rank'] = index + self.startingpoint",
"def rank_transform(self):\n sorted_targets = sorted(self.genomes, key=lambda item: item.fitness)\n for index, target in enumerate(sorted_targets):\n target.fitness = index/len(sorted_targets) - 0.5",
"def pagerank(matrix, bias, d=0.85):\n n = matrix.shape[0]\n rank = 0\n new_rank = np.array([1.0 / n] * n)\n for i in range(0,200):\n print \"iteration: \"+str(i)\n rank = new_rank\n new_rank = np.array([(1.0-d)/n] * n) + d * np.dot(matrix, rank)\n# new_rank = (1.0-d) * bias + d * np.dot(matrix, rank)\n # new_rank = [(((1.0-d) / n) +\n # d * sum((rank[i] * link) for i, link in enumerate(row)))\n # for row in matrix]\n if(has_converged(rank, new_rank)):\n break\n return new_rank",
"def run_pagerank(self, number_iterations=10):\r\n\r\n if not self.pagerank_initialized:\r\n self._initialize_pagerank()\r\n self.pagerank_initialized = True\r\n\r\n # Sanity check.\r\n summed_pagerank = sum([node.pagerank for node in self.nodes.values()])\r\n if not (summed_pagerank > 0.99 and summed_pagerank < 1.01):\r\n print \"Initial PageRank sanity check failed! (%s)\" % summed_pagerank\r\n return\r\n\r\n for i in range(number_iterations):\r\n self._run_pagerank_iteration()\r\n\r\n summed_pagerank = sum([node.pagerank for node in self.nodes.values()])\r\n if not (summed_pagerank > 0.99 and summed_pagerank < 1.01):\r\n print(\"PageRank iteration %s sanity check failed! (%s)\" % \r\n (i + 1, summed_pagerank))\r\n return",
"def iterate_pagerank(corpus, damping_factor):\n # 1. Initialize PageRank dictionary to 1/N\n page_ranks = {key: 1/len(corpus) for key in list(corpus.keys())}\n\n # 2. Iterate until difference < .001 for all updates\n repeat = True\n while repeat:\n repeat = False\n for page in corpus:\n new_value = calculate_pagerank(page, corpus, page_ranks, damping_factor)\n if abs(new_value - page_ranks[page]) > .001:\n repeat = True\n page_ranks[page] = new_value\n return page_ranks",
"def ast_update_rank(node, mapper):\n\n symbols = FindInstances(Symbol).visit(node, ret=FindInstances.default_retval())[Symbol]\n for s in symbols:\n if mapper.get(s.symbol):\n # Add a dimension\n s.rank = mapper[s.symbol] + s.rank\n else:\n # Try to replace dimensions\n s.rank = tuple([r if r not in mapper else mapper[r] for r in s.rank])\n\n return node",
"def MapToRanks(t):\n # pair up each value with its index\n pairs = enumerate(t)\n \n # sort by value\n sorted_pairs = sorted(pairs, key=lambda pair: pair[1])\n\n # pair up each pair with its rank\n ranked = enumerate(sorted_pairs)\n\n # sort by index\n resorted = sorted(ranked, key=lambda trip: trip[1][0])\n\n # extract the ranks\n ranks = [trip[0]+1 for trip in resorted]\n return ranks"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add community membership to each node using walktrap algorithm implemented in igraph | def add_communites(self):
query = '''
MATCH (c1:)-[r:INTERACTS]->(c2:)
RETURN c1.name, c2.name, r.weight AS weight
'''
ig = IGraph.TupleList(self.graph.run(query), weights=True)
clusters = IGraph.community_walktrap(ig, weights="weight").as_clustering()
nodes = [{"name": node["name"]} for node in ig.vs]
for node in nodes:
idx = ig.vs.find(name=node["name"]).index
node["community"] = clusters.membership[idx]
write_clusters_query = '''
UNWIND {nodes} AS n
MATCH (c:) WHERE c.name = n.name
SET c.community = toInt(n.community)
'''
self.graph.run(write_clusters_query, nodes=nodes) | [
"def insert(self, node, community, incident_weight):\n self.community_degrees[community] += self.degrees[node]\n self.community_self_loops[community] += incident_weight + self.self_loops[node]\n self.node_to_community_map[node] = community",
"def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)",
"def update_graph(self):\r\n for element in self.neighbours:\r\n element.neighbours.append(self)\r\n print('Graph update successful')",
"def extend_motifs(node_to_motifs, graph_dir):\n\n nodes = list(node_to_motifs.keys())\n\n neighbours = get_neighbours(graph_dir)\n\n print('extending motifs')\n for node in tqdm(nodes):\n for neighbour in neighbours[node]:\n node_to_motifs[node] = node_to_motifs[neighbour] | node_to_motifs[node]\n\n return node_to_motifs",
"def add_metrics(g):\n\t# Each function returns a dict keyed by node id with the computed metric as value\n\tdeg_cent = nx.degree_centrality(g)\n\tclose_cent = nx.closeness_centrality(g)\n\tbetween_cent = nx.betweenness_centrality(g)\n\tcom = community.best_partition(g)\n\t# Only interested in communities with more than one member - get a list\n\t# of multimember communities, sorted by community number\n\tsorted_coms = get_sorted_multimember_coms(com)\n\n\t# Loop through nodes in the graph and give them new attributes\n\tfor vertex in self.graph.node.keys():\n\t\tg.node[vertex][\"deg_cent\"] = deg_cent[vertex]\n\t\tg.node[vertex][\"close_cent\"] = close_cent[vertex]\n\t\tg.node[vertex][\"between_cent\"] = between_cent[vertex]\n\n\t\t# Only nodes in a multimember community get a community number\n\t\tif com[vertex] in sorted_coms:\n\t\t\t# So community numbers start at 1, change community numbers to their position in the sorted_coms\n\t\t\t# list, plus 1\n\t\t\t# e.g. first multimember community number may be 3, this makes it 0 (position in list) + 1\n\t\t\tnew_com_num = sorted_coms.index(com[vertex]) + 1\n\t\t\tg.node[vertex][\"com\"] = new_com_num\n\t\t# If node not in a multimember community, gets False as com number attribute\n\t\telse:\n\t\t\tg.node[vertex][\"com\"] = False\n\n\treturn g",
"def contract(self, adj):\n self.node += adj.node\n self.edge = [e for e in self.edge + adj.edge if e not in self.node]",
"def apply_ford_fulkerson(\n nodes, inner_capacities, source_capacities, sink_capacities, dir_name,\n G, pos\n):\n\n # build a complete matrix of capacities\n capacities = np.zeros((len(nodes) + 2, len(nodes) + 2))\n # add capacities between source and inner nodes\n capacities[0, 1:-1] = source_capacities\n # add capacities between inner nodes and sink\n capacities[1:-1, -1] = sink_capacities\n # add capacities between inner nodes\n capacities[1: len(nodes) + 1, 1: len(nodes) + 1] = inner_capacities\n\n # initial flow set to 0\n # the total number of vertices in the graph\n # is len(nodes)+2 since we must take into account\n # the source and the sink.\n flow = np.zeros((len(nodes) + 2, len(nodes) + 2))\n\n # Algorithm iterations\n step = 1\n while True:\n print(\"===================\\nAlgorithm step : {}\".format(step))\n\n # compute the residual capacities\n residual_capacities = capacities-flow\n # print(flow)\n\n # show the residual network\n # it might have more edges since we changed the capacities\n G_residual = build_residual_graph(G,\n nodes,\n residual_capacities,\n capacities)\n show_residual_network_nx(G_residual, pos, residual_capacities,\n capacities, nodes, dir_name, step)\n\n # first look for possible augmenting paths\n augmenting_paths = find_augmenting_path(residual_capacities)\n if augmenting_paths:\n print(\"found augmenting paths in residual graph\")\n # update the flow\n flow = augment_flow(flow,\n residual_capacities,\n augmenting_paths,\n dir_name,\n G_residual,\n pos,\n step,\n nodes)\n residual_capacities = capacities-flow\n # print(\"new flow\")\n # print(flow)\n\n # check if the flow matrix is really a flow\n check_flow(flow, nodes, capacities)\n\n # compute the value of the flow\n # it corresponds to what goes out of the source\n flow_value = 3\n print(\"flow value {}\\n\\n\\n\".format(flow_value))\n\n # print the flow\n # show_flow(flow, dot, dir_name, step, flow_value, nodes)\n\n # update algo step\n step += 1\n else:\n print(\"\\n=====================\\n\")\n print(\"found no augmenting path : flow is optimal\")\n print(\"stopping at step {}\".format(step))\n print(\"flow value: {}\".format(flow_value))\n print(\"\\n=====================\\n\")\n break",
"def convert_edge_membership_to_node_membership(graph, edge_membership,\n min_com_size=4):\n community_frequencies = Counter(edge_membership.values())\n communities = [com for com, freq in community_frequencies.items()\n if freq > min_com_size]\n print(\"Num non-trivial communities\", len(communities))\n print(\"Community distribution\")\n for com, freq in community_frequencies.items():\n if freq > min_com_size:\n print(\"com\", com, \"freq\", freq)\n\n community_index = {com: i for i, com in enumerate(communities)}\n node_membership_matrix = np.zeros((len(communities),\n nx.number_of_nodes(graph)),\n dtype=bool)\n for edge, membership in edge_membership.items():\n if membership in community_index:\n node_membership_matrix[community_index[membership],\n edge[0]] = True\n node_membership_matrix[community_index[membership],\n edge[1]] = True\n\n return node_membership_matrix",
"def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]",
"def _apply_infomap(self):\n infomapWrapper = Infomap(\"--two-level --directed\")\n print(\"Building Infomap network from a NetworkX graph...\")\n for e in self.graph.edges():\n infomapWrapper.addLink(*e)\n print(\"Find communities with Infomap...\")\n infomapWrapper.run()\n print(\"Found %d top modules with codelength: %f\" % (infomapWrapper.numTopModules(), infomapWrapper.codelength()))\n communities = {}\n for node in infomapWrapper.iterTree():\n if node.isLeaf():\n communities[node.physicalId] = node.moduleIndex()\n nx.set_node_attributes(self.graph, name='community', values=communities)\n self.graph = nx.relabel.relabel_nodes(self.graph, self.catalog, copy=True)\n self.num_modules = infomapWrapper.numTopModules()\n self.community_labels = set(nx.get_node_attributes(self.graph, \"community\").values())",
"def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())",
"def assign_louvain_communities(\n reddit_graph: nx.Graph,\n wiki_graph: nx.Graph = None,\n reddit_edge_weight: str = \"count\",\n others_threshold: int = 2,\n louvain_resolution_reddit: float = 1,\n) -> Union[nx.Graph, Tuple[nx.Graph, nx.Graph]]:\n reddit_dendrogram = community.generate_dendrogram(\n reddit_graph, weight=reddit_edge_weight, resolution=louvain_resolution_reddit\n )\n if wiki_graph:\n wiki_dendrogram = community.generate_dendrogram(\n wiki_graph,\n )\n\n # Iterate over reddit nodes to assign communities\n for node in reddit_graph:\n # Iterate over all levels of the dendrogram\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n reddit_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n if wiki_graph:\n # Also add the community from the other graph to allow comparing\n # Again, iterate over all levels in the dendrogram\n for level in range(len(wiki_dendrogram) - 1):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n\n try:\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n\n except:\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{level}\"\n ] = f\"L{level}-NONE\"\n if wiki_graph:\n for node in wiki_graph:\n for level in range(\n len(wiki_dendrogram) - 1,\n ):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n wiki_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n # Also add the community from the other graph to allow comparing\n\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n try:\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n except:\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{level}\"\n ] = f\"L{level}-NONE\"\n\n return (\n (reddit_graph, reddit_dendrogram, wiki_graph, wiki_dendrogram)\n if wiki_graph\n else (reddit_graph, reddit_dendrogram)\n )",
"def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity",
"def _connect_nodes(from_nodes, to_nodes, weight=0):\n for to_node in to_nodes:\n for from_node in from_nodes:\n Connection(from_node, to_node, weight)",
"def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities",
"def update(self):\n for d in range(1, 3):\n # d is 1 or 2\n if self.test:\n print('---------------------------------degree {}--------------------------'.format(d))\n nodes = self.get_node_by_degree(d=d) # a list of node id\n for node in nodes:\n if node in list(self.g.nodes):\n neighbors = self.get_neighbors(node) # a list of node id\n if self.test:\n print()\n print('## Current node is: {}'.format(node))\n print(' >>> Neighbors of this node are : {}'.format(','.join([str(i) for i in neighbors])))\n for neighbor in neighbors:\n # neighbor may be deleted on this process, so need to check if it exists\n if d == 1: # degree = 1, only leaves\n if self.test:\n print(' >>> Start to check if {} and {} can be merged...'.format(neighbor, node))\n if (neighbor in list(self.g.nodes)) and self.check_if_merge(neighbor, node):\n if self.test:\n print(' >>> Start to merge {} to {}...'.format(node, neighbor))\n self.merge_two_nodes(left_id=neighbor, right_id=node)\n if d == 2: # degree = 2, only merge with the neighbor which degree is 2\n if self.get_degree_by_node(neighbor) == 2:\n if self.test:\n print(' >the degree of neighbor {} is {}'.format(\n neighbor, self.get_degree_by_node(neighbor)))\n print(' >>> Start to check if {} and {} can be merged...'.format(neighbor, node))\n if (neighbor in list(self.g.nodes)) and self.check_if_merge(neighbor, node):\n if self.test:\n print(' >>> Start to merge {} to {}...'.format(neighbor, node))\n self.merge_two_nodes(left_id=node, right_id=neighbor)\n\n n2n = {n: list(self.g.neighbors(n)) for n in list(self.g.nodes())} # node 2 neighbors, {id: [], ... }\n id2smiles = nx.get_node_attributes(self.g, 'smiles')\n id2mol_inx = nx.get_node_attributes(self.g, 'mol_inx')\n return {'n2n': n2n, 'id2smiles': id2smiles, 'f2f': self.f2f, 'id2mol_inx': id2mol_inx}",
"def iter_uplinks(self):\n\n def get_node_uplinks(node):\n role = self.G.node[node].get(\"role\", \"cloud\")\n if role == \"uplink\":\n # Only downlinks matter\n return []\n elif role == \"downlink\":\n # All segment neighbors are uplinks.\n # As no inter-downlink segment's links are loaded\n # so all neigbors are from current segment\n return list(self.G.neighbors(node))\n # Segment role and clouds\n ups = {}\n for u in uplinks:\n for path in nx.all_simple_paths(self.G, node, u):\n lp = len(path)\n p = path[1]\n ups[p] = min(lp, ups.get(p, lp))\n # Shortest path first\n return sorted(ups, key=lambda x: ups[x])\n\n from noc.sa.models.objectdata import ObjectUplinks\n\n uplinks = self.get_uplinks()\n # @todo: Workaround for empty uplinks\n # Get uplinks for cloud nodes\n cloud_uplinks = dict(\n (o, [int(u) for u in get_node_uplinks(o)])\n for o in self.G.node\n if self.G.node[o][\"type\"] == \"cloud\"\n )\n # All objects including neighbors\n all_objects = set(o for o in self.G.node if self.G.node[o][\"type\"] == \"managedobject\")\n # Get objects uplinks\n obj_uplinks = {}\n obj_downlinks = defaultdict(set)\n for o in all_objects:\n mo = int(o)\n ups = []\n for u in get_node_uplinks(o):\n cu = cloud_uplinks.get(u)\n if cu is not None:\n # Uplink is a cloud. Use cloud's uplinks instead\n ups += cu\n else:\n ups += [int(u)]\n obj_uplinks[mo] = ups\n for u in ups:\n obj_downlinks[u].add(mo)\n # Calculate RCA neighbors and yield result\n for mo in obj_uplinks:\n # Filter out only current segment. Neighbors will be updated by their\n # segment's tasks\n if mo not in self.segment_objects:\n continue\n # All uplinks\n neighbors = set(obj_uplinks[mo])\n # All downlinks\n for dmo in obj_downlinks[mo]:\n neighbors.add(dmo)\n # And uplinks of downlinks\n neighbors |= set(obj_uplinks[dmo])\n # Not including object itself\n if mo in neighbors:\n neighbors.remove(mo)\n # Recalculated result\n yield ObjectUplinks(\n object_id=mo, uplinks=obj_uplinks[mo], rca_neighbors=list(sorted(neighbors))\n )",
"def additive_phylogeny(matrix, n, G):\n new_node = n\n\n def additive_recur_helper(matrix, n, G):\n\n nonlocal new_node\n\n if n == 2:\n print(\"d add_edge (%s,%s):%s\" % (0, 1, matrix[0, 1]))\n G.add_edge(0, 1, weight=matrix[0, 1])\n return\n\n limblen = limblength(n - 1, matrix)\n i, k = find_i_k(matrix, n - 1, limblen)\n x = matrix[i, n - 1] - limblen\n\n print(\"n=%s limblen=%s i=%s k=%s x=%s\" % (n, limblen, i, k, x))\n\n additive_recur_helper(matrix[0 : n - 1, 0 : n - 1], n - 1, G)\n\n v = node_at_distance(G, i, k, x, matrix[i, k], new_node)\n if v == new_node:\n new_node += 1\n\n print(\"node_at_distance %s from %s is %s\" % (x, i, v))\n\n print(\"e add_edge (%s,%s):%s\" % (v, n - 1, limblen))\n G.add_edge(v, n - 1, weight=limblen)\n\n # draw graph if small\n if len(G) < 30:\n global plot_cnt\n pos = nx.kamada_kawai_layout(G)\n labels = nx.get_edge_attributes(G, \"weight\")\n nx.draw(G, pos, with_labels=True)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.draw()\n plt.savefig(\"Graph\" + str(plot_cnt) + \".png\", format=\"PNG\")\n plt.clf()\n plot_cnt += 1\n\n return\n\n additive_recur_helper(matrix, n, G)\n\n return",
"def make_communities(community_side, communities_per_side):\n community_size = community_side * community_side\n communities = []\n seed_node = 0\n for i in range(communities_per_side):\n for j in range(communities_per_side):\n community = []\n for k in range(community_side):\n for z in range(community_side):\n _id = (\n communities_per_side * community_size * i\n + community_side * j\n + z\n + k * (communities_per_side * community_side)\n )\n # print(f\"{_id} \", end=\"\")\n community.append(_id)\n # print(\"- \", end=\"\")\n communities.append(community)\n #print()\n return communities"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Advance the time reference by the given amount. | def advance_by(self, amount: float):
if amount < 0:
raise ValueError("cannot retreat time reference: amount {} < 0"
.format(amount))
self.__delta += amount | [
"def advanceTime(self, amount):\n self.currentSeconds += amount",
"def advance(self, amount):\n right_now = self.rightNow + amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= right_now:\n self.rightNow = self.calls[0].getTime()\n call = self.calls.pop(0)\n call.called = 1\n call.func(*call.args, **call.kw)\n self._sortCalls()\n self.rightNow = right_now",
"def advance(self, amount=1):\n self._current += amount\n self.redraw()",
"def advance(self, **kwargs):\n self._now = self._now + timedelta(**kwargs)",
"def incrementTimeStep(self):\n pass",
"def next_time_step(self):\n\n self.__time_step += 1",
"def advanceCompletely(self, amount):\n self.rightNow += amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= self.seconds():\n call = self.calls.pop(0)\n call.called = 1\n yield call.func(*call.args, **call.kw)\n self._sortCalls()",
"def _step(self):\n self._amount = self._incremental.add(\n self._amount, self._increment_amount)",
"def advance(self, time):\n raise \"use method advance of class ReactorNet\"\n #return _cantera.reactor_advance(self.__reactor_id, time)",
"def incrementTimer(self):\n if self.timer >= 0:\n self.timer += 1",
"def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()",
"def _advance_timer(self):\n self._rate_last_ts = self._rate_last_ts + self._rate_delay\n return self._rate_last_ts",
"def advance_to(self, timestamp: float):\n now = self.__original_time()\n if timestamp < now:\n raise ValueError(\"cannot retreat time reference: \"\n \"target {} < now {}\"\n .format(timestamp, now))\n self.__delta = timestamp - now",
"def _advance(self):\n self._current *= self._base",
"def advance(self):\n # Increment iteration counter\n self.currentIteration += 1\n if self._lastStep:\n # The timestep was adjusted to reach end in the previous call\n # So now the simulation is over\n self.isOver = True\n else:\n if self.currentIteration < self.iterMax:\n # Advance time for the iteration just ended\n self.tk = self.tkp1\n self.tkp1 = self.tk + self.timeStep\n\n # Adjust last timestep to reach self.end\n if self.tkp1 > self.end:\n self.timeStep = self.end - self.tk\n if self.timeStep <= self.tol:\n self.isOver = True\n else:\n self.tkp1 = self.end\n self._lastStep = True\n else:\n # iteration number is reached\n self.isOver = True\n\n self.time = self.tkp1",
"def _advance(self):\t\t# override inherited version\n self._current *= self._base",
"def advance(self):\n\t\tmax_days = Calendar1.months[self.__months-1]\n\t\tif(self.__months == 2 and Calendar1.leap_year(self.__years)):\n\t\t\tmax_days += 1\n\n\t\tif(self.__days == max_days):\n\t\t\tself.__days = 1\n\t\t\tif(self.__months == 12):\n\t\t\t\tself.__months = 1\n\t\t\t\tself.__years += 1\n\t\t\telse : self.__months += 1\n\t\telse : self.__days += 1",
"def add_time(self,time):\n self.time += float(time)",
"def _advance(self): #override inherited version\n self._current *= self._base"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Advance the time reference so that now is the given timestamp. | def advance_to(self, timestamp: float):
now = self.__original_time()
if timestamp < now:
raise ValueError("cannot retreat time reference: "
"target {} < now {}"
.format(timestamp, now))
self.__delta = timestamp - now | [
"def advance(self, **kwargs):\n self._now = self._now + timedelta(**kwargs)",
"def _update_time(self):\n self.prev_time = time.time()",
"def change_time(self, new_time):\r\n self.when = new_time",
"def setTimepoint(self, tp):\n\t\tif tp != self.timepoint:\n\t\t\tself.renew = True\n\t\tself.timepoint = tp",
"def advanceTime(self, amount):\n self.currentSeconds += amount",
"def current_time(self, current_time):\n self._current_time = current_time",
"def get_next_fire_time(self, previous_fire_time, now):",
"def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()",
"def increment_datetime(self):\n self.current_datetime += timedelta(seconds=self.step_size)",
"def _advance_timer(self):\n self._rate_last_ts = self._rate_last_ts + self._rate_delay\n return self._rate_last_ts",
"def update(self):\n (sec, msec) = self.ntptime()\n if sec is None or msec is None:\n return\n\n loct = time.localtime(sec)\n tup = (loct[0], loct[1], loct[2], 0, loct[3] + 1, loct[4], loct[5], msec)\n #print(\"loct\", loct, \"arg\", tup, \"len\", len(tup))\n self.RTC.datetime(tup)\n\n print(\"[*] Updated time to %02i-%02i-%02iT%02i:%02i:%02i.%04i\"%(\n loct[0], loct[1], loct[2], loct[3]+1, loct[4], loct[5], msec))",
"def set_time(self, time):\n pass",
"def _stamp(self) -> None:\n t = time.time()\n self.timestamps.append(t)",
"def setTimepoint(self, tp):\n\t\tpass",
"def update_timeval(self):\n self.timeval = self.get_timeval()",
"def update(self):\n self.last_time = self.current_time\n self.current_time = time.perf_counter()\n self.delta_time = self.current_time - self.last_time",
"def next_time_step(self):\n\n self.__time_step += 1",
"def seek_time(self, time):\n pass",
"def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Email the given document to the given email address. | def email_document(document, to, template='django_dms/email.txt', subject=''):
# Start a new thread to email the document
# This avoids a frozen screen while the email is being sent (particularly if the document is big).
t = threading.Thread(target=_email_document, args=[document, to, template, subject])
t.setDaemon(True)
t.start() | [
"def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()",
"def documents_email_address(self, documents_email_address):\n\n self._documents_email_address = documents_email_address",
"def send_document_by_email(self, send_document_by_email):\n\n self._send_document_by_email = send_document_by_email",
"def email_contact(self, email, name=None):\n if not name:\n name = email\n return self.contact(Contact.TYPE_EMAIL, email, name)",
"def send_contact_email(message, email, name):\n admin = User.query.filter_by(email=\"unhumanartist@gmail.com\").first()\n recipient_email = admin.email\n msg = message + f\"Sender email: {email}\"\n send_mail(\"Contact Form Submission\", msg, recipient_email)",
"def contact_email(self, contact_email):\n\n self._contact_email = contact_email",
"def email_address(self, email_address):\n self._email_address = email_address",
"def send_user_mail(self, form):\n address = form.cleaned_data.get(\"email\")\n if address:\n send_mail(\n self.subject,\n self.render_email(form),\n [address],\n self.from_address,\n )",
"def send_mail(self, address, title, message):\n pass",
"def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def documents_email_address_specified(self, documents_email_address_specified):\n\n self._documents_email_address_specified = documents_email_address_specified",
"def author_email(self, author_email):\n self._author_email = author_email",
"def author_email(self, author_email):\n\n self._author_email = author_email",
"def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)",
"def contact_business_email(self, contact_business_email):\n\n self._contact_business_email = contact_business_email",
"def send_mail(email):\n return email.send()",
"def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", 'no-reply@email.com', [\n self.customer.email], headers={'Reply-To': 'no-reply@email.com'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)",
"def do_doc(self, arg):\n try:\n doc_path = input(f\"{Fore.BLUE}Path to Document: {Fore.GREEN}\")\n with open(doc_path, \"rb\") as doc:\n doc_data = doc.read()\n doc_name = doc.name\n self.email_info[\"attachments\"].append([doc_data, \"application\", \"octet-stream\", doc_name])\n print(f\"Document attached successfully.{Style.RESET_ALL}\")\n except Exception as e:\n print(f\"{Fore.RED}{e}{Style.RESET_ALL}\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to email document in another thread. | def _email_document(document, to, template='django_dms/email.txt', subject=''):
# TODO: A really cool system would delay sending the email for 10 seconds or so,
# to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)
# Create the message
message = EmailMessage(to=to, subject=subject)
message.to = to
message.subject = subject
message.body = render_to_string(template, {'document': document})
message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)
# Send the message
message.send() | [
"def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()",
"def send_async_email(msg):\n with app.app_context():\n Mail.send(msg)",
"def send_async_email(self, msg):\n with app.app_context():\n result = mail.send(msg)\n print result",
"def send_async_email(msg):\n with app.app_context():\n print('sending mail')\n mail.send(msg)",
"def make_send_pdf(self):\n slim_content = edit_content(page.get_content(page.get_summary()), \"Weblinks\", \"Literatur\", \"Einzelnachweise\",\n \"Sonstiges\", \"Filme\", \"Auszeichnungen\", \"Filmdokumentationen\", \"Anmerkungen\",\n \"Biografien\",\n \"Weitere Texte\") # cuts out these sections from the article\n\n to_mail = 'send_to_this_mail@gmail.com' # testing email\n try:\n print(\"Progress:\")\n # create the word page\n add_title()\n print(\"Added title...\")\n add_logo()\n print(\"Added image...\")\n add_summary()\n print(\"Added summary...\")\n add_content(slim_content)\n print(\"Added content...\")\n document.save(get_doc_name() + \".docx\")\n print(\"Saving file...\")\n delete_pic_in_outer_folder()\n word_to_pdf.docx_to_pdf(get_doc_name() + \".docx\", get_doc_name())\n move_word_files()\n print(\"Sending email...\")\n email_sender.send_email(to_mail, get_doc_name() + \".pdf\", str(page.get_title())) # uncomment to send email\n word_to_pdf.move_pdf_to_folder()\n print(\"===Finished===\")\n except PermissionError:\n print(\n \"Bitte schließen sie Microsoft Word und versuchen sie es erneut.\") # \"Please close Microsoft Word and try again\"",
"def send_result_email(self): # pragma: no cover\n pass",
"def send_mail(self):\n context2 = self.env.context.copy()\n if self.model and self.id_active and self.env.context.get('send_mail_wkf_signal'):\n obj = self.env[self.model].browse(self.id_active)\n obj.signal_workflow(self.env.context['send_mail_wkf_signal'])\n context2['thread_model'] = self.model\n if self.model and self.id_active and self.env.context.get('send_mail_method_next'):\n obj = self.env[self.model].browse(self.id_active)\n getattr(obj, self.env.context['send_mail_method_next'])()\n \n return super(mail_compose_message, self.with_context(context2)).send_mail()",
"def send_ajax(self, request, id, tribe_slug):\n\n document = self.get_document(id, tribe_slug)\n\n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if not email and not form:\n form = EmailForm()\n \n if form:\n content = '<form class=\"ajax_update_email\" action=\"%s\" method=\"post\">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])\n content += '%s<input type=\"submit\" value=\"Send\"/></form>' % form['email']\n return HttpResponse(content)\n \n print \"Sending email to %s\" % email\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n\n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponse('Email sent to %s' % email)",
"def test_send_email_with_queue(self):\n pass",
"def send_all_prof_emails():\n current_app_context = current_app._get_current_object()\n msgs = list()\n threads = list()\n pdfs = list()\n for section in Section.query.all():\n t = None\n # create section data\n results_count = section.results.count()\n students_count = section.students.count()\n means = section.get_means()\n stds = section.get_stds()\n frq_responses = section.get_frq_responses()\n try:\n if results_count > 0:\n pdf = PDFPlotter(section)\n t = Thread(target=lambda m, app_con, s, r_c, s_c, me, st, f_r, f: \\\n m.insert(0, create_prof_emails(app_con, s, r_c, s_c, me, st, f_r, f)), \\\n args=(msgs, current_app_context, section, results_count, students_count, means, stds, frq_responses, pdf.file))\n pdfs.append(pdf)\n else:\n # don't create PDF if no results were submitted\n t = Thread(target=lambda m, app_con, s, r_c, s_c, me, st, f_r: \\\n m.insert(0, create_prof_emails(app_con, s, r_c, s_c, me, st, f_r)), \\\n args=(msgs, current_app_context, section, results_count, students_count, means, stds, frq_responses))\n threads.append(t)\n t.start()\n except Exception as e:\n print('ERROR: Failed to create message for {} {} - {}'.format(section.course_id, section.prof_email, e))\n # block until all threads are finished\n for t in threads:\n t.join()\n # delete pdfs after emails are sent\n for pdf in pdfs:\n pdf.deleteFile()\n # send emails\n print(log_header('PROFESSOR EMAILS'))\n Thread(target=send_async_emails, args=(current_app_context,), kwargs={'msgs': msgs}).start()\n print('SENT {} EMAILS'.format(len(msgs)))",
"def main():\n\n # start time in milliseconds to compare with last message time\n start_time = int(time.time()) * 1000\n\n # get credentials first and create gmail service object\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n while True:\n # receive email messages\n q_to_list = ['from:' + e_mail for e_mail in senders]\n q = 'in:inbox {}'.format(' OR '.join(q_to_list))\n messages = list_messages_matching_query(service, user_id,\n query=q,\n maxResults=1)\n if not messages:\n print(\"No messages to show\")\n time.sleep(seconds_between_checks)\n continue\n else:\n pprint.pprint('Messages to show: {}'.format(messages))\n\n # get thread of first document - so you can label the thread itself if need be\n thread_id = messages[0]['threadId']\n thread = get_thread(service, user_id, thread_id)\n\n msg_id = messages[0]['id']\n message = get_message(service, user_id, msg_id)\n\n msg_sender = field_from_message(message, 'From')\n canned_label_id = get_label_id(service, canned_label)\n thread_label_ids = thread['messages'][0][\"labelIds\"]\n\n # check that the date is later than starting, and emails match list\n if int(message[\"internalDate\"]) < start_time:\n print('internalDate earlier than start_time!')\n print(\"better luck next time\")\n # check if it's already replied to\n elif canned_label_id in thread_label_ids:\n print(\"you replied already to this one, even if it is later than startup\")\n print(\"better luck next time\")\n else:\n # check cleaned sender email in list\n sender_email = parseaddr(msg_sender)[1]\n if sender_email not in senders:\n print(\"emails don't match!!\")\n # after all tests passed, reply to message with same subject\n else:\n subject = 'Re: ' + field_from_message(message, 'Subject')\n msg = create_message(destination=msg_sender, origin=to,\n subject=subject,\n msg_txt=message_text, thr_id=thread_id)\n send_message(service, user_id, msg)\n print(\"Replied to message!\")\n start_time = int(time.time()) * 1000\n\n # then label the thread\n labels = create_msg_labels(service, addLabels=[canned_label_id])\n modify_thread(service, user_id, thread_id, labels)\n print(\"Added a label: {} \".format(canned_label))\n print('done!')\n\n # always print blank line and wait a few seconds\n print('=====\\n')\n time.sleep(seconds_between_checks)",
"def send_newsletter_task(self: ScheduleOnCommitTask, subject, preview_email, testmode, now_, import_subscribers, tags):\n\n # from celery.contrib import rdb ; rdb.set_trace()\n\n request = self.get_request()\n\n secrets = get_secrets(request.registry)\n\n if not now_:\n now_ = now()\n\n mailing_list = secrets[\"mailgun.mailing_list\"]\n\n if preview_email:\n to = preview_email\n subject = \"[PREVIEW] \" + subject\n else:\n to = mailing_list\n\n newsletter = request.registry.queryAdapter(request, INewsletterGenerator)\n\n state = NewsletterState(request)\n\n text = \"Please see the attached HTML mail.\"\n\n @retryable(tm=request.tm)\n def render_tx():\n \"\"\"Run HTML rendering in its own transaction, as it most likely reads database.\"\"\"\n return newsletter.render(since=state.get_last_send_timestamp())\n\n html = render_tx()\n html = premailer.transform(html)\n\n from_ = secrets[\"mailgun.from\"]\n domain = secrets[\"mailgun.domain\"]\n campaign = now().isoformat()\n\n mailgun = Mailgun(request.registry)\n\n if import_subscribers:\n # This may take a looooong time....\n logger.info(\"Importing subscribers\")\n import_all_users(mailgun, request.dbsession, mailing_list, tm=request.tm)\n\n logger.info(\"Sending out newsletter %s %s %s %s %s %s\", domain, subject, to, from_, campaign, tags)\n mailgun.send(domain, to, from_, subject, text, html, campaign, tags=tags)\n\n if not preview_email:\n # Only mark newsletter send if not preview\n state.set_last_send_timestamp(now_)",
"def retrieve_email_thread(es, index, message_id, restrict_to_same_group=True):\n def create_should_clause(p):\n return [\n {'prefix': {'headers.message_id.keyword': p}},\n {'prefix': {'headers.in_reply_to.keyword': p}},\n {'prefix': {'headers.references.keyword': p}}\n ]\n\n retrieved_ids = set()\n id_prefix = get_message_id_prefix(message_id)\n\n must_clause = []\n must_not_clause = []\n should_clause = create_should_clause(id_prefix)\n query = {\n 'query': {\n 'bool': {\n 'filter': {\n 'bool': {\n 'must': must_clause,\n 'must_not': must_not_clause,\n 'should': should_clause,\n 'minimum_should_match': 1\n }\n }\n }\n },\n 'sort': {\n 'headers.date': {'order': 'asc'}\n }\n }\n\n docs = []\n while True:\n if retrieved_ids:\n must_clause.clear()\n must_not_clause.append({'terms': {'headers.message_id.keyword': list(retrieved_ids)}})\n\n results = es.search(index=index, body=query, size=500)\n hits = results['hits']['hits']\n if not hits:\n break\n\n if not must_clause and restrict_to_same_group:\n must_clause.append({'term': {'group': hits[0]['_source']['group']}})\n\n references = set()\n for hit in hits:\n docs.append(hit)\n headers = hit['_source']['headers']\n\n if headers.get('message_id'):\n retrieved_ids.add(headers['message_id'])\n\n if headers.get('in_reply_to'):\n references.update(headers['in_reply_to']\n if type(headers['in_reply_to']) is list else [headers['in_reply_to']])\n\n if headers.get('references'):\n references.update(headers['references']\n if type(headers['references']) is list else [headers['references']])\n\n should_clause.clear()\n for message_id in (references - retrieved_ids):\n id_prefix = get_message_id_prefix(message_id)\n if id_prefix.rstrip():\n should_clause.extend(create_should_clause(id_prefix.rstrip()))\n\n if not should_clause:\n break\n\n return sorted(docs, key=lambda d: d['sort'])",
"def send_email(self, message):\n pass",
"def validthread(msg,allrecipients,from_email):\n ''' A useful thread identification algorithm is explained in \n http://www.jwz.org/doc/threading.html ...\n \n NOTES:\n References header: will contains list of all message id's for tentire converstion.\n Oldest MsgId being the first. Header contain a list of Message-IDs listing the parent, grandparent, great-grandparent, \n and so on, of this message, oldest first. \n That is, the direct parent of this message will be the last element of the References header.\n NewsGroups are allowed to truncate the refrences header.\n\n In-Reply-To Header will (most of times) contain the msgid of recent message for which reply is being sent to.\n \n New Mail will not contein In-ReplyTo and References header. It will contain MsgId header only.\n\n\n Design:\n 1) For each new mail received we need to store msgId as key and value \"\" (mails which does not contain In-Reply-To header)\n 2) For each reply mail, we need to get the refrences header and in-reply-to id and search in collections for existing entries.\n Get In-Reply-To header mostly contains the msgId of previous mail, search is int the collection and get the actual msgId\n and check if it is the head of references header. If these check passed the thread is valid.\n Now add the nre msgId of reply mail into the values section for the original msgId stored in the mongodb\n Or \n if in-reply-to value is part of any documents of existing treat this as valid thread and add the msgid of reply mail in to \n the document.\n 3) Handling duplicates, if the msgId of the new reply mail is part of any existing documents of db then treat this mail as \n duplicate. We need not handle this mail.\n\n For above logic we may think of adding subject as well. for all reply mails we can match the existing subject line excluding \"re:\" \n charactes of the reply mail subject line.\n\n Notes:\n 1) For new mails the existing gmail , other mail servers does not produce multiple inbound triggers for mail id's in cc header.\n 2) But for reply path the gmail and other servers does produce multiple inbound triggers (possibly with same msgId) for each\n mail id in cc header.\n\n '''\n\n msgId = msg.get(\"Message-ID\")\n if msgId is None:\n logger.info(\"Message-ID not found in mail\")\n return False\n\n msgId = msgId.strip()\n\n inreplyto = msg.get(\"In-Reply-To\")\n if inreplyto is not None:\n inreplyto = inreplyto.strip()\n\n ''' References are seperated by '\\n\\t' oldest thread id being the first id in references '''\n references = msg.get('References')\n if references is not None:\n references = references.strip()\n\n mailthread = db.findThread ( msgId )\n if mailthread is None:\n ''' no mail with msgId found in DB .. insert new entry in the db'''\n db.insertThread(msgId)\n logger.info(\"Inserting new doc {}\".format(msgId))\n return True\n else:\n logger.info(\"Possible Duplicate mail {}\".format(msgId))\n return False",
"def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, 'no-reply@cultrtoolkit.com',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)",
"def send_realtime_email(self,body_):\n import smtplib, ssl\n\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = self.fromaddr # Enter your address\n receiver_email = self.toaddr # Enter receiver address\n password = self.pswd\n message = f\"\"\"\\\nSubject: [Test] Twitter real time (half) hourly trending alert\n\n{body_}\"\"\"\n\n context = ssl.create_default_context()\n # send to multiple emails\n for receiver in receiver_email:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver, message)\n \n print(f'Email successfully sent to {receiver}')",
"def run_for(self, timeout):\n try:\n pcl_to = ph.base64pickle(timeout)\n pw_cmd = \"application mailClientThunderbird \" + str(self.window_id) + \" run_for \" + pcl_to\n self.is_busy = True\n self.guest_obj.send(pw_cmd)\n\n except Exception as e:\n raise Exception(\"error mailer::run_for: \" + str(e))",
"def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", 'no-reply@email.com', [\n self.customer.email], headers={'Reply-To': 'no-reply@email.com'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send the specified document to the user's email address (AJAX version). | def send_ajax(self, request, id, tribe_slug):
document = self.get_document(id, tribe_slug)
form = self._set_user_email_address(request)
email = self._get_user_email_address(request)
if not email and not form:
form = EmailForm()
if form:
content = '<form class="ajax_update_email" action="%s" method="post">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])
content += '%s<input type="submit" value="Send"/></form>' % form['email']
return HttpResponse(content)
print "Sending email to %s" % email
#email_document(document, to=[email], subject='Document: %s' % document.title)
# Send a signal to let everyone know about this document interaction
document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email)
return HttpResponse('Email sent to %s' % email) | [
"def send_document_by_email(self, send_document_by_email):\n\n self._send_document_by_email = send_document_by_email",
"def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()",
"def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()",
"def save_form(self, request, form, change):\n\n document = form.instance\n self.send_notification_email(document, request, \n 'email/document_modified.txt.django')\n\n document = super(DocumentAdmin, self).save_form(request, form, change)\n document.uploader = request.user\n return document",
"def do_doc(self, arg):\n try:\n doc_path = input(f\"{Fore.BLUE}Path to Document: {Fore.GREEN}\")\n with open(doc_path, \"rb\") as doc:\n doc_data = doc.read()\n doc_name = doc.name\n self.email_info[\"attachments\"].append([doc_data, \"application\", \"octet-stream\", doc_name])\n print(f\"Document attached successfully.{Style.RESET_ALL}\")\n except Exception as e:\n print(f\"{Fore.RED}{e}{Style.RESET_ALL}\")",
"def send_documents(bot, update, user_data):\n if update.message.text == \"/cancel\":\n return ConversationHandler.END\n\n else:\n user_data['subject'] = update.message.text[1:]\n index = int(user_data['subject'])\n tables = user_data['page'].find_all('table')\n all_links = tables[index].find_all('a') \n download_links = []\n for link in all_links:\n download_links.append(\"https://muquestionpapers.com/\" + link.get('href'))\n for download_link in download_links:\n bot.send_document(chat_id = update.message.chat_id, document = download_link)\n\n return ConversationHandler.END",
"def i_change_mail_to_garygarygmail(context):\n context[\"obj\"].edit_mongo_mail(\"garygary@gmail.com\")",
"async def send_document(self, chat_id, document, **kwarg):\n\n caption = kwarg.get(\"caption\")\n disable_notification = kwarg.get(\"disable_notification\")\n reply = kwarg.get(\"reply\")\n reply_markup = kwarg.get(\"reply_markup\")\n\n if type(document) is str:\n apiq = self.api_gen(\"sendDocument\",\n chat_id=chat_id,\n document=document,\n caption=caption,\n disable_notification=disable_notification,\n reply_to_message_id=reply,\n reply_markup=reply_markup)\n\n return await self._api_send(apiq)\n\n else:\n apiq = self.api_gen(\"sendDocument\",\n chat_id=chat_id,\n caption=caption,\n disable_notification=disable_notification,\n reply_to_message_id=reply,\n reply_markup=reply_markup)\n\n response = requests.post(apiq, files=dict(document=document))\n return response.content.decode(\"utf8\")",
"def send_user_mail(self, form):\n address = form.cleaned_data.get(\"email\")\n if address:\n send_mail(\n self.subject,\n self.render_email(form),\n [address],\n self.from_address,\n )",
"def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)",
"def email(self):\r\n webbrowser.open(\"mailto: gorm90@gmail.com\")",
"def put(self, *args, **kwargs):\n\n # validate email before saving\n self.validate_mail()\n\n if not self.is_saved() or self.resend:\n # attempt to send email\n self.send()\n\n super(Email, self).put(*args, **kwargs)",
"def data_email_send(result_id, user_id):\n user = CustomUser.objects.get(pk=user_id)\n result = Result.objects.get(pk=result_id)\n email = user.email\n subject, from_email, to = 'Twitter data extraction result', 'amithah.nithin@gmail.com', email\n\n html_content = render_to_string('scraper/email.html', {'user': user}) # render with dynamic value\n text_content = strip_tags(html_content) # Strip the html tag. So people can see the pure text at least.\n\n # create the email, and attach the HTML version as well.\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_file(f\"{result.result_file.path}\")\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n print(\"email sent successfullly\")",
"def documents_email_address(self, documents_email_address):\n\n self._documents_email_address = documents_email_address",
"def send_to_email(self, prospect_email=None, **kwargs):\n response = self._post(\n path='/do/send/prospect_email/{prospect_email}'.format(prospect_email=prospect_email),\n params=kwargs)\n return response",
"def moderate_document(request, document):\n\tif request.user.is_staff == False:\n\t\traise Http404('Document does not exist')\n\n\tdocument = get_object_or_404(Document, id=document)\n\n\tif request.method == 'POST':\n\t\t\"\"\"Save the form if the request is a POST\"\"\"\n\t\tform = AdminDocumentForm(request.POST, instance=document)\n\n\t\tif form.is_valid():\n\t\t\td = form.save(commit=False)\n\n\t\t\tpublished = request.POST.get('publish')\n\n\t\t\tif published != None:\n\t\t\t\t\"\"\"If the published checkbox isn't ticked (which it won't be an unless an editor's seen and checked it, put the document in pending.\"\"\"\n\t\t\t\tif published == 'Approve':\n\t\t\t\t\td.pending = False\n\t\t\t\telse:\n\t\t\t\t\td.pending = True\n\n\t\t\td.save()\n\t\t\tform.save_m2m()\n\n\t\t\tif published == 'Approve':\n\t\t\t\t\"\"\"If the 'published' box is checked, email the contributor to say thanks, otherwise just return the editor to the dashboard\"\"\"\n\t\t\t\tmessage = request.POST.get('email_thanks')\n\t\t\t\t# Check to see if the 'send email' box is ticked. If it is, send a confirmation email to the author, copying in the editors.\n\t\t\t\tsend = request.POST.get('send_email')\n\t\t\t\tif send:\n\t\t\t\t\tinform_user_of_content_publication(d.author, d.title, message)\n\t\t\t\treturn dashboard(request)\n\t\t\telse:\n\t\t\t\treturn dashboard(request)\n\n\telse:\n\t\t\"\"\"Display the form with its content if request is a GET\"\"\"\n\t\tform = AdminDocumentForm(instance=document)\n\n\treturn render(request, 'map/moderate_document.html', {'form': form, 'document': document })",
"def sendRedemptionEmail():\n return",
"def notifyUserEdit(self, email, projectNumbers, firstName, lastName, netID, course):\n\n renderArgs = {\n 'domain': self.domain,\n 'projectNumbers': projectNumbers,\n 'firstName': firstName,\n 'lastName': lastName,\n 'netID': netID,\n 'course': course\n }\n subject = \"You Have Been Edited!\"\n template = self.templateLookup.get_template('notifyUserEdit.html')\n body = template.render(**renderArgs)\n self.send(email, subject, body)",
"def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", 'no-reply@email.com', [\n self.customer.email], headers={'Reply-To': 'no-reply@email.com'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a custom defined or default email address for the current user. | def _get_user_email_address(self, request):
return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email) | [
"def get_user_email():\n try:\n return auth.get_current_user().email\n except Exception:\n return ''",
"def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')",
"def __default_email(self):\n email_default = lambda n, s: \"{name}{surname}@{name}.{surname}\".format(\n name=n, surname=s)\n name, surname = self.__split_social_name()\n if not self.email:\n return email_default(name, surname)\n\n # This was passed by user\n return self.email",
"def GetUserEmail():\n user = users.get_current_user()\n if user:\n return user.email()\n return None",
"def email_address(self) -> str:\n return pulumi.get(self, \"email_address\")",
"def get_user_email(self, user: U):\n return user.email",
"def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")",
"def _get_default_email(self, cr, uid, context=None):\n if not context.get('portal', False):\n return False\n user = self.pool.get('res.users').browse(cr, uid, uid, context=context)\n return user.user_email",
"def user_email(self):\n return self._user_email",
"def get_usermail():\n return _get_git_config(\"user.email\")",
"def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''",
"def getUserEmail(self, user):\n user_data = self.getUserData(user, \"email\")\n if user_data:\n return user_data[0]\n else:\n return \"\"",
"def email_address(self):\n return self.__email_address",
"def email_address(self) -> str:\n return self.__email_address",
"def Get_Email( self ):\n\n return self._email_address",
"def email_address(self):\n return self._email_address",
"def email_address(self) -> str:\n return self._email_address",
"def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None",
"def get_from_email_address():\n return getattr(settings, 'ENTITY_EMAILER_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a new email address is posted, remember it. | def _set_user_email_address(self, request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email']
else:
return form | [
"def duplicate_email(user):\n return user.email",
"def email(self, email):\n if email == self.email:\n return\n\n email = email.lower()\n if self._email is None:\n self._email = email\n self.require_email_confirmation()\n else:\n self.email_new = email\n self.require_email_confirmation()",
"def change_email(self, token):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n if data.get(\"user_id\") != self.id:\n return False\n new_email = data.get(\"new_email\")\n if new_email is None:\n return False\n # check to see if another user has this email\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = data.get(\"new_email\")\n db.session.add(self)\n return True",
"def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email",
"def change_email(self, token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n if data.get('change_email') != self.id:\n return False\n new_email = data.get('new_email')\n if new_email is None:\n return False\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = new_email\n db.session.add(self)\n return True",
"def update_email(self, email):\n email = email.lower()\n\n if email == \"\":\n return\n elif email == self.survivor[\"email\"]:\n return\n else:\n self.survivor[\"email\"] = email\n self.Settlement.log_event(\"%s is now managed by %s.\" % (self, email))\n self.logger.debug(\"[%s] changed survivor %s email to %s.\" % (self.User, self, email))",
"def test_user_logged_in_post_changes_email(self):\n form_data = {\n 'password': self.password,\n 'new_email': \"new@email.com\",\n 'new_email2': \"new@email.com\"\n }\n self.assertTrue(self.login())\n post_response = self.post_change_email(form_data)\n self.assertEqual(post_response.status_code, 302)\n self.assertRedirects(post_response, reverse('account:overview'))\n user = User.objects.get(pk=self.user.id)\n self.assertEqual(user.email, 'new@email.com')",
"def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key",
"def cancel_email_change(self):\n if not self.email_new:\n return\n\n self.email_new = None\n self.email_confirmed = True\n self.email_link = None\n self.email_new = None\n self.email_link_expires = None",
"def remember(self, response, request, identity):",
"def receiver_email(self, email):\n self.receiving_email = email",
"def email_address(self, email_address):\n self._email_address = email_address",
"async def update_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been updated successfully..!\")\n else:\n await ctx.send(\"There is no email address configured, \"\n \"Please use add command to add one..!\")\n return",
"def post_change_email(self, data=None):\n return self.client.post(self.change_email_url, data)",
"def test_user_confirm_email_duplicate(self, get_current):\n get_current.return_value.domain = 'su.mo.com'\n self.client.login(username='testuser', password='testpass')\n old_email = User.objects.get(username='testuser').email\n new_email = 'newvalid@email.com'\n response = self.client.post(reverse('users.change_email'),\n {'email': new_email})\n eq_(200, response.status_code)\n assert mail.outbox[0].subject.find('Please confirm your') == 0\n ec = EmailChange.objects.all()[0]\n\n # Before new email is confirmed, give the same email to a user\n other_user = User.objects.filter(username='testuser2')[0]\n other_user.email = new_email\n other_user.save()\n\n # Visit confirmation link and verify email wasn't changed.\n response = self.client.get(reverse('users.confirm_email',\n args=[ec.activation_key]))\n eq_(200, response.status_code)\n doc = pq(response.content)\n eq_('Unable to change email for user testuser',\n doc('.main h1').text())\n u = User.objects.get(username='testuser')\n eq_(old_email, u.email)",
"def update_replied(self,email:str):\n cell = self.wks.find(email)\n print (cell.row)\n print (email)\n #pdb.set_trace()\n\n self.wks.update_cell(cell.row,self.c[7]+1,1)",
"def email_post(request):\n if request.user.is_authenticated:\n messages.error(request, _(\"You are already logged in.\"))\n return redirect(ta_settings.LOGIN_REDIRECT)\n\n form = EmailForm(request.POST)\n if not form.is_valid():\n messages.error(request, _(\"The email address was invalid. Please check the address and try again.\"))\n return redirect(ta_settings.LOGIN_URL)\n\n email = ta_settings.NORMALIZE_EMAIL(form.cleaned_data[\"email\"])\n if not email:\n # The user's normalization function has returned something falsy.\n messages.error(\n request, _(\"That email address is not allowed to authenticate. Please use an alternate address.\")\n )\n return redirect(ta_settings.LOGIN_URL)\n\n email_login_link(request, email, next_url=request.GET.get(\"next\", \"\"))\n\n messages.success(request, _(\"Login email sent! Please check your inbox and click on the link to be logged in.\"))\n return redirect(ta_settings.LOGIN_URL)",
"def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError(\n \"That email is already in use. Please try again.\"\n )",
"def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print list of instances with their attached volume id/size to console, ie | def list_ebss_by_instance():
ec2 = u.create_ec2_resource()
instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]
sorted_instances = sorted(instances, key=itemgetter(0))
for (seconds, instance) in sorted_instances:
volumes = instance.volumes.all()
volume_strs = []
for v in volumes:
volume_strs.append("%s (%s)"%(v.id, v.size))
print("%s: %s" % (u.get_name(instance.tags), ','.join(volume_strs))) | [
"def do_show(cs, args):\n instance = _find_instance(cs, args.instance)\n instance._info['flavor'] = instance.flavor['id']\n if hasattr(instance, 'volume'):\n instance._info['volume'] = instance.volume['size']\n if hasattr(instance, 'ip'):\n instance._info['ip'] = ', '.join(instance.ip)\n _print_instance(instance)",
"def printvol(v, voldict, il, ssdroot):\n if v not in voldict:\n return\n subdict = voldict[v]\n for sn in sorted(subdict.keys()):\n indlev(il+2)\n par = \"\"\n if flag_terse_output:\n par = \"%s/%s -> \" % (ssdroot, v)\n if flag_showsize:\n sv = \"%s/%s\" % (ssdroot, sn)\n if sv in volsizes:\n par = \"%s \" % volsizes[sv]\n else:\n u.verbose(1, \"no size info for %s\" % sv)\n print(\"snapshot %s%s/%s\" % (par, ssdroot, sn))\n printvol(sn, voldict, il+2, ssdroot)",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print json.dumps({'data': lst})",
"def _display_oci_volume_list(volumes, output_mode, details, truncate):\n\n def _get_displayable_size(_,volume):\n return volume.get_size(format_str=OCI_VOLUME_SIZE_FMT.HUMAN.name)\n\n def _get_attached_instance_name(_,volume):\n global _this_instance_ocid\n if not volume.is_attached():\n return '-'\n _vol_instance_attach_to = volume.get_instance()\n if _vol_instance_attach_to.get_ocid() == _this_instance_ocid:\n return \"this instance\"\n else:\n pip=_vol_instance_attach_to.get_public_ip()\n if pip:\n return \"%s (%s)\" % (_vol_instance_attach_to.get_display_name(), _vol_instance_attach_to.get_public_ip())\n return _vol_instance_attach_to.get_display_name()\n return '-'\n\n def _get_comp_name(_,volume):\n \"\"\" keep track of compartment per ID as it may be expensive info to fetch \"\"\"\n _map = getattr(_get_comp_name,'c_id_to_name',{})\n if volume.get_compartment_id() not in _map:\n _map[volume.get_compartment_id()] = volume.get_compartment().get_display_name()\n setattr(_get_comp_name,'c_id_to_name',_map)\n return _map[volume.get_compartment_id()]\n\n\n _title='Block volumes information'\n _columns = [['Name',32,'get_display_name'],\n ['Size',6,_get_displayable_size],\n ['Attached to',32,_get_attached_instance_name],\n ['OCID',32,'get_ocid']]\n if details:\n _columns.extend((['IQN',14,'get_iqn'],\n ['Compartement',14,_get_comp_name],\n ['Availability domain',19,'get_availability_domain_name']))\n if output_mode == 'compat':\n printerKlass = get_row_printer_impl('text')\n else:\n printerKlass = get_row_printer_impl(output_mode)\n\n printer = printerKlass(title=_title, columns=_columns, text_truncate=truncate)\n printer.printHeader()\n for vol in volumes:\n printer.printRow(vol)\n printer.rowBreak()\n printer.printFooter()\n printer.finish()",
"def do_list():\n for src, mp, fs, opts, p1, p2 in self._get_mounted_fs():\n print(\"%25s mounted on %s\" % (src, mp))",
"def _print_instances(instance_list: list):\n for instance in instance_list:\n print('\\t{:<20} {:<20} {:<20}'.format(\n instance['Ec2InstanceId'],\n instance['PrivateIpAddress'],\n instance['PublicIpAddress'] if 'PublicIpAddress' in instance else '')\n )",
"def get_instance_vols(conn, instance_id):\n\n vols = conn.get_all_volumes(filters={'attachment.instance-id': instance_id})\n return vols",
"def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass",
"def display_instances(self):\n pass",
"def print_volume(self, volume, prefix=\"\"):\n self.verbose_print(\"%s:\" % volume['name'], prefix=prefix)\n self.verbose_print(\" Tags:\", prefix=prefix)\n\n for key, val in volume['labels'].items():\n self.verbose_print(\" %s: %s\" % (key, val), prefix=prefix)",
"def do_show(args):\n global _this_instance_ocid\n #\n _logger.debug('%s', where_am_i())\n # create an oci api session\n oci_sess = get_oci_api_session()\n # collect iscsi volume information\n system_disks = lsblk.list_blk_dev()\n # we are not touching boot volume in iscsi config\n iscsiadm_session = iscsiadm.session()\n # get the ocid of this instance\n _this_instance_ocid = get_this_instance_ocid(session=oci_sess)\n\n try:\n _ = show_volumes(oci_session=oci_sess,\n iscsiadm_session=iscsiadm_session,\n system_disks=system_disks,\n args=args)\n except Exception as e:\n _logger.debug('Failed to show block volumes: %s', str(e), stack_info=True)\n return False\n return True",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)",
"def show_vdcs(self):\n for v in self.vdcs:\n print v",
"def display_attached_volumes(oci_sess, iscsiadm_session, disks, args):\n #\n # todo: handle the None ocisession more elegantly.\n oci_vols = list()\n try:\n if bool(oci_sess):\n oci_vols = sorted(oci_sess.this_instance().all_volumes())\n except Exception as e:\n _logger.debug('Cannot get all volumes of this instance : %s', str(e))\n\n if not iscsiadm_session and len(oci_vols) > 0:\n #\n # iscsiadm does not show volumes, oci_api.session does, attached volumes but not connected.\n print(\"Local iSCSI info not available.\")\n print(\"List info from Cloud instead(No boot volume).\")\n print(\"\")\n _display_oci_volume_list(oci_vols, args)\n return\n\n _cols = ['Target',\n 'Volume Name',\n 'Volume OCID',\n 'Persistent Portal',\n 'Current Portal',\n 'Session State',\n 'Attached Device',\n 'Size',\n 'Mountpoint',\n 'Filesystem']\n _col_name = ['target',\n 'name',\n 'ocid',\n 'p_portal',\n 'c_portal',\n 's_state',\n 'dev',\n 'size',\n 'mountpoint',\n 'fstype']\n _cols_len = list()\n for col in _cols:\n _cols_len.append(len(col))\n\n volumes_data, _collen = collect_volumes_data(oci_sess, iscsiadm_session, disks, dict(zip(_col_name, _cols_len)))\n\n if not args.no_truncate:\n _collen = {'target': 32,\n 'name': 13,\n 'ocid': 32,\n 'p_portal': 20,\n 'c_portal': 20,\n 's_state': 13,\n 'dev': 15,\n 'size': 6,\n 'mountpoint': 12,\n 'fstype': 12}\n\n _columns = get_columns(args.details, args.output_mode, _collen)\n\n # this is only to be used in compatibility mode, text mode or parsable mode, for now.\n partitionPrinter = get_row_printer_impl(args.output_mode)(title='\\nPartitions:\\n',\n columns=(['Device', 8, 'dev_name'],\n ['Size', 6, 'size'],\n ['Filesystem', 12, 'fstype'],\n ['Mountpoint', 12, 'mountpoint']))\n\n iscsi_dev_printer = None\n if len(volumes_data) == 0:\n print('No iSCSI devices attached.')\n else:\n _title = 'Currently attached iSCSI devices:'\n iscsi_dev_printer = get_row_printer_impl(args.output_mode)(title=_title,\n columns=_columns,\n text_truncate=args.no_truncate)\n if bool(iscsi_dev_printer):\n iscsi_dev_printer.printHeader()\n for _item in volumes_data:\n iscsi_dev_printer.printRow(_item)\n if args.output_mode in ['compat', 'text', 'parsable']:\n if 'partitions' not in disks[_item['dev']]:\n #\n fstype = disks[_item['dev']]['fstype'] \\\n if bool(disks[_item['dev']]['fstype']) \\\n else 'Unknown'\n iscsi_dev_printer.printKeyValue('File system type', fstype)\n mntpoint = disks[_item['dev']]['mountpoint'] \\\n if bool(disks[_item['dev']]['mountpoint']) \\\n else 'Not mounted'\n iscsi_dev_printer.printKeyValue('Mountpoint', mntpoint)\n else:\n partitions = disks[_item['dev']]['partitions']\n partitionPrinter.printHeader()\n for part in sorted(list(partitions.keys())):\n # add it as we need it during the print\n partitions[part]['dev_name'] = part\n partitionPrinter.printRow(partitions[part])\n partitionPrinter.rowBreak()\n partitionPrinter.printFooter()\n if not args.output_mode == 'parsable':\n partitionPrinter.finish()\n iscsi_dev_printer.rowBreak()\n iscsi_dev_printer.printFooter()\n iscsi_dev_printer.finish()\n return",
"def do_show(self, arg):\n args = parse(arg)\n if len(args) < 2:\n print('Please provide the Class and the instance.id to search')\n else:\n print(args)\n print(storage.get(classes[args[0]], args[1]).to_dict())\n storage.close()",
"def show(vol_path):\n name = \"qemu-img\"\n image = \"breqwatr/qemu-img:latest\"\n path = Path(vol_path)\n vol_abspath = path.absolute().__str__()\n run = f\"qemu-img info {vol_abspath}\"\n mount = f\"-v {vol_abspath}:{vol_abspath}\"\n cmd = f\"docker run --rm -it --name {name} {mount} {image} {run}\"\n shell(cmd)",
"def do_volume_list(gc, *args, **kwargs):\n\n volumes = gc.volumes.list(**kwargs)\n\n columns = ['peer_id', 'host', 'port', 'iqn', 'lun']\n utils.print_list(volumes, columns)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Grows EBS volume for given task. | def grow_ebs_for_task(task_fragment, target_size_gb):
ec2 = u.create_ec2_resource()
client = u.create_ec2_client()
# todo: don't crash on missing/duplicate names
instances = {u.get_name(i.tags): i for i in ec2.instances.all()}
ec2 = u.create_ec2_resource()
instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]
sorted_instances = reversed(sorted(instances, key=itemgetter(0)))
for (seconds, instance) in sorted_instances:
task_name = u.get_name(instance.tags)
hours_ago = (time.time()-seconds)/3600
hours_ago+=8 # adjust for time being in UTC
if task_fragment in task_name:
print("Found instance %s launched %.1f hours ago" %( task_name, hours_ago))
break
print(instance.id)
volumes = list(instance.volumes.all())
assert len(volumes)==1, "Must have 1 volume"
print("Growing %s to %s"%(volumes[0].id, target_size_gb))
response = client.modify_volume(
VolumeId=volumes[0].id,
Size=target_size_gb,
)
assert u.is_good_response(response) | [
"def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)",
"def extend_volume(self, volume, new_size):",
"def expand_volume(self, vol, new_size):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(vol)\n size_in_bytes = vipr_utils.to_bytes(str(new_size) + \"G\")\n\n try:\n self.volume_obj.expand(\n self.configuration.vipr_tenant +\n \"/\" +\n self.configuration.vipr_project +\n \"/\" +\n volume_name,\n size_in_bytes,\n True)\n except vipr_utils.SOSError as e:\n if e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR:\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + volume_name + \": expand failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : %s expand failed\") % volume_name)",
"def run(self, size, new_size, min_sleep=0, max_sleep=0, **kwargs):\n volume = self.cinder.create_volume(size, **kwargs)\n self.cinder.extend_volume(volume, new_size=new_size)\n self.sleep_between(min_sleep, max_sleep)\n self.cinder.delete_volume(volume)",
"def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )",
"def grow_disks(fragment: str, target_size_gb=500):\n\n instance = u.lookup_instance(fragment)\n client = u.get_ec2_client()\n\n volumes = list(instance.volumes.all())\n for vol in volumes:\n volume: Volume = vol\n if volume.size < target_size_gb:\n print(\"Growing %s to %s\" % (volume.id, target_size_gb))\n response = client.modify_volume(VolumeId=volume.id, Size=target_size_gb)\n assert u.is_good_response(response)\n else:\n print(f\"Volume {volume.id} is already {volume.size} GB's, skipping\")",
"def growVolume(self, name, amount):\n result = self.ssh.run(['growvv', '-f', name, '%dg' % amount])\n\n if result:\n msg = result[0]\n else:\n msg = None\n\n if msg:\n if '%s not found' % name in msg:\n raise exceptions.HTTPNotFound(error={'desc': msg})\n else:\n raise exceptions.GrowVolumeException(message = msg)",
"def growVolume(self, name, amount):\n info = {'action': self.GROW_VOLUME,\n 'sizeMiB': amount}\n\n response, body = self.http.put('/volumes/%s' % name, body=info)\n return body",
"def extend_volume(self, volume, new_size):\n vol_name = volume['name']\n backing = self.volumeops.get_backing(vol_name, volume['id'])\n if not backing:\n LOG.info(\"There is no backing for volume: %s; no need to \"\n \"extend the virtual disk.\", vol_name)\n return\n\n # try extending vmdk in place\n try:\n self._extend_backing(backing, new_size,\n VMwareVcVmdkDriver._get_disk_type(volume))\n LOG.info(\"Successfully extended volume: %(vol)s to size: \"\n \"%(size)s GB.\",\n {'vol': vol_name, 'size': new_size})\n return\n except exceptions.NoDiskSpaceException:\n LOG.warning(\"Unable to extend volume: %(vol)s to size: \"\n \"%(size)s on current datastore due to insufficient\"\n \" space.\",\n {'vol': vol_name, 'size': new_size})\n\n # Insufficient disk space; relocate the volume to a different datastore\n # and retry extend.\n LOG.info(\"Relocating volume: %s to a different datastore due to \"\n \"insufficient disk space on current datastore.\",\n vol_name)\n try:\n create_params = {CREATE_PARAM_DISK_SIZE: new_size}\n (host, rp, folder, summary) = self._select_ds_for_volume(\n volume, create_params=create_params)\n self.volumeops.relocate_backing(backing, summary.datastore, rp,\n host)\n self.volumeops.move_backing_to_folder(backing, folder)\n self._extend_backing(backing, new_size,\n VMwareVcVmdkDriver._get_disk_type(volume))\n except exceptions.VMwareDriverException:\n with excutils.save_and_reraise_exception():\n LOG.error(\"Failed to extend volume: %(vol)s to size: \"\n \"%(size)s GB.\",\n {'vol': vol_name, 'size': new_size})\n\n LOG.info(\"Successfully extended volume: %(vol)s to size: \"\n \"%(size)s GB.\",\n {'vol': vol_name, 'size': new_size})",
"def grow(self, size):\n # size of the instance\n if size is not None and (type(size) == int or size.isdigit()):\n size = { 'size': int(size) }\n else:\n # TODO : proper error\n raise Exception()\n\n if self.size > size['size']:\n # TODO : proper error\n raise Exception((\"This instance has a data storage volume of %d GB and cannot \" + \\\n \"be shrunk. (Tried to specify %d GB as new size.)\") % (self.size, size['size']))\n\n self.client.post(self.path+'/action', { 'resize': {'volume': size} })\n return True",
"def put(self, task: Task, amount: float):\r\n\r\n heappush(self.container, (self.clock() + amount, self.sequence, task))\r\n self.sequence += 1",
"def grow(self):\n self.grow_partition()\n self.grow_filesystem()",
"def grow_ebs_volume(server_name, new_size, device_name):\n boto3.setup_default_session(region_name='us-west-2')\n # Order of operations\n # Find instance AMI ID by server name tag\n instance_id, instance_dict = get_instance_by_tagged_name(server_name)\n \n # Get EC2 instance object\n instance = boto3.resource('ec2').Instance(instance_id)\n\n # Get the device mappings for that instance and find the volume's ID\n mapping = instance.block_device_mappings\n if len(mapping) <= 0:\n print \"No map found. Please check the instance manually to ensure there is at least one valid volume currently attached\"\n exit(1)\n\n devices = {m[\"DeviceName\"]: m[\"Ebs\"][\"VolumeId\"] for m in mapping}\n devices_by_id= {m[\"Ebs\"][\"VolumeId\"]: m[\"DeviceName\"] for m in mapping}\n vol_device_name = \"\"\n if len(devices) > 1:\n print \"More than 1 device found.\"\n if device_name in devices.keys():\n show_attached_volumes(instance)\n print \"Using '{device_name}' (you can override this with the --device-name flag)\".format(device_name=device_name)\n vol_id=devices[device_name]\n vol_device_name = devices_by_id[vol_id]\n else:\n print \"Select a device:\"\n index=0\n vols={}\n for name, vol_id in devices.iteritems():\n vol = boto3.resource('ec2').Volume(vol_id)\n print \"\\t{index}:\\t{name}:\\t{vol_id}\\t{vol_size} GiB\".format(index=index, name=name, vol_id=vol_id, vol_size=vol.size)\n vols[index] = vol_id\n index=index+1\n print \"Re-run this job with one of the selected devices above.\"\n exit(1)\n #Commenting this out since we can't be interactive in Jenkins\n #selected_vol_index = click.prompt(\"Select a device for '{server_name}'\".format(server_name=server_name),type=int)\n #vol_id = vols[selected_vol_index]\n #vol_device_name = devices_by_id[vol_id]\n else:\n # If there's just a single entry, then just pop the item out of the dict (it's the only item)\n vol_device_name, vol_id = devices.popitem()\n print \"Using '{vol_name}'/'{vol_id}' since it was the only attached volume found.\".format(vol_name=vol_device_name, vol_id=vol_id)\n\n # Now you have the volume\n vol = boto3.resource('ec2').Volume(vol_id)\n\n # Snapshot the volume\n print \"Creating a snapshot of the instance...\"\n snapshot = vol.create_snapshot()\n while snapshot.state != \"completed\":\n time.sleep(5)\n snapshot.reload()\n print \"Creating snapshot: {progress} complete\".format(progress=snapshot.progress)\n # This \"waiter\" will only wait 10 minutes and error out...which is far too short.\n # snapshot.wait_until_completed()\n\n # Create a new volume from that snapshot - TODO MATCH AVAILABILITY ZONE WITH INSTANCE\n print \"Creating a new volume using the snapshot\"\n new_volume = boto3.resource('ec2').create_volume(Size=int(new_size), SnapshotId=snapshot.id, AvailabilityZone=vol.availability_zone)\n while new_volume.state != \"available\":\n time.sleep(5)\n new_volume.reload()\n\n # Stop the instance\n stop_instance(instance)\n # Detach the old volume noting where it was stored, waiting for it to fully detach\n print \"Detaching the old volume\"\n vol.detach_from_instance()\n while vol.state != \"available\":\n time.sleep(5)\n vol.reload()\n\n # Attach the new volume to the same place\n print \"Attaching the new volume\"\n new_volume.attach_to_instance(InstanceId=instance_id, Device=vol_device_name)\n\n # Delete the old volume\n print \"Deleting the old volume\"\n vol.delete()\n # Delete the snapshot\n print \"Deleting the snapshot\"\n snapshot.delete()\n\n # Start the instance\n start_instance(instance)\n\n print \"Instance restarted. Here are the devices -\"\n show_attached_volumes(instance)\n\n tags = {x['Key']: x['Value'] for x in instance.tags}\n user=tags['DeployUser']\n host=tags['Name']\n\n print \"Waiting for SSH to become available...\"\n wait_for_ssh(server_name)\n\n print \"Resizing the device.\"\n resize2fs(user, host, vol_device_name)",
"def extend_volume(self, volume, new_size):\n spdk_name = self._get_spdk_volume_name(volume.name)\n params = {'name': spdk_name, 'size': new_size * units.Gi}\n self._rpc_call('bdev_lvol_resize', params)",
"def resize_root_volume(instance):\n\n # TODO: could we avoid this by fixing our Packer image?\n\n # See https://www.elastic.co/blog/autoresize-ebs-root-volume-on-aws-amis\n ssh(instance,\n 'yum update -y && ' +\n 'yum-config-manager --enable epel && ' +\n 'yum install -y cloud-utils-growpart && ' +\n '/usr/bin/growpart /dev/xvda 1 && ' +\n 'resize2fs /dev/xvda1'\n )",
"def guest_grow_root_volume(self, userid, os_version):\n LOG.debug('Begin to punch grow partition commands to guest: %s',\n userid)\n linuxdist = self._dist_manager.get_linux_dist(os_version)()\n # get configuration commands\n config_cmds = linuxdist.get_extend_partition_cmds()\n # Creating tmp file with these cmds\n temp_folder = self._pathutils.get_guest_temp_path(userid)\n file_path = os.path.join(temp_folder, 'gpartvol.sh')\n LOG.debug('Creating file %s to contain root partition extension '\n 'commands' % file_path)\n with open(file_path, \"w\") as f:\n f.write(config_cmds)\n try:\n self._smtclient.punch_file(userid, file_path, \"X\")\n finally:\n LOG.debug('Removing the folder %s ', temp_folder)\n shutil.rmtree(temp_folder)",
"def extend_share(self, name, new_size):\n # first get the original capacity\n old_size = None\n fsname = self.get_fsname_by_name(name)\n for fs in self.ssh.lsfs():\n if fs['fs_name'] == fsname:\n old_size = self.size_to_gb(fs['total_capacity'])\n break\n\n if old_size is None:\n msg = _('share %s is not available') % name\n raise exception.ShareBackendException(msg=msg)\n\n LOG.debug('Extend fs %s from %dGB to %dGB', fsname, old_size, new_size)\n self.ssh.expandfs(fsname, new_size - old_size)",
"def reserve_volume(self, volume):\n volume = self._get_resource(_volume.Volume, volume)\n volume.reserve(self)",
"def _add_volumes_to_task(task_dict, volumes):\n\n if not volumes:\n return\n\n if 'volumes' in task_dict:\n task_volumes = task_dict['volumes']\n else:\n task_volumes = {}\n task_dict['volumes'] = task_volumes\n\n for volume in volumes:\n if volume.is_host:\n vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'host',\n 'host_path': volume.host_path}\n else:\n vol_dict = {'container_path': volume.container_path, 'mode': volume.mode, 'type': 'volume'}\n if volume.driver:\n vol_dict['driver'] = volume.driver\n if volume.driver_opts:\n vol_dict['driver_opts'] = volume.driver_opts\n task_volumes[volume.name] = vol_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This class tests the PyTorchYolo object detector. | def get_pytorch_yolo(get_default_cifar10_subset):
import cv2
import torch
from pytorchyolo import models
from pytorchyolo.utils.loss import compute_loss
from art.estimators.object_detection.pytorch_yolo import PyTorchYolo
model_path = "/tmp/PyTorch-YOLOv3/config/yolov3.cfg"
weights_path = "/tmp/PyTorch-YOLOv3/weights/yolov3.weights"
model = models.load_model(model_path=model_path, weights_path=weights_path)
class YoloV3(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, targets=None):
if self.training:
outputs = self.model(x)
# loss is averaged over a batch. Thus, for patch generation use batch_size = 1
loss, loss_components = compute_loss(outputs, targets, self.model)
loss_components_dict = {"loss_total": loss}
return loss_components_dict
else:
return self.model(x)
model = YoloV3(model)
object_detector = PyTorchYolo(
model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=("loss_total",)
)
n_test = 10
(_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset
x_test_cifar10 = x_test_cifar10[0:n_test]
x_test = cv2.resize(
x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC
).transpose((2, 0, 1))
x_test = np.expand_dims(x_test, axis=0)
x_test = np.repeat(x_test, repeats=2, axis=0)
# Create labels
result = object_detector.predict(x=x_test)
y_test = [
{
"boxes": result[0]["boxes"],
"labels": result[0]["labels"],
"scores": np.ones_like(result[0]["labels"]),
},
{
"boxes": result[1]["boxes"],
"labels": result[1]["labels"],
"scores": np.ones_like(result[1]["labels"]),
},
]
yield object_detector, x_test, y_test | [
"def yolo_test_file(self):\n # Detect objects\n annotatedImage, predictedObjects = self.detect_from_file(\n self.inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(10)\n # Save annotated image\n if self.saveAnnotatedImage:\n cv2.imwrite(self.outputFile, annotatedImage)\n # Save the parameters of detected objects in xml format\n if self.saveAnnotatedXML:\n xmlFileName = os.path.join(\n self.textOutputFolder,\n self.outputFile.split('.')[0] + '.xml')\n self.save_xml(xmlFileName, predictedObjects)",
"def yolo(image, classes=\"src/yolo/classes.txt\", config=\"src/yolo/yolo.cfg\", weights=\"src/yolo/yolov3.weights\"):\n\n with open(classes, 'r') as in_file:\n classes = [line.strip() for line in in_file.readlines()]\n\n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392\n\n net = cv2.dnn.readNet(weights, config)\n\n blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(get_output_layers(net))\n\n class_ids = []\n confidences = []\n boxes = []\n conf_threshold = 0.5\n nms_threshold = 0.4\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.5:\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)\n\n img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #convert it to RGB channel\n return_val = []\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = box[0]\n y = box[1]\n w = box[2]\n h = box[3]\n\n color = draw_prediction(image, classes, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))\n average = get_bbox_average(img_rgb, round(x), round(y), round(x+w), round(y+h))\n\n return_val.append({\n \"x\": x,\n \"y\": y,\n \"w\": w,\n \"h\": h,\n # \"class\": classes[class_ids[i]],\n \"class\": \"person\",\n \"confidence\": confidences[i],\n \"color\": color,\n \"centroid\": np.array([x + w/2, y + h/2]),\n \"average\": average,\n \"index\": i\n })\n\n return return_val",
"def yolo_test_video(self):\n # Open the input video, blocking call\n inputVideo = cv2.VideoCapture(self.inputFile)\n\t\t\n # Get infomration about the input video\n codec = int(inputVideo.get(cv2.CAP_PROP_FOURCC))\n fps = int(inputVideo.get(cv2.CAP_PROP_FPS))\n frameWidth = int(inputVideo.get(cv2.CAP_PROP_FRAME_WIDTH))\n frameHeight = int(inputVideo.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Open the output stream\n outputVideo = cv2.VideoWriter(self.outputFile,\n codec,\n fps,\n (frameWidth,frameHeight))\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n totalFrames = inputVideo.get(cv2.CAP_PROP_FRAME_COUNT)\n \t \n\tavgGrabTime = 0\n\tavgYoloTime = 0\n\tavgWriteTime = 0\n \n # For each frame in the video\n while True:\n \n startTime = time.time()\n \n # Calculate the time it takes to grab a frame\n startGrabTime = time.time()\n grabbed, frame = inputVideo.read()\n endGrabTime = time.time() \n\t avgGrabTime+=(endGrabTime-startGrabTime)\n\t \n\n if grabbed:\n\t\t\n # Calculate the time it takes to run YOLO pipeline \n\t\tstartYoloTime = time.time()\n annotatedFrame, predictedObjects = self.detect_from_image(frame)\n\t\tendYoloTime = time.time()\n\t\tavgYoloTime+= ( endYoloTime - startYoloTime)\n\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n \t\n\t\tcurrentTime = time.time()\n\t\telapsedTime = currentTime - startTime\n\t\tcurrentFPS = (1)/elapsedTime \n\t\t \t\n #cv2.rectangle(annotatedFrame, (0, 0), (30, 30), (0,0,0), -1)\n cv2.putText(\n annotatedFrame, 'FPS' + ': %.2f' % currentFPS,\n (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2\n )\n\t\t\n # Calculate the time it takes to write an annotated frame to video\n\t\tstartWriteTime = time.time()\n outputVideo.write(annotatedFrame)\n\t\tendWriteTime = time.time()\n\t\tavgWriteTime +=(endWriteTime - startWriteTime)\n\t\n else:\n inputVideo.set(cv2.CAP_PROP_POS_FRAMES, frameIndex-1)\n cv2.waitKey(100)\n\n if frameIndex==totalFrames:\n break\n\t\t\n inputVideo.release()\n outputVideo.release()\n cv2.destroyAllWindows()\n \n avgGrabTime/=totalFrames\n avgYoloTime/=totalFrames\n avgWriteTime/=totalFrames\n\n if self.verbose:\n print ('Average time for extracting compressed video frame : %.3f' %avgGrabTime)\n print ('Average time for YOLO object detection : %.3f' %avgYoloTime )\n print ('Average time for writing frame to video : %.3f' %avgWriteTime)",
"def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]",
"def detect():\n parser = argparse.ArgumentParser(description=\"\"\"\n Pads image and performs detection of particles on the image with YOLOv3.\n Note that results for x and y coordinates are given relative to the padded image. \n Returns particle positions and characteristics as tensor of shape (*, 6) where 6\n corresponds to (confidence, x, y, x, radius, refractive index)\"\"\")\n parser.add_argument(\n \"-w\",\n \"--weights\",\n type=str,\n default=\"weights.pth.tar\",\n help=\"Path to weights or checkpoint file .pth.tar\",\n )\n parser.add_argument(\n \"-i\",\n \"--image\",\n type=str,\n default=\"../data/img.npy\",\n help=\"Path to directory with images to inference (npy format expected)\",\n )\n parser.add_argument(\n \"-b\", \"--batch_size\", type=int, default=1, help=\"Size of each image batch\"\n )\n parser.add_argument(\n \"--conf_threshold\", type=float, default=0.5, help=\"Object confidence threshold\"\n )\n parser.add_argument(\n \"--nms_threshold\",\n type=float,\n default=5,\n help=\"Pixel threshold for non-maximum suppression\",\n )\n parser.add_argument(\n \"--z_unit\",\n type=str,\n default=\"\",\n help=\"if 'micro' the z predictions will be converted to micrometres according to simulation settings\\\n used in our experiments. Do not use if your images differ.\",\n )\n parser.add_argument(\n \"--toggle_eval\",\n type=bool,\n default=False,\n help=\"boolean to indicate whether to set model to eval or train mode for inference i.e \\\n whether to use batch statistics from training or not in batch normalization\",\n )\n parser.add_argument(\n \"--device\", type=str, default=\"cuda\", help=\"device to run model on\"\n )\n parser.add_argument(\n \"--plot_result\",\n type=bool,\n default=True,\n help=\"Whether the result should be plotted with Matplotlib\",\n )\n args = parser.parse_args()\n\n image = np.load(args.image)\n pad_side = config.PAD_SIDE\n transform = A.Compose(\n [\n A.PadIfNeeded(\n min_width=pad_side, min_height=pad_side, border_mode=cv2.BORDER_CONSTANT\n ),\n A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=1.0,),\n ToTensorV2(),\n ]\n )\n\n image = transform(image=image)[\"image\"].permute(1, 2, 0)\n model = YOLOv3(in_channels=3, num_classes=config.NUM_CLASSES)\n optimizer = optim.Adam(\n model.parameters(), lr=config.LEARNING_RATE, weight_decay=config.WEIGHT_DECAY\n )\n load_checkpoint(args.weights, model, optimizer, config.LEARNING_RATE)\n\n model = model.to(args.device)\n results = run_on_patches(\n image,\n model,\n conf_threshold=args.conf_threshold,\n nms_threshold=args.nms_threshold,\n batch_size=args.batch_size,\n z_unit=args.z_unit,\n toggle_eval=args.toggle_eval,\n device=args.device,\n plot_result=args.plot_result,\n )\n return results",
"def run_detect(**kwargs):\n cmd = 'python yolov3/detect.py'\n pms_list = [\n 'image_folder', 'model_def', \n 'weights_path', 'class_path', \n 'conf_thres', 'nms_thres',\n 'batch_size', 'n_cpu', \n 'img_size', 'checkpoint_model'\n ]\n call_command(pms_list, cmd, kwargs)",
"def object_detection(presets: str = DEFAULT):\n hyperparameters = {\n \"model.names\": [\"mmdet_image\"],\n \"model.mmdet_image.checkpoint_name\": \"yolox_s\",\n \"env.eval_batch_size_ratio\": 1,\n \"env.precision\": 32,\n \"env.strategy\": \"ddp\",\n \"env.auto_select_gpus\": False, # Have to turn off for detection!\n \"env.num_gpus\": -1,\n \"env.per_gpu_batch_size\": 8, # Works on 8G GPU\n \"env.num_workers\": 2,\n \"optimization.learning_rate\": 1e-4,\n \"optimization.lr_decay\": 0.9,\n \"optimization.lr_mult\": 100,\n \"optimization.lr_choice\": \"two_stages\",\n \"optimization.top_k\": 1,\n \"optimization.top_k_average_method\": \"best\",\n \"optimization.warmup_steps\": 0.0,\n \"optimization.patience\": 10,\n \"optimization.val_check_interval\": 0.5,\n \"optimization.check_val_every_n_epoch\": 1,\n }\n hyperparameter_tune_kwargs = {}\n\n presets, use_hpo = parse_presets_str(presets)\n if use_hpo:\n default_tunable_hyperparameters, default_hyperparameter_tune_kwargs = get_default_hpo_setup()\n hyperparameters.update(default_tunable_hyperparameters)\n hyperparameter_tune_kwargs.update(default_hyperparameter_tune_kwargs)\n\n if presets == MEDIUM_QUALITY:\n hyperparameters.update(\n {\n \"model.mmdet_image.checkpoint_name\": \"yolox_l\",\n \"env.per_gpu_batch_size\": 2, # Works on 8G GPU\n \"optimization.learning_rate\": 5e-5,\n \"optimization.patience\": 3,\n \"optimization.max_epochs\": 50,\n \"optimization.val_check_interval\": 1.0,\n \"optimization.check_val_every_n_epoch\": 3,\n }\n )\n elif presets in [DEFAULT, HIGH_QUALITY]:\n hyperparameters.update(\n {\n \"model.mmdet_image.checkpoint_name\": \"dino-4scale_r50_8xb2-12e_coco.py\",\n \"model.mmdet_image.frozen_layers\": [\"backbone\", \"model.level_embed\"],\n \"env.per_gpu_batch_size\": 1, # Works on 16G GPU\n \"optimization.learning_rate\": 1e-4,\n \"optimization.patience\": 20,\n \"optimization.max_epochs\": 50,\n \"optimization.val_check_interval\": 1.0,\n \"optimization.check_val_every_n_epoch\": 1,\n }\n )\n elif presets == BEST_QUALITY:\n hyperparameters.update(\n {\n \"model.mmdet_image.checkpoint_name\": \"dino-5scale_swin-l_8xb2-36e_coco.py\",\n \"model.mmdet_image.frozen_layers\": [\"backbone\", \"model.level_embed\"],\n \"env.per_gpu_batch_size\": 1, # Works on 24G GPU\n \"optimization.learning_rate\": 1e-4,\n \"optimization.patience\": 20,\n \"optimization.max_epochs\": 50,\n \"optimization.val_check_interval\": 1.0,\n \"optimization.check_val_every_n_epoch\": 1,\n }\n )\n else:\n raise ValueError(f\"Unknown preset type: {presets}\")\n\n return hyperparameters, hyperparameter_tune_kwargs",
"def test_detection_on_image(self, image_path):\n image = TLClassifier.load_image(image_path)\n result = self.infer_image(image)\n TLClassifier.display_image(result)",
"def test_predictor():",
"def object_detector(model, should_log=True):\n if model is None:\n raise Exception(\n \"alwaysai.py: object_detector: model name parameter not found\")\n od = edgeiq.ObjectDetection(model)\n e = engine()\n od.load(e)\n if should_log == True:\n print(\"alwaysai.py: object_detector: Engine: {}\".format(od.engine))\n print(\"alwaysai.py: object_detector: Accelerator: {}\\n\".format(\n od.accelerator))\n print(\"alwaysai.py: object_detector: Model:\\n{}\\n\".format(od.model_id))\n return od",
"def user_test_cam_yolo(dev_idx, _user_id, test_loop):\n is_raw_output = True\n image_source_h = 480\n image_source_w = 640\n app_id = constants_kl520.APP_TINY_YOLO3\n image_size = image_source_w * image_source_h * 2\n frames = []\n\n # Setup video capture device.\n capture = kdp_wrapper.setup_capture(0, image_source_w, image_source_h)\n if capture is None:\n return -1\n\n # Start ISI mode.\n if kdp_wrapper.start_isi_parallel_ext(dev_idx, app_id, image_source_w, image_source_h, is_raw_output):\n return -1\n\n start_time = time.time()\n # Fill up the image buffers.\n ret, img_id_tx, img_left, buffer_depth = kdp_wrapper.fill_buffer(\n dev_idx, capture, image_size, frames)\n if ret:\n return -1\n\n # Send the rest and get result in loop, with 2 images alternatively\n print(\"Companion image buffer depth = \", buffer_depth)\n kdp_wrapper.pipeline_inference(\n dev_idx, app_id, test_loop - buffer_depth, image_size,\n capture, img_id_tx, img_left, buffer_depth, frames, handle_result, is_raw_output)\n\n end_time = time.time()\n diff = end_time - start_time \n estimate_runtime = float(diff/test_loop)\n fps = float(1/estimate_runtime) \n print(\"Parallel inference average estimate runtime is \", estimate_runtime)\n print(\"Average FPS is \", fps)\n\n kdp_wrapper.kdp_exit_isi(dev_idx)\n\n return 0",
"def setup_detector(weights_path: str):\n model = torch.hub.load('ultralytics/yolov5', 'custom', weights_path)\n if torch.cuda.is_available():\n print(f\"Using {torch.cuda.get_device_name(torch.cuda.current_device())}.\")\n model.cuda()\n torch.backends.cudnn.benchmark = True\n else:\n print(\"Using CPU.\")\n return model",
"def predict_from_cv2(yolo, inputfilepath):\n\n print(\"call func of predict_from_cv2\")\n img = cv2.imread(inputfilepath)\n yolo_results = yolo.predict(img)\n for yolo_result in yolo_results:\n print(yolo_result.get_detect_result())",
"def run_yolo(net, image, coco_classes, save_image=False):\n\n global frame, classes\n # Give the configuration and weight files for the model and load the network using them.\n classes = coco_classes\n\n frame = cv2.imread(str(image))\n\n # Crop the frame\n # (y_min, y_max) (x_min, x_max)\n # frame = frame[300:1080, 200:1920] # Classifying people\n # frame = frame[0:500, 0:1920] # Classifying Cars\n\n # Stop the program if reached end of video\n if frame is None:\n return\n\n # Create a 4D blob from a frame.\n blob = cv2.dnn.blobFromImage(\n frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False\n )\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n\n # Remove the bounding boxes with low confidence\n postprocess(frame, outs, save_image)\n\n # Get the overall time for inference(t) and the timings for each of the layers(in layersTimes)\n t, _ = net.getPerfProfile()\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv2.getTickFrequency())\n # cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n print(label)\n\n # Save image with all bounding boxes\n # utils.write_image(frame)",
"def createDetector(self):\n pass",
"def testTextureCrawler(self):\n crawler = Crawler.create(PathHolder(self.__exrFile))\n self.assertIsInstance(crawler, TextureCrawler)\n crawler = Crawler.create(PathHolder(self.__tifFile))\n self.assertIsInstance(crawler, TextureCrawler)\n crawler = Crawler.create(PathHolder(self.__badExrFile))\n self.assertNotIsInstance(crawler, TextureCrawler)",
"def invocation(\n image: bytes = File(None),\n models_check: Tuple[bool, bool] = Depends(health_check_models),\n):\n try:\n # Create yolo input object\n yolo_input = YoloInput.from_bytes_image(\n image, new_shape=(224, 224)\n )\n # Detect objects\n return yolo_input.detect_products(conf_thres=0.2)\n except Exception as e:\n raise HTTPException(\n status_code=500,\n detail={\n \"error\": str(e),\n \"message\": \"There was an error on the decoding bytes image or in the detections\"\n \", check your image file is correct or valid!\",\n },\n )",
"def app_object_detection():\n MODEL_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel\" # noqa: E501\n MODEL_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.caffemodel\"\n PROTOTXT_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt\" # noqa: E501\n PROTOTXT_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.prototxt.txt\"\n\n CLASSES = [\n \"background\",\n \"aeroplane\",\n \"bicycle\",\n \"bird\",\n \"boat\",\n \"bottle\",\n \"bus\",\n \"car\",\n \"cat\",\n \"chair\",\n \"cow\",\n \"diningtable\",\n \"dog\",\n \"horse\",\n \"motorbike\",\n \"person\",\n \"pottedplant\",\n \"sheep\",\n \"sofa\",\n \"train\",\n \"tvmonitor\",\n ]\n COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564)\n download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353)\n\n DEFAULT_CONFIDENCE_THRESHOLD = 0.5\n\n class Detection(NamedTuple):\n name: str\n prob: float\n\n class MobileNetSSDVideoTransformer(VideoTransformerBase):\n confidence_threshold: float\n result_queue: \"queue.Queue[List[Detection]]\"\n\n def __init__(self) -> None:\n self._net = cv2.dnn.readNetFromCaffe(\n str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)\n )\n self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD\n self.result_queue = queue.Queue()\n\n def _annotate_image(self, image, detections):\n # loop over the detections\n (h, w) = image.shape[:2]\n result: List[Detection] = []\n for i in np.arange(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n if confidence > self.confidence_threshold:\n # extract the index of the class label from the `detections`,\n # then compute the (x, y)-coordinates of the bounding box for\n # the object\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n name = CLASSES[idx]\n result.append(Detection(name=name, prob=float(confidence)))\n\n # display the prediction\n label = f\"{name}: {round(confidence * 100, 2)}%\"\n cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(\n image,\n label,\n (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n COLORS[idx],\n 2,\n )\n return image, result\n\n def transform(self, frame: av.VideoFrame) -> np.ndarray:\n image = frame.to_ndarray(format=\"bgr24\")\n blob = cv2.dnn.blobFromImage(\n cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5\n )\n self._net.setInput(blob)\n detections = self._net.forward()\n annotated_image, result = self._annotate_image(image, detections)\n\n # NOTE: This `transform` method is called in another thread,\n # so it must be thread-safe.\n self.result_queue.put(result)\n\n return annotated_image\n\n webrtc_ctx = webrtc_streamer(\n key=\"object-detection\",\n mode=WebRtcMode.SENDRECV,\n client_settings=WEBRTC_CLIENT_SETTINGS,\n video_transformer_factory=MobileNetSSDVideoTransformer,\n async_transform=True,\n )\n\n confidence_threshold = st.slider(\n \"Confidence threshold\", 0.0, 1.0, DEFAULT_CONFIDENCE_THRESHOLD, 0.05\n )\n if webrtc_ctx.video_transformer:\n webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold\n\n if st.checkbox(\"Show the detected labels\", value=True):\n if webrtc_ctx.state.playing:\n labels_placeholder = st.empty()\n # NOTE: The video transformation with object detection and\n # this loop displaying the result labels are running\n # in different threads asynchronously.\n # Then the rendered video frames and the labels displayed here\n # are not strictly synchronized.\n while True:\n if webrtc_ctx.video_transformer:\n try:\n result = webrtc_ctx.video_transformer.result_queue.get(\n timeout=1.0\n )\n except queue.Empty:\n result = None\n labels_placeholder.table(result)\n else:\n break\n\n st.markdown(\n \"This demo uses a model and code from \"\n \"https://github.com/robmarkcole/object-detection-app. \"\n \"Many thanks to the project.\"\n )",
"def predict_from_pil(yolo, inputfilepath):\n\n print(\"call func of predict_from_pil\")\n img = np.array(Image.open(inputfilepath))\n yolo_results = yolo.predict(img)\n for yolo_result in yolo_results:\n print(yolo_result.get_detect_result())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits image into tiles by size of tile. tile_w tile width tile_h tile height | def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):
x_axis = -1
y_axis = -2
arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]
x_ntiles = (
arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1
)
y_ntiles = (
arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1
)
tiles = []
# row
for i in range(0, y_ntiles):
# height of this tile
ver_f = tile_h * i
ver_t = ver_f + tile_h
# col
for j in range(0, x_ntiles):
# width of this tile
hor_f = tile_w * j
hor_t = hor_f + tile_w
tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)
tiles.append(tile)
tile_shape = [tile_h, tile_w]
ntiles = dict(x=x_ntiles, y=y_ntiles)
padding = dict(left=0, right=0, top=0, bottom=0)
if arr_width % tile_w == 0:
padding["right"] = 0
else:
padding["right"] = tile_w - (arr_width % tile_w)
if arr_height % tile_h == 0:
padding["bottom"] = 0
else:
padding["bottom"] = tile_h - (arr_height % tile_h)
info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)
return tiles, info | [
"def splitImgs(self, tile_size, n_tiles):\n\n if n_tiles%2!=0 or tile_size%16!=0:\n print(\"Incorrect number of tiles or tile size not divisible by 16.\\nAborting\")\n exit()\n\n\n path_train = self.train_path\n path_label = self.label_path\n path_raw = self.raw_path\n\n for img in os.listdir(path_raw + \"/image\"):\n\n\n read_img = cv2.imread(path_raw + \"/image/\" + img, -1)\n\n if np.sum(read_img) == 0:\n print(\"Problem with reading image.\\nAborting\")\n exit()\n\n elif np.max(read_img) > 255:\n print(\"Image bit depth is 16 or higher. Please convert images to 8-bit first.\\nAborting\")\n exit()\n\n read_lab = cv2.imread(path_raw + \"/label/\" + img, cv2.IMREAD_GRAYSCALE)\n y, x = read_img.shape\n\n if tile_size > max(y,x)/2+16:\n print(\"Tile size to big.\\nAborting\")\n exit()\n\n\n # splitting image into n tiles of predefined size\n #############\n\n start_y = 0\n start_x = 0\n end_y = tile_size\n end_x = tile_size\n\n column = 0\n row = 0\n\n for i in range(n_tiles):\n\n start_x, end_x, start_y, end_y, column, row = self.find_tile_pos(x, y, tile_size, start_x, end_x, start_y, end_y,\n column, row)\n\n image_tile_train = read_img[start_y:end_y, start_x:end_x]\n image_tile_label = read_lab[start_y:end_y, start_x:end_x]\n\n cv2.imwrite(path_train + \"/\" + str(i) + \"_\" + img, image_tile_train)\n cv2.imwrite(path_label + \"/\" + str(i) + \"_\" + img, image_tile_label)\n\n #############",
"def split_main_image(image: Image, tile_size: int) -> list:\n rgb_img = image.convert('RGB')\n img_grid = list()\n\n if rgb_img.width % tile_size == 0 and rgb_img.height % tile_size == 0:\n for row in range(int(rgb_img.height/tile_size)):\n img_grid.append(list())\n for col in range(int(rgb_img.width/tile_size)):\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) * tile_size))\n img_grid[row].append(cropped)\n elif rgb_img.width % tile_size != 0 and rgb_img.height % tile_size == 0:\n for row in range(rgb_img.height/tile_size):\n img_grid.append(list())\n for col in range(int(math.ceil(rgb_img.width/tile_size))):\n if col == range(int(math.ceil(rgb_img.width/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, rgb_img.width, (row + 1) * tile_size))\n else:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) *\n tile_size))\n img_grid[row].append(cropped)\n elif rgb_img.width % tile_size == 0 and rgb_img.height % tile_size != 0:\n for row in range(int(math.ceil(rgb_img.height/tile_size))):\n img_grid.append(list())\n for col in range(int(rgb_img.width/tile_size)):\n if row == range(int(math.ceil(rgb_img.height/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, rgb_img.height))\n else:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) *\n tile_size))\n img_grid[row].append(cropped)\n else:\n for row in range(int(math.ceil(rgb_img.height/tile_size))):\n img_grid.append(list())\n for col in range(int(math.ceil(rgb_img.width/tile_size))):\n if col == range(int(math.ceil(rgb_img.width/tile_size)))[-1] and row == \\\n range(int(math.ceil(rgb_img.height/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, rgb_img.width, rgb_img.height))\n elif col == range(int(math.ceil(rgb_img.width/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, rgb_img.width, (row + 1) * tile_size))\n elif row == range(int(math.ceil(rgb_img.height/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, rgb_img.height))\n else:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) *\n tile_size))\n img_grid[row].append(cropped)\n\n return img_grid",
"def poss_tile_sizes(self):\n\n path_raw = self.raw_path\n\n for img in os.listdir(path_raw + \"/image\"):\n read_img = cv2.imread(path_raw + \"/image/\" + img, -1)\n y,x = read_img.shape\n\n break\n\n size = 16\n\n while size < max([y, x]) / 2 + 16:\n\n x_tile = math.ceil(x / size)\n y_tile = math.ceil(y / size)\n\n x_overlap = (np.abs(x - x_tile * size)) / (x_tile - 1)\n y_overlap = (np.abs(y - y_tile * size)) / (y_tile - 1)\n\n if (x_overlap.is_integer() and y_overlap.is_integer()) and (x_tile * y_tile) % 2 == 0:\n print(\"tile size (px):\", size, \"number of tiles: \", x_tile * y_tile)\n\n size += 16",
"def split_image(image, size):\n width, height = image.size[0], image.size[1]\n m, n = size\n tile_width, tile_height = int(width/m), int(height/n)\n # Image list\n imgs = []\n for j in range(m):\n for i in range(n):\n # Append cropped image\n imgs.append(image.crop((i*tile_width, j*tile_height,\n (i+1)*tile_width, (j+1)*tile_height)))\n return imgs",
"def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)",
"def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles",
"def tiles2images(tiles: List[np.ndarray], im_shape: tuple, h: int, w: int):\n im_height, im_width, channels = im_shape\n n_h = math.ceil(im_height / h)\n n_w = math.ceil(im_width / w)\n im = []\n for y_index in range(n_h):\n im_row = tiles[y_index * n_w:(y_index + 1) * n_w]\n dw = im_width % w\n im_row[-1] = im_row[-1][:, -dw:]\n im_row = np.concatenate(im_row, axis=1)\n im.append(im_row)\n dh = im_height % h\n im[-1] = im[-1][-dh:, :]\n im = np.concatenate(im, axis=0)\n return im",
"def splitImage(image, size):\n\n W, H = image.size[0], image.size[1]\n m, n = size\n w, h = int(W / n), int(H / m)\n imgs = []\n # 先按行再按列裁剪出 m * n 个小图像\n for j in range(m):\n for i in range(n):\n # 坐标原点在图像左上角\n imgs.append(image.crop((i * w, j * h, (i + 1) * w, (j + 1) * h)))\n return imgs",
"def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)",
"def extract_tiles(image_sample, tile_size=100, step_size=100):\n # Potential fix: force all tiles to be same size, find an optimal tiling size.\n h, w = image_sample.shape\n rows = np.arange(0, h, step_size)\n cols = np.arange(0, w, step_size)\n g = np.meshgrid(cols, rows)\n cols_rows = list(zip(*(x.flat for x in g)))\n tiles = []\n for p in cols_rows:\n tiles.append(\n image_sample[\n p[1]: p[1] + tile_size,\n p[0]: p[0] + tile_size,\n ]\n )\n return tiles",
"def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))",
"def tile(img):\n rows, cols, res = img.rows, img.cols, img.res\n pixels, pixsize = img.pixels, channels[img.pixtype] # assumes 8-bit channels\n width, height = cols/res, rows/res\n\n def tiled(x, y):\n h = (x + width/2.0) % width # horz, vert offset from top left\n v = (height/2.0 - y) % height \n r, c = int(v*res), int(h*res)\n offset = (cols*r + c)*pixsize\n return pixels[offset:offset+pixsize]\n return (tiled, img.pixtype)",
"def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile",
"def break_tile_map(img, tile_size):\n\n # create a 2D array for each tile in the image\n w = int(img.width / tile_size)\n h = int(img.height / tile_size)\n arr = [[0 for i in range(w)] for i in range(h)]\n\n # set non empty tiles to 1\n for t, x, y in next_tile(img, tile_size):\n if not is_empty_tile(t):\n r = int(y / tile_size)\n c = int(x / tile_size)\n arr[r][c] = 1\n\n # a list of rect partitions\n broken = []\n\n # find the minimal rect partitioning for this image\n for r in range(len(arr)):\n for c, tile in enumerate(arr[r]):\n if tile == 1:\n box = _find_max_rect(arr, r, c, tile_size)\n broken.append(img.crop(box))\n\n # clear this rect from the array\n for i in range(int(box[1] / tile_size), int(box[3] / tile_size)):\n for j in range(int(box[0] / tile_size), int(box[2] / tile_size)):\n arr[i][j] = 0\n\n return broken",
"def im_tilecut(img, tile_no=None, tile_size=None):\n dx,dy = img.shape[0:2]\n if tile_no is not None and tile_size is None:\n nx,ny = entuple(tile_no)\n wx,wy = ceil(dx/nx),ceil(dy/ny)\n elif tile_no is None and tile_size is not None:\n wx,wy = entuple(tile_size)\n nx,ny = ceil(dx/wx),ceil(dy/wy)\n else:\n return None\n sx,sy = (dx-wx)//(nx-1),(dy-wy)//(ny-1) # TODO: fix a problem when nx=1 or ny=1\n for i in range(0,dx,sx):\n for j in range(0,dy,sy):\n if i+wx>=dx or j+wy>=dy: continue\n yield img[i:i+wx,j:j+wy]",
"def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out",
"def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()",
"def calculateTierSize(imageWidth, imageHeight, tileSize=256):\n tierSizeInTiles = []\n while (imageWidth > tileSize or imageHeight > tileSize):\n tileWidth = float(imageWidth) / tileSize\n tileHeight = float(imageHeight) / tileSize\n tierSizeInTiles.append([math.ceil(tileWidth), math.ceil(tileHeight)])\n tileSize += tileSize\n tierSizeInTiles.append([1.0, 1.0]) \n tierSizeInTiles.reverse() \n return tierSizeInTiles",
"def _assemble_tiles(self,images,X,Y,Z,C,T):\n self._buffer_supertile(X[0][0],X[0][1])\n \n if X[-1][0] - self._tile_x_offset > self._TILE_SIZE:\n split_ind = 0\n while X[split_ind][0] - self._tile_x_offset < self._TILE_SIZE:\n split_ind += 1\n else:\n split_ind = len(X)\n \n # Untile the data\n num_rows = Y[0][1] - Y[0][0]\n num_cols = X[0][1] - X[0][0]\n num_tiles = len(X)\n \n for ind in range(split_ind):\n r_min = Y[ind][0]-self._tile_y_offset\n r_max = Y[ind][1]-self._tile_y_offset\n c_min = X[ind][0]-self._tile_x_offset\n c_max = X[ind][1]-self._tile_x_offset\n self._pixel_buffer[r_min:r_max,c_min:c_max] = images[ind,:,:,0]\n \n if split_ind != num_tiles:\n self._buffer_supertile(X[-1][0],X[-1][1])\n for ind in range(split_ind,num_tiles):\n r_min = Y[ind][0]-self._tile_y_offset\n r_max = Y[ind][1]-self._tile_y_offset\n c_min = X[ind][0]-self._tile_x_offset\n c_max = X[ind][1]-self._tile_x_offset\n self._pixel_buffer[r_min:r_max,c_min:c_max] = images[ind,:,:,0]\n \n self._tile_last_column = c_max\n \n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits image into tiles by number of tile. x_ntiles number of tiles horizontally y_ntiles number of tiles vertically | def split_image_into_number_of_tiles(
arr: Image, x_ntiles: int, y_ntiles: int, overlap: int
):
img_width, img_height = arr.shape[-1], arr.shape[-2]
tile_w = img_width // x_ntiles
tile_h = img_height // y_ntiles
return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap) | [
"def splitImgs(self, tile_size, n_tiles):\n\n if n_tiles%2!=0 or tile_size%16!=0:\n print(\"Incorrect number of tiles or tile size not divisible by 16.\\nAborting\")\n exit()\n\n\n path_train = self.train_path\n path_label = self.label_path\n path_raw = self.raw_path\n\n for img in os.listdir(path_raw + \"/image\"):\n\n\n read_img = cv2.imread(path_raw + \"/image/\" + img, -1)\n\n if np.sum(read_img) == 0:\n print(\"Problem with reading image.\\nAborting\")\n exit()\n\n elif np.max(read_img) > 255:\n print(\"Image bit depth is 16 or higher. Please convert images to 8-bit first.\\nAborting\")\n exit()\n\n read_lab = cv2.imread(path_raw + \"/label/\" + img, cv2.IMREAD_GRAYSCALE)\n y, x = read_img.shape\n\n if tile_size > max(y,x)/2+16:\n print(\"Tile size to big.\\nAborting\")\n exit()\n\n\n # splitting image into n tiles of predefined size\n #############\n\n start_y = 0\n start_x = 0\n end_y = tile_size\n end_x = tile_size\n\n column = 0\n row = 0\n\n for i in range(n_tiles):\n\n start_x, end_x, start_y, end_y, column, row = self.find_tile_pos(x, y, tile_size, start_x, end_x, start_y, end_y,\n column, row)\n\n image_tile_train = read_img[start_y:end_y, start_x:end_x]\n image_tile_label = read_lab[start_y:end_y, start_x:end_x]\n\n cv2.imwrite(path_train + \"/\" + str(i) + \"_\" + img, image_tile_train)\n cv2.imwrite(path_label + \"/\" + str(i) + \"_\" + img, image_tile_label)\n\n #############",
"def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info",
"def poss_tile_sizes(self):\n\n path_raw = self.raw_path\n\n for img in os.listdir(path_raw + \"/image\"):\n read_img = cv2.imread(path_raw + \"/image/\" + img, -1)\n y,x = read_img.shape\n\n break\n\n size = 16\n\n while size < max([y, x]) / 2 + 16:\n\n x_tile = math.ceil(x / size)\n y_tile = math.ceil(y / size)\n\n x_overlap = (np.abs(x - x_tile * size)) / (x_tile - 1)\n y_overlap = (np.abs(y - y_tile * size)) / (y_tile - 1)\n\n if (x_overlap.is_integer() and y_overlap.is_integer()) and (x_tile * y_tile) % 2 == 0:\n print(\"tile size (px):\", size, \"number of tiles: \", x_tile * y_tile)\n\n size += 16",
"def split_main_image(image: Image, tile_size: int) -> list:\n rgb_img = image.convert('RGB')\n img_grid = list()\n\n if rgb_img.width % tile_size == 0 and rgb_img.height % tile_size == 0:\n for row in range(int(rgb_img.height/tile_size)):\n img_grid.append(list())\n for col in range(int(rgb_img.width/tile_size)):\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) * tile_size))\n img_grid[row].append(cropped)\n elif rgb_img.width % tile_size != 0 and rgb_img.height % tile_size == 0:\n for row in range(rgb_img.height/tile_size):\n img_grid.append(list())\n for col in range(int(math.ceil(rgb_img.width/tile_size))):\n if col == range(int(math.ceil(rgb_img.width/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, rgb_img.width, (row + 1) * tile_size))\n else:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) *\n tile_size))\n img_grid[row].append(cropped)\n elif rgb_img.width % tile_size == 0 and rgb_img.height % tile_size != 0:\n for row in range(int(math.ceil(rgb_img.height/tile_size))):\n img_grid.append(list())\n for col in range(int(rgb_img.width/tile_size)):\n if row == range(int(math.ceil(rgb_img.height/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, rgb_img.height))\n else:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) *\n tile_size))\n img_grid[row].append(cropped)\n else:\n for row in range(int(math.ceil(rgb_img.height/tile_size))):\n img_grid.append(list())\n for col in range(int(math.ceil(rgb_img.width/tile_size))):\n if col == range(int(math.ceil(rgb_img.width/tile_size)))[-1] and row == \\\n range(int(math.ceil(rgb_img.height/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, rgb_img.width, rgb_img.height))\n elif col == range(int(math.ceil(rgb_img.width/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, rgb_img.width, (row + 1) * tile_size))\n elif row == range(int(math.ceil(rgb_img.height/tile_size)))[-1]:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, rgb_img.height))\n else:\n cropped = image.crop((col * tile_size, row * tile_size, (col + 1) * tile_size, (row + 1) *\n tile_size))\n img_grid[row].append(cropped)\n\n return img_grid",
"def tile_iterator(x,axis,n_tiles,block_size,n_block_overlap):\n n = x.shape[axis]\n\n n % block_size == 0 or _raise(ValueError(\"'x' must be evenly divisible by 'block_size' along 'axis'\"))\n n_blocks = n // block_size\n\n n_tiles_valid = int(np.clip(n_tiles,1,n_blocks))\n if n_tiles != n_tiles_valid:\n warnings.warn(\"invalid value (%d) for 'n_tiles', changing to %d\" % (n_tiles,n_tiles_valid))\n n_tiles = n_tiles_valid\n\n s = n_blocks // n_tiles # tile size\n r = n_blocks % n_tiles # blocks remainder\n assert n_tiles * s + r == n_blocks\n\n # list of sizes for each tile\n tile_sizes = s*np.ones(n_tiles,int)\n # distribute remaning blocks to tiles at beginning and end\n if r > 0:\n tile_sizes[:r//2] += 1\n tile_sizes[-(r-r//2):] += 1\n\n # n_block_overlap = int(np.ceil(92 / block_size))\n # n_block_overlap -= 1\n # print(n_block_overlap)\n\n # (pre,post) offsets for each tile\n off = [(n_block_overlap if i > 0 else 0, n_block_overlap if i < n_tiles-1 else 0) for i in range(n_tiles)]\n\n # tile_starts = np.concatenate(([0],np.cumsum(tile_sizes[:-1])))\n # print([(_st-_pre,_st+_sz+_post) for (_st,_sz,(_pre,_post)) in zip(tile_starts,tile_sizes,off)])\n\n def to_slice(t):\n sl = [slice(None) for _ in x.shape]\n sl[axis] = slice(\n t[0]*block_size,\n t[1]*block_size if t[1]!=0 else None)\n return tuple(sl)\n\n start = 0\n for i in range(n_tiles):\n off_pre, off_post = off[i]\n\n # tile starts before block 0 -> adjust off_pre\n if start-off_pre < 0:\n off_pre = start\n # tile end after last block -> adjust off_post\n if start+tile_sizes[i]+off_post > n_blocks:\n off_post = n_blocks-start-tile_sizes[i]\n\n tile_in = (start-off_pre,start+tile_sizes[i]+off_post) # src in input image / tile\n tile_out = (start,start+tile_sizes[i]) # dst in output image / s_dst\n tile_crop = (off_pre,-off_post) # crop of src for output / s_src\n\n yield x[to_slice(tile_in)], to_slice(tile_crop), to_slice(tile_out)\n start += tile_sizes[i]",
"def _assemble_tiles(self,images,X,Y,Z,C,T):\n self._buffer_supertile(X[0][0],X[0][1])\n \n if X[-1][0] - self._tile_x_offset > self._TILE_SIZE:\n split_ind = 0\n while X[split_ind][0] - self._tile_x_offset < self._TILE_SIZE:\n split_ind += 1\n else:\n split_ind = len(X)\n \n # Untile the data\n num_rows = Y[0][1] - Y[0][0]\n num_cols = X[0][1] - X[0][0]\n num_tiles = len(X)\n \n for ind in range(split_ind):\n r_min = Y[ind][0]-self._tile_y_offset\n r_max = Y[ind][1]-self._tile_y_offset\n c_min = X[ind][0]-self._tile_x_offset\n c_max = X[ind][1]-self._tile_x_offset\n self._pixel_buffer[r_min:r_max,c_min:c_max] = images[ind,:,:,0]\n \n if split_ind != num_tiles:\n self._buffer_supertile(X[-1][0],X[-1][1])\n for ind in range(split_ind,num_tiles):\n r_min = Y[ind][0]-self._tile_y_offset\n r_max = Y[ind][1]-self._tile_y_offset\n c_min = X[ind][0]-self._tile_x_offset\n c_max = X[ind][1]-self._tile_x_offset\n self._pixel_buffer[r_min:r_max,c_min:c_max] = images[ind,:,:,0]\n \n self._tile_last_column = c_max\n \n return True",
"def extract_tiles(image_sample, tile_size=100, step_size=100):\n # Potential fix: force all tiles to be same size, find an optimal tiling size.\n h, w = image_sample.shape\n rows = np.arange(0, h, step_size)\n cols = np.arange(0, w, step_size)\n g = np.meshgrid(cols, rows)\n cols_rows = list(zip(*(x.flat for x in g)))\n tiles = []\n for p in cols_rows:\n tiles.append(\n image_sample[\n p[1]: p[1] + tile_size,\n p[0]: p[0] + tile_size,\n ]\n )\n return tiles",
"def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles",
"def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)",
"def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))",
"def split_image(image, size):\n width, height = image.size[0], image.size[1]\n m, n = size\n tile_width, tile_height = int(width/m), int(height/n)\n # Image list\n imgs = []\n for j in range(m):\n for i in range(n):\n # Append cropped image\n imgs.append(image.crop((i*tile_width, j*tile_height,\n (i+1)*tile_width, (j+1)*tile_height)))\n return imgs",
"def split_tiles(module_data):\n raise NotImplementedError",
"def calculateTierSize(imageWidth, imageHeight, tileSize=256):\n tierSizeInTiles = []\n while (imageWidth > tileSize or imageHeight > tileSize):\n tileWidth = float(imageWidth) / tileSize\n tileHeight = float(imageHeight) / tileSize\n tierSizeInTiles.append([math.ceil(tileWidth), math.ceil(tileHeight)])\n tileSize += tileSize\n tierSizeInTiles.append([1.0, 1.0]) \n tierSizeInTiles.reverse() \n return tierSizeInTiles",
"def _get_tiles(self,X,Y,Z,C,T):\n \n self._buffer_supertile(X[0][0],X[0][1])\n \n if X[-1][0] - self._tile_x_offset > self._TILE_SIZE:\n shift_buffer = True\n split_ind = 0\n while X[split_ind][0] - self._tile_x_offset < self._TILE_SIZE:\n split_ind += 1\n else:\n shift_buffer = False\n split_ind = len(X)\n \n # Tile the data\n num_rows = Y[0][1] - Y[0][0]\n num_cols = X[0][1] - X[0][0]\n num_tiles = len(X)\n images = np.zeros((num_tiles,num_rows,num_cols,1),dtype=self.pixel_type())\n \n for ind in range(split_ind):\n images[ind,:,:,0] = self._pixel_buffer[Y[ind][0]-self._tile_y_offset:Y[ind][1]-self._tile_y_offset,\n X[ind][0]-self._tile_x_offset:X[ind][1]-self._tile_x_offset]\n \n if split_ind != num_tiles:\n self._buffer_supertile(X[-1][0],X[-1][1])\n for ind in range(split_ind,num_tiles):\n images[ind,:,:,0] = self._pixel_buffer[Y[ind][0]-self._tile_y_offset:Y[ind][1]-self._tile_y_offset,\n X[ind][0]-self._tile_x_offset:X[ind][1]-self._tile_x_offset]\n \n return images",
"def splitCanvasIntoTiles(self, canvas_img_file, canvas_img, paste_img_files,\n paste_label_dir, save_img_dir, save_label_dir,\n width_multiple, height_multiple, canvas_idx, out_labels):\n\n # Go through and get tile count for width\n width_tiles = int(math.ceil(width_multiple))\n height_tiles = int(math.ceil(height_multiple))\n\n if width_tiles < 1:\n width_tiles = 1\n if height_tiles < 1:\n height_tiles = 1\n\n canvas_width = canvas_img.shape[1]\n canvas_height = canvas_img.shape[0]\n\n tile_width = self.final_img_width\n tile_height = self.final_img_height\n rotate_deg = 0\n\n tile_idx = 1\n for width_idx in range(0, width_tiles):\n for height_idx in range(0, height_tiles):\n cut_x = width_idx * tile_width\n cut_y = height_idx * tile_height\n\n if cut_x + tile_width > canvas_width:\n cut_x = canvas_width - tile_width\n rotate_deg = self.getForcedRandomRotationValue()\n\n if cut_y + tile_height > canvas_height:\n cut_y = canvas_height - tile_height\n rotate_deg = self.getForcedRandomRotationValue()\n\n cut_canvas_img = canvas_img[cut_y:(cut_y+tile_height), cut_x:(cut_x+tile_width)].copy()\n\n flipped_canvas_img = utils.randomFlipImage(cut_canvas_img)\n if rotate_deg != 0:\n rotated_canvas_img = self.rotateCanvasImage(flipped_canvas_img, rotate_deg)\n\n # This fills in any black spots from rotation with pixels from the original flipped image.\n where = np.array(np.where(rotated_canvas_img))\n\n flipped_canvas_img[where[0], where[1]] = rotated_canvas_img[where[0], where[1]]\n\n rotated_canvas_img = flipped_canvas_img\n else:\n rotated_canvas_img = flipped_canvas_img\n\n self.addPastedImages(canvas_img_file, rotated_canvas_img, paste_img_files,\n paste_label_dir, save_img_dir, save_label_dir, canvas_idx,\n tile_idx, out_labels)\n tile_idx += 1\n\n return tile_idx",
"def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile",
"def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))",
"def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles",
"def tiles2images(tiles: List[np.ndarray], im_shape: tuple, h: int, w: int):\n im_height, im_width, channels = im_shape\n n_h = math.ceil(im_height / h)\n n_w = math.ceil(im_width / w)\n im = []\n for y_index in range(n_h):\n im_row = tiles[y_index * n_w:(y_index + 1) * n_w]\n dw = im_width % w\n im_row[-1] = im_row[-1][:, -dw:]\n im_row = np.concatenate(im_row, axis=1)\n im.append(im_row)\n dh = im_height % h\n im[-1] = im[-1][-dh:, :]\n im = np.concatenate(im, axis=0)\n return im"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates an array of ppxf_util.gaussian emission lines to be used as gas templates in PPXF. Generally, these templates represent the instrumental line spread function (LSF) at the set of wavelengths of each emission line. In this case, pPXF will return the intrinsic (i.e. astrophysical) dispersion of the gas lines. Alternatively, one can input FWHM_gal=0, in which case the emission lines are deltafunctions and pPXF will return a dispersion which includes both the intrumental and the intrinsic disperson. Additional lines can be easily added by editing the code of this procedure, which is meant as a template to be modified by the users where needed. For accuracy the ppxf_util.gaussians are integrated over the pixels boundaries. This can be changed by setting `pixel`=False. The [OI], [OIII] and [NII] doublets are fixed at theoretical flux ratio~3. The [OII] and [SII] doublets can be restricted to physical range of ratios. The Balmet Series can be fixed to the theoretically predicted decrement. | def emission_lines(logLam_temp, lamRange_gal, FWHM_gal, pixel=True,
tie_balmer=False, limit_doublets=False, vacuum=False):
if tie_balmer:
# Balmer decrement for Case B recombination (T=1e4 K, ne=100 cm^-3)
# Table 4.4 of Dopita & Sutherland 2003 https://www.amazon.com/dp/3540433627
# Balmer: Htheta Heta Hzeta Heps Hdelta Hgamma Hbeta Halpha
wave = np.array([3797.90, 3835.39, 3889.05, 3970.07, 4101.76, 4340.47, 4861.33, 6562.80]) # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
ratios = np.array([0.0530, 0.0731, 0.105, 0.159, 0.259, 0.468, 1, 2.86])
ratios *= wave[-2]/wave # Account for varying pixel size in Angstrom
emission_lines = gauss @ ratios
line_names = ['Balmer']
w = (wave > lamRange_gal[0]) & (wave < lamRange_gal[1])
line_wave = np.mean(wave[w]) if np.any(w) else np.mean(wave)
else:
# Use fewer lines here, as the weak ones are difficult to measure
# Balmer: Hdelta Hgamma Hbeta Halpha
line_wave = [4101.76, 4340.47, 4861.33, 6562.80] # air wavelengths
if vacuum:
line_wave = ppxf_util.air_to_vac(line_wave)
line_names = ['Hdelta', 'Hgamma', 'Hbeta', 'Halpha']
emission_lines = ppxf_util.gaussian(logLam_temp, line_wave, FWHM_gal, pixel)
if limit_doublets:
# The line ratio of this doublet lam3729/lam3726 is constrained by
# atomic physics to lie in the range 0.28--1.47 (e.g. fig.5.8 of
# Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/).
# We model this doublet as a linear combination of two doublets with the
# maximum and minimum ratios, to limit the ratio to the desired range.
# -----[OII]-----
wave = [3726.03, 3728.82] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
names = ['[OII]3726_d1', '[OII]3726_d2']
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
doublets = gauss @ [[1, 1], [0.28, 1.47]] # produces *two* doublets
emission_lines = np.column_stack([emission_lines, doublets])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, wave)
# The line ratio of this doublet lam6716/lam6731 is constrained by
# atomic physics to lie in the range 0.44--1.43 (e.g. fig.5.8 of
# Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/).
# We model this doublet as a linear combination of two doublets with the
# maximum and minimum ratios, to limit the ratio to the desired range.
# -----[SII]-----
wave = [6716.47, 6730.85] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
names = ['[SII]6731_d1', '[SII]6731_d2']
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
doublets = gauss @ [[0.44, 1.43], [1, 1]] # produces *two* doublets
emission_lines = np.column_stack([emission_lines, doublets])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, wave)
else:
# Here the doublets are free to have any ratio
# -----[OII]----- -----[SII]-----
wave = [3726.03, 3728.82, 6716.47, 6730.85] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
names = ['[OII]3726', '[OII]3729', '[SII]6716', '[SII]6731']
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
emission_lines = np.column_stack([emission_lines, gauss])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, wave)
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[OIII]-----
wave = [4958.92, 5006.84] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[OIII]5007_d') # single template for this doublet
line_wave = np.append(line_wave, wave[1])
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[OI]-----
wave = [6300.30, 6363.67] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.33]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[OI]6300_d') # single template for this doublet
line_wave = np.append(line_wave, wave[0])
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[NII]-----
wave = [6548.03, 6583.41] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[NII]6583_d') # single template for this doublet
line_wave = np.append(line_wave, wave[1])
#added by anja to ppxf_util.emission_lines version
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[NI]-----
wave = [5197.90, 5200.39] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.7]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[NI]5200_d') # single template for this doublet
line_wave = np.append(line_wave, wave[1])
#----------------------
# Only include lines falling within the estimated fitted wavelength range.
#
w = (line_wave > lamRange_gal[0]) & (line_wave < lamRange_gal[1])
emission_lines = emission_lines[:, w]
line_names = line_names[w]
line_wave = line_wave[w]
print('Emission lines included in gas templates:')
print(line_names)
return emission_lines, line_names, line_wave | [
"def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template",
"def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list",
"def gaussian(x, x0, I, HWHM_l, HWHM_r):\n# numerator = (x-x0)**2\n# denominator = 2*HWHM**2\n# return I*np.exp(-numerator/denominator)\n\n x_l = x[(x<=x0)]\n x_r = x[(x>x0)]\n \n numerator_l = (x_l-x0)**2 \n denominator_l = 2*HWHM_l**2\n y_l = I*np.exp(-numerator_l/denominator_l)\n \n numerator_r = (x_r-x0)**2 \n denominator_r = 2*HWHM_r**2 \n y_r = I*np.exp(-numerator_r/denominator_r)\n \n return np.hstack((y_l, y_r)) #return combination of both Gaussians",
"def make_templates(grism='G141', return_lists=False, fsps_templates=False,\n line_list=DEFAULT_LINE_LIST):\n\n from .multifit import MultiBeam\n\n if grism == 'G141': # WFC3/IR\n fwhm = 1100\n elif grism == 'G800L': # ACS/UVIS\n fwhm = 1400\n elif grism == 'G280': # WFC3/UVIS\n fwhm = 1500\n elif grism == 'GRISM': # WFIRST\n fwhm = 350\n else:\n fwhm = 700 # G102\n\n # Line complex templates\n t_complexes = utils.load_templates(fwhm=fwhm, line_complexes=True,\n fsps_templates=fsps_templates)\n\n # Individual lines\n # line_list = ['SIII', 'SII', 'Ha', 'OI-6302', 'OIII', 'Hb',\n # 'OIII-4363', 'Hg', 'Hd', 'NeIII', 'OII', 'MgII']\n\n t_lines = utils.load_templates(fwhm=fwhm, line_complexes=False,\n full_line_list=line_list,\n fsps_templates=fsps_templates)\n\n if return_lists:\n return t_complexes, t_lines\n else:\n # Save them to a file\n np.save('templates_{0}.npy'.format(fwhm), [t_complexes, t_lines])\n print('Wrote `templates_{0}.npy`'.format(fwhm))",
"def gaussian_expe(n_expe, n_arms, T, methods, param_dic, labels, colors, doplot=True, track_ids=False):\n mu = np.random.normal(0, 1, size=n_expe*n_arms).reshape(n_expe, n_arms)\n sigma = np.ones(n_arms*n_expe).reshape(n_expe, n_arms)\n P = [[[m[i], s[i]] for i in range(n_arms)] for m, s in zip(mu, sigma)]\n models = [GaussianMAB(p) for p in P]\n if track_ids:\n for m in models:\n m.store_IDS = True\n results = storeRegret(models, methods, param_dic, n_expe, T)\n if doplot:\n plotRegret(labels, results['mean_regret'], colors, 'Gaussian rewards')\n if track_ids:\n plot_IDS_results(T, n_expe, results['IDS_results'])\n return results",
"def array_templates(templates, max_R=5000):\n from grizli.utils_c.interp import interp_conserve_c\n \n wave = np.unique(np.hstack([templates[t].wave for t in templates]))\n clipsum, iter = 1, 0\n while (clipsum > 0) & (iter < 10):\n clip = np.gradient(wave)/wave < 1/max_R\n idx = np.arange(len(wave))[clip]\n wave[idx[::2]] = np.nan\n wave = wave[np.isfinite(wave)]\n iter += 1\n clipsum = clip.sum()\n #print(iter, clipsum)\n \n NTEMP = len(templates)\n flux_arr = np.zeros((NTEMP, len(wave)))\n \n for i, t in enumerate(templates):\n flux_arr[i,:] = interp_conserve_c(wave, templates[t].wave,\n templates[t].flux)\n \n is_line = np.array([t.startswith('line ') for t in templates])\n \n return wave, flux_arr, is_line",
"def Build_Background_Template(numBGPhotons, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS = False,outputSize=300,angularSize=10.0):\r\n \r\n numPhotons = numBGPhotons\r\n numHigh = int(round(.32 *numPhotons))\r\n numLow = numPhotons-numHigh\r\n \r\n bgEventsX = []\r\n bgEventsY = []\r\n \r\n bgTemplate = bgTemplate *(1.0-flatLevel) + flatLevel\r\n# import matplotlib.pyplot as plt\r\n# plt.imshow(bgTemplate,'jet',vmin=0, vmax=1)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n app=float(angularSize)/float(outputSize) # angle per pixel\r\n for i in range(numPhotons):\r\n x ,y = 0, 0\r\n while True:\r\n x,y = np.random.randint(0,high = len(bgTemplate)),np.random.randint(0,high = len(bgTemplate))\r\n if (np.random.ranf() < bgTemplate[y][x]):\r\n break\r\n # Shift and scale coordinates to output map and then compute PSF modification to the position.\r\n psfMod = PSF_Spread(PSFTableFront,PSFTableBack, HESS =HESS)\r\n dx = psfMod[0]*math.cos(psfMod[1]) # PSF shift in deg\r\n dy = psfMod[0]*math.sin(psfMod[1]) # PSF shift in deg\r\n \r\n bgEventsX.append((x-outputSize/2.0)*app + dx)\r\n bgEventsY.append((y-outputSize/2.0)*app + dy)\r\n \r\n return (bgEventsX, bgEventsY)",
"def make_alf_template():\n import alf.alf\n import fsps\n \n ssp = alf.alf.Alf()\n \n sp = fsps.StellarPopulation(zcontinuous=1)\n sp.params['logzsol'] = 0.2\n\n # Alf\n m = ssp.get_model(in_place=False, logage=0.96, zh=0.2, mgh=0.2)\n \n # FSPS\n w, spec = sp.get_spectrum(tage=10**0.96, peraa=True)\n \n # blue\n blue_norm = spec[w > 3600][0] / m[ssp.wave > 3600][0]\n red_norm = spec[w > 1.7e4][0] / m[ssp.wave > 1.7e4][0]\n \n templx = np.hstack([w[w < 3600], ssp.wave[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], w[w > 1.7e4]])\n temply = np.hstack([spec[w < 3600]/blue_norm, m[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], spec[w > 1.7e4]/red_norm])\n \n np.savetxt('alf_SSP.dat', np.array([templx, temply]).T, fmt='%.5e', header='wave flux\\nlogage = 0.96\\nzh=0.2\\nmgh=0.2\\nfsps: w < 3600, w > 1.7e4')",
"def gaussianfg(ctx):\n\n import numpy as np\n\n from cora.core import skysim\n from cora.util import hputil\n from cora.foreground import galaxy\n\n fsyn = galaxy.FullSkySynchrotron()\n fpol = galaxy.FullSkyPolarisedSynchrotron()\n\n # Set frequency parameters\n fsyn.frequencies = ctx.obj.freq\n nfreq = len(fsyn.frequencies)\n\n nside = ctx.obj.nside\n lmax = 3 * nside\n npol = 4 if ctx.obj.full_pol else 1\n\n cv_fg = np.zeros((lmax+1, npol, nfreq, npol, nfreq))\n\n cv_fg[:, 0, :, 0, :] = skysim.clarray(fsyn.angular_powerspectrum, lmax, fsyn.nu_pixels)\n\n if ctx.obj.full_pol:\n cv_fg[:, 1, :, 1, :] = skysim.clarray(fpol.angular_powerspectrum, lmax, fsyn.nu_pixels)\n cv_fg[:, 2, :, 2, :] = skysim.clarray(fpol.angular_powerspectrum, lmax, fsyn.nu_pixels)\n\n cv_fg = cv_fg.reshape(lmax+1, npol*nfreq, npol*nfreq)\n\n alms = skysim.mkfullsky(cv_fg, nside, alms=True).reshape(npol, nfreq, lmax+1, lmax+1)\n alms = alms.transpose((1, 0, 2, 3))\n\n maps = hputil.sphtrans_inv_sky(alms, nside)\n write_map(ctx.obj.filename, maps, fsyn.frequencies, ctx.obj.freq_width, ctx.obj.include_pol)",
"def generate_random_linelist (teff,wv_bounds=(4500,5500),species_params=None,filepath=None):\n abund_offset_range = (-1,1)\n species_offset_range = (-1,1)\n ew_dist_width = 30\n ep_range = (0,12)\n loggf_range = (-6.0,0.5) \n \n theta = 5040.0/teff\n \n # # TODO: remove this calculation???\n # # # fix to a particular line which should be by the turnoff\n # # # Fe I 88.2 2.22 EP -4.2 loggf\n # loggf = -4.2\n # ep = 2.22\n # x_turnoff = abund_standard['Fe']['abundance']+loggf-theta*ep\n # x-x_turnoff = -5\n # \n # based on the model abundance used in the cog file\n xnorm = -6.5\n ynorm = -2.0\n \n # read in the parameters \n if species_params is None:\n species_params = _elements_params\n el_params = species_params.copy()\n for el,pars in _elements_params.items():\n el_params.setdefault(el,pars)\n \n\n coeffs, knots, centers, scales = np.array(cog_ppol_hf[\"coefficients\"]), np.array(cog_ppol_hf[\"knots\"]), np.array(cog_ppol_hf[\"centers\"]), np.array(cog_ppol_hf[\"scales\"])\n iqp = piecewise_polynomial.InvertiblePiecewiseQuadratic(coeffs, knots, centers=centers, scales=scales)\n iqp_deriv = iqp.deriv()\n \n # calc the linelist\n linelist = {}\n element_abund = {}\n for species,pars in list(species_params.items()):\n wvs = np.random.uniform(wv_bounds[0],wv_bounds[1],pars['n'])\n solar_abund_offset = np.random.uniform(*abund_offset_range)\n \n # get the abundance for this element, ignore species\n abund = abund_standard[species]['abundance']+solar_abund_offset\n element_abund.setdefault(abund_standard[species]['element'],abund) \n \n species_offset = np.random.uniform(*species_offset_range) \n species_abund = element_abund[abund_standard[species]['element']]+species_offset\n species_abund = np.repeat(species_abund,pars['n'])\n \n # generate the parameters for the lines\n spe_col = np.repeat(abund_standard.species_id(species),pars['n'])\n ew = np.random.exponential(ew_dist_width,pars['n'])\n ep = np.random.uniform(ep_range[0],ep_range[1],pars['n'])\n loggf = np.random.uniform(loggf_range[0],loggf_range[1],pars['n'])\n \n # calculate the line strengths from the COG\n #x = species_abund + loggf - theta*ep + xnorm\n logrw = np.log10(ew/wvs)\n x = iqp.inverse(logrw-ynorm)\n loggf = species_abund - x - theta*ep + xnorm\n\n # estimate the lorzentian and gaussian widths for this line\n lorz_width = estimate_lorentz_width(x, iqp_deriv)\n gauss_width = np.repeat(99.9,pars['n'])\n \n # add to the linelist\n linelist[species] = np.dstack((wvs,spe_col,ep,loggf,ew,gauss_width,lorz_width))[0]\n \n if filepath is not None:\n # save moog file\n f = open(filepath,'w')\n header = \"# Fake linelist created THIMBLES with teff {} # \"\n header += \"wvs species ep loggf ew gauss_width lorz_width # \"\n header += \"guassian and lorentzian widths are estimate\\n\"\n f.write(header.format(teff))\n \n fmt = \"{0:>9.5f} {1:>9.1f} {2:>9.2f} {3:>9.2f}\"+20*\" \"+\" {4:>9.2f}\"+10*\" \"\n fmt += \" {5:>9.2f} {6:>9.2f} FAKE_LINE\\n\"\n for species,ll in linelist.items():\n for row in ll:\n f.write(fmt.format(*row)) \n return linelist",
"def generate_gexf(G, encoding='utf-8',prettyprint=True,version='1.1draft'):\n writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,\n version=version)\n writer.add_graph(G)\n for line in str(writer).splitlines():\n yield line",
"def gp_ex_fixed_period():\n start, end = -4.0, 0.0\n X = infpy.gp_1D_X_range( start, end, 1.3 )\n # X = [ ]\n # X = [ [ 0.0 ] ]\n # X = [ [ 0.0 ], [ 1.0 ] ]\n # X = [ [ 0.0 ], [ -1.0 ], [ -2.0 ], [ -3.0 ], ]\n y = numpy.asarray( [ math.sin( 2.0 * math.pi * x[0] ) for x in X ] )\n # pylab.plot( [ x[0] for x in X ], [ y1 for y1 in y ] )\n # pylab.show()\n LN = infpy.LogNormalDistribution\n k = (\n infpy.FixedPeriod1DKernel( 1.0 )\n + infpy.noise_kernel( 0.1 )\n )\n gp = infpy.GaussianProcess( X, y, k )\n sample_X = infpy.gp_1D_X_range( start, end, 0.03 )\n y = infpy.gp_sample_from( gp, sample_X )\n ( y, V_f_star, log_p_y_given_X ) = gp.predict( sample_X )\n infpy.gp_plot_prediction( sample_X, y )\n infpy.gp_title_and_show( gp )",
"def generate_6D_Gaussian_bunch_matched(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z=None, epsn_z=None\n ):\n if self.longitudinal_mode == 'linear':\n assert(sigma_z is not None)\n bunch = self.generate_6D_Gaussian_bunch(n_macroparticles, intensity,\n epsn_x, epsn_y, sigma_z)\n elif self.longitudinal_mode == \"non-linear\":\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.RF_bucket_distribution(\n self.longitudinal_map.get_bucket(gamma=self.gamma),\n sigma_z=sigma_z,\n epsn_z=epsn_z,\n ),\n ).generate()\n else:\n raise ValueError('Unknown longitudinal mode!')\n\n return bunch",
"def plotting_gaussian_curves():\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n x = np.arange(1, 100)\n xbar = 50.0\n s = 15.0\n a = 20.0\n c = 0.0\n singlecurve = zoom_and_gauss_general.gauss(x, xbar, s, a, c)\n\n # Balmer Emission Lines\n x = np.arange(1, 100)\n xbar = 50.0\n s1 = 15.0\n a1 = 20.0\n s2 = 25.0\n a2 = -2.0\n c = 0.0\n\n doublecurve = zoom_and_gauss_general.double_gauss(x, xbar,\n s1, a1, c, s2, a2)\n\n positive = zoom_and_gauss_general.gauss(x, xbar, s1, a1, doublecurve[0])\n negative = zoom_and_gauss_general.gauss(x, xbar, s2, a2, c)\n\n # Oxygen Two Line\n x = np.arange(1, 100)\n xbar = 40.0\n s1 = 8.0\n a1 = 20.0\n s2 = 8.0\n a2 = 30.0\n c = 0.0\n\n oxycurve = oxy2_gauss(x, xbar, s1, a1, c, s2, a2)\n\n xbar3 = 40.0\n xbar4 = 63.5\n s3 = 8.0\n a3 = 20.0\n\n s4 = 8.0\n a4 = 30.0\n\n positive1 = zoom_and_gauss_general.gauss(x, xbar3, s3, a3, oxycurve[0])\n positive2 = zoom_and_gauss_general.gauss(x, xbar4, s4, a4, oxycurve[0])\n\n ax1.plot(x, singlecurve)\n ax2.plot(x, doublecurve)\n ax2.plot(x, positive, 'r', linestyle='--')\n ax2.plot(x, negative, 'g', linestyle='--')\n ax3.plot(x, oxycurve)\n ax3.plot(x, positive1, 'r', linestyle='--')\n ax3.plot(x, positive2, 'r', linestyle='--', )\n ax1.set_yticklabels([])\n ax2.set_yticklabels([])\n ax1.set_ylim(-3, 25.5)\n ax2.set_ylim(-3, 20.5)\n ax3.set_ylim(-3, 30.5)\n ax3.set_yticklabels([])\n ax1.set_title('Single Gaussian Curve')\n ax2.set_title('Balmer Fitting with Gaussian Curves')\n ax3.set_title('[OII] Fitting with Gaussian Curves')\n txt1 = '(A)'\n txt2 = '(B)'\n txt3 = '(C)'\n ax1.annotate(txt1, [0.95, 0.95], xycoords='axes fraction', va='top',\n ha='right', fontsize='10')\n ax2.annotate(txt2, [0.95, 0.95], xycoords='axes fraction', va='top',\n ha='right', fontsize='10')\n ax3.annotate(txt3, [0.95, 0.95], xycoords='axes fraction', va='top',\n ha='right', fontsize='10')\n\n plt.show()",
"def growth_curve(userinputs, filter, catalog):\n logging.info('Running growth curve analysis on {}'.format(catalog))\n # Load the photometry results from the catalog (that is returned by the phot\n # function)\n aper_st, flux_st = np.loadtxt(catalog, unpack=True, usecols=(0,3))\n\n #Growth curve is only done on the ref image so we get the filter from userinp.\n ref_filter = filter\n\n ratio_st = np.empty(len(aper_st))\n\n #number of apertures\n naper = 20\n\n # Calculate the number of stars, make sure it is an integer\n nstar = int(len(aper_st)/naper)\n logging.info('Number of stars used: {}'.format(nstar))\n aper_ind = naper - 1\n\n for k in range(nstar):\n\n for i in range(naper):\n\n ratio_st[i + k*naper] = flux_st[i + k*naper]/flux_st[aper_ind + k*naper]\n\n\n # Find median ratio at each aperture between all the stars and all the clusters\n med_st = np.empty(naper)\n\n for i in range(naper):\n\n med_st[i] = np.median(ratio_st[i::naper])\n\n\n # Plot growth curves\n logging.info('Creating Growth curve plots')\n fig = plt.figure(figsize = (7,7))\n\n aper_x = np.arange(naper) + 1\n\n for i in range(nstar):\n\n ratio_y = ratio_st[i*naper:(i + 1)*naper]\n plt.plot(aper_x, ratio_y, 'y-')\n plt.annotate(str(i + 1), xy=(8.0, ratio_y[7]),\n horizontalalignment='left', verticalalignment='top', fontsize=6)\n\n\n plt.plot(aper_x, med_st, 'r-' , linewidth=4.0)\n plt.hlines(0.5, 0, 20, color='black', linewidth=2, zorder=10)\n plt.vlines(4, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(5, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(6, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n\n plt.ylabel('Normalized Flux ' + ref_filter.upper())\n plt.xlabel('Radius (pix)')\n plt.xlim(1,20)\n plt.minorticks_on()\n\n fig.savefig(userinputs['OUTDIR'] + '/plots/plot_growth_curve_{}.pdf'.format(ref_filter))",
"def generate_6D_Gaussian_bunch(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z\n ):\n if self.longitudinal_mode == \"linear\":\n check_inside_bucket = lambda z, dp: np.array(len(z) * [True])\n Q_s = self.longitudinal_map.Q_s\n elif self.longitudinal_mode == \"non-linear\":\n bucket = self.longitudinal_map.get_bucket(\n gamma=self.gamma, mass=self.mass, charge=self.charge\n )\n check_inside_bucket = bucket.make_is_accepted(margin=0.05)\n Q_s = bucket.Q_s\n else:\n raise NotImplementedError(\"Something wrong with self.longitudinal_mode\")\n\n eta = self.longitudinal_map.alpha_array[0] - self.gamma ** -2\n beta_z = np.abs(eta) * self.circumference / 2.0 / np.pi / Q_s\n sigma_dp = sigma_z / beta_z\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.cut_distribution(\n generators.gaussian2D_asymmetrical(sigma_u=sigma_z, sigma_up=sigma_dp),\n is_accepted=check_inside_bucket,\n ),\n ).generate()\n\n return bunch",
"def gaussian_line(w, w0, sigma):\n return 2/sigma*(np.log(2)/np.pi)**0.5*np.exp(\n -4*np.log(2)*((w-w0)/sigma)**2)",
"def GenerateHSGrid(lrho,ltheta,lphi):\n lhs = []\n for rho in lrho:\n for theta in ltheta:\n for phi in lphi:\n hc = geometry.Geometry(rho,theta,phi)\n lhs.append(hc)\n return lhs",
"def create_photom_fgs_image(value):\n\n photmjsr = [value]\n uncertainty = [0.0]\n\n dtype = np.dtype([('photmjsr', '<f4'),\n ('uncertainty', '<f4')])\n reftab = np.array(list(zip(photmjsr, uncertainty)),\n dtype=dtype)\n ftab = datamodels.FgsImgPhotomModel(phot_table=reftab)\n\n return ftab"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combine SSP traces to have mass/luminosity weighted properties | def weighted_traces(parnames, trace, nssps):
weights = np.array([trace["w_{}".format(i+1)].data for i in range(
nssps)])
wtrace = []
for param in parnames:
data = np.array([trace["{}_{}".format(param, i+1)].data
for i in range(nssps)])
t = np.average(data, weights=weights, axis=0)
wtrace.append(Table([t], names=["{}_weighted".format(param)]))
return hstack(wtrace) | [
"def modifyTraces(self,traces,samplerate,noiseinfo,smodel):\n noiseshape,noise = noiseinfo\n newtraces = []\n for trace in traces:\n sampledtrace = self.sampleTrace(trace,smodel,samplerate)\n if noise != 0.0:\n newtraces.append(self.makeTracePartial(sampledtrace,noiseinfo,smodel))\n else:\n newtraces.append(sampledtrace)\n return newtraces",
"def weighted_sum_extraction(cutout, trace, psf, ron = 12, gain = 1.2):\n ###NEW VERSION BELOW\n # width = len(cutout[0]) #we have square cutout\n # #buffer area on either ends of the trace\n # buffer = int(round(0.85*slit_length/2)) #imported from constant\n\n #width = len(cutout[0])\n spec = []\n var = []\n for i in range(len(trace)): #loop through x\n #print(i)\n #put psf at this location\n dim = np.array(cutout.shape) + np.array(psf.shape)\n #print(dim)\n weight = np.zeros( dim) #padded, to be cropped later\n\n #case where trace i is not in the image\n if trace[i] < 0 or trace[i] > cutout.shape[0]:\n spec += [0]\n var += [0]\n else:\n x = i + int(psf.shape[1]//2)\n #print(trace[i], psf.shape[0]//2)\n y = int(trace[i] + psf.shape[0]//2)\n #print(i, x, y - int(psf.shape[0]//2), y + int(psf.shape[0]//2)+1, np.shape(weight[y - int(psf.shape[0]//2): y + int(psf.shape[0]//2)+1, x - int(psf.shape[1]//2): x + int(psf.shape[1]//2)+1]))\n weight[y - int(psf.shape[0]//2): y + int(psf.shape[0]//2)+1, x - int(psf.shape[1]//2): x + int(psf.shape[1]//2)+1] = psf\n weight = weight[ int(psf.shape[0]//2): int(-psf.shape[0]//2), int(psf.shape[1]//2): int(-psf.shape[1]//2)]\n #print(weight.shape, cutout.shape)\n #plt.imshow(weight*cutout,origin = 'lower')\n #plt.show()\n\n spec += [np.sum(weight * cutout)/np.sum(weight)]\n #TODO: Is the variance calculation correct? Might need another weighted term. \n var += [np.sum(weight * (cutout/gain + (ron/gain)**2))] #variance assuming readout noise and photon noise\n\n return np.array(spec[::-1]), np.array(var[::-1]) #flip so long wavelenght is to the right",
"def stack_all(st1, st2, pws=False):\n\n print()\n print('Stacking ALL traces in streams')\n\n # Copy stats from stream\n str_stats = st1[0].stats\n\n # Initialize arrays\n tmp1 = np.zeros(len(st1[0].data))\n tmp2 = np.zeros(len(st2[0].data))\n weight1 = np.zeros(len(st1[0].data), dtype=complex)\n weight2 = np.zeros(len(st2[0].data), dtype=complex)\n\n # Stack all traces\n for tr in st1:\n tmp1 += tr.data\n hilb1 = hilbert(tr.data)\n phase1 = np.arctan2(hilb1.imag, hilb1.real)\n weight1 += np.exp(1j*phase1)\n\n for tr in st2:\n tmp2 += tr.data\n hilb2 = hilbert(tr.data)\n phase2 = np.arctan2(hilb2.imag, hilb2.real)\n weight2 += np.exp(1j*phase2)\n\n # Normalize\n tmp1 = tmp1/np.float(len(st1))\n tmp2 = tmp2/np.float(len(st2))\n\n # Phase-weighting\n if pws:\n weight1 = weight1/np.float(len(st1))\n weight2 = weight2/np.float(len(st2))\n weight1 = np.real(abs(weight1))\n weight2 = np.real(abs(weight2))\n else:\n weight1 = np.ones(len(st1[0].data))\n weight2 = np.ones(len(st1[0].data))\n\n # Put back into traces\n stack1 = Trace(data=weight1*tmp1,header=str_stats)\n stack2 = Trace(data=weight2*tmp2,header=str_stats)\n\n return stack1, stack2",
"def spindle_attributes(self):\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n\n dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])\n self.spindle_events = {}\n self.spindle_rejects = {}",
"def _TraceFormatting(PDTrace, PDFETMap, MaxPDValue, AvgRange=25, FWHM=4):\n MeasuredOutputPulseShape=[]\n WeightList=[math.exp(-4*math.log(2)*((ii+1-round(AvgRange/2.))/FWHM)**2) for ii in range(AvgRange)]\n WSum = sum(WeightList)\n MX , B = PDFETMap\n for FETNo in range(140):\n Loc = round(MX*FETNo + B)\n WSample=sum([PDTrace[int(Loc+(ii+1-round(AvgRange/2.)))]*WeightList[ii] for ii in range(AvgRange)])/WSum\n MeasuredOutputPulseShape+=[WSample/MaxPDValue]\n return MeasuredOutputPulseShape",
"def _merge_sounding(self, parts):\n merged = {'STID': parts['STID'],\n 'STNM': parts['STNM'],\n 'SLAT': parts['SLAT'],\n 'SLON': parts['SLON'],\n 'SELV': parts['SELV'],\n 'STAT': parts['STAT'],\n 'COUN': parts['COUN'],\n 'DATE': parts['DATE'],\n 'TIME': parts['TIME'],\n 'PRES': [],\n 'HGHT': [],\n 'TEMP': [],\n 'DWPT': [],\n 'DRCT': [],\n 'SPED': [],\n }\n\n # Number of parameter levels\n num_man_levels = len(parts['TTAA']['PRES']) if 'TTAA' in parts else 0\n num_man_wind_levels = len(parts['PPAA']['PRES']) if 'PPAA' in parts else 0\n num_trop_levels = len(parts['TRPA']['PRES']) if 'TRPA' in parts else 0\n num_max_wind_levels = len(parts['MXWA']['PRES']) if 'MXWA' in parts else 0\n num_sigt_levels = len(parts['TTBB']['PRES']) if 'TTBB' in parts else 0\n num_sigw_levels = len(parts['PPBB']['SPED']) if 'PPBB' in parts else 0\n num_above_man_levels = len(parts['TTCC']['PRES']) if 'TTCC' in parts else 0\n num_above_trop_levels = len(parts['TRPC']['PRES']) if 'TRPC' in parts else 0\n num_above_max_wind_levels = len(parts['MXWC']['SPED']) if 'MXWC' in parts else 0\n num_above_sigt_levels = len(parts['TTDD']['PRES']) if 'TTDD' in parts else 0\n num_above_sigw_levels = len(parts['PPDD']['SPED']) if 'PPDD' in parts else 0\n num_above_man_wind_levels = len(parts['PPCC']['SPED']) if 'PPCC' in parts else 0\n\n total_data = (num_man_levels\n + num_man_wind_levels\n + num_trop_levels\n + num_max_wind_levels\n + num_sigt_levels\n + num_sigw_levels\n + num_above_man_levels\n + num_above_trop_levels\n + num_above_max_wind_levels\n + num_above_sigt_levels\n + num_above_sigw_levels\n + num_above_man_wind_levels\n )\n if total_data == 0:\n return None\n\n # Check SIG wind vertical coordinate\n # For some reason, the pressure data can get put into the\n # height array. Perhaps this is just a artifact of Python,\n # as GEMPAK itself just uses array indices without any\n # names involved. Since the first valid pressure of the\n # array will be negative in the case of pressure coordinates,\n # we can check for it and place data in the appropriate array.\n ppbb_is_z = True\n if num_sigw_levels:\n if 'PRES' in parts['PPBB']:\n ppbb_is_z = False\n else:\n for z in parts['PPBB']['HGHT']:\n if z != self.prod_desc.missing_float and z < 0:\n ppbb_is_z = False\n parts['PPBB']['PRES'] = parts['PPBB']['HGHT']\n break\n\n ppdd_is_z = True\n if num_above_sigw_levels:\n if 'PRES' in parts['PPDD']:\n ppdd_is_z = False\n else:\n for z in parts['PPDD']['HGHT']:\n if z != self.prod_desc.missing_float and z < 0:\n ppdd_is_z = False\n parts['PPDD']['PRES'] = parts['PPDD']['HGHT']\n break\n\n # Process surface data\n if num_man_levels < 1:\n merged['PRES'].append(self.prod_desc.missing_float)\n merged['HGHT'].append(self.prod_desc.missing_float)\n merged['TEMP'].append(self.prod_desc.missing_float)\n merged['DWPT'].append(self.prod_desc.missing_float)\n merged['DRCT'].append(self.prod_desc.missing_float)\n merged['SPED'].append(self.prod_desc.missing_float)\n else:\n merged['PRES'].append(parts['TTAA']['PRES'][0])\n merged['HGHT'].append(parts['TTAA']['HGHT'][0])\n merged['TEMP'].append(parts['TTAA']['TEMP'][0])\n merged['DWPT'].append(parts['TTAA']['DWPT'][0])\n merged['DRCT'].append(parts['TTAA']['DRCT'][0])\n merged['SPED'].append(parts['TTAA']['SPED'][0])\n\n merged['HGHT'][0] = merged['SELV']\n\n first_man_p = self.prod_desc.missing_float\n if num_man_levels >= 1:\n for mp, mt, mz in zip(parts['TTAA']['PRES'],\n parts['TTAA']['TEMP'],\n parts['TTAA']['HGHT']):\n if self.prod_desc.missing_float not in [\n mp,\n mt,\n mz\n ]:\n first_man_p = mp\n break\n\n surface_p = merged['PRES'][0]\n if surface_p > 1060:\n surface_p = self.prod_desc.missing_float\n\n if (surface_p == self.prod_desc.missing_float\n or (surface_p < first_man_p\n and surface_p != self.prod_desc.missing_float)):\n merged['PRES'][0] = self.prod_desc.missing_float\n merged['HGHT'][0] = self.prod_desc.missing_float\n merged['TEMP'][0] = self.prod_desc.missing_float\n merged['DWPT'][0] = self.prod_desc.missing_float\n merged['DRCT'][0] = self.prod_desc.missing_float\n merged['SPED'][0] = self.prod_desc.missing_float\n\n if (num_sigt_levels >= 1\n and self.prod_desc.missing_float not in [\n parts['TTBB']['PRES'][0],\n parts['TTBB']['TEMP'][0]\n ]):\n first_man_p = merged['PRES'][0]\n first_sig_p = parts['TTBB']['PRES'][0]\n if (first_man_p == self.prod_desc.missing_float\n or np.isclose(first_man_p, first_sig_p)):\n merged['PRES'][0] = parts['TTBB']['PRES'][0]\n merged['DWPT'][0] = parts['TTBB']['DWPT'][0]\n merged['TEMP'][0] = parts['TTBB']['TEMP'][0]\n\n if num_sigw_levels >= 1:\n if ppbb_is_z:\n if (parts['PPBB']['HGHT'][0] == 0\n and parts['PPBB']['DRCT'][0] != self.prod_desc.missing_float):\n merged['DRCT'][0] = parts['PPBB']['DRCT'][0]\n merged['SPED'][0] = parts['PPBB']['SPED'][0]\n else:\n if self.prod_desc.missing_float not in [\n parts['PPBB']['PRES'][0],\n parts['PPBB']['DRCT'][0]\n ]:\n first_man_p = merged['PRES'][0]\n first_sig_p = abs(parts['PPBB']['PRES'][0])\n if (first_man_p == self.prod_desc.missing_float\n or np.isclose(first_man_p, first_sig_p)):\n merged['PRES'][0] = abs(parts['PPBB']['PRES'][0])\n merged['DRCT'][0] = parts['PPBB']['DRCT'][0]\n merged['SPED'][0] = parts['PPBB']['SPED'][0]\n\n # Merge MAN temperature\n bgl = 0\n qcman = []\n if num_man_levels >= 2 or num_above_man_levels >= 1:\n if merged['PRES'][0] == self.prod_desc.missing_float:\n plast = 2000\n else:\n plast = merged['PRES'][0]\n\n if num_man_levels >= 2:\n bgl, plast = self._merge_mandatory_temps(merged, parts, 'TTAA',\n qcman, bgl, plast)\n\n if num_above_man_levels >= 1:\n bgl, plast = self._merge_mandatory_temps(merged, parts, 'TTCC',\n qcman, bgl, plast)\n\n # Merge MAN wind\n if num_man_wind_levels >= 1 and num_man_levels >= 1 and len(merged['PRES']) >= 2:\n self._merge_mandatory_winds(merged, parts, 'PPAA', qcman)\n\n if num_above_man_wind_levels >= 1 and num_man_levels >= 1 and len(merged['PRES']) >= 2:\n self._merge_mandatory_winds(merged, parts, 'PPCC', qcman)\n\n # Merge TROP\n if num_trop_levels >= 1 or num_above_trop_levels >= 1:\n if merged['PRES'][0] != self.prod_desc.missing_float:\n pbot = merged['PRES'][0]\n elif len(merged['PRES']) > 1:\n pbot = merged['PRES'][1]\n if pbot < parts['TRPA']['PRES'][1]:\n pbot = 1050\n else:\n pbot = 1050\n\n if num_trop_levels >= 1:\n pbot = self._merge_tropopause_data(merged, parts, 'TRPA', pbot)\n\n if num_above_trop_levels >= 1:\n pbot = self._merge_tropopause_data(merged, parts, 'TRPC', pbot)\n\n # Merge SIG temperature\n if num_sigt_levels >= 1 or num_above_sigt_levels >= 1:\n if merged['PRES'][0] != self.prod_desc.missing_float:\n pbot = merged['PRES'][0]\n elif len(merged['PRES']) > 1:\n pbot = merged['PRES'][1]\n if pbot < parts['TTBB']['PRES'][1]:\n pbot = 1050\n else:\n pbot = 1050\n\n if num_sigt_levels >= 1:\n pbot = self._merge_significant_temps(merged, parts, 'TTBB', pbot)\n\n if num_above_sigt_levels >= 1:\n pbot = self._merge_significant_temps(merged, parts, 'TTDD', pbot)\n\n # Interpolate heights\n _interp_moist_height(merged, self.prod_desc.missing_float)\n\n # Merge SIG winds on pressure surfaces\n if not ppbb_is_z or not ppdd_is_z:\n if num_sigw_levels >= 1 or num_above_sigw_levels >= 1:\n if merged['PRES'][0] != self.prod_desc.missing_float:\n pbot = merged['PRES'][0]\n elif len(merged['PRES']) > 1:\n pbot = merged['PRES'][1]\n else:\n pbot = 0\n\n if num_sigw_levels >= 1 and not ppbb_is_z:\n pbot = self._merge_winds_pressure(merged, parts, 'PPBB', pbot)\n\n if num_above_sigw_levels >= 1 and not ppdd_is_z:\n pbot = self._merge_winds_pressure(merged, parts, 'PPDD', pbot)\n\n # Merge max winds on pressure surfaces\n if num_max_wind_levels >= 1 or num_above_max_wind_levels >= 1:\n if merged['PRES'][0] != self.prod_desc.missing_float:\n pbot = merged['PRES'][0]\n elif len(merged['PRES']) > 1:\n pbot = merged['PRES'][1]\n else:\n pbot = 0\n\n if num_max_wind_levels >= 1:\n pbot = self._merge_winds_pressure(merged, parts, 'MXWA', pbot)\n\n if num_above_max_wind_levels >= 1:\n _ = self._merge_winds_pressure(merged, parts, 'MXWC', pbot)\n\n # Interpolate height for SIG/MAX winds\n _interp_logp_height(merged, self.prod_desc.missing_float)\n\n # Merge SIG winds on height surfaces\n if ppbb_is_z or ppdd_is_z:\n nsgw = num_sigw_levels if ppbb_is_z else 0\n nasw = num_above_sigw_levels if ppdd_is_z else 0\n if (nsgw >= 1 and (parts['PPBB']['HGHT'][0] == 0\n or parts['PPBB']['HGHT'][0] == merged['HGHT'][0])):\n istart = 1\n else:\n istart = 0\n\n self._merge_winds_height(merged, parts, nsgw, nasw, istart)\n\n # Interpolate missing pressure with height\n _interp_logp_pressure(merged, self.prod_desc.missing_float)\n\n # Interpolate missing data\n _interp_logp_data(merged, self.prod_desc.missing_float)\n\n # Add below ground MAN data\n if merged['PRES'][0] != self.prod_desc.missing_float and bgl > 0:\n size = len(merged['PRES'])\n for ibgl in range(1, num_man_levels):\n press = parts['TTAA']['PRES'][ibgl]\n if press > merged['PRES'][0]:\n loc = size - bisect.bisect_left(merged['PRES'][1:][::-1], press)\n merged['PRES'].insert(loc, press)\n merged['TEMP'].insert(loc, parts['TTAA']['TEMP'][ibgl])\n merged['DWPT'].insert(loc, parts['TTAA']['DWPT'][ibgl])\n merged['DRCT'].insert(loc, parts['TTAA']['DRCT'][ibgl])\n merged['SPED'].insert(loc, parts['TTAA']['SPED'][ibgl])\n merged['HGHT'].insert(loc, parts['TTAA']['HGHT'][ibgl])\n size += 1\n\n # Add text data, if it is included\n if 'TXTA' in parts:\n merged['TXTA'] = parts['TXTA']['TEXT']\n if 'TXTB' in parts:\n merged['TXTB'] = parts['TXTB']['TEXT']\n if 'TXTC' in parts:\n merged['TXTC'] = parts['TXTC']['TEXT']\n if 'TXPB' in parts:\n merged['TXPB'] = parts['TXPB']['TEXT']\n\n return merged",
"def sum_spectra_weighted_ave(obj, **kwargs):\n \n if obj is None:\n return None\n\n # import the helper functions\n import hlr_utils\n\n # set up for working through data\n (result, res_descr) = hlr_utils.empty_result(obj)\n o_descr = hlr_utils.get_descr(obj)\n\n result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)\n\n # Get the number of axis channels\n len_axis = len(obj[0])\n\n import nessi_list\n import SOM\n import utils\n\n # Empty SO for final spctrum\n so = SOM.SO()\n\n len_som = hlr_utils.get_length(obj)\n\n # Slice data, calculate weighted average and repackage spectra\n for i in xrange(len_axis):\n\n sliced_data = nessi_list.NessiList()\n sliced_data_err2 = nessi_list.NessiList()\n\n for j in xrange(len_som):\n obj1 = hlr_utils.get_value(obj, j, o_descr, \"all\")\n if i == 0 and j == 0:\n map_so = hlr_utils.get_map_so(obj, None, j)\n hlr_utils.result_insert(so, \"SO\", map_so, None, \"all\")\n \n sliced_data.append(obj1.y[i])\n sliced_data_err2.append(obj1.var_y[i])\n\n len_fit = len(sliced_data)\n\n value = utils.weighted_average(sliced_data, sliced_data_err2,\n 0, len_fit-1)\n so.y[i] = value[0]\n so.var_y[i] = value[1]\n\n hlr_utils.result_insert(result, res_descr, so, None, \"all\")\n\n return result",
"def spindle_attributes(self):\n # check if channel list exists\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n\n dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])\n self.spindle_events = {}\n self.spindle_rejects_t = {}\n self.spindle_rejects_f = {}",
"def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12",
"def _extract_weight(self, stamp):\n var_image = stamp.variance.array\n weight = var_image.copy()\n\n weight[:,:]=0\n\n zlogic = var_image > 0\n\n no_data_logic = numpy.logical_not(\n stamp.mask.array & stamp.mask.getPlaneBitMask(\"NO_DATA\")\n )\n w=numpy.where(zlogic & no_data_logic)\n\n if w[0].size > 0:\n medvar = numpy.median(var_image[w])\n weight[w] = 1.0/medvar\n\n return weight",
"def extract(self, traces):\n if len(traces) > 0:\n # Loop through traces\n sci_pack = SciencePack()\n\n for trace_index in range(len(traces)):\n # Initial checks\n history_headers = []\n new_header = self.header.copy()\n if self.science_object.lamp_count > 0:\n all_lamps = [np.array([]) for lamp_index in range(self.science_object.lamp_count)]\n else:\n log.warning('There are no lamps available for this Target.')\n\n # Extraction of data parsed as argument\n chebyshev, width, _ = traces[trace_index]\n # log.debug(\"Offset for Background: %s\", offset)\n if width % 2 == 1:\n half_width = int((width - 1) / 2)\n else:\n half_width = int(width / 2)\n\n # Define background subtraction zones\n # TODO(simon): Make the background subtraction zones to follow the trace\n background = []\n limits = []\n for i in range(len(self.region) - 1):\n if self.region[i] != self.region[i + 1]:\n log.debug(\"Found a mask limit.\")\n limits.append(i + 1)\n\n for limit_index in range(0, len(limits) - 1, 2):\n if limits[limit_index + 1] - limits[limit_index] == width and\\\n -1 not in self.region[limits[limit_index]:limits[limit_index + 1]]:\n background.append([limits[limit_index], limits[limit_index + 1]])\n hist = \"Defining background extraction zone [%s:%s] for target.\" % (limits[limit_index],\n limits[limit_index + 1])\n history_headers.append(hist)\n log.debug(hist)\n\n # Getting data shape\n data_y = self.data.shape[1]\n sci = []\n\n unsubtracted = []\n subtracted_background = []\n\n # Actual extraction\n # Avoid printing inside this loop since it goes through all the columns\n for i in range(data_y):\n # Define limits of aperture for spectrum\n x_min = int(round(chebyshev(i))) - half_width\n x_max = int(round(chebyshev(i))) + half_width\n apnum1 = '%s %s %s %s'%(trace_index + 1, 1, x_min, x_max)\n if i == int(data_y/2):\n hist = 'Aperture for extraction [%s:%s] at %s' % (x_min, x_max, i)\n history_headers.append(hist)\n log.debug(hist)\n log.debug('APNUM1 = %s', apnum1)\n\n # If there are background extraction zones here are prepared to subtract\n if len(background) > 1:\n background_part = []\n for back in background:\n part = np.sum(self.data[back[0]:back[1], i])\n # background_median = np.median(self.data[back[0]:back[1], i])\n # part2 = abs(x_max - x_min) * background_median\n background_part.append(part)\n background_data = np.mean(background_part)\n elif len(background) == 1:\n background_data = np.sum(self.data[background[0][0]:background[0][1], i])\n else:\n background_data = 0\n\n # Stored for debugging process\n # print background_data\n subtracted_background.append(background_data)\n unsubtracted.append(np.sum(self.data[x_min:x_max, i]))\n\n data_point = np.sum(self.data[x_min:x_max, i]) - background_data\n # print('DATA POINT ', np.sum(self.data[x_min:x_max, i]), background_data)\n sci.append(data_point)\n\n # Lamp extraction\n if len(self.lamps_data) > 0:\n for limit_index in range(self.science_object.lamp_count):\n lamp_point = np.sum(self.lamps_data[limit_index][x_min:x_max, i])\n all_lamps[limit_index] = np.append(all_lamps[limit_index], lamp_point)\n # Construction of extracted_object (to be returned)\n # extracted_object.append(np.array(sci))\n sci_pack.add_data(np.array(sci))\n # if int(trace_index + 1) > 1:\n # new_header.rename_keyword('APNUM1', 'APNUM%s' % str(int(trace_index + 1)))\n new_header['APNUM1'] = apnum1\n if history_headers != []:\n for hist in history_headers:\n new_header['HISTORY'] = hist\n # headers.append(new_header)\n sci_pack.add_header(new_header)\n if len(self.lamps_data) > 0:\n for lamp_index in range(self.science_object.lamp_count):\n # extracted_object.append(np.array(all_lamps[lamp_index]))\n sci_pack.add_lamp(np.array(all_lamps[lamp_index]))\n self.lamps_header[lamp_index]['APNUM1'] = apnum1\n sci_pack.add_lamp_header(self.lamps_header[lamp_index])\n # headers.append(self.lamps_header[lamp_index])\n #\n # Plot background subtraction\n if self.args.plots_enabled:\n # sci_sample = sci[int(len(sci) / 2.) - 30:int(len(sci) / 2.) + 30]\n fig = plt.figure(1)\n fig.canvas.set_window_title('Subtraction')\n plt.title('Background Subtraction\\n' + self.science_object.name)\n plt.xlabel('Pixels (dispersion direction)')\n plt.ylabel('Intensity (counts)')\n plt.xlim((0, len(sci)))\n # plt.yscale('log')\n plt.plot(sci, color='k', alpha=1, label='Background Subtracted')\n plt.plot(unsubtracted, color='k', alpha=0.5, label='Unsubtracted')\n plt.plot(subtracted_background, color='r', label='Background')\n # plt.plot(all_lamps[0],label='lamp 1')\n # plt.plot(all_lamps[1],label='lamp 2')\n plt.legend(loc='best')\n plt.tight_layout()\n # plt.savefig('background-subtraction_'\n # + self.science_object.name\n # + '_'\n # + str(int(chebyshev(10)))\n # + '.png', dpi=300)\n plt.show()\n\n return sci_pack\n else:\n log.error(\"There are no traces discovered here!!.\")\n return None",
"def coadd(self, sp, method='pixel'):\n\t\tif method == 'pixel':\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tif self.apply_sigma_mask:\n\t\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\telse:\n\t\t\t\tself.mask = []\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\telif method == 'wavelength':\n\t\t\tself_supers = copy.deepcopy(self)\n\t\t\tg = interpolate.interp1d(self.wave, self.flux)\n\t\t\tsp_supers = copy.deepcopy(sp)\n\t\t\tf = interpolate.interp1d(sp.wave, sp.flux)\n\t\t\t## 10x supersample the average difference of \n\t\t\t## the wavelength\n\t\t\t#step0 = np.mean(np.diff(self.wave))/10\n\t\t\t#self_supers.wave = np.arange(self.wave[0],\n\t\t\t#\tself.wave[-1],step0)\n\t\t\tself_supers.flux = g(self_supers.wave)\n\t\t\tself_supers.oriWave = np.arange(self.oriWave[0],\n\t\t\t\tself.oriWave[-1],(self.oriWave[-1]-self.oriWave[0])/10240)\n\t\t\tg1 = interpolate.interp1d(self.oriWave, self.oriFlux)\n\t\t\tself_supers.oriFlux = g1(self_supers.oriWave)\n\n\t\t\t#step = np.mean(np.diff(sp.wave))/10\n\t\t\t#sp_supers.wave = np.arange(sp.wave[0],sp.wave[-1],step)\n\t\t\t#sp_supers.flux = f(sp_supers.wave)\n\t\t\tsp_supers.oriWave = np.arange(sp.oriWave[0],\n\t\t\t\tsp.oriWave[-1],(sp.oriWave[-1]-sp.oriWave[0])/10240)\n\t\t\tf1 = interpolate.interp1d(sp.oriWave, sp.oriFlux)\n\t\t\tsp_supers.oriFlux = f1(sp_supers.oriWave)\n\n\t\t\t## calculate the max cross correlation value\n\t\t\tdef xcorr(a0,b0,shift):\n\t\t\t\t\"\"\"\n\t\t\t\tShift is the index number after supersampling \n\t\t\t\tboth of the spectra.\n\t\t\t\t\"\"\"\n\t\t\t\ta = copy.deepcopy(a0)\n\t\t\t\tb = copy.deepcopy(b0)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\tlength = b.oriFlux.shape[0]\n\t\t\t\tif shift >= 0:\n\t\t\t\t\tmask_a = np.arange(0,shift,1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(length-1,length-shift-1,-1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\telif shift < 0:\n\t\t\t\t\tmask_a = np.arange(length-1,length+shift-1,-1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(0,-shift,1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\t#b.wave += shift * step\n\t\t\t\t## discard the points where the wavelength values\n\t\t\t\t## are larger\n\t\t\t\t#condition = (a.wave > b.wave[0]) & (a.wave < b.wave[-1])\n\t\t\t\t\n\t\t\t\t#a.flux = a.flux[np.where(condition)]\n\t\t\t\t#a.wave = a.wave[np.where(condition)]\n\t\t\t\t## resampling the telluric model\n\t\t\t\t#b.flux = np.array(smart.integralResample(xh=b.wave, \n\t\t\t\t#\tyh=b.flux, xl=a.wave))\n\t\t\t\t\n\t\t\t\treturn np.inner(a.oriFlux, b.oriFlux)/\\\n\t\t\t\t(np.average(a.oriFlux)*np.average(b.oriFlux))/a.oriFlux.shape[0]\n\n\t\t\txcorr_list = []\n\t\t\t## mask the ending pixels\n\t\t\tself_supers2 = copy.deepcopy(self_supers)\n\t\t\tsp_supers2 = copy.deepcopy(sp_supers)\n\t\t\tself_supers2.wave = self_supers2.wave[1000:-1000]\n\t\t\tself_supers2.flux = self_supers2.flux[1000:-1000]\n\t\t\tsp_supers2.wave = sp_supers2.wave[1000:-1000]\n\t\t\tsp_supers2.flux = sp_supers2.flux[1000:-1000]\n\t\t\tfor shift in np.arange(-10,10,1):\n\t\t\t\txcorr_list.append(xcorr(self_supers2,sp_supers2,shift))\n\n\t\t\t## dignostic plot for cc result\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(np.arange(-10,10,1),np.array(xcorr_list),'k-')\n\t\t\tplt.show()\n\t\t\tplt.close()\n\n\t\t\tstep = np.absolute(np.mean(np.diff(sp_supers.wave)))\n\t\t\tbestshift = np.arange(-10*step,10*step,step)[np.argmax(xcorr_list)]\n\t\t\tsp_supers.oriWave += bestshift\n\t\t\t## discard the points where the wavelength values\n\t\t\t## are larger\n\t\t\tcondition = (self.oriWave > sp_supers.oriWave[0])\\\n\t\t\t& (self.oriWave < sp_supers.oriWave[-1])\n\n\t\t\tself.oriFlux = self.oriFlux[np.where(condition)]\n\t\t\tself.oriWave = self.oriWave[np.where(condition)]\n\t\t\tself.oriNoise = self.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriNoise = sp_supers.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriFlux = np.array(smart.integralResample(xh=sp_supers.oriWave, \n\t\t\t\tyh=sp_supers.oriFlux, xl=self.oriWave))\n\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp_supers.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp_supers.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self",
"def weight_rollup(self, tech_factors, mission):\n\n pp_wts = {}\n ngroups = self.ngroups\n for i in range(ngroups):\n group = self.groups[i]\n if(group.type == 'turboshaft'):\n tech_fac = tech_factors.powerplant \n elif(group.type == 'battery'):\n tech_fac = tech_factors.battery \n else:\n quit('unknown powerplant group type')\n\n# calculate engine weight\n engine = group.weight(tech_fac, mission)\n key = 'group'+str(i) + 'engine'\n pp_wts[key] = engine['total']\n\n#add air intake anti-icing and accessories for fuel-burning engines\n if(group.type in ['turboshaft','piston']):\n anti_icing = group.icing_weight(tech_factors.anti_icing)\n key2 = 'group'+str(i) + 'intake_heater'\n pp_wts[key2] = anti_icing\n\n accessories = group.engine_accessories()\n for k,v in accessories.items():\n key3 = 'group'+str(i) + k\n pp_wts[key3] = v\n\n#accumulate total masses for this powerplant group and return dictionary\n pp_wts['total'] = dict_accumulation(pp_wts)\n\n# call summarization function for batteries \n self.summarize_batteries()\n \n return pp_wts",
"def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)",
"def one_transition_spectrum_gauss(self,tr):\n \n \n fa = tr[\"fa\"] # Frequency axis\n HWHH = tr[\"HWHH\"] # Half width at the half hight (maximum)\n dd = tr[\"dd\"] # transition dipole strength\n rr = tr[\"rr\"] # transition dipole strength\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"]+self.rwa # frequency\n \n # LineShape = lambda p, x: (x/(p[1]*np.sqrt(2*m.pi))*np.exp(-0.5*((x-p[0])/p[1])**2))\n # broad = broad/np.sqrt(2*np.log(2))\n sigma = HWHH/numpy.sqrt(2*numpy.log(2))\n \n # x = ta.data\n \n data = (fa.data/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-0.5*((fa.data-om)/sigma)**2))\n data_abs = dd*data\n data_CD = rr*data\n data_LD = ld*data\n \n return data_abs,data_CD, data_LD",
"def reduce_waveforms(self, waveforms):",
"def _combine_weights(self, axis_weights: AxisWeights) -> xr.DataArray:\n region_weights = reduce((lambda x, y: x * y), axis_weights.values())\n\n coord_keys = sorted(region_weights.dims) # type: ignore\n region_weights.name = \"_\".join(coord_keys) + \"_wts\" # type: ignore\n\n return region_weights",
"def get_scale_mutual_info(series, recovered_series):\n pass",
"def features_combine():\n\n\n\t# PROCESSING AUDIO"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
update learning rate of optimizers | def updatelearningrate(self, epoch):
self.lr = getlearningrate(epoch=epoch, opt=self.opt)
# update learning rate of model optimizer
if isinstance(self.model, list):
count = 0
for param_group in self.optimzer.param_groups:
# if type(model) is <list> then update modules with different learning rate
param_group['lr'] = self.lr
count += 1
# print ">>> count is:", count-1
else:
for param_group in self.optimzer.param_groups:
param_group['lr'] = self.lr | [
"def update_learning_rate(self):\n self._lr *= self._lr_decay\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self._lr\n\n # Display status message\n success_message = 'Learning rate updated to {:.1e}'.format(self._lr)\n print(success_format(success_message))",
"def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)",
"def learning_rate_adjustment(optimizer):\r\n for g in optimizer.param_groups:\r\n g['lr'] = g['lr'] / 1.1",
"def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return",
"def update_learning_rate(self):\r\n self.scheduler.step(self.clock.epoch)",
"def learning_rate_warmup(optimizer, epoch, args):\n\n lr = args.lr\n lr /= 10\n lr *= (epoch+1)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr",
"def adjust_learning_rate_warmup(optimizer, epoch, args):\n lr = args.lr * (epoch + 1) / args.warmup_epoch\n global current_lr\n current_lr = lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return current_lr",
"def _update_initial_learning_rate(configs, learning_rate):\n\n optimizer_type = get_optimizer_type(configs[\"train_config\"])\n if optimizer_type == \"rms_prop_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n elif optimizer_type == \"momentum_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.momentum_optimizer\n elif optimizer_type == \"adam_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.adam_optimizer\n else:\n raise TypeError(\"Optimizer %s is not supported.\" % optimizer_type)\n\n learning_rate_type = get_learning_rate_type(optimizer_config)\n if learning_rate_type == \"constant_learning_rate\":\n constant_lr = optimizer_config.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n elif learning_rate_type == \"exponential_decay_learning_rate\":\n exponential_lr = (\n optimizer_config.learning_rate.exponential_decay_learning_rate)\n exponential_lr.initial_learning_rate = learning_rate\n elif learning_rate_type == \"manual_step_learning_rate\":\n manual_lr = optimizer_config.learning_rate.manual_step_learning_rate\n original_learning_rate = manual_lr.initial_learning_rate\n learning_rate_scaling = float(learning_rate) / original_learning_rate\n manual_lr.initial_learning_rate = learning_rate\n for schedule in manual_lr.schedule:\n schedule.learning_rate *= learning_rate_scaling\n elif learning_rate_type == \"cosine_decay_learning_rate\":\n cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate\n learning_rate_base = cosine_lr.learning_rate_base\n warmup_learning_rate = cosine_lr.warmup_learning_rate\n warmup_scale_factor = warmup_learning_rate / learning_rate_base\n cosine_lr.learning_rate_base = learning_rate\n cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate\n else:\n raise TypeError(\"Learning rate %s is not supported.\" % learning_rate_type)",
"def adjust_learning_rate(start_lr, optimizer, epoch, total_epoch_num):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n if epoch==total_epoch_num:\n lr = lr * 0.3\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr",
"def set_learning_rate(self, lr):\n self.lr = lr",
"def sgd_update(trainables, learning_rate=1e-2):\n for t in trainables:\n p = t.gradients[t]\n t.value = t.value - learning_rate*p",
"def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // args.lr_drop))\n print('lr= '+str(lr), flush=True)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr",
"def assign_learning_rate(session, lr_update, lr_placeholder, new_lr):\n session.run(lr_update, feed_dict={lr_placeholder: new_lr})",
"def update_weights(self):\n layers = self.layers\n for layer in layers:\n layer.weights += self.learning_rate * layer.d_weights\n self.layers = layers",
"def parameters_update(self, learning_rate):\n for i in range(1, self.L):\n #print('dW' + str(i))\n #print(self.grads['dW' + str(i)])\n #print(learning_rate * self.grads['dW' + str(i)])\n self.params['W' + str(i)] = self.params['W' + str(i)] - learning_rate * self.grads['dW' + str(i)]\n self.params['b' + str(i)] = self.params['b' + str(i)] - learning_rate * self.grads['db' + str(i)]",
"def adjust_learning_rate(optimizer, shrink_factor):\n\n print(\"\\nDECAYING learning rate.\")\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * shrink_factor\n print(\"The new learning rate is %f\\n\" % (optimizer.param_groups[0]['lr'],))",
"def adjust_learning_rate(optimizer, shrink_factor):\r\n\r\n print(\"\\nDECAYING learning rate.\")\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = param_group['lr'] * shrink_factor\r\n print(\"The new learning rate is %f\\n\" % (optimizer.param_groups[0]['lr'],))",
"def adjust_learning_rate(lr, decay, optimizer, cur_epoch, every_n_epochs):\n new_lr = lr * (decay ** (cur_epoch // every_n_epochs))\n\n # if cur_epoch % every_n_epochs == 0:\n # new_lr = lr * 0.1\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr",
"def adjust_learning_rate(optimizer, epoch, n):\n lr = args.lr * (0.1 ** (epoch // n))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return apitools message object for give message name. | def GetApiMessage(message_name):
messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION)
return getattr(messages, message_name) | [
"def get_message_by_name(self, name):\r\n mh = ct.c_void_p(None)\r\n dll.kvaDbGetMsgByName(self._handle, name.encode('utf-8'), ct.byref(mh))\r\n message = Message(self, mh)\r\n return message",
"def get_message(self, id=None, name=None):\r\n message = None\r\n if (id is not None) and (name is not None):\r\n # Both arguments were given\r\n message = self.get_message_by_id(id, id & MessageFlag.EXT)\r\n if message.name == name:\r\n return message\r\n else:\r\n raise KvdNoMessage()\r\n else:\r\n if id is not None:\r\n return self.get_message_by_id(id, id & MessageFlag.EXT)\r\n else:\r\n return self.get_message_by_name(name)",
"def get_message(self, name):\n request = sample_pb2.GetMessageRequest(\n name=name\n )\n\n try:\n response = self.stub.GetMessage(request)\n print('Message fetched.')\n print(response)\n except grpc.RpcError as err:\n print(err.details()) # pylint: disable=no-member\n print('{}, {}'.format(err.code().name, err.code().value)) # pylint: disable=no-member",
"def get_by_name(message_id: uuid.UUID) -> Optional[MessageDBO]:\n return MessageDBO.objects(name=message_id).first()",
"def resolveMessageByName(self, name):\n self.logger.debug('Resolving message %s' % (str(name)))\n (prefix, leafName) = utils.NamespacePath.splitFullName(name)\n resolved = self.resolveByName_(leafName, prefix, \"messagesByName\", set())\n return resolved",
"def _get_message_class_by_name(class_name):\n if class_name in _lazy_dependent_class_to_package_map:\n module_path = _lazy_dependent_class_to_package_map[class_name]\n elif class_name in _lazy_class_to_package_map:\n module_path = _lazy_class_to_package_map[class_name]\n else:\n raise AttributeError(f\"unknown sub-module {class_name!r}.\")\n\n try:\n module = _load_module(module_path)\n message = getattr(module, class_name)\n except AttributeError:\n raise AttributeError(f\"unknown message class {class_name!r}.\")\n\n if class_name.endswith(\"Service\"):\n message.__module__ = \"google.ads.google_ads.v2.types\"\n\n globals()[class_name] = message\n return message",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Message':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = MessageArgs.__new__(MessageArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"data\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"hl7_v2_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"message_type\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"parsed_data\"] = None\n __props__.__dict__[\"patient_ids\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"schematized_data\"] = None\n __props__.__dict__[\"send_facility\"] = None\n __props__.__dict__[\"send_time\"] = None\n return Message(resource_name, opts=opts, __props__=__props__)",
"def parse_message(msg):\n if type(msg) is bytes:\n return Message.from_bytes(msg)\n elif isinstance(msg, str):\n return Message.from_bytes(msg)\n elif type(msg) is RawMessage:\n return msg.msg\n elif type(msg) is Message:\n return msg\n elif isinstance(msg, tuple):\n return Message(*msg)\n else:\n raise NotImplementedError",
"def get( msgid ):\n return MESSAGES[ msgid ] if is_valid( msgid ) else None",
"def get_msggen(name):\n if name == \"Signal\":\n mg = signal_service\n elif name == \"DBus\":\n from jeepney.bus_messages import message_bus\n mg = message_bus\n elif name in (\"Stats\", \"Monitoring\"):\n import jeepney.bus_messages as bm\n mg = getattr(bm, name)()\n else:\n raise ValueError(\"Unable to determine target object\")\n return mg",
"def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj",
"def makeMessage( name, *structure ):\n return X12Message( name, *structure )",
"def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj",
"def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg",
"def get_message(self, label_message: str) -> Optional[BaseMessage]:\n return next(iter(x for x in self._message_queue if x.label == label_message), None)",
"def UnpackMessage(swig_obj_pointer, msg_name):\n\n ptr = int(swig_obj_pointer)\n c_array = ctypes.c_char * aio.GetPackMessageSize(msg_name)\n received = c_array.from_address(ptr)\n\n msg_type = MESSAGE_TYPE_HELPER.Value(msg_name)\n return c_helpers.Unpack(received[:], MESSAGE_STRUCTS[msg_type])",
"def get_message(self, id):\n\n return self.messages[id]",
"def get_message(self, message_id):\n r = requests.get('https://outlook.office.com/api/v2.0/me/messages/' + message_id, headers=self._headers)\n check_response(r)\n return Message._json_to_message(self, r.json())",
"def load_message(message_id):\n pathname = \"messages/{}.json\".format(message_id)\n return _load_message(pathname)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a bigquery AccessValueListEntry array from input file. Expects YAML or JSON formatted file. | def PermissionsFileProcessor(input_file):
access_value_msg = GetApiMessage('Dataset').AccessValueListEntry
try:
permissions_array = []
permissions_from_file = yaml.load(input_file[0])
permissions_from_file = permissions_from_file.get('access', None)
if not permissions_from_file or not isinstance(permissions_from_file, list):
raise PermissionsFileError(
'Error parsing permissions file: no access list defined in file')
for access_yaml in permissions_from_file:
permission = encoding.PyValueToMessage(access_value_msg, access_yaml)
if _ValidatePermission(permission):
permissions_array.append(permission)
else:
raise PermissionsFileError(('Error parsing permissions file:'
' invalid permission definition'
' [{}]'.format(permission)))
return sorted(permissions_array, key=lambda x: x.role)
except yaml.YAMLParseError as ype:
raise PermissionsFileError('Error parsing permissions file [{}]'.format(
ype)) | [
"def build_accession_parser(rules_file):\n\n rules_data = json.load(rules_file)\n rules_by_prefix_len = {}\n for prefix_list, database, molecule_type, type_description in rules_data:\n for prefix in prefix_list:\n prefix_length = len(prefix)\n if REFSEQ_PREFIX_RE.match(prefix) is not None:\n # RefSeq whose accessions start with XX_ has its own rules\n if 'RefSeq' not in rules_by_prefix_len:\n rules_by_prefix_len['RefSeq'] = []\n rules_by_prefix_len['RefSeq'].append((prefix, database, molecule_type, type_description))\n elif '-' in prefix or '_' in prefix:\n (prefix_length, matcher) = make_range_matcher(prefix)\n if prefix_length not in rules_by_prefix_len:\n rules_by_prefix_len[prefix_length] = []\n rules_by_prefix_len[prefix_length].append((matcher, database, molecule_type, type_description))\n else:\n if prefix_length not in rules_by_prefix_len:\n rules_by_prefix_len[prefix_length] = []\n rules_by_prefix_len[prefix_length].append((prefix, database, molecule_type, type_description))\n return rules_by_prefix_len",
"def get_fc_entries(file: str) -> List[FCMetadata]:\n\n def parse(line: str) -> FCMetadata:\n fc_meta = cast(FCMetadata, json.loads(line.strip()))\n for key in POP_KEYS:\n fc_meta.pop(key, None)\n if \"uuid\" not in fc_meta:\n raise Exception(f\"FC entry is missing a `uuid` field: {fc_meta}\")\n logging.debug(fc_meta)\n return fc_meta\n\n fc_entries = [parse(ln) for ln in open(file)]\n logging.info(f\"Parsed {len(fc_entries)} FC entries from {file}\")\n return fc_entries",
"def getLocations(file, userInput):\r\n\tstartingPoint = Location(name=userInput['starting name'],address = userInput['starting_address'])\r\n\tlocations = [startingPoint] # Initializes the list of locations with the starting location.\r\n\twith open(file) as bookmarks:\r\n\t\tdata = json.load(bookmarks)\r\n\t\tfor i in range(len(data['features'])):\r\n\t\t\taddress = data['features'][i]['properties']['Location']['Address'] # JSON data can be accessed with keys, like nested dictionaries.\r\n\t\t\tname = data['features'][i]['properties']['Location']['Business Name']\r\n\t\t\tlocations.append(Location(name,address)) # Make a location out of each name-address pair, add to a list\r\n\treturn locations",
"def read_auditlog(file: str):\n with open(file, \"r\") as fp:\n for line in fp:\n yield Auditlog.parse(json.loads(line))",
"def parse_edef_schema(schema_file):\r\n # Each entry is a tuple of enum value and comment\r\n # Example: ('DISABLED', 'The Account is not permitted to create guaranteed Stop Loss Orders.')\r\n values = []\r\n\r\n f = open(schema_file)\r\n comments = \"\"\r\n i = 0\r\n\r\n for l in f:\r\n i += 1\r\n line = l.strip()\r\n\r\n if not line:\r\n continue\r\n\r\n # start / end definition\r\n if line == \"[\" or line == \"]\":\r\n continue\r\n elif line.startswith(\"#\"):\r\n # don't append empty comment line\r\n c = line[1:].strip()\r\n if not c:\r\n continue\r\n # multiline comment, add space\r\n if comments:\r\n comments += \" \"\r\n comments += c\r\n else:\r\n if line.endswith(\",\"):\r\n values.append((line[:-1], comments))\r\n else:\r\n values.append((line, comments))\r\n\r\n comments = \"\"\r\n\r\n for value in values:\r\n print(\"value=[{}] comments=[{}]\".format(value[0], value[1]))\r\n\r\n return values",
"def get_reference_list(filepath):\n database = []\n try:\n import csv\n csv_file = open(filepath, 'r')\n logging.debug('Reading %s' % filepath)\n reader = csv.reader(csv_file, delimiter=' ', skipinitialspace=True, strict=True)\n for row in reader:\n # <= 6.0.5\n if len(row) == 2:\n id_code, name, version = int(row[0]), str(row[1]), None\n # >= 6.1.0\n elif len(row) == 3:\n id_code, name, version = int(row[0]), str(row[1]), int(row[2])\n else:\n raise ValueError\n database.append(SetInfo(name, id_code, version))\n except IOError:\n logging.error('Could not open %s' % filepath)\n except (ValueError, csv.Error):\n logging.error('Corrupted file on line %d: %s' % (reader.line_num, filepath))\n csv_file.close()\n database = []\n else:\n csv_file.close()\n return database",
"def read_cfg(file):\n result = []\n if isfile(file):\n with open(file) as f:\n cfg = json.load(f)\n for entry in cfg:\n if \"start\" in entry:\n filter = (entry[\"start\"], entry.get(\"end\", None))\n result.append(filter)\n return result",
"def generate_localization_value_to_entry_dictionary_from_file(file_path):\n return __generate_localization_dictionary_from_file(file_path, \"value\")",
"def read_list_data(input_file_path: str) -> List[str]:\n if input_file_path.startswith('gs://'):\n hl.hadoop_copy(input_file_path, 'file:///' + input_file_path.split(\"/\")[-1])\n f = gzip.open(\"/\" + os.path.basename(input_file_path)) if input_file_path.endswith('gz') else open(\"/\" + os.path.basename(input_file_path))\n else:\n f = gzip.open(input_file_path) if input_file_path.endswith('gz') else open(input_file_path)\n output = []\n for line in f:\n output.append(line.strip())\n f.close()\n return output",
"def readfaidxbed(f):\n from collections import deque\n import pybedtools as bt\n fabed = deque()\n with open(f, 'r') as fin:\n for line in fin:\n line = line.strip().split()\n fabed.append([line[0], 1, int(line[1])])\n return list(fabed)",
"def read_refs_from_file(filename: Path) -> list:\n with open(filename, \"r\") as fp:\n payload = json.load(fp)\n\n return [record[\"0\"] for record in payload]",
"def loadfileconfig():\n zones = []\n for infile in INPUT_FILES:\n # read file into array\n rawinput = []\n with open(infile) as inputfile:\n rawinput = inputfile.readlines()\n inputfile.close()\n\n selectedinput = []\n for line in rawinput:\n # remove leading and trailing whitespace\n line = line.strip()\n\n # remove double quotes, \"\n line = line.replace('\"', '')\n\n # select the lines we need and extract\n # the second field\n if re.search(INPUT_SELECTOR, line):\n line = line.split(' ')\n line = line[1]\n selectedinput.append(line)\n\n\n # Merge two and two items into a list of tuples\n # put into zones\n iterator = iter(selectedinput)\n zones.extend(zip(iterator, iterator))\n\n return zones",
"def read_inputfile():\n # Structure of dictionary should be something like:\n # inputfile = {0:{'BASIC': ...}, ...}\n # with the different namelists being copy.deepcopy() of the namelist objects!\n # Then we can have multiple copies of the same namelist.",
"def read_fieldlist(self, filename):\n with open(filename, 'r') as fieldlistfile:\n for line in fieldlistfile:\n fields = line.split()\n if not fields or fields[0][0] == '#':\n continue\n\n try:\n self.add_subnet(fields[0])\n except ValueError:\n print('*** WARNING *** could not parse IP range: ', line)",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def parse_datafile(file):\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data",
"def load_builtin_data(name):\n\t\n\tpath = Path(resource_filename('pyospray', f'data/{name}.txt'))\n\tret = {}\n\tvalues = None\n\twith path.open('r') as f:\n\t\tlines = (line.rstrip('\\n') for line in f)\n\t\tfor token, content in tokenize(lines):\n\t\t\tif token == 'key':\n\t\t\t\tvalues = []\n\t\t\t\tret[content] = values\n\t\t\t\n\t\t\telif token == 'values':\n\t\t\t\tvalues.extend(content)\n\t\t\t\n\t\t\telse:\n\t\t\t\traise NotImplementedError\n\t\n\treturn ret",
"def listBigFile(in_filename, my_list):\n \n fcsv = open(in_filename, 'r')\n for line in csv.DictReader(fcsv):\n d = dict(line)\n my_list.append(d)",
"def parse_file(input_file: str) -> list[Tree]:\n parsed_trees = []\n with open(input_file) as f:\n for line in f:\n tree_def = eval(line.strip())\n parsed_trees.append(Tree.parse(tree_def))\n return parsed_trees"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set projectId value for a BigQueryXXXRequests. | def SetProjectId(ref, args, request):
del ref
project = args.project or properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project,
collection='bigquery.projects')
request.projectId = project_ref.Name()
return request | [
"def qtest_project_id(self, value):\n self._qtest_project_id = value",
"def setProjectId(self, id):\n self.__current_project_id = id",
"def set_project_quotas(self, project_id, request_model, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.post(\n 'project-quotas/' + project_id,\n request_model=request_model,\n response_model_type=quota_models.ProjectQuotaModel,\n extra_headers=extra_headers,\n use_auth=use_auth, user_name=user_name)\n return resp",
"def setProjectid(self, projectid):\r\n self.projectid = projectid",
"def set_projectid(self, projectid):\n self._projectid = projectid",
"def project_id(self, project_id):\n self._project_id = project_id",
"def project_id(self, project_id):\n \n self._project_id = project_id",
"def set_project_id(self, project_id):\n self._project_id = project_id",
"def project_ids(self, project_ids):\n\n self._project_ids = project_ids",
"def _add_project_id(self):\n\n self.params[self.EventParams.PROJECT_ID] = self.config.get_project_id()",
"def setProject(self, projectname):\r\n projectdict = self.projectdict()\r\n for p in projectdict:\r\n if projectdict[p] == projectname:\r\n self.projectid = p",
"def set_project(self, project: str) -> None:\n if project:\n self.project = project",
"def set_project(self):\r\n \r\n self.d_proyecto = self.get_project()",
"def set_or_create_project(conn: BlitzGateway, project: Union[str, int],\n across_groups: Optional[bool] = True) -> int:\n if isinstance(project, str):\n project_id = post_project(conn, project)\n print(f'Created new Project:{project_id}')\n elif (isinstance(project, int)):\n project_id = project\n else:\n raise TypeError(\"'project' must be str or int\")\n return project_id",
"def change_project(self, project, project_format='id'):\n name = 'tenant' if self.api_version == 2 else 'project'\n self.creds['%s_%s' % (name, project_format)] = project\n opposite_format = 'name' if project_format == 'id' else 'id'\n del self.creds['%s_%s' % (name, opposite_format)]",
"def project_name(self, project_name):\n \n self._project_name = project_name",
"def project_name(self, project_name):\n self._project_name = project_name",
"def updateProject(self, projectId,payload):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.put(uri,payload)\n return response",
"def project_name(self, project_name):\n\n self._project_name = project_name"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that view parameters are set properly tables create request. | def SetViewParameters(ref, args, request):
del ref # unused
if not args.view:
request.table.view = None
return request | [
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)",
"def create_view(self, repo, view, sql):\n return self.user_con.create_view(\n repo=repo, view=view, sql=sql)",
"def create_view(\n request_id: str,\n context_id: str,\n command_id: str,\n table_name: str,\n columns: List[str],\n filters: dict,\n) -> str:\n view_name = create_table_name(\n TableType.VIEW,\n node_config.identifier,\n context_id,\n command_id,\n )\n return views.create_view(\n view_name=view_name,\n table_name=table_name,\n columns=columns,\n filters=filters,\n minimum_row_count=MINIMUM_ROW_COUNT,\n ).json()",
"def _create_views(self, db_session: Session, job_version: Job) -> None:\n self._create_latest_view(db_session=db_session, job_version=job_version)\n self._create_all_view(db_session=db_session, job_version=job_version)",
"def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return",
"def create_table(self):\n pass",
"def create(self,\n view,\n ):\n return self._invoke('create',\n {\n 'view': view,\n })",
"def pre_route_table_create(self, resource_dict):\n pass",
"def create_table(self):\n return None",
"def create_views():\n \"\"\"\n DROP VIEW SUMMARY_LOSS;\nCREATE VIEW\n SUMMARY_LOSS AS\n SELECT A.id, target_names, hidden_neurons, standardization, cost_l2_scale, early_stop_after, best_rms_test, best_rms_validation, l2_norm_validation, walltime, hostname FROM\n (\n SELECT network.id, network.target_names, hyperparameters.hidden_neurons, hyperparameters.standardization, hyperparameters.cost_l2_scale, hyperparameters.early_stop_after, networkmetadata.rms_test as best_rms_test, networkmetadata.rms_validation as best_rms_validation\n FROM network\n INNER JOIN hyperparameters\n ON network.id = hyperparameters.network_id\n INNER JOIN networkmetadata\n ON network.id = networkmetadata.network_id\n WHERE hyperparameters.early_stop_measure = 'loss'\n ) A\n INNER JOIN\n (\n SELECT network.id AS id_C, trainmetadata.l2_norm[networkmetadata.best_epoch + 1] as l2_norm_validation, trainmetadata.walltime[array_length(trainmetadata.walltime, 1)], trainmetadata.hostname\n FROM network\n INNER JOIN trainmetadata\n ON network.id = trainmetadata.network_id\n INNER JOIN networkmetadata\n ON network.id = networkmetadata.network_id\n WHERE trainmetadata.set = 'validation'\n ) C\n ON A.id = C.id_C\n\"\"\"",
"def txn_createTables(self):\r\n self.db_create_nonce()\r\n self.db_create_assoc()\r\n self.db_create_settings()",
"def create_table(self, *args, **kwargs):\n raise NotImplementedError",
"def setUp(self):\n self.factory = RequestFactory()\n (self.existing_variant, self.existing_variant_materialized_view) = create_variant_and_materialized_view(test_data.existing_variant())\n self.existing_clinvar_report = create_report_and_associate_to_variant(test_data.existing_clinvar_report(), self.existing_variant)\n self.existing_lovd_report = create_report_and_associate_to_variant(test_data.existing_lovd_report(), self.existing_variant)",
"def create_table_request_info(self):\n table_query = f\"\"\"\n Create Table If Not Exists Request_Info(\n {self.__fields[0]} INT AUTO_INCREMENT PRIMARY KEY,\n {self.__fields[1]} TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n {self.__fields[2]} CHAR(30),\n {self.__fields[3]} CHAR(30),\n {self.__fields[4]} CHAR(30) NULL,\n {self.__fields[5]} DATE,\n {self.__fields[6]} CHAR(15),\n {self.__fields[7]} CHAR(30),\n {self.__fields[8]} CHAR(30),\n {self.__fields[9]} CHAR(30),\n {self.__fields[10]} INT(32),\n {self.__fields[11]} CHAR(30),\n {self.__fields[12]} INT(32),\n {self.__fields[13]} VARCHAR(30))\n \"\"\"\n self.execute(table_query)",
"def do_fullview(self, view):\n if self.table_exists(view, self.db):\n # If it does, create or replace the view for it.\n logging.info(\"[{}] \".format(view))\n if (\n not self.table_exists(view, self.db_p) or\n self._confirm('View already exists. Replace?')\n ):\n # Can't use pymysql to build this\n self.write_execute(\"\"\"\n CREATE OR REPLACE\n DEFINER={0}\n VIEW `{1}`.`{2}`\n AS SELECT * FROM `{3}`.`{2}`;\n \"\"\".format(self.definer, self.db_p, view, self.db))\n else:\n # Some views only exist in CentralAuth, some only in MediaWiki,\n # etc.\n logging.debug(\n (\"Skipping full view {} on database {} as the table does not\"\n \" seem to exist.\")\n .format(view, self.db)\n )",
"def create_view_stmt(self) -> Iterable[str]:\n return (f\"\"\"\n CREATE SCHEMA IF NOT EXISTS {self.schema}\n \"\"\",\n f\"\"\"\n DROP VIEW IF EXISTS {self.name} CASCADE\n \"\"\",\n f\"\"\"\n CREATE VIEW {self.name}\n AS\n {self.select_stmt()}\n \"\"\")",
"def build_table_setup(self, items, format_method,\n limit, build_urls=True):\n view_entries = []\n for item in items:\n view_entries.append(ViewEntry(item=item))\n self.build_table(view_entries,\n limit=limit,\n format_method=format_method,\n build_urls=build_urls)",
"def ensure_transaction_delta_view_exists():\n transaction_delta_view_path = os.path.join(\n settings.BASE_DIR, \"usaspending_api/database_scripts/etl/transaction_delta_view.sql\"\n )\n with open(transaction_delta_view_path) as f:\n transaction_delta_view = f.read()\n with connection.cursor() as cursor:\n cursor.execute(transaction_delta_view)",
"def prepare_transaction(\n self, request: HttpRequest, view: Optional[View] = None, **kwargs\n ) -> HttpResponse:\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process the overwrite flag on tables create. | def ProcessTableOverwrite(ref, args, request):
dataset_id = ref.datasetId
table_id = ref.Name()
project_id = ref.projectId
if args.overwrite:
if _TableExists(dataset_id, table_id, project_id):
_TryDeleteTable(dataset_id, table_id, project_id)
return request | [
"def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True",
"def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request",
"def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)",
"def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True",
"def txn_createTables(self):\r\n self.db_create_nonce()\r\n self.db_create_assoc()\r\n self.db_create_settings()",
"def __new_tables_statement(self):\n new_tables = self.__new_tables()\n for table in new_tables:\n with open('./update/create_tables.sql', 'a') as f:\n create_statement = self.source.query_create_table_statement(table.name)\n f.write(create_statement)\n f.write('\\n')",
"def test_database_object_overwrite_parameter_is_set(self):\n database = generate_database_object(overwrite=True)\n\n self.assertEqual(\n True,\n database.overwrite == True,\n \"Database object did not have an overwrite flag, despite being created with one.\"\n )",
"def enterCreate_table_stmt(self, ctx: SQLiteParser.Create_table_stmtContext):\n self.is_ddl = True",
"def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")",
"def create_table_if_needed(self):\n if not self.table_exists():\n self.create_table()",
"def check_tables(self, dcon, tables):\r\n\r\n dcur = dcon.cursor()\r\n for tbl in tables.keys():\r\n if not skytools.exists_table(dcur, tbl):\r\n if not self.part_template:\r\n raise Exception('Dest table does not exists and no way to create it.')\r\n\r\n sql = self.part_template\r\n sql = sql.replace(DEST_TABLE, skytools.quote_fqident(tbl))\r\n\r\n # we do this to make sure that constraints for \r\n # tables who contain a schema will still work\r\n schema_table = tbl.replace(\".\", \"__\")\r\n sql = sql.replace(SCHEMA_TABLE, skytools.quote_ident(schema_table))\r\n\r\n dcur.execute(sql)\r\n dcon.commit()\r\n self.log.info('%s: Created table %s' % (self.job_name, tbl))",
"def on_doctype_update():\n\tif not dataent.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_defkey_index\" \"\"\"):\n\t\tdataent.db.commit()\n\t\tdataent.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_defkey_index(parent, defkey)\"\"\")\n\n\tif not dataent.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_parenttype_index\" \"\"\"):\n\t\tdataent.db.commit()\n\t\tdataent.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_parenttype_index(parent, parenttype)\"\"\")",
"def data_modification_sql_text(self):\n if self.overwrite_target_partition:\n return \"OVERWRITE\"\n else:\n return \"INTO\"",
"def recreateTables(self, connection):\n\t\tif self.parseOptions.updateMode or self.dd.updating:\n\t\t\tif self.parseOptions.dropIndices:\n\t\t\t\tfor t in self:\n\t\t\t\t\tif t.tableDef.onDisk:\n\t\t\t\t\t\tt.dropIndices()\n\t\t\treturn\n\n\t\tfor t in self:\n\t\t\tif t.tableDef.system and not self.parseOptions.systemImport:\n\t\t\t\tcontinue\n\t\t\tif t.tableDef.onDisk:\n\t\t\t\tt.runScripts(\"preImport\")\n\t\t\t\tt.recreate()",
"def create(self):\n for t in Database.tables:\n t.create(checkfirst=True)",
"def pre_create(self, record):",
"def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()",
"def on_doctype_update():\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_defkey_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_defkey_index(parent, defkey)\"\"\")\n\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_parenttype_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_parenttype_index(parent, parenttype)\"\"\")",
"def _process_create(existing, change):\n if existing is not None:\n # You cannot create if it exists a already.\n raise _error\n return change"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process the overwrite flag on tables copy. | def ProcessTableCopyOverwrite(ref, args, request):
del ref # Unused
if args.overwrite:
request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'
return request | [
"def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request",
"def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True",
"def is_overwrite_all(self):\n return self._tag == 'overwrite_all'",
"def data_modification_sql_text(self):\n if self.overwrite_target_partition:\n return \"OVERWRITE\"\n else:\n return \"INTO\"",
"def test_overwrites(self):\n\n extra_con = set([Constraint('fake', ['OVERWRITE'])])\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/%fake%/%file%/%pattern%.txt',\n 'echo', extra_constraints=extra_con)\n\n ds_result = the_process_unit.execute(simulate=True)\n\n expected_in_cons = set([Constraint('fake', ['fake_1']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n expected_out_cons = set([Constraint('fake', ['OVERWRITE']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n\n self.assertEqual(expected_in_cons, self.a_pattern_ds.constraints)\n self.assertEqual(expected_out_cons, ds_result.constraints)",
"def test__ApplicationCommandPermissionOverwrite__copy():\n allow = True\n target_id = 202302210007\n target_type = ApplicationCommandPermissionOverwriteTargetType.role\n \n application_command_permission_overwrite = ApplicationCommandPermissionOverwrite(\n allow = allow,\n target = (target_type, target_id)\n )\n copy = application_command_permission_overwrite.copy()\n \n _asert_fields_set(copy)\n vampytest.assert_is_not(application_command_permission_overwrite, copy)\n \n vampytest.assert_eq(application_command_permission_overwrite, copy)",
"def copy_table_data(self):\n\t\tself.my_eng.copy_table_data(self.pg_eng, self.global_config.copy_max_memory)\n\t\tself.pg_eng.save_master_status(self.my_eng.master_status)",
"def needPartitionTableUpdate(self):\n n_table=list()\n d_table=self.destination.getPartitionTable()\n s_table=self.source.getPartitionTable()\n for i in range(len(s_table)):\n n_table.append(re.sub(self.source.getDeviceName(), \\\n self.destination.getDeviceName(), \\\n s_table[i]))\n if d_table == n_table:\n return False\n else:\n return True",
"def load(self):\n\n self.starttime = datetime.datetime.utcnow()\n\n # Initializing Data\n delete_clause = self.get_delete_sql()\n staging_table = self.staging_table_name\n destination_table = self.destination_table_name\n is_normal_load = self._destination_table_status == self.DESTINATION_TABLE_OK\n is_rebuild = self._destination_table_status == self.DESTINATION_TABLE_REBUILD\n is_dne = self._destination_table_status == self.DESTINATION_TABLE_DNE\n\n with get_redshift().cursor() as cur:\n self.set_search_path(cur)\n\n # If table does not exist, create it\n if is_dne:\n create_table = self.query_to_redshift_create_table(\n self.get_query_sql(), self.destination_table_name\n )\n cur.execute(create_table)\n elif not is_normal_load and not is_rebuild:\n raise RuntimeError(\n \"Invalid table status in redshift_copy: {}\".format(\n self._destination_table_status\n )\n )\n\n # If there is no row updates, just skip copy and return\n if self.row_count == 0:\n return\n\n cur.execute(\"BEGIN TRANSACTION;\")\n # Lock the table early to avoid deadlocks in many-to-one pipelines.\n query = generate_lock_query(destination_table)\n cur.execute(query)\n\n query = generate_drop_exists_query(staging_table)\n cur.execute(query)\n\n if is_rebuild:\n # Build staging table anew and grant it appropriate permissions\n self.logger.info(\n \"Creating staging table to rebuild %s\", destination_table\n )\n create_staging_table = self.query_to_redshift_create_table(\n self.get_query_sql(), staging_table\n )\n permissions_sql = self.get_grant_sql(cur)\n cur.execute(create_staging_table)\n if permissions_sql:\n self.logger.info(\n \"Copying permissions onto %s:\\n%s\",\n staging_table,\n permissions_sql,\n )\n cur.execute(permissions_sql)\n else:\n # If not rebuilding, create staging with LIKE\n self.logger.info(\"Creating staging table %s\", staging_table)\n query = generate_create_table_like_query(\n staging_table, destination_table\n )\n cur.execute(query)\n\n # Issuing Copy Command\n self.logger.info(\"Issuing copy command\")\n query = generate_copy_query(\n staging_table,\n self.copy_target_url,\n get_redshift().iam_copy_role,\n self.manifest_mode,\n )\n self.logger.debug(query)\n cur.execute(query)\n\n # Row delete and count logic\n if is_rebuild or (self.append_only and not self.full_refresh):\n self.rows_deleted = 0\n else:\n cur.execute(delete_clause)\n self.rows_deleted = cur.rowcount\n\n # Row insert and count logic\n if is_rebuild:\n self.logger.info(\"Swapping staging table into %s\", destination_table)\n # DNE overrides rebuild, so we can assume the table exists\n query = generate_drop_query(destination_table)\n self.logger.debug(query)\n cur.execute(query)\n query = generate_rename_query(staging_table, destination_table)\n self.logger.debug(query)\n cur.execute(query)\n query = generate_count_query(destination_table)\n self.logger.debug(query)\n cur.execute(query)\n self.rows_inserted = cur.fetchall()[0]\n else:\n query = generate_insert_all_query(staging_table, destination_table)\n self.logger.debug(query)\n cur.execute(query)\n self.rows_inserted = cur.rowcount\n query = generate_drop_query(staging_table)\n self.logger.debug(query)\n cur.execute(query)\n cur.execute(\"END TRANSACTION;\")\n self.register_and_cleanup()",
"def test__ApplicationCommandPermissionOverwrite__copy_with__0(): \n allow = True\n target_id = 202302210008\n target_type = ApplicationCommandPermissionOverwriteTargetType.role\n \n application_command_permission_overwrite = ApplicationCommandPermissionOverwrite(\n allow = allow,\n target = (target_type, target_id)\n )\n copy = application_command_permission_overwrite.copy_with()\n \n _asert_fields_set(copy)\n vampytest.assert_is_not(application_command_permission_overwrite, copy)\n \n vampytest.assert_eq(application_command_permission_overwrite, copy)",
"def MergeUndo():\n pass",
"def compare_schema_patch_ifneeded(copy_driver, table_name):\n srctable_ref = copy_driver.source_client.dataset(copy_driver.source_dataset).table(table_name)\n try:\n srctable = copy_driver.source_client.get_table(srctable_ref)\n except exceptions.NotFound:\n copy_driver.get_logger().warning(\"Table {}.{}.{} has been deleted between list comparison and detail sync skipping\".format(copy_driver.source_project,\n copy_driver.source_dataset,\n table_name))\n return\n dsttable_ref = copy_driver.destination_client.dataset(copy_driver.destination_dataset).table(\n table_name)\n dsttable = copy_driver.destination_client.get_table(dsttable_ref)\n\n # if different table types thats not good need to sort\n # drop and recreate this handles TABLE->MODEL and MODEL->TABLE\n if dsttable.table_type != srctable.table_type \\\n or srctable.partitioning_type is None and dsttable.partitioning_type is not None \\\n or srctable.partitioning_type != dsttable.partitioning_type:\n copy_driver.get_logger().warning(\n \"Change in table_type source {0}.{1}.{tablename} is type {2} and destination {3}.{\"\n \"4}.{tablename} is type {5}\".format(\n copy_driver.source_project,\n copy_driver.source_dataset,\n srctable.table_type,\n copy_driver.destination_project,\n copy_driver.destination_dataset,\n dsttable.table_type,\n tablename=table_name))\n remove_deleted_destination_table(copy_driver, table_name)\n create_and_copy_table(copy_driver, table_name)\n return\n\n if srctable.table_type == \"MODEL\":\n compare_model_patch_ifneeded(copy_driver, table_name)\n return\n\n NEW_SCHEMA = list(srctable.schema)\n OLD_SCHEMA = list(dsttable.schema)\n\n fields = []\n # Only check encryption if missing if its has been updated but exists left as is\n if srctable.encryption_configuration is not None and dsttable.encryption_configuration is None:\n dsttable.encryption_configuration = copy_driver.calculate_target_cmek_config(\n srctable.encryption_configuration)\n fields.append(\"encryption_configuration\")\n if dsttable.description != srctable.description:\n dsttable.description = srctable.description\n fields.append(\"description\")\n if dsttable.friendly_name != srctable.friendly_name:\n dsttable.friendly_name = srctable.friendly_name\n fields.append(\"friendly_name\")\n if dsttable.labels != srctable.labels:\n dsttable.labels = srctable.labels\n fields.append(\"labels\")\n if dsttable.partition_expiration != srctable.partition_expiration:\n dsttable.partition_expiration = srctable.partition_expiration\n fields.append(\"partition_expiration\")\n if dsttable.expires != srctable.expires:\n dsttable.expires = srctable.expires\n fields.append(\"expires\")\n\n\n # if fields added lengths will differ\n # as initial copy used original these will be same order\n # merge and comare schemas\n def compare_and_merge_schema_fields(input):\n working_schema = []\n changes = 0\n\n field_names_found = {}\n match = True\n for schema_item in input[\"oldchema\"]:\n match = False\n for tgt_schema_item in input[\"newschema\"]:\n if tgt_schema_item.name == schema_item.name:\n field_names_found[schema_item.name] = True\n match = True\n # cannot patch type changes so have to recreate\n if tgt_schema_item.field_type != schema_item.field_type:\n changes += 1\n input[\"workingchema\"] = working_schema\n input[\"changes\"] = changes\n input[\"deleteandrecreate\"] = True\n return input\n if tgt_schema_item.description != schema_item.description:\n changes += 1\n if tgt_schema_item.field_type != \"RECORD\":\n tag_change,tgt_schema_item2 = copy_driver.map_policy_tag(tgt_schema_item,schema_item)\n if tag_change:\n changes += 1\n tgt_schema_item = tgt_schema_item2\n working_schema.append(tgt_schema_item2)\n else:\n # cannot change mode for record either repeated or not\n if tgt_schema_item.mode != schema_item.mode:\n changes += 1\n input[\"workingchema\"] = working_schema\n input[\"changes\"] = changes\n input[\"deleteandrecreate\"] = True\n return input\n\n output = compare_and_merge_schema_fields(\n {\"oldchema\": list(schema_item.fields),\n \"newschema\": list(tgt_schema_item.fields),\n \"changes\": 0})\n changes += output[\"changes\"]\n # if changes then need to create a new schema with new fields\n # field is immutable so convert to api rep\n # alter and convert back\n if output[\"changes\"] > 0:\n newfields = []\n for schema_item in output[\"workingchema\"]:\n newfields.append(schema_item.to_api_repr())\n tmp_work = tgt_schema_item.to_api_repr()\n tmp_work[\"fields\"] = newfields\n working_schema.append(bigquery.SchemaField.from_api_repr(tmp_work))\n else:\n tmp_work = tgt_schema_item.to_api_repr()\n tmp_work[\"fields\"] = [i.to_api_repr() for i in output['workingchema']]\n working_schema.append(bigquery.SchemaField.from_api_repr(tmp_work))\n if \"deleteandrecreate\" in output and output[\"deleteandrecreate\"]:\n input[\"deleteandrecreate\"] = output[\"deleteandrecreate\"]\n input[\"changes\"] = changes\n input[\"workingchema\"] = working_schema\n return input\n\n break\n\n # retain stuff that existed previously\n # nominally a change but as not an addition deemed not to be\n if not match:\n working_schema.append(schema_item)\n\n # add any new structures\n for tgt_schema_item in input[\"newschema\"]:\n if tgt_schema_item.name not in field_names_found:\n _,new_field = copy_driver.map_policy_tag(tgt_schema_item)\n working_schema.append(new_field)\n changes += 1\n\n if len(working_schema) < len(input[\"newschema\"]):\n pass\n\n input[\"workingchema\"] = working_schema\n input[\"changes\"] = changes\n return input\n\n output = compare_and_merge_schema_fields({\"oldchema\": OLD_SCHEMA,\n \"newschema\": NEW_SCHEMA,\n \"changes\": 0})\n if \"deleteandrecreate\" in output:\n remove_deleted_destination_table(copy_driver, table_name)\n create_and_copy_table(copy_driver, table_name)\n return\n else:\n if output[\"changes\"] > 0:\n dsttable.schema = output[\"workingchema\"]\n fields.append(\"schema\")\n\n # and update the table\n if len(fields) > 0:\n try:\n if dsttable.clustering_fields != srctable.clustering_fields:\n dsttable.clustering_fields = srctable.clustering_fields\n table_api_rep = dsttable.to_api_repr()\n fields.append(\"clustering\")\n copy_driver.discovery_update_table(table_api_rep,copy_driver)\n else:\n copy_driver.destination_client.update_table(dsttable,\n fields)\n except exceptions.BadRequest as e:\n if \"encryption_configuration\" in fields and \\\n str(e).find(\n \"Changing from Default to Cloud KMS encryption key and back must be \"\n \"done via table.copy job\") != -1:\n pass\n else:\n copy_driver.get_logger().exception(\n \"Bad Request when patching table {}.{}.{} {}\".format(copy_driver.destination_project,\n copy_driver.destination_dataset,\n table_name, \",\".join(fields)))\n copy_driver.increment_tables_failed_sync()\n raise\n except Exception as e:\n copy_driver.increment_tables_failed_sync()\n raise\n\n dsttable = copy_driver.destination_client.get_table(dsttable_ref)\n copy_driver.get_logger().info(\n \"Patched table {}.{}.{} {}\".format(copy_driver.destination_project,\n copy_driver.destination_dataset,\n table_name, \",\".join(fields)))\n\n copy_driver.add_bytes_synced(srctable.num_bytes)\n copy_driver.add_rows_synced(srctable.num_rows)\n\n # as not possible to patch on day partition with data time to rebuild this table\n # this should be feasible with an in region copy\n if \"encryption_configuration\" in fields and \\\n dsttable.num_rows != 0 and \\\n (dsttable.partitioning_type in PARTITIONING_BY_TIME or\n dsttable.encryption_configuration is None): # going from none to some needs to\n # happen via a copy\n update_table_cmek_via_copy(copy_driver.destination_client,\n dsttable,\n copy_driver.calculate_target_cmek_config(\n srctable.encryption_configuration),\n copy_driver.get_logger())\n dsttable = copy_driver.destination_client.get_table(dsttable_ref)\n\n if dsttable.num_rows != srctable.num_rows or \\\n dsttable.num_bytes != srctable.num_bytes or \\\n srctable.modified >= dsttable.modified or \\\n copy_driver.table_data_change(srctable, dsttable):\n export_import_type = copy_driver.export_import_format_supported(srctable, dsttable)\n copy_driver.copy_q.put((-1 * srctable.num_rows, BQSyncTask(copy_table_data,\n [copy_driver, table_name,\n srctable.partitioning_type,\n dsttable.num_rows,\n srctable.num_rows,\n export_import_type])))\n else:\n copy_driver.increment_tables_avoided()\n copy_driver.add_bytes_avoided(srctable.num_bytes)\n copy_driver.add_rows_avoided(srctable.num_rows)",
"def _apply(self):\n s = [(iptables_save, iptables_restore, self.ipv4)]\n if self.use_ipv6:\n s += [(ip6tables_save, ip6tables_restore, self.ipv6)]\n\n for save, restore, tables in s:\n all_tables, _err = save()\n all_lines = all_tables.split('\\n')\n for table_name, table in six.iteritems(tables):\n start, end = self._find_table(all_lines, table_name)\n all_lines[start:end] = self._modify_rules(\n all_lines[start:end], table, table_name)\n table.dirty = False\n restore('\\n'.join(all_lines))",
"def _conflict(self, srcdata, dst, dstdata):\n dst = dst.relpath()\n while True:\n choice = self.ui.choose(\n \"How do you wish to proceed with this file?\\n\"\n \"over(w)rite, (s)kip, (r)ender, (d)iff, (a)bort: \",\n [\"w\", \"s\", \"r\", \"d\", \"a\"])\n if choice == \"w\":\n self.ui.write(\"Overwritten\\n\")\n return True\n elif choice == \"s\":\n self.ui.write(\"Skipped\\n\")\n return False\n elif choice == \"r\":\n self.ui.write(\"Rendering %s\\n\\n%s\\n\" % (dst, srcdata))\n elif choice == \"d\":\n self.ui.write(\"Showing differences for %s\\n\\n\" % dst)\n self.ui.diff(dstdata, srcdata, \"old\", \"new\")\n self.ui.write(\"\\n\")\n elif choice == \"a\":\n raise error.AbortError(\"user\")",
"def enableOverwriteOnConflict(self, enabled: bool) -> None:\n ...",
"def pre_osc_check(self):\n # Make sure temporary table we will use during copy doesn't exist\n self.table_check()\n self.decide_pk_for_filter()\n\n # Check if we can have indexes in new table to efficiently look up\n # current old pk combinations\n if not self.validate_post_alter_pk():\n self.table_size = self.get_table_size(self.table_name)\n if self.skip_pk_coverage_check:\n log.warning(\n \"Indexes on new table cannot cover current PK of \"\n \"the old schema, which will make binary logs replay \"\n \"in an inefficient way.\"\n )\n elif self.table_size < self.pk_coverage_size_threshold:\n log.warning(\n \"No index on new table can cover old pk. Since this is \"\n \"a small table: {}, we fallback to a full table dump\".format(\n self.table_size\n )\n )\n # All columns will be chosen if we are dumping table without\n # chunking, this means all columns will be used as a part of\n # the WHERE condition when replaying\n self.is_full_table_dump = True\n self._pk_for_filter = [col.name for col in self._old_table.column_list]\n self._pk_for_filter_def = self._old_table.column_list.copy()\n elif self.is_full_table_dump:\n log.warning(\n \"Skipping coverage index test, since we are doing \"\n \"full table dump\"\n )\n else:\n old_pk_names = \", \".join(\n \"`{}`\".format(col.name)\n for col in self._old_table.primary_key.column_list\n )\n raise OSCError(\"NO_INDEX_COVERAGE\", {\"pk_names\": old_pk_names})\n\n log.info(\n \"PK filter for replaying changes later: {}\".format(self._pk_for_filter)\n )\n\n self.foreign_key_check()\n self.trigger_check()\n self.init_range_variables()\n self.get_table_chunk_size()\n self.make_chunk_size_odd()\n self.check_disk_size()\n self.ts_bootstrap_check()\n self.drop_columns_check()",
"def test__ApplicationCommandPermissionOverwrite__copy_with__1():\n old_allow = True\n old_target_id = 202302210009\n old_target_type = ApplicationCommandPermissionOverwriteTargetType.role\n \n new_allow = False\n new_target_id = 202302210010\n new_target_type = ApplicationCommandPermissionOverwriteTargetType.channel\n \n application_command_permission_overwrite = ApplicationCommandPermissionOverwrite(\n allow = old_allow,\n target = (old_target_type, old_target_id)\n )\n \n copy = application_command_permission_overwrite.copy_with(\n allow = new_allow,\n target = (new_target_type, new_target_id)\n )\n \n _asert_fields_set(copy)\n vampytest.assert_is_not(application_command_permission_overwrite, copy)\n vampytest.assert_eq(copy.allow, new_allow)\n vampytest.assert_eq(copy.target_id, new_target_id)\n vampytest.assert_is(copy.target_type, new_target_type)",
"def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)",
"def set_overwrite_status(self, enabled):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build JobConfigurationTableCopy from request resource args. | def ProcessTableCopyConfiguration(ref, args, request):
del ref # Unused
source_ref = args.CONCEPTS.source.Parse()
destination_ref = args.CONCEPTS.destination.Parse()
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.datasetId',
destination_ref.Parent().Name())
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.projectId',
destination_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.destinationTable.tableId',
destination_ref.Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.datasetId',
source_ref.Parent().Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.projectId',
source_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.tableId',
source_ref.Name())
return request | [
"def GetTableCopyResourceArgs():\n table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')\n arg_specs = [\n resource_args.GetResourcePresentationSpec(\n verb='to copy from', name='source', required=True, prefixes=True,\n attribute_overrides={'table': 'source'}, positional=False,\n resource_data=table_spec_data.GetData()),\n resource_args.GetResourcePresentationSpec(\n verb='to copy to', name='destination',\n required=True, prefixes=True,\n attribute_overrides={'table': 'destination'}, positional=False,\n resource_data=table_spec_data.GetData())]\n fallthroughs = {\n '--source.dataset': ['--destination.dataset'],\n '--destination.dataset': ['--source.dataset']\n }\n return [concept_parsers.ConceptParser(arg_specs, fallthroughs)]",
"def __init__(self, args, copy_thread = 1):\r\n\r\n Replicator.__init__(self, args)\r\n\r\n if not copy_thread:\r\n raise Exception(\"Combined copy not supported\")\r\n\r\n if len(self.args) != 3:\r\n self.log.error(\"londiste copy requires table name\")\r\n sys.exit(1)\r\n self.copy_table_name = self.args[2]\r\n\r\n sfx = self.get_copy_suffix(self.copy_table_name)\r\n self.old_consumer_name = self.consumer_name\r\n self.pidfile += sfx\r\n self.consumer_name += sfx\r\n self.copy_thread = 1\r\n self.main_worker = False",
"def create_table_from_args(definition, options, where, args, tc):\n table = _Table.create_table_from_string(definition, options, tc)\n table.set_where_condition(where)\n table.set_column_range(args.column_range)\n if args.columns is not None:\n table.set_chosen_columns(args.columns)\n if args.ignore_columns is not None:\n table.set_ignore_columns(args.ignore_columns)\n if args.decodeCP1252_columns is not None:\n table.set_decode_cp1252_columns(args.decodeCP1252_columns)\n table.set_group_by_column(args.group_by_column) # if not defined, then it's None and we'll compute it later\n\n return table",
"def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args",
"def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request",
"def test_create_copy(self):\n\n config = {\n 'version': '2.0',\n 'input_files': {\n 'INPUT_1': [{\n 'id': 1234,\n 'type': 'PRODUCT',\n 'workspace_name': 'wksp-name',\n 'workspace_path': 'the/workspace/path/file.json',\n 'local_file_name': 'file_abcdfeg.json',\n 'is_deleted': False,\n }]\n },\n 'output_workspaces': {\n 'OUTPUT_1': 'WORKSPACE_1'\n },\n 'tasks': [\n {\n 'task_id': 'task-1234',\n 'type': 'main',\n 'resources': {'cpu': 1.0},\n 'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',\n 'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},\n 'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},\n 'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},\n 'settings': {'SETTING_NAME': 'SETTING_VALUE'},\n 'volumes': {\n 'VOLUME_NAME_1': {\n 'container_path': '/the/container/path',\n 'mode': 'ro',\n 'type': 'host',\n 'host_path': '/the/host/path'\n },\n 'VOLUME_NAME_2': {\n 'container_path': '/the/other/container/path',\n 'mode': 'rw',\n 'type': 'volume',\n 'driver': 'SUPER_DRIVER_5000',\n 'driver_opts': {'turbo': 'yes-pleez'}\n }\n },\n 'docker_params': [{'flag': 'hello', 'value': 'scale'}]\n }\n ]\n }\n exe_config = ExecutionConfiguration(config)\n\n copy = exe_config.create_copy()\n self.assertDictEqual(copy.get_dict(), config)",
"def _GetMigrationJob(\n self,\n source_ref,\n destination_ref,\n conversion_workspace_ref,\n cmek_key_ref,\n args,\n ):\n migration_job_type = self.messages.MigrationJob\n labels = labels_util.ParseCreateArgs(\n args, self.messages.MigrationJob.LabelsValue\n )\n type_value = self._GetType(migration_job_type, args.type)\n source = source_ref.RelativeName()\n destination = destination_ref.RelativeName()\n params = {}\n if args.IsSpecified('peer_vpc'):\n params['vpcPeeringConnectivity'] = self._GetVpcPeeringConnectivity(args)\n elif args.IsSpecified('vm_ip'):\n params['reverseSshConnectivity'] = self._GetReverseSshConnectivity(args)\n elif args.IsSpecified('static_ip'):\n params['staticIpConnectivity'] = self._GetStaticIpConnectivity()\n\n migration_job_obj = migration_job_type(\n labels=labels,\n displayName=args.display_name,\n state=migration_job_type.StateValueValuesEnum.CREATING,\n type=type_value,\n dumpPath=args.dump_path,\n source=source,\n destination=destination,\n **params)\n if conversion_workspace_ref is not None:\n migration_job_obj.conversionWorkspace = self._GetConversionWorkspaceInfo(\n conversion_workspace_ref, args\n )\n if cmek_key_ref is not None:\n migration_job_obj.cmekKeyName = cmek_key_ref.RelativeName()\n\n if args.IsKnownAndSpecified('filter'):\n args.filter, server_filter = filter_rewrite.Rewriter().Rewrite(\n args.filter\n )\n migration_job_obj.filter = server_filter\n\n if args.IsKnownAndSpecified('dump_parallel_level'):\n migration_job_obj.performanceConfig = self._GetPerformanceConfig(args)\n\n return migration_job_obj",
"def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)",
"def AddCopyBackupResourceArgs(parser):\n arg_specs = [\n presentation_specs.ResourcePresentationSpec(\n '--source',\n GetBackupResourceSpec(),\n 'TEXT',\n required=True,\n flag_name_overrides={\n 'instance': '--source-instance',\n 'backup': '--source-backup'\n }),\n presentation_specs.ResourcePresentationSpec(\n '--destination',\n GetBackupResourceSpec(),\n 'TEXT',\n required=True,\n flag_name_overrides={\n 'instance': '--destination-instance',\n 'backup': '--destination-backup',\n }),\n ]\n\n concept_parsers.ConceptParser(arg_specs).AddToParser(parser)",
"def initiate_build(self, config: Union[TableConfig, str, UUID],\n version: Union[str, UUID] = None) -> JobSubmissionResponse:\n if isinstance(config, TableConfig):\n if version is not None:\n logger.warning('Ignoring version {} since config object was provided.'\n .format(version))\n if config.version_number is None:\n raise ValueError('Cannot build table from config which has no version. '\n 'Try registering the config before building.')\n if config.config_uid is None:\n raise ValueError('Cannot build table from config which has no uid. '\n 'Try registering the config before building.')\n uid = config.config_uid\n version = config.version_number\n else:\n if version is None:\n raise ValueError('Version must be specified when building by config uid.')\n uid = config\n job_id = uuid4()\n logger.info('Building table from config {} version {} with job ID {}...'\n .format(uid, version, job_id))\n path = 'projects/{}/ara-definitions/{}/versions/{}/build'.format(\n self.project_id, uid, version\n )\n response = self.session.post_resource(\n path=path,\n json={},\n params={\n 'job_id': job_id\n }\n )\n submission = JobSubmissionResponse.build(response)\n logger.info('Build job submitted with job ID {}.'.format(submission.job_id))\n return submission",
"def __gen_datatable__(self):\n # | - __generate_data_table\n rows_list = []\n for Job_i in self.Job_list:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop, value in Job_i.job_params.items():\n entry_param_dict[prop] = value\n\n entry_param_dict[\"Job\"] = Job_i\n entry_param_dict[\"path\"] = Job_i.full_path\n entry_param_dict[\"max_revision\"] = Job_i.max_revision\n entry_param_dict[\"revision_number\"] = Job_i.revision_number\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|",
"def __init__(self, job_template_name, job_input, device_list,\n api_server_config, logger, amqp_client,\n transaction_id, transaction_descr, args):\n self._job_template_name = job_template_name\n self._job_input = job_input\n self._device_list = device_list\n self._api_server_config = api_server_config\n self._logger = logger\n self._job_id = None\n self._job_status = None\n self._amqp_client = amqp_client\n self._transaction_id = transaction_id\n self._transaction_descr = transaction_descr\n self._args = args\n super(JobHandler, self).__init__()",
"def __copy__(self):\n from bn.distribs.distribution_builder import MultivariateTableBuilder\n builder = MultivariateTableBuilder()\n for assignment in self._table.keys():\n builder.add_row(copy(assignment), self._table[assignment])\n\n return builder.build()",
"def load(self):\n\n self.starttime = datetime.datetime.utcnow()\n\n # Initializing Data\n delete_clause = self.get_delete_sql()\n staging_table = self.staging_table_name\n destination_table = self.destination_table_name\n is_normal_load = self._destination_table_status == self.DESTINATION_TABLE_OK\n is_rebuild = self._destination_table_status == self.DESTINATION_TABLE_REBUILD\n is_dne = self._destination_table_status == self.DESTINATION_TABLE_DNE\n\n with get_redshift().cursor() as cur:\n self.set_search_path(cur)\n\n # If table does not exist, create it\n if is_dne:\n create_table = self.query_to_redshift_create_table(\n self.get_query_sql(), self.destination_table_name\n )\n cur.execute(create_table)\n elif not is_normal_load and not is_rebuild:\n raise RuntimeError(\n \"Invalid table status in redshift_copy: {}\".format(\n self._destination_table_status\n )\n )\n\n # If there is no row updates, just skip copy and return\n if self.row_count == 0:\n return\n\n cur.execute(\"BEGIN TRANSACTION;\")\n # Lock the table early to avoid deadlocks in many-to-one pipelines.\n query = generate_lock_query(destination_table)\n cur.execute(query)\n\n query = generate_drop_exists_query(staging_table)\n cur.execute(query)\n\n if is_rebuild:\n # Build staging table anew and grant it appropriate permissions\n self.logger.info(\n \"Creating staging table to rebuild %s\", destination_table\n )\n create_staging_table = self.query_to_redshift_create_table(\n self.get_query_sql(), staging_table\n )\n permissions_sql = self.get_grant_sql(cur)\n cur.execute(create_staging_table)\n if permissions_sql:\n self.logger.info(\n \"Copying permissions onto %s:\\n%s\",\n staging_table,\n permissions_sql,\n )\n cur.execute(permissions_sql)\n else:\n # If not rebuilding, create staging with LIKE\n self.logger.info(\"Creating staging table %s\", staging_table)\n query = generate_create_table_like_query(\n staging_table, destination_table\n )\n cur.execute(query)\n\n # Issuing Copy Command\n self.logger.info(\"Issuing copy command\")\n query = generate_copy_query(\n staging_table,\n self.copy_target_url,\n get_redshift().iam_copy_role,\n self.manifest_mode,\n )\n self.logger.debug(query)\n cur.execute(query)\n\n # Row delete and count logic\n if is_rebuild or (self.append_only and not self.full_refresh):\n self.rows_deleted = 0\n else:\n cur.execute(delete_clause)\n self.rows_deleted = cur.rowcount\n\n # Row insert and count logic\n if is_rebuild:\n self.logger.info(\"Swapping staging table into %s\", destination_table)\n # DNE overrides rebuild, so we can assume the table exists\n query = generate_drop_query(destination_table)\n self.logger.debug(query)\n cur.execute(query)\n query = generate_rename_query(staging_table, destination_table)\n self.logger.debug(query)\n cur.execute(query)\n query = generate_count_query(destination_table)\n self.logger.debug(query)\n cur.execute(query)\n self.rows_inserted = cur.fetchall()[0]\n else:\n query = generate_insert_all_query(staging_table, destination_table)\n self.logger.debug(query)\n cur.execute(query)\n self.rows_inserted = cur.rowcount\n query = generate_drop_query(staging_table)\n self.logger.debug(query)\n cur.execute(query)\n cur.execute(\"END TRANSACTION;\")\n self.register_and_cleanup()",
"def CopyTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('copyTapSettings', payload=payload, response_object=None)",
"def __copy__(self):\n # prepare unnamed arguments\n args = [getattr(self, arg) for arg in self._copy_conf['args']]\n\n # prepare named arguments\n kwargs = {}\n for arg in self._copy_conf['kwargs']:\n # if arg is a tuple, the first entry will be the named kwargs, and\n # the second will be the name of the attribute to copy\n name = arg\n if isinstance(arg, tuple):\n name, arg = arg\n if hasattr(self, arg):\n kwargs[name] = getattr(self, arg)\n\n # create the new instance\n new_copy = self.__class__(*args, **kwargs)\n\n # then copy attributes\n for attr_name in self._copy_conf['attrs']:\n if hasattr(self, attr_name):\n setattr(new_copy, attr_name, getattr(self, attr_name))\n\n return new_copy",
"def _ConstructCreateSettingsFromArgs(cls,\n sql_messages,\n args,\n instance=None,\n release_track=DEFAULT_RELEASE_TRACK):\n original_settings = instance.settings if instance else None\n settings = cls._ConstructBaseSettingsFromArgs(sql_messages, args, instance,\n release_track)\n\n backup_configuration = (\n reducers.BackupConfiguration(\n sql_messages,\n instance,\n backup=args.backup,\n backup_start_time=args.backup_start_time,\n enable_bin_log=args.enable_bin_log))\n if backup_configuration:\n cls.AddBackupConfigToSettings(settings, backup_configuration)\n\n settings.databaseFlags = (\n reducers.DatabaseFlags(\n sql_messages, original_settings,\n database_flags=args.database_flags))\n\n settings.maintenanceWindow = (\n reducers.MaintenanceWindow(\n sql_messages,\n instance,\n maintenance_release_channel=args.maintenance_release_channel,\n maintenance_window_day=args.maintenance_window_day,\n maintenance_window_hour=args.maintenance_window_hour))\n\n if args.storage_type:\n settings.dataDiskType = _ParseStorageType(\n sql_messages, STORAGE_TYPE_PREFIX + args.storage_type)\n\n # BETA args.\n if _IsBetaOrNewer(release_track):\n settings.userLabels = labels_util.ParseCreateArgs(\n args, sql_messages.Settings.UserLabelsValue)\n\n return settings",
"def __generate_data_table__(self):\n # | - __generate_data_table__\n rows_list = []\n for job in self.job_var_lst:\n revisions = self.job_revision_number(job)\n for revision in range(revisions + 1)[1:]:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop in job:\n entry_param_dict[prop[\"property\"]] = prop[\"value\"]\n\n entry_param_dict[\"variable_list\"] = job\n entry_param_dict[\"path\"] = self.var_lst_to_path(job)\n\n entry_param_dict[\"max_revision\"] = revisions\n entry_param_dict[\"revision_number\"] = revision\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|",
"def from_mapping(context: CreateCommandsContext, dry_run):\n if dry_run:\n logger.info(\"** Dry run, nothing will be sent to server **\")\n\n # Make sure no jobs are actually created\n context.client_tool.create_path_job = mock_create\n context.client_tool.create_pacs_job = mock_create\n\n job_sets = extract_job_sets(\n context.default_parameters(), context.get_mapping()\n )\n\n # inspect project name and destination to present the next question to the user\n project_names = set()\n destination_paths = set()\n for job_set in job_sets:\n project_names.add(job_set.get_param_by_type(Project).value)\n destination_paths.add(job_set.get_param_by_type(DestinationPath).value)\n\n question = (\n f\"This will create {len(job_sets)} jobs on \"\n f\"{context.get_active_server().name},\"\n f\" for projects '{list(project_names)}', writing data to \"\n f\"'{[str(x) for x in destination_paths]}'. Are you sure?\"\n )\n if not click.confirm(question):\n logger.info(\"Cancelled\")\n return\n\n created_job_ids = create_jobs(context, job_sets)\n\n if created_job_ids:\n context.add_to_batch(created_job_ids)\n\n logger.info(\"Done\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process schema Updates (additions/mode changes) for the request. Retrieves the current table schema for ref and attempts to merge in the schema provided in the requests. This is necessary since the API backend does not handle PATCH semantics for schema updates (e.g. process the deltas) so we must always send the fully updated schema in the requests. | def ProcessSchemaUpdate(ref, args, request):
table = request.table
relaxed_columns = args.relax_columns
if not table.schema and not relaxed_columns: # if not updating schema,
return request # then just return.
original_schema = _TryGetCurrentSchema(ref.Parent().Name(),
ref.Name(),
ref.projectId)
new_schema_columns = table.schema
updated_fields = _GetUpdatedSchema(original_schema,
new_schema_columns,
relaxed_columns)
table_schema_type = GetApiMessage('TableSchema')
request.table.schema = table_schema_type(fields=updated_fields)
return request | [
"def UpdateSchema(request, schema):\n handler = DetermineHandlerModule(request)\n \n result = handler.UpdateSchema(request, schema)\n \n return result",
"async def upgradeSchema(self) -> None:",
"def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry",
"def resolve_schema_in_request_body(self, request_body):\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)",
"def batch_operations_default_mode_with_one_schema(\n self,\n app,\n schema_name,\n fields_to_send,\n expected_fields_from_get_operation,\n fields_to_update,\n ):\n #\n # Create and feed documents\n #\n num_docs = len(fields_to_send)\n schema = schema_name\n docs = [{\"id\": fields[\"id\"], \"fields\": fields} for fields in fields_to_send]\n update_docs = [\n {\"id\": fields[\"id\"], \"fields\": fields} for fields in fields_to_update\n ]\n\n app.feed_batch(batch=docs)\n\n #\n # Verify that all documents are fed\n #\n result = app.query(\n body={\n \"yql\": 'select * from sources * where (userInput(\"sddocname:{}\"))'.format(\n schema_name\n ),\n \"ranking\": {\"profile\": \"default\", \"listFeatures\": \"false\"},\n }\n )\n self.assertEqual(result.number_documents_indexed, num_docs)\n\n #\n # get batch data\n #\n result = app.get_batch(batch=docs)\n for idx, response in enumerate(result):\n self.assertDictEqual(\n response.json[\"fields\"], expected_fields_from_get_operation[idx]\n )\n\n #\n # Update data\n #\n result = app.update_batch(batch=update_docs)\n for idx, response in enumerate(result):\n self.assertEqual(\n response.json[\"id\"],\n \"id:{}:{}::{}\".format(schema, schema, fields_to_update[idx][\"id\"]),\n )\n\n #\n # Get updated data\n #\n result = app.get_batch(batch=docs)\n for idx, response in enumerate(result):\n expected_updated_fields = {\n k: v for k, v in expected_fields_from_get_operation[idx].items()\n }\n expected_updated_fields.update(fields_to_update[idx])\n self.assertDictEqual(response.json[\"fields\"], expected_updated_fields)\n\n #\n # Delete data\n #\n result = app.delete_batch(batch=docs)\n for idx, response in enumerate(result):\n self.assertEqual(\n response.json[\"id\"],\n \"id:{}:{}::{}\".format(schema, schema, docs[idx][\"id\"]),\n )\n\n #\n # get batch deleted data\n #\n result = app.get_batch(batch=docs)\n for idx, response in enumerate(result):\n self.assertEqual(response.status_code, 404)",
"async def refetch_schema(self):\n await self._protocol.refetch_schema()",
"def process(*, schemas: types.Schemas) -> None:\n # Retrieve back references\n backrefs = process_helper.get_artifacts(\n schemas=schemas, get_schema_artifacts=_get_schema_backrefs\n )\n # Map to a schema for each grouped back references\n backref_schemas = process_helper.calculate_outputs(\n artifacts=backrefs, calculate_output=_backrefs_to_schema\n )\n # Convert to list to resolve iterator\n backref_schema_list = list(backref_schemas)\n # Add backreferences to schemas\n for name, backref_schema in backref_schema_list:\n schemas[name] = {\"allOf\": [schemas[name], backref_schema]}",
"def _schema_update(self, schema_func, *args):\r\n while True:\r\n try:\r\n schema_version = schema_func(*args)\r\n except SchemaDisagreementException:\r\n self._wait_for_agreement()\r\n else:\r\n break\r\n return schema_version",
"def merge_schema_entry(self, old_schema_entry, new_schema_entry):\n if not old_schema_entry:\n return new_schema_entry\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n ('Unexpected schema_entry type, this should never happen: '\n 'old (%s); new (%s)') % (old_status, new_status))\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n raise Exception(\n 'old_name (%s) != new_name(%s), should never happen' %\n (old_name, new_name))\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n ('Converting schema for \"%s\" from NULLABLE RECORD '\n 'into REPEATED RECORD') % old_name)\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n 'Leaving schema for \"%s\" as REPEATED RECORD' % old_name)\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n merged_entry = self.merge_schema_entry(old_entry, new_entry)\n old_fields[key] = merged_entry\n return old_schema_entry\n\n # For all other types, the old_mode must be the same as the new_mode. It\n # might seem reasonable to allow a NULLABLE {primitive_type} to be\n # upgraded to a REPEATED {primitive_type}, but currently 'bq load' does\n # not support that so we must also follow that rule.\n if old_mode != new_mode:\n raise Exception(('Mismatched mode for non-RECORD: '\n 'old=(%s,%s,%s,%s); new=(%s,%s,%s,%s)') %\n (old_status, old_name, old_mode, old_type,\n new_status, new_name, new_mode, new_type))\n\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n raise Exception(\n 'Mismatched type: old=(%s,%s,%s,%s); new=(%s,%s,%s,%s)' %\n (old_status, old_name, old_mode, old_type, new_status,\n new_name, new_mode, new_type))\n\n new_info['type'] = candidate_type\n return new_schema_entry",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def upgrade_schema():\n\n db_version = get_db_version()\n\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)",
"def _build_schema(self, schema):\n\n for key in list(schema.keys()):\n\n value = schema[key]\n if isinstance(value, dict):\n if u'$ref' in value:\n schema[key].update(self._schema_cache[value[u'$ref']])\n self._build_schema(value)\n elif key == '$ref':\n prop = self._schema_cache[value][u'properties']\n schema[u'properties'] = self._build_schema(prop)\n\n return schema",
"def schema_update_script(self, version):",
"def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode",
"def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )",
"def updateSchema(update_types=[],\n update_all=False,\n remove_inst_schemas=True):\n portal = getSite()\n portal = makerequest(portal)\n req = portal.REQUEST\n req.form['update_all'] = update_all\n req.form['remove_instance_schemas'] = remove_inst_schemas\n for obj_type in update_types:\n req.form[obj_type] = True\n portal.archetype_tool.manage_updateSchema(req)",
"def _onSchemaFromServer(self, schemadata):\n msg = 'POST schema.json?schema={}'.format(json.dumps(schemadata))\n asyncio.ensure_future(self._sendToAllConnectedSockets(msg))",
"def _update(self, schema: 'Schema'):\n for method in schema._get_methods():\n if method.id in self:\n raise ValueError(\n f\"Duplicate method id for {method.method} id: {method.id}\"\n )\n\n for combinator in schema._get_combinators():\n if combinator.id in self:\n raise ValueError(\n f\"Duplicate combinator id for {combinator.predicate} \" +\n f\"id: {combinator.id}\"\n )\n\n self.constructors += schema.constructors\n self.functions += schema.functions\n\n self._build_schema_data()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to retrieve the current BigQuery TableSchema for a table_ref. Tries to fetch the schema of an existing table. Raises SchemaUpdateError if table is not found or if table is not of type 'TABLE'. | def _TryGetCurrentSchema(dataset_id, table_id, project_id):
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id,
tableId=table_id,
projectId=project_id)
try:
table = service.Get(get_request)
if not table or table.type != 'TABLE':
raise SchemaUpdateError('Schema modifications only supported '
'on TABLE objects received [{}]'.format(
table))
except apitools_exceptions.HttpNotFoundError:
raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return table.schema | [
"def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema",
"def get_table_schema(dataset, table):\r\n logging.info('getting table schema')\r\n bigquery_client = bigquery.Client()\r\n dataset_ref = bigquery_client.dataset(dataset)\r\n bg_tableref = bigquery.table.TableReference(dataset_ref, table)\r\n bg_table = bigquery_client.get_table(bg_tableref)\r\n return bg_table.schema",
"def get_table_schema(schema):\n if schema is None:\n return schema\n elif isinstance(schema, str):\n return bigquery_tools.parse_table_schema_from_json(schema)\n elif isinstance(schema, dict):\n return bigquery_tools.parse_table_schema_from_json(json.dumps(schema))\n else:\n raise TypeError('Unexpected schema argument: %s.' % schema)",
"def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None",
"def get_table(client_name, dataset_ref, table_id, incl_schema=False):\n table_ref = dataset_ref.table(table_id)\n \n table = client_name.get_table(table_ref)\n \n if table:\n print('---------------------------------------')\n print(f'Table ID: {table.table_id}')\n print(f'Friendly Name: {table.friendly_name}')\n print(f'Full ID: {table.full_table_id}')\n print(f'Type: {table.table_type}')\n print(f'Rows: {table.num_rows}')\n if incl_schema == True:\n print(f'\\nSchema:\\n{table.schema}') \n \n else:\n print(f'{table_id} not present in dataset {dataset}')\n \n return table",
"def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))",
"def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request",
"def tableref(project, dataset_id, table_id):\n dataset_ref = bigquery.dataset.DatasetReference(project=project, dataset_id=dataset_id)\n return bigquery.table.TableReference(dataset_ref=dataset_ref, table_id=table_id)",
"def get_table_def(df: DataFrame, schema_name: str = 'Extract', table_name: str = 'Extract') -> TableDefinition:\n schema = df.schema\n cols = list(map(HyperUtils.convert_struct_field, schema))\n return TableDefinition(\n table_name=TableName(schema_name, table_name),\n columns=cols\n )",
"def getSchema (self, table):\n\t\tcursor = self._getCursor()\n\t\tquery = \"DESCRIBE %s\" % table\n\t\tschema = []\n\t\tcursor.execute (query)\n\t\tfor row in cursor.fetchall():\n\t\t\tschema.append( row[0] )\n\t\treturn schema",
"def create_bq_table(client, dataset, table, schema):\n \n print('Creating table %s.%s' % (dataset, table))\n exists = client.check_table(dataset, table)\n if exists:\n raise AssertionError(\"Table already exists: %s.%s\" % (dataset,table))\n created = client.create_table(dataset, table, schema)\n # Check that the empty table was created\n exists = client.check_table(dataset, table)\n if not exists:\n raise RuntimeError('Table creation failed: %s.%s' % (dataset, table))",
"def get_query_schema(self, job_id):\n\n job_collection = self.bigquery.jobs()\n query_reply = self._get_query_results(\n job_collection, self.project_id, job_id, offset=0, limit=0)\n\n if not query_reply['jobComplete']:\n logger.warning('BigQuery job %s not complete' % job_id)\n raise UnfinishedQueryException()\n\n return query_reply['schema']['fields']",
"def get_table_schema(context, data_dict):\n resource_id = data_dict.get(\"resource_id\")\n data_dict = {'id': resource_id}\n _check_access('resource_show', context, data_dict)\n resource = t.get_action('resource_show')(context, data_dict)\n if not resource:\n raise NotFound(_('Resource not found.'))\n schema_name = resource.get('schema')\n schema = False\n if schema_name:\n schema = validation_load_json_schema(schema_name)\n if not schema:\n schema = {}\n return schema",
"def _get_schema(name):\n item = _get_notebook_item(name)\n if not item:\n item = _get_table(name)\n\n if isinstance(item, gcp.bigquery.Schema):\n return item\n if hasattr(item, 'schema') and isinstance(item.schema, gcp.bigquery._schema.Schema):\n return item.schema\n return None",
"def __check_table(input_table):\n\n try:\n table = TABLE_TYPES[input_table]\n return table\n except KeyError:\n raise InvalidTableType(input_table)",
"def validate_bigquery_table_exists(self, table_name):\n table_ref = self.dataset.table(table_name)\n try:\n self.bigquery_client.get_table(table_ref)\n print(f\"Bigquery table exists: {table_name}\")\n return True\n except NotFound:\n print(f\"Bigquery table does NOT exist: {table_name}\")\n return False",
"def CreateTableFromFile(self, table_name, schema_path):\n try:\n schema_file = open(schema_path)\n schema_json = schema_file.read()\n schema_file.close()\n except IOError, e:\n raise SchemaError('Could not read file (%s):\\n%s' %\n (schema_path, str(e)))\n return self.CreateTableFromJson(table_name, schema_json)",
"def table_schema(self):\n return self._table_schema",
"def save_df_to_bq_table(\n df: pd.DataFrame,\n table_id: str,\n schema: Optional[List[bigquery.SchemaField]] = None,\n project: str = \"dg-dp-bqondemand-dev\",\n):\n bqclient = bigquery.Client(project=project, location=\"EU\")\n job_config = bigquery.LoadJobConfig(\n source_format=bigquery.SourceFormat.PARQUET,\n )\n\n try:\n bqclient.get_table(table_id)\n except NotFound:\n\n if not schema:\n schema = infer_bq_table_schema_from_df(df)\n\n # Create table\n table = bigquery.Table(table_id, schema=schema)\n table = bqclient.create_table(table)\n print(\"Table created with schema:\")\n pprint(schema)\n\n load_job = bqclient.load_table_from_dataframe(df, table_id, job_config=job_config)\n load_job.result()\n print(f\"{load_job.output_rows} written to table {table_id}.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change mode to `NULLABLE` for columns in existing schema. Tries set mode on existing columns in orig_schema_map to `NULLABLE`. Raises SchemaUpdateError if column is not found in orig_schema_map. | def _GetRelaxedCols(relaxed_columns, orig_schema_map):
updated_schema_map = orig_schema_map.copy()
for col in relaxed_columns:
if col in orig_schema_map:
updated_schema_map[col].mode = 'NULLABLE'
else:
raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)
return updated_schema_map | [
"def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode",
"def _visit_column_nullable(self, table, column, delta):\n # TODO: http://www.firebirdfaq.org/faq103/\n raise exceptions.NotSupportedError(\n \"Firebird does not support altering NULL bevahior.\")",
"def test_nullable_reflection(self, metadata, connection):\n meta = metadata\n\n # this is ideally one table, but older MySQL versions choke\n # on the multiple TIMESTAMP columns\n row = connection.exec_driver_sql(\n \"show variables like '%%explicit_defaults_for_timestamp%%'\"\n ).first()\n explicit_defaults_for_timestamp = row[1].lower() in (\"on\", \"1\", \"true\")\n\n reflected = []\n for idx, cols in enumerate(\n [\n [\n \"x INTEGER NULL\",\n \"y INTEGER NOT NULL\",\n \"z INTEGER\",\n \"q TIMESTAMP NULL\",\n ],\n [\"p TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP\"],\n [\"r TIMESTAMP NOT NULL\"],\n [\"s TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP\"],\n [\"t TIMESTAMP\"],\n [\"u TIMESTAMP DEFAULT CURRENT_TIMESTAMP\"],\n ]\n ):\n Table(\"nn_t%d\" % idx, meta) # to allow DROP\n\n connection.exec_driver_sql(\n \"\"\"\n CREATE TABLE nn_t%d (\n %s\n )\n \"\"\"\n % (idx, \", \\n\".join(cols))\n )\n\n reflected.extend(\n {\n \"name\": d[\"name\"],\n \"nullable\": d[\"nullable\"],\n \"default\": d[\"default\"],\n }\n for d in inspect(connection).get_columns(\"nn_t%d\" % idx)\n )\n\n if connection.dialect._is_mariadb_102:\n current_timestamp = \"current_timestamp()\"\n else:\n current_timestamp = \"CURRENT_TIMESTAMP\"\n\n eq_(\n reflected,\n [\n {\"name\": \"x\", \"nullable\": True, \"default\": None},\n {\"name\": \"y\", \"nullable\": False, \"default\": None},\n {\"name\": \"z\", \"nullable\": True, \"default\": None},\n {\"name\": \"q\", \"nullable\": True, \"default\": None},\n {\"name\": \"p\", \"nullable\": True, \"default\": current_timestamp},\n {\n \"name\": \"r\",\n \"nullable\": False,\n \"default\": None\n if explicit_defaults_for_timestamp\n else (\n \"%(current_timestamp)s \"\n \"ON UPDATE %(current_timestamp)s\"\n )\n % {\"current_timestamp\": current_timestamp},\n },\n {\"name\": \"s\", \"nullable\": False, \"default\": current_timestamp},\n {\n \"name\": \"t\",\n \"nullable\": True\n if explicit_defaults_for_timestamp\n else False,\n \"default\": None\n if explicit_defaults_for_timestamp\n else (\n \"%(current_timestamp)s \"\n \"ON UPDATE %(current_timestamp)s\"\n )\n % {\"current_timestamp\": current_timestamp},\n },\n {\n \"name\": \"u\",\n \"nullable\": True\n if explicit_defaults_for_timestamp\n else False,\n \"default\": current_timestamp,\n },\n ],\n )",
"def nullable(self):\n _columns = []\n if not isinstance(self._last_column, list):\n _columns = [self._last_column]\n\n for column in _columns:\n column.nullable()\n return self",
"def hide_null_values(table):\n\n for name, col in list(table.columns.items()):\n if col.dtype.kind == 'O':\n good_values = [x for x in col if x is not None]\n good_kind = np.array(good_values).dtype.kind\n null = null_values[good_kind]\n good_col = np.array([null if x is None else x for x in col])\n table[name] = Column(good_col)",
"def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)",
"def to_null(self, value_to_null, table_name):\n\n if type(value_to_null) == str:\n value_to_null = \"\\\"\" + value_to_null + \"\\\"\"\n\n try:\n with sql.connect(self.db_file) as conn:\n cur = conn.cursor()\n columns = self.tables[table_name]\n # print(columns.keys())\n for col in columns.keys():\n # print(col)\n cur.execute(f\"\"\"UPDATE {table_name} SET {col}=NULL \n WHERE {col}=={value_to_null}\"\"\")\n finally:\n conn.close()",
"def null_removal_mode(dataframe, colname):\n\n col = dataframe[colname]\n\n dataframe[colname] = col.fillna(col.mode().get(0, None))\n\n return dataframe",
"def check_nullable(self, check_obj, schema):\n raise NotImplementedError",
"def test_rewriteOracleNULLs_SelectAllColumns(self):\n rows = resultOf(\n Select(From=self.schema.NULLCHECK).on(NullTestingOracleTxn())\n )[0]\n self.assertEquals(rows, [[\"\", None]])",
"def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request",
"def upgrade_schema(schema: FlatSchema) -> FlatSchema:\n\n cls_fields = {}\n for py_cls in so.ObjectMeta.get_schema_metaclasses():\n if isinstance(py_cls, adapter.Adapter):\n continue\n\n fields = py_cls._schema_fields.values()\n cls_fields[py_cls] = sorted(fields, key=lambda f: f.index)\n\n id_to_data = schema._id_to_data\n fixes = {}\n for id, typ_name in schema._id_to_type.items():\n data = id_to_data[id]\n obj = so.Object.schema_restore((typ_name, id))\n typ = type(obj)\n\n tfields = cls_fields[typ]\n exp_len = len(tfields)\n if len(data) < exp_len:\n ldata = list(data)\n for i in range(len(ldata), exp_len):\n ldata.append(tfields[i].get_default())\n\n fixes[id] = tuple(ldata)\n\n return schema._replace(id_to_data=id_to_data.update(fixes))",
"def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry",
"def fill_nulls_with_none(df):\n new_df = df.copy()\n for col in df.columns:\n new_df[col] = new_df[col].where(new_df[col].notnull(), None)\n return new_df",
"def set_null_default_value(self):\n raise NotImplementedError(\n 'operation set_null_default_value(...) not yet implemented')",
"def _alter_set_defaults(self, field, name, params, sqls):\n type = params['type']\n # MySQL does not support defaults for geometry columns also\n is_geom = True in [ type.find(t) > -1 for t in self.geom_types ]\n is_text = True in [ type.find(t) > -1 for t in self.text_types ]\n if not is_geom and not is_text:\n super(DatabaseOperations, self)._alter_set_defaults(field, name, params, sqls)",
"def __modify_schema__(cls, field_schema):\n field_schema.update(type='string')",
"def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None",
"def edit_columns(self):\n return [input_col_spec_from_kolumne(k) for k in\n self.modell.kolumnes if not k.is_primary_key()]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new columns to an existing schema. Tries add new fields to an existing schema. Raises SchemaUpdateError if column already exists in the orig_schema_map. | def _AddNewColsToSchema(new_fields, orig_schema_map):
updated_schema_map = orig_schema_map.copy()
for new_field in new_fields:
if new_field.name in orig_schema_map:
raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)
updated_schema_map[new_field.name] = new_field
return updated_schema_map | [
"def add_column(self, schema):\n self[schema.name] = schema.copy()",
"def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()",
"def test_add_columns(self):\n schema = 'test_schema'\n table = 'test_table'\n adding_columns = {'col1': 'type1', 'col2': 'type2'}\n columns_query = ', '.join([f'{col_name} {col_type}' for col_name, col_type in adding_columns.items()])\n expected_query = f'ALTER TABLE {schema}.\"{table.upper()}\" ADD {columns_query}'\n\n self.snowflake.add_columns(schema, table, adding_columns)\n self.assertListEqual(self.snowflake.executed_queries, [expected_query])",
"def addColumn(columnDef, changeSchema=False, connection=None):",
"def _GetRelaxedCols(relaxed_columns, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for col in relaxed_columns:\n if col in orig_schema_map:\n updated_schema_map[col].mode = 'NULLABLE'\n else:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n return updated_schema_map",
"def add_cols(self, source) :\n\n cols = source.get_cols()\n types = source.get_types()\n\n new_cols = []\n new_types = []\n for i in range(len(cols)) :\n if cols[i] not in self.cols :\n new_cols.append(cols[i])\n new_types.append(types[i])\n self.cols.extend(new_cols)\n self.types.extend(new_types)\n\n self._alter_table(new_cols, new_types)\n\n row_ids = self.get_values('__ROWID')\n \n for col in new_cols :\n new_vals = source.get_values(col)\n if len(row_ids) == 0 :\n for val in new_vals :\n self._insert_internal(['__ROWID', col], [0, val])\n\n row_ids = self.get_values('__ROWID')\n\n else :\n binds = zip(new_vals, row_ids)\n q = self._quoter(col)\n sql_base = 'UPDATE \"%s\" SET \"%s\" = %s WHERE \"__ROWID\" = %%d' % (self.name, col, q)\n cur = self.con.cursor()\n for bind in binds :\n if bind[0] :\n update_sql = sql_base % (str(bind[0]), bind[1])\n cur.execute(update_sql)\n\n self.version += 1",
"def add_info_table_cols(self, new_cols):\n\n cols = set([x.header for x in self.info_table.columns])\n missing = set(new_cols) - cols\n if len(missing) == 0:\n return\n\n # iterate on new_cols since they are in order\n for c in new_cols:\n if c in missing:\n self.info_table.add_column(c)",
"def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)",
"def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )",
"def insert_meta_fields_into_existing_schema(graphql_schema):\n root_type_name = graphql_schema.get_query_type().name\n\n for type_name, type_obj in six.iteritems(graphql_schema.get_type_map()):\n if type_name.startswith('__') or type_name == root_type_name:\n # Ignore the types that are built into GraphQL itself, as well as the root query type.\n continue\n\n if not isinstance(type_obj, (GraphQLObjectType, GraphQLInterfaceType)):\n # Ignore definitions that are not interfaces or types.\n continue\n\n for meta_field_name, meta_field in six.iteritems(EXTENDED_META_FIELD_DEFINITIONS):\n if meta_field_name in type_obj.fields:\n raise AssertionError(u'Unexpectedly encountered an existing field named {} while '\n u'attempting to add a meta-field of the same name. Make sure '\n u'you are not attempting to add meta-fields twice.'\n .format(meta_field_name))\n\n type_obj.fields[meta_field_name] = meta_field",
"def show_schema_updates(self):\n for mode in ['source', 'target']:\n deltas = self.database['deltas']['new_columns_in_' + mode]\n working_db = self.source['alias'] if mode == 'source' else self.target['alias']\n other_db = self.target['alias'] if mode == 'source' else self.source['alias']\n con = self.source['connection'] \\\n if working_db == self.source['alias'] else self.target['connection']\n for column in deltas:\n logging.info('')\n logging.info(self._get_log_break('|Schema Change - Column: {0}|'.format(column)))\n logging.info(\n ' Column \\'%s\\' exists in the %s database but not in %s',\n column, working_db, other_db\n )\n info = con.get_column_sql(self.database['table'], column)\n logging.info(\n ' To Add Column \\'%s\\' to %s, run the following SQL on %s:',\n column, other_db, other_db\n )\n logging.warning(' ' + info['add_sql'])\n logging.info(\n ' To Drop Column \\'%s\\' from %s, run the following SQL on %s:',\n column, working_db, working_db\n )\n logging.warning(' ' + info['drop_sql'])\n logging.info(self._get_log_break())\n logging.info('')\n if self.config.getboolean('clone_row', 'schema_only'):\n # we're done if only diffing schema\n self.exit()",
"def add_columns(self, table, col_data, col_type):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n for data, typ in zip(col_data, col_type):\n c.execute(\"ALTER TABLE {tn} ADD COLUMN {cn} {ct}\".\n format(tn=table, cn=data, ct=typ))\n conn.commit() \n conn.close()",
"def add_obs_columns(self, column_defs, ignore_duplicates=True, commit=True):\n current_cols = self.conn.execute('pragma table_info(\"obs\")').fetchall()\n current_cols = [r[1] for r in current_cols]\n if isinstance(column_defs, str):\n column_defs = column_defs.split(',')\n for column_def in column_defs:\n if isinstance(column_def, str):\n column_def = column_def.split()\n name, typestr = column_def\n if typestr is float:\n typestr = 'float'\n elif typestr is int:\n typestr = 'int'\n elif typestr is str:\n typestr = 'text'\n check_name = name\n if name.startswith('`'):\n check_name = name[1:-1]\n else:\n name = '`' + name + '`'\n if check_name in current_cols:\n if ignore_duplicates:\n continue\n raise ValueError(\"Column %s already exists in table obs\" % check_name)\n self.conn.execute('ALTER TABLE obs ADD COLUMN %s %s' % (name, typestr))\n current_cols.append(check_name)\n if commit:\n self.conn.commit()\n return self",
"def add_table_column(self, schema, column):\n if not column[\"name\"] or not constants.NAME_RX.match(column[\"name\"]):\n raise ValueError(\"invalid column name\")\n if utils.name_in_nocase(column[\"name\"], [c[\"name\"] for c in schema[\"columns\"]]):\n raise ValueError(\"non-unique column name\")\n if column[\"type\"] not in constants.COLUMN_TYPES:\n raise ValueError(\"invalid column type\")\n sql = (\n f'''ALTER TABLE \"{schema['name']}\"'''\n f\"\"\" ADD COLUMN \"{column['name']}\" {column['type']}\"\"\"\n )\n if column.get(\"notnull\"):\n notnull = [\"NOT NULL\"]\n if column[\"type\"] == constants.INTEGER:\n notnull.append(\"DEFAULT 0\")\n elif column[\"type\"] == constants.REAL:\n notnull.append(\"DEFAULT 0.0\")\n elif column[\"type\"] in (constants.TEXT, constants.BLOB):\n notnull.append(\"DEFAULT ''\")\n sql += \" \" + \" \".join(notnull)\n self.dbcnx.execute(sql)\n schema[\"columns\"].append(column)\n self.update_table(schema)",
"def add_column_to_staging_table(cursor,table_schema,table_name,column_name):\n if not check_if_column_exists(cursor, table_schema, table_name, column_name):\n add_column = \"ALTER TABLE \" + table_schema + \".\" + table_name + \" ADD COLUMN \" + column_name + \" text;\"\n cursor.execute(add_column)",
"def add_new_column_by_copying_values_from_another_column(\n self,\n df,\n list_of_existing_col_names,\n list_of_new_col_names):\n if not (isinstance(list_of_existing_col_names, list)\n and isinstance(list_of_new_col_names, list)):\n raise transform_errors.InputDataTypeError(\n f\"List of existing and new column names must be \"\n f\"of list type.\")\n\n if len(list_of_existing_col_names) != len(list_of_new_col_names):\n raise transform_errors.InputDataLengthError(\n f\"The length of existing column list: \"\n f\"{len(list_of_existing_col_names)} \"\n f\"is NOT the same as the length of new column \"\n f\"name list: {len(list_of_new_col_names)}\")\n\n for i, new_col_name in enumerate(list_of_new_col_names):\n df[new_col_name] = df[list_of_existing_col_names[i]]\n\n return df",
"def addColumnsFromDatabase(connection=None):",
"def add_schema_fields(self, fields):\n if not fields:\n return\n\n data = json.dumps(fields)\n\n try:\n return self.client.post(\n self._get_collection_url('schema/fields'),\n body=data\n )\n except solr_errors.SolrError as e:\n raise solr_errors.SolrSchemaUpdateError(fields, message=e.args[0])",
"def add_feature_columns(self, feature_columns: typing.List[str]):\n self.feature_columns += feature_columns"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to delete a dataset, propagating error on failure. | def _TryDeleteDataset(dataset_id, project_id):
client = GetApiClient()
service = client.datasets
delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest')
delete_request = delete_request_type(datasetId=dataset_id,
projectId=project_id,
deleteContents=True)
service.Delete(delete_request)
log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id)) | [
"def delete_dataset(self, dataset: DatasetDB):\n try:\n self._es.delete_index(dataset_records_index(dataset.id))\n finally:\n self._es.delete_document(index=DATASETS_INDEX_NAME, doc_id=dataset.id)",
"def delete_dataset(dataset_id: int):\n db = get_db()\n cur = db.execute(f'DELETE FROM dataset WHERE id = {dataset_id}')\n db.commit()\n cur.close()",
"def on_dataset_delete(dataset_id):\n # todo: write this",
"def delete(_id):\n dataset = ESDataset.get(id=_id, ignore=404)\n\n if not dataset:\n raise NoEntityError(f\"dataset {_id} does not exist.\")\n\n dataset.delete()\n\n return dataset.name",
"def delete_dataset(self, dataset_id):\n try:\n datasets = self.bigquery.datasets()\n request = datasets.delete(projectId=self.project_id,\n datasetId=dataset_id)\n request.execute()\n return True\n except Exception, e:\n logger.error('Cannot delete dataset %s: %s' % (dataset_id, e))\n return None",
"def delete_dataset(self, identifier):\n # Delete the dataset directory if it exists. Otherwise return False\n dataset_dir = self.get_dataset_dir(identifier)\n if not os.path.isdir(dataset_dir):\n return False\n shutil.rmtree(dataset_dir)\n return True",
"def test_delete_detail_unauthenticated(self):\n dataset = create_external_dataset(**self.dataset_attrs[0])\n self.assertEqual(DataSet.objects.count(), 1)\n uri = '/api/0.1/datasets/%s/' % (dataset.dataset_id)\n resp = self.api_client.delete(uri, format='json')\n self.assertHttpUnauthorized(resp)\n self.assertEqual(DataSet.objects.count(), 1)",
"def DeleteDataset(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def test_delete_training_dataset(self):\n pass",
"def test_that_when_dataset_is_deleted_the_account_is_still_there(self):\n test_dataset = Dataset.objects.get(\n dataset_slug=\"google-geojson-example\")\n test_dataset.delete()\n with self.assertRaises(ObjectDoesNotExist):\n Dataset.objects.get(dataset_slug=\"google-geojson-example\")\n Account.objects.get(account_slug=\"test_user\")",
"def delete_dataset(dataset:str,domain:str):\n from .config import get_config\n cfg = get_config()\n dataset,domain = dataset.lower(),domain.lower()\n datasets = pd.DataFrame(cfg['domains'])\n if not (datasets == np.array([dataset,domain])).all(axis=1).any():\n raise Exception(f\"Sorry, {dataset} is not a registered dataset in {domain} domain in dcarte\")\n else:\n print(f'ARE YOU SURE YOU WANT TO DELETE DATASET {dataset}?') \n answer = input(\"Enter yes or no: \") \n while answer not in (\"yes\", \"no\"): \n print(\"Please only enter yes or no.\")\n answer = input(\"Enter yes or no: \") \n\n if answer == \"yes\": \n delete_dataset_(cfg,dataset,domain)\n print(f'DATASET {dataset} from DOMAIN {domain} is now deleted')",
"def delete_dataset_controller(self, request):\n try:\n logging.info(\"executing delete_dataset_controller function\")\n delete_dataset_url = (\n self.gcp_config.get(\"automl\").get(\"common\").get(\"delete_dataset\")\n )\n delete_dataset_request = request.dict(exclude_none=True)\n response, status_code = APIInterface.post(\n route=delete_dataset_url,\n data=delete_dataset_request,\n )\n if status_code == 200:\n self.CRUDDataset.update(\n dataset_id=response.get(\"dataset_id\"), status=\"Deleting\"\n )\n self.create_operation_record(api_response=response)\n return response\n else:\n raise Exception({\"status\": \"delete dataset failed\"})\n except Exception as error:\n logging.error(f\"Error in delete_dataset_controller function: {error}\")\n raise error",
"def delete_dataset(self, dataset_id: str) -> dict:\n return self.make_request({}, f\"dataset/{dataset_id}\", requests.delete)",
"def delete_data(self, data_id):",
"def test_delete_detail_for_asset(self):\n for dataset_attr in self.dataset_attrs:\n self.datasets.append(create_external_dataset(**dataset_attr))\n self.asset.datasets.add(self.datasets[0], self.datasets[1])\n self.asset.save()\n self.assertEqual(len(self.asset.datasets.all()), 2)\n disassociate_dataset = self.datasets[0]\n self.api_client.client.login(username=self.username,\n password=self.password)\n uri = '/api/0.1/datasets/assets/%s/%s/' % (self.asset.asset_id,\n disassociate_dataset.dataset_id)\n resp = self.api_client.delete(uri)\n self.assertHttpAccepted(resp)\n try:\n # Test that we didn't delete the dataset\n dataset = DataSet.objects.get(\n dataset_id=disassociate_dataset.dataset_id)\n self.assertNotEqual(dataset.status, 'deleted') \n # Refresh the asset\n asset = Asset.objects.get_subclass(asset_id=self.asset.asset_id)\n # Check that the dataset is no longer associated\n self.assertNotIn(disassociate_dataset, asset.datasets.all())\n # Check that we didn't accidently clobber other\n # datasets associated with the asset\n self.assertEqual(asset.datasets.all().count(), 1)\n self.assertIn(self.datasets[1], asset.datasets.select_subclasses())\n except DataSet.DoesNotExist:\n self.fail(\"Data set was removed, it should only have been disassociated\")",
"def destroy(self, request, pk=None):\n organization_id = int(request.query_params.get('organization_id', None))\n dataset_id = pk\n # check if user has access to the dataset\n d = ImportRecord.objects.filter(\n super_organization_id=organization_id, pk=dataset_id\n )\n if not d.exists():\n return JsonResponse({\n 'status': 'error',\n 'message': 'user does not have permission to delete dataset',\n }, status=status.HTTP_403_FORBIDDEN)\n d = d[0]\n d.delete()\n return JsonResponse({'status': 'success'})",
"def raise_exception_for_dataset(dataset_reference):\n if dataset_reference.dataset_id == non_existing_dataset_id:\n raise cloud.exceptions.NotFound('')",
"def delete(self, dataset: SomeResourceIds, **kwargs):\n data = kwargs\n if not isinstance(dataset, list):\n dataset = [dataset]\n\n data['datasets'] = dataset\n self._provider.post('delete-datasets', data=data, as_json=False)",
"def test_delete_detail_for_asset_unauthorized(self):\n for dataset_attr in self.dataset_attrs:\n self.datasets.append(create_external_dataset(**dataset_attr))\n self.asset.datasets.add(self.datasets[0], self.datasets[1])\n self.asset.owner = self.user2\n self.asset.save()\n self.assertEqual(len(self.asset.datasets.all()), 2)\n disassociate_dataset = self.datasets[0]\n self.api_client.client.login(username=self.username,\n password=self.password)\n uri = '/api/0.1/datasets/assets/%s/%s/' % (self.asset.asset_id,\n disassociate_dataset.dataset_id)\n resp = self.api_client.delete(uri)\n self.assertHttpUnauthorized(resp)\n self.assertEqual(len(self.asset.datasets.all()), 2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Table resource args (source, destination) for copy command. | def GetTableCopyResourceArgs():
table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')
arg_specs = [
resource_args.GetResourcePresentationSpec(
verb='to copy from', name='source', required=True, prefixes=True,
attribute_overrides={'table': 'source'}, positional=False,
resource_data=table_spec_data.GetData()),
resource_args.GetResourcePresentationSpec(
verb='to copy to', name='destination',
required=True, prefixes=True,
attribute_overrides={'table': 'destination'}, positional=False,
resource_data=table_spec_data.GetData())]
fallthroughs = {
'--source.dataset': ['--destination.dataset'],
'--destination.dataset': ['--source.dataset']
}
return [concept_parsers.ConceptParser(arg_specs, fallthroughs)] | [
"def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name())\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.projectId',\n destination_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.destinationTable.tableId',\n destination_ref.Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.datasetId',\n source_ref.Parent().Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.projectId',\n source_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.tableId',\n source_ref.Name())\n return request",
"def AddCopyBackupResourceArgs(parser):\n arg_specs = [\n presentation_specs.ResourcePresentationSpec(\n '--source',\n GetBackupResourceSpec(),\n 'TEXT',\n required=True,\n flag_name_overrides={\n 'instance': '--source-instance',\n 'backup': '--source-backup'\n }),\n presentation_specs.ResourcePresentationSpec(\n '--destination',\n GetBackupResourceSpec(),\n 'TEXT',\n required=True,\n flag_name_overrides={\n 'instance': '--destination-instance',\n 'backup': '--destination-backup',\n }),\n ]\n\n concept_parsers.ConceptParser(arg_specs).AddToParser(parser)",
"def get_copy_command(src_file, dst_file, use_links):\n copy_command = ['ln', '-f', '-s'] if use_links else ['cp']\n copy_command.extend([src_file, dst_file])\n return copy_command",
"def gen_copy_tbl(self, src_curs, dst_curs, where):\r\n self.pkey_list = skytools.get_table_pkeys(src_curs, self.table_name)\r\n dst_pkey = skytools.get_table_pkeys(dst_curs, self.table_name)\r\n if dst_pkey != self.pkey_list:\r\n self.log.error('pkeys do not match')\r\n sys.exit(1)\r\n\r\n src_cols = skytools.get_table_columns(src_curs, self.table_name)\r\n dst_cols = skytools.get_table_columns(dst_curs, self.table_name)\r\n field_list = []\r\n for f in self.pkey_list:\r\n field_list.append(f)\r\n for f in src_cols:\r\n if f in self.pkey_list:\r\n continue\r\n if f in dst_cols:\r\n field_list.append(f)\r\n\r\n self.common_fields = field_list\r\n\r\n fqlist = [skytools.quote_ident(col) for col in field_list]\r\n\r\n tbl_expr = \"select %s from %s\" % (\",\".join(fqlist), self.fq_table_name)\r\n if where:\r\n tbl_expr += ' where ' + where\r\n tbl_expr = \"COPY (%s) TO STDOUT\" % tbl_expr\r\n\r\n self.log.debug(\"using copy expr: %s\" % tbl_expr)\r\n\r\n return tbl_expr",
"def _cp_commands(src_file):\n # File linking is more efficient than copying.\n dst_file = get_relative_path(possible_prefixes, src_file)\n dst_dir = os.path.dirname(dst_file)\n copy_command = get_copy_command(src_file, dst_file, use_links)\n result = []\n if dst_dir not in dst_dir_list:\n result.append(get_mkdir_command(dst_dir))\n dst_dir_list.append(dst_dir)\n result.append(copy_command)\n return result",
"def command_copy(args):\n sources = args.sources\n destpath = args.destpath\n source_files = []\n for file_ in sources:\n if \"*\" in file_:\n selected = glob(file_)\n source_files.extend(selected)\n elif os.path.isfile(file_):\n source_files.append(file_)\n\n if destpath.endswith(\"/\") or os.path.isdir(destpath) or len(sources) > 1:\n # -- DESTDIR-MODE: Last argument is a directory.\n destdir = destpath\n else:\n # -- DESTFILE-MODE: Copy (and rename) one file.\n assert len(source_files) == 1\n destdir = os.path.dirname(destpath)\n\n # -- WORK-HORSE: Copy one or more files to destpath.\n if not os.path.isdir(destdir):\n sys.stdout.write(\"copy: Create dir %s\\n\" % destdir)\n os.makedirs(destdir)\n for source in source_files:\n destname = os.path.join(destdir, os.path.basename(source))\n sys.stdout.write(\"copy: %s => %s\\n\" % (source, destname))\n shutil.copy(source, destname)\n return 0",
"def supported_table_args(self) -> t.Tuple[str, ...]:",
"def copyData(self, src_schema, src_table, src_columns, dest_schema, dest_table, dest_columns):\r\n sql = 'INSERT INTO {} ( {} ) SELECT {} FROM {}'.format(self.encodeTableName(dest_schema, dest_table), ','.join(dest_columns),\r\n ','.join(src_columns), self.encodeTableName(src_schema, src_table))\r\n return self.runSql(sql)",
"def svn_client_copy_source_t_path_get(svn_client_copy_source_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_source_system_profile_params(argv):\n with get_audit_db(argv) as audit_db:\n if audit_db is None:\n if not argv.tablelist:\n return []\n\n if len(argv.tablelist) == 1:\n # A file containing table names\n if os.path.isfile(argv.tablelist[0]):\n with open(argv.tablelist[0]) as f:\n return [(argv.sourceschema,\n t.strip(),\n argv.targetschema,\n None) for t in f]\n\n return [(argv.sourceschema, table, argv.targetschema, None)\n for table in argv.tablelist]\n\n sql = \"\"\"\n SELECT source_region, object_name, target_region, query_condition\n FROM {audit_schema}.source_system_profile\n WHERE profile_name = %s\n AND version = %s\n AND active_ind = 'Y'\n ORDER BY object_seq\"\"\".format(audit_schema=argv.auditschema)\n\n bind_values = [argv.profilename, argv.profileversion]\n result = audit_db.execute_query(sql, argv.arraysize, bind_values)\n\n return [(row[0], row[1], row[2], row[3]) for row in result]",
"def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)",
"def svn_fs_copy(*args):\r\n return _fs.svn_fs_copy(*args)",
"def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args",
"def copy_args_files(self, args: Union[str, List[str]]):\n if isinstance(args, (str, bytes)):\n args = shlex.split(str(args))\n\n for arg in args:\n arg_path = os.path.normpath(\n os.path.abspath(os.path.join(self._source_dir, arg)))\n\n if os.path.isfile(arg_path) and \\\n any(p.match(arg_path) for p in self._includes) and \\\n not any(p.match(arg_path) for p in self._excludes):\n try:\n arg_relpath = normalize_relpath(\n os.path.relpath(arg_path, self._source_dir))\n except ValueError:\n pass # not a file in `source_dir`\n else:\n dst_path = os.path.join(self._dest_dir, arg_relpath)\n dst_dir = os.path.dirname(dst_path)\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir, exist_ok=True)\n shutil.copyfile(arg_path, dst_path)",
"def copyCommand(self):\n\n selection = self.selectedIndexes()\n\n if selection:\n rows = [index.row() for index in selection]\n columns = [index.column() for index in selection]\n if len(rows) == 4:\n model = self.proxyModel.sourceModel()\n row = rows[3]\n column = columns[3]\n command = model.dataset.data[row][column].cell\n QApplication.clipboard().setText(command)",
"def __init__(self, args, copy_thread = 1):\r\n\r\n Replicator.__init__(self, args)\r\n\r\n if not copy_thread:\r\n raise Exception(\"Combined copy not supported\")\r\n\r\n if len(self.args) != 3:\r\n self.log.error(\"londiste copy requires table name\")\r\n sys.exit(1)\r\n self.copy_table_name = self.args[2]\r\n\r\n sfx = self.get_copy_suffix(self.copy_table_name)\r\n self.old_consumer_name = self.consumer_name\r\n self.pidfile += sfx\r\n self.consumer_name += sfx\r\n self.copy_thread = 1\r\n self.main_worker = False",
"def _copy_file ( self, source, dest ):\n return",
"def print_dry_run_copy_info(source, dest):\n\n def shorten_home(path):\n expanded_home = os.path.expanduser(\"~\")\n path = str(path)\n if path.startswith(expanded_home):\n return path.replace(expanded_home, \"~\")\n return path\n\n def truncate_middle(path: str, acceptable_len: int):\n \"\"\"Middle truncate a string\n https://www.xormedia.com/string-truncate-middle-with-ellipsis/\n \"\"\"\n if len(path) <= acceptable_len:\n return path\n # half of the size, minus the 3 .'s\n n_2 = int(acceptable_len / 2 - 3)\n # whatever's left\n n_1 = int(acceptable_len - n_2 - 3)\n return f\"{path[:n_1]}...{path[-n_2:]}\"\n\n trimmed_source = shorten_home(source)\n trimmed_dest = shorten_home(dest)\n longest_allowed_path_len = 87\n if len(trimmed_source) + len(trimmed_dest) > longest_allowed_path_len:\n trimmed_source = truncate_middle(trimmed_source, longest_allowed_path_len)\n trimmed_dest = truncate_middle(trimmed_dest, longest_allowed_path_len)\n print(\n Fore.YELLOW + Style.BRIGHT + trimmed_source + Style.NORMAL,\n \"->\",\n Style.BRIGHT + trimmed_dest + Style.RESET_ALL,\n )",
"def get_formatted_copy_sql(table, s3_data_path, aws_key, aws_secret, aws_region):\n\n return f\"\"\"\n COPY {table}\n FROM {s3_data_path}\n ACCESS_KEY_ID {aws_key}\n SECRET_ACCESS_KEY {aws_secret}\n FORMAT AS JSON 'auto'\n REGION {aws_region};\n \"\"\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show all the models that were printed. | def show_completed_models(completed_models):
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model) | [
"def show_models(completed_models):\r\n\t\r\n\tprint(\"\\nPrinted models: \")\r\n\t\r\n\tfor design in completed_models:\r\n\t\tprint(design)",
"def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for model in completed_models:\n print(model)",
"def show_completed_models(completed_models): \n print(\"\\nThe following models have been printed:\") \n for completed_model in completed_models:\n print(completed_model)",
"def show_completed_models(completed_models):\n print(\"\\n以下型号已经被打印出来:\")\n for completed_model in completed_models:\n print(completed_model)",
"def print(self):\n self.model.summary()",
"def print_models (unprinted_designs,completed_models):\n \n while unprinted_designs:\n current_design = unprinted_designs.pop()\n #Simulate creating a 3D print from the desig.\n print (\"printing model: \" + current_design)\n completed_models.append (current_design)",
"def print_model(self, name_filter=None):\n raise NotImplementedError",
"def print_models( unprinted_designs, completed_models ):\r\n while unprinted_designs:\r\n current_design = unprinted_designs.pop()\r\n print( 'Printing model: ' + current_design )\r\n completed_models.append( current_design )",
"def print_models(unprinted_designs, complete_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n\n # Simula a criação de uma impressão 3D a partir do design\n print('\\nPrinting model: ' + current_design)\n complete_models.append(current_design)",
"def print_models(unprinted_desings, completed_models):\n while unprinted_desings:\n current_design = unprinted_desings.pop()\n\n # 模拟根据设计制作 3D 打印模型的过程\n print(\"Printing model with a auto method: \" + current_design)\n completed_models.append(current_design)",
"def print_models(unprinted_designs, completed_models):\r\n while unprinted_designs:\r\n current_designs = unprinted_designs.pop()\r\n\r\n # simulate creating a 3d print from the design\r\n print(\"Printing model: \" + current_designs)\r\n completed_models.append(current_designs)",
"def print_models(unprinted_designs, completed_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n print(f\"3D印刷中: {current_design}\")\n completed_models.append(current_design)",
"def test_print_models_showing(self):\n self.assertIn('tasks42.models.Person', self.out.getvalue())\n self.assertIn('error: tasks42.models.Person', self.err.getvalue())",
"def displayModel(cls,\n model,\n config=None):\n printer = model.metamodel.modelPrinterClass(\n theModel=model,\n config=config)\n printer.display()",
"def print_models(unprinted_designs, completed_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n\n # Simulation\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)",
"def print_summary(self):\n self.model.summary()",
"def show_database_structure(self):\n self.analyze()\n items = []\n for model in get_models():\n names = []\n # for f, m in model._meta.get_fields_with_model():\n for f in model._meta.concrete_fields:\n names.append(f.name)\n items.append(\n \"{0} : {1}\".format(fmn(model), ', '.join(names)))\n\n items = sorted(items)\n return rstgen.ul(items)",
"def test_print_all_models_writes_to_stdout(self):\n stdout_value = self.stdout.getvalue()\n for model in self.all_models:\n self.assertIn(\n 'Model `%s` - %d instances' % (model.__name__,\n model.objects.count()),\n stdout_value, 'Should write names of models and the count of '\n 'objects in every model to stdout'\n )",
"def display_instances(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register the message handlers that every journal should support. | def register_message_handlers(journal):
journal.dispatcher.register_message_handler(
DumpQuorumMessage, _dumpquorumhandler) | [
"def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DumpJournalBlocksMessage,\n _dumpjournalblockshandler)\n journal.dispatcher.register_message_handler(\n DumpJournalValueMessage,\n _dumpjournalvaluehandler)",
"def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n QuorumTransactionBlockMessage,\n transaction_block_message.transaction_block_message_handler)",
"def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DevModeTransactionBlockMessage,\n transaction_block_message.transaction_block_message_handler)",
"def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)",
"def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]",
"def _register_handlers(self):\n _LOGGER.debug(\"%s: Registering internal handlers.\", self._ip_address)\n # Register the callback for messages being received\n self._listener()\n\n # Register callback for connection.\n self.add_event_handler('connected',\n self._connected_handler,\n disposable=False,\n )\n\n # Register callback for disconnections.\n self.add_event_handler('disconnected',\n self._disconnected_handler,\n disposable=False,\n )",
"def add_handlers(self, logger, handlers):\n for h in handlers:\n try:\n logger.addHandler(self.config['handlers'][h])\n except StandardError as e:\n raise ValueError('Unable to add handler %r: %s' % (h, e))",
"def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)",
"def _set_handlers(self):\n for handler_name in self.handler_names:\n\n if self.config[handler_name]['active'] is False:\n continue\n\n handler = self._get_handler(handler_name)\n\n level = self.config[handler_name].get('level')\n formatter = self.config[handler_name].get('formatter')\n self._config_handler(handler, formatter=formatter, level=level)\n\n if not self._handler_exist(handler):\n self.logger.addHandler(handler)",
"def register_chat_handlers(handlers, bot):\n for command, meta in handlers.items():\n response = meta['response']\n aliases = meta['aliases']\n\n bot.register_handler(command, ChatHandler(response), *aliases)",
"def on_register_event_handlers(self):\n pass",
"def register_multiprocess_handlers(logger: logging=None):\n if logger is None:\n logger = logging.getLogger()\n\n for i, orig_handler in enumerate(list(logger.handlers)):\n handler = MultiProcessingHandler('mp-handler-{0}'.format(i), sub_handler=orig_handler)\n logger.removeHandler(orig_handler)\n logger.addHandler(handler)",
"def _register_listeners (self):\n for event in self._container._eventMixin_events:\n handler_name = \"_handle_\" + event.__class__.__name__\n if hasattr(self, handler_name):\n self._container.addListener(event, getattr(self, handler_name),\n weak=True)\n else:\n self._container.addListener(event, self._log_event, weak=True)",
"def register_service(self, service):\n for message_handler in service.iter_message_handlers():\n self.message_handlers[message_handler.name] = message_handler",
"def add_handlers(self, host_pattern, host_handlers):\n pass",
"def RegisteredHandlers(self) -> _n_0_t_2[ITrackingHandler]:",
"def get_handler_registry():\n return HANDLERS",
"def get_message_handlers(self):\n return [\n (\"normal\", self.message),\n ]",
"def init_handlers(self):\n self.dispatcher.add_handler(MessageHandler(Filters.text, self.text_handler))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor for DumpQuorumMessage class. | def __init__(self, minfo=None):
if minfo is None:
minfo = {}
super(DumpQuorumMessage, self).__init__(minfo)
self.IsSystemMessage = False
self.IsForward = True
self.IsReliable = True | [
"def dump(self):\n result = super(DumpQuorumMessage, self).dump()\n return result",
"def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(DumpJournalValueMessage, self).__init__(minfo)\n\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True\n\n self.TransactionType = minfo.get('TransactionType')\n self.Name = minfo.get('Name')",
"def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(DumpJournalBlocksMessage, self).__init__(minfo)\n\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True\n\n self.Count = minfo.get(\"Count\", 0)",
"def __init__(self, msg_type, message, queue):\n self.type = msg_type\n self.message = message\n self.queue = queue",
"def __init__(self, message):\n\n self.message = message",
"def __init__(self, bytes = None):\n id = pcs.Field(\"id\", 16)\n seq = pcs.Field(\"sequence\", 16)\n pcs.Packet.__init__(self, [id, seq], bytes)\n self.description = \"ICMPv4 Echo\"\n\n if (bytes != None):\n offset = id.width + seq.width\n self.data = payload.payload(bytes[offset:len(bytes)])\n else:\n self.data = None",
"def __init__(self, message=None):\n self._subdir = 'new'\n self._info = ''\n self._date = time.time()\n Message.__init__(self, message)",
"def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(QuorumTransactionBlock, self).__init__(minfo)\n self.BlockNumber = minfo.get('BlockNumber', 0)",
"def __init__(self, strict_redis, prefix=\"pyslate_\"):\n\n self.redis = strict_redis\n self.prefix = prefix",
"def __init__(self, key, max_messages, zmq_client, level=logging.NOTSET):\n logging.Handler.__init__(self, level)\n self.key = key\n self.zmq_client = zmq_client\n self.formatter = ZeroMQFormatter()\n self.max_messages = max_messages",
"def __init__(self, buf=None, *args, **kwargs):\n super(Message, self).__init__(buf, *args, **kwargs)\n self.__initialized = True",
"def __init__(self, username=None, hostname=None, action=None,\n channel=None, msg=None):\n\n self.raw_msg = \"\"\n self.username = username or \"\"\n self.hostname = hostname or \"\"\n self.action = action or \"\"\n self.channel = channel or \"\"\n self.msg = msg or \"\"",
"def __init__(self, message=''):\n self.time_start = time.clock()\n self.message = message",
"def __init__(self, msg, timer=0):\n self.ackmap = None\n self.msg = msg\n self.timer = timer\n self.checksum = AckMap.checksum(msg.SerializeToString())",
"def __init__(self, bytes = None):\n type = pcs.Field(\"type\", 8)\n code = pcs.Field(\"code\", 8)\n cksum = pcs.Field(\"checksum\", 16)\n pcs.Packet.__init__(self, [type, code, cksum], bytes)\n self.description = \"ICMPv4\"\n\n if (bytes != None):\n offset = type.width + code.width + cksum.width\n self.data = payload.payload(bytes[offset:len(bytes)])\n else:\n self.data = None",
"def __init__(self, *args):\n this = _ida_hexrays.new_qstring_printer_t(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__( self, \n workerId, \n logPath, \n nameserver, \n qin = None, \n sqout = None, \n eqout = None,\n mqout = None,\n metaQin = None, \n metaQout = None, \n geoip = None ):\n\n super( dnsBroker, self ).__init__( workerId = workerId, \n workerPurpose = \"Probe\",\n logPath = logPath,\n qin = qin, \n metaQin = metaQin,\n metaQout = metaQout )\n\n self.state.update( {\n\n # DNS Probe\n 'probe' : Probe( workerId = workerId, \n logPath = logPath, \n nameserver = nameserver ),\n\n # Google MX Regex\n 'rgmx' : reg_compile( \"([0-9]+)\\s(.*\\.google(?:mail)?\\.com$)\" ),\n\n # SPF Regex\n 'rgspf' : reg_compile( '^\"v\\=(spf[0-9].*)\"$' ),\n \n # Output Queues\n 'qout' : [ sqout, eqout, mqout ],\n\n # GeoIp Db Wrapper\n 'geoip' : geoip,\n \n } )",
"def __init__(self, topic, partition, offset, key, message):\n self.topic = topic\n self.partition = partition\n self.offset = offset\n self._rawKey = key\n self._rawMessage = message\n self._keyDecoder = utf8_decoder\n self._valueDecoder = utf8_decoder",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(KomodoSpeechRecCommand, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.cmd is None:\n self.cmd = ''\n if self.cat is None:\n self.cat = ''\n else:\n self.header = std_msgs.msg.Header()\n self.cmd = ''\n self.cat = ''"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dict with information about the dump quorum message. | def dump(self):
result = super(DumpQuorumMessage, self).dump()
return result | [
"def dumps(self) -> Dict[str, Any]:\n return {\n \"commitId\": self.commit_id,\n \"parentCommitId\": self.parent_commit_id,\n \"message\": self.message,\n \"committer\": self.committer.dumps(),\n }",
"def dump(self):\n result = super(QuorumTransactionBlock, self).dump()\n result['BlockNumber'] = self.BlockNumber\n\n return result",
"def dump(self):\n result = super(DumpJournalValueMessage, self).dump()\n\n result['TransactionType'] = self.TransactionType\n result['Name'] = self.Name\n\n return result",
"def dump(self):\n result = super(DumpJournalBlocksMessage, self).dump()\n result['Count'] = self.Count\n\n return result",
"def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(DumpQuorumMessage, self).__init__(minfo)\n\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True",
"def dump(self):\n return {\n 'session': self.session.dump(), 'user': self.user,\n 'uuid': self._uuid}",
"def dump(self) -> bytes:\n return self.mailbox.socket_dump(self.id)",
"def messages(self):\n return {}",
"async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj",
"def DumpCommand(database):\n if(database.Keys()):\n return \", \".join(database.Keys())\n else:\n return \"Nothing to dump\"",
"def get_dump_status(self, uid: str) -> Dict[str, str]:\n return self.http.get(\n self.config.paths.dumps + '/' + str(uid) + '/status'\n )",
"def get_database_file_info(self):\n status, output, message = self.backup_list()\n file_info = {}\n unit_size = [\"MB\", \"GB\"]\n if status:\n if output:\n for x in output:\n if \"Shards\" in x:\n if x.strip().split()[0][-2:] in unit_size:\n file_info[\"file_size\"] = \\\n int(x.strip().split()[0][:-2].split(\".\")[0])\n\n file_info[\"items\"] = int(x.strip().split()[1])\n print((\"output content \", file_info))\n return file_info\n else:\n print(message)",
"def dump(self):\n\t\tdictionary = {}\n\t\twith self.env.begin(write=False) as txn:\n\t\t\tcursor = txn.cursor()\n\t\t\tfor key, value in cursor:\n\t\t\t\tdictionary[key] = value\n\t\treturn dictionary",
"def qtile_info(self) -> dict:\n dictionary = {\n \"config_path\": self.config.file_path,\n \"version\": VERSION,\n \"log_level\": self.loglevelname(),\n }\n if isinstance(logger.handlers[0], RotatingFileHandler):\n log_path = logger.handlers[0].baseFilename\n dictionary[\"log_path\"] = log_path\n return dictionary",
"def dump_db_content():\n\n Response = namedtuple('Response', \"return_code return_data\")\n\n redis_client = APP.config.get('REDIS')\n if not redis_client:\n return Response(return_code=400, return_data={\n \"dataBody\": {\n \"response\": \"---\",\n \"reason\": \"Unknown\"\n },\n \"error\": True\n })\n\n dump_content = dict()\n for key in redis_client.scan_iter():\n dump_content.update({key: loads(redis_client.get(key))})\n\n if not dump_content:\n return Response(return_code=200, return_data={\"dataBody\": \"EMPTY\", \"error\": False})\n\n return Response(return_code=200, return_data=dump_content)",
"def _dump_queue(self):\n outfile = self.registryValue('dumpFile')\n with open(outfile, 'w') as h:\n i = 1\n for nick, msg in self._queue:\n if msg is None:\n msg = '[no message]'\n h.write(\"% 2d\\t%s\\t%s\\n\" % (i, nick, msg))\n i += 1",
"def dump(self):\n\n\t\treturn {\n\t\t\t\"package\" : self.name,\n\t\t\t\"diversions\" : [\n\t\t\t\tdiversion.dump()\n\t\t\t\tfor diversion in self.diversions\n\t\t\t]\n\t\t}",
"def dump(self):\n avps = self.get_all_avps_contents()\n auth = self.compute_authenticator(avps)\n header = struct.pack(RadiusMessage.RADIUS_HDR_TMPL, self.code,\n self.pid, len(self), auth)\n return b\"\".join([header, avps])",
"def msg_info_multiple_dict(self):\n return self._msg_info_multiple_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve a known stored filter object from the db | def retrieve_filter(self, filter_id):
LOG.debug("Retrieve filter {}".format(filter_id))
filter_obj = self.filter_collection.find_one({"_id": ObjectId(filter_id)})
# use _id to preselect the currently loaded filter, and drop it while we are at it
filter_obj.update([("filters", filter_obj.pop("_id", None))])
return filter_obj | [
"def get(cls, **filters: Dict[str, Any]) -> \"Model\":\n pks = communicator.filter_objects(\n cls._app_name, cls._model_name, filters, [\"id\"]\n )\n if len(pks) > 1:\n raise RuntimeError(\n f\"Exactly one object should match the given criteria, received {len(pks)}.\"\n )\n elif len(pks) == 0:\n raise RuntimeError(\n \"No objects match the given criteria or no permission to read object.\"\n )\n return cls(pks[0][0])",
"def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None",
"def get_instance(self, data):\n filters = {\n key: data[key]\n for key in self.fields.keys() if key in self.lookup_fields}\n\n if None not in filters.values():\n return self.session.query(\n self.opts.model\n ).filter_by(\n **filters\n ).first()\n return None",
"def get_filter(self, name):\n for flter in self.get_filters():\n if flter.name == name:\n return flter",
"def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))",
"def filterInfo(self, name):\n return self.__filters[name] if name in self.__filters else None",
"def filter(self):\n return self._filter",
"def getFilter(self):\n\n return self.currentFilter",
"def model(self) -> StringFilter:\n return self.__model",
"def test_get_saved_filter(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[\"name\"], \"Devops\")",
"def get_filters(self):",
"def get_user_filter():",
"def filtering(self):\n if not hasattr(self, \"__filtering\"):\n raise RuntimeError(\"Filtering for the model {} is not defined yet.\".format(self.name))\n return getattr(self, \"__filtering\")",
"def get_exact_filter_by_name(self, name):\n for entry in self.filters:\n if (entry['type'] == 'filter' and entry['name'] == name and\n entry['comparator'] == 'equals'):\n return entry",
"def test_get_saved_filters(self):\n url = reverse('xds_api:saved-filters')\n\n saved_config = SavedFilter(owner=self.user_1,\n name=\"Devops\", query=\"randomQuery\")\n saved_config.save()\n\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[0][\"name\"], \"Devops\")",
"def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters",
"def getFilter(self, type: int) -> int:\n ...",
"def get(cls, **filters) -> dict:\n errors = cls.validate_query(filters)\n if errors:\n raise ValidationFailed(filters, errors)\n\n cls.deserialize_query(filters)\n\n if cls.__collection__.count_documents(filters) > 1:\n raise ValidationFailed(\n filters, message=\"More than one result: Consider another filtering.\"\n )\n\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Query document matching {filters}...\")\n document = cls.__collection__.find_one(filters)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(\n f'{\"1\" if document else \"No corresponding\"} document retrieved.'\n )\n return cls.serialize(document)",
"def filter_type(self):\n return self._filter_type"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtain a cursor for all filters available to an institute in a category. | def filters(self, institute_id, category="snv"):
filters_res = self.filter_collection.find(
{"institute_id": institute_id, "category": category}
)
return filters_res | [
"def get_filters(self):",
"def get_queryset(self):\n queryset = Article.objects.all()\n category = self.request.query_params.get('category')\n if category is not None:\n queryset = queryset.filter(category=category)\n return queryset",
"def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )",
"def scg_find_all(context, filters=None, transaction=None):\n return IMPL.scg_find_all(context, filters=filters,\n transaction=transaction)",
"def get_all_possible_filters(item_category):\n\n\tpk_lists = []\n\n\tfor filter_category in FilterCategory.objects.filter(item_category=item_category):\n\t\tfilter_option_set = filter_category.filteroption_set.all()\n\t\ttemp_list = list(filter_option_set.values_list('pk', flat=True))\n\n\t\tpk_lists.append(temp_list)\n\n\treturn pk_lists",
"def browse_categories():\n print(\"***** Find Businesses by Categories *****\")\n while True:\n print()\n category = input(\n 'Please enter a type of business (category) or type \"back\" or \"quit\": ')\n print()\n if category == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if category == \"back\":\n return\n\n # create a regex pattern for business name\n pattern = r\".*\" + re.escape(category) + r\".*\"\n regx = re.compile(pattern, re.IGNORECASE)\n\n cursor = business_col.find({\"categories\": regx})\n\n business_objects = cursor.limit(10)\n \n if cursor.count() == 0:\n print(\"No businesses found with given category.\")\n continue\n for business_object in business_objects:\n print_business(business_object)",
"def from_category_and_interval(self, category, interval):\n self.interval = interval\n self.results = self.filter(\n category=category,\n request_datetime__gte=self.get_interval()\n )\n return self.results",
"def _start_query_filters(self):\n filters = []\n if hasattr(self.model_class, \"id\"):\n filters.append(self.model_class.id == self.model_id)\n if hasattr(self.model_class, \"owner\"):\n filters.append(self.model_class.owner == None)\n return filters",
"def _get_category_filter_key_set(category_filter_group):\n keys = {'lot'}\n\n if category_filter_group is not None and category_filter_group['filters']:\n keys.update({f['name'] for f in category_filter_group['filters']})\n\n return keys",
"def show_init_filters():\n if (PICKLE_FS):\n fs = pickle.loads(f.session['fs'])\n else:\n guid = f.session['fs']\n fs = sm.get_FilterSystem(guid, app)\n\n # filter kind to list of each category & number of project IDs\n return f.jsonify({\"filter_options_dict\": fs.get_add_filter_options_str_dict(),\n \"total_participants\": len(fs.get_result_pids())})",
"def getfilters():\n # GET QUERY\n function = request.args.get('function', '')\n rq_uri = request.args.get('rq_uri', '')\n graph_uri = request.args.get('graph_uri', '')\n mode = request.args.get('mode', '')\n query = Qry.get_filter(rq_uri, graph_uri)\n\n if (mode == 'added'):\n style = 'background-color:lightblue'\n else:\n style = ''\n\n # RUN QUERY AGAINST ENDPOINT\n data = sparql(query, strip=False)\n # print data\n\n return render_template('list_group_description.html',\n function = function,\n style = style,\n list = data)",
"def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories",
"def filter(self, filters):",
"def starwars_search(self, category, attribute, filters):\n self.load_response(category)\n while self.counter != int(self.response_info['count']):\n self.attribute_search(attribute, filters)\n self.load_next_response()",
"def GetEntries(self, cat):\n if cat == 'All':\n return self.entries\n else:\n return [e for e in self.entries if e.category == cat]",
"def test_filter_category_search_by_page(self, rf):\n category_type = choice(CATEGORY_TYPES)[0]\n # Test the second page\n CategoryFactory.create_batch(26, category=category_type)\n request = rf.get(f\"item/api/v1/category/{category_type}/\", {\"page\": 2})\n response = search_category(request, category_type)\n assert response.status_code == 200\n data = json.loads(response.content)\n assert (\n len(data[\"data\"]) == 1\n ), \"The paginator should show only 1 organization since page size is 25.\"",
"async def fetch_filtering_terms(connection):\n # async with connection.transaction():\n query = \"SELECT DISTINCT ontology, term, label FROM ontology_term ORDER BY ontology, term;\"\n response = await connection.fetch(query)\n for record in response:\n yield record",
"def get_userfilters():\n return all_user_filters.values()",
"def search_iter(self, name=None, author=None, category=None, offset=0,\n limit=deck_list_max):\n s = self.search(name, author, category, offset, limit)\n\n while s.count > 0:\n yield s\n\n offset += s.count\n s = self.search(name, author, category, offset, limit)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy `in_tree` to `out_tree`, checking selection(in_tree) for each event. | def tree_copy_selection(in_tree, out_tree, selection):
for entry in in_tree:
if selection(entry):
out_tree.Fill() | [
"def copy_tree_checker(src, dst):\n copy_tree(src, dst)\n return True",
"def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else (src.length or node.length\n if None in (src.length, node.length)\n else src.length + node.length) if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n if move in ('down', 'bottom'):\n if get_support(node) is not None:\n res.support = node.support\n elif get_support(src) is not None:\n res.support = src.support\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res",
"def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else src.length + node.length if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n res.support = (node.support if move in ('down', 'bottom')\n else src.support)\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res",
"def tree_copy_duplicate_removal(in_tree, out_tree, key, keys):\n for entry in in_tree:\n key_value = getattr(entry, key)\n if not key_value in keys:\n out_tree.Fill()\n keys.add(key_value)",
"def _copy_file(self, input_url, output_path):\n with open_root(input_url, 'r') as infile, open_root(output_path, 'w'):\n for key in infile.GetListOfKeys():\n if key.GetName() == 'tree':\n continue\n obj = key.ReadObj()\n obj.Write()\n intree = infile.Get('tree')\n n_total = intree.GetEntriesFast()\n outtree = intree.CopyTree(self.cut)\n n_selected = outtree.GetEntriesFast()\n outtree.Write()\n return n_selected, n_total",
"def deep_copy_tree(tree: Tree):\n #print(\"enter bartpy/bartpy/tree.py Tree deep_copy_tree\")\n output = Tree([deep_copy_node(x) for x in tree.nodes])\n #print(\"-exit bartpy/bartpy/tree.py Tree deep_copy_tree\")\n return output",
"def convertTreeToCoveringTree( self, tree ):\n\n self.debug( \"convertTreeToCoveringTree: tree at start\" )\n if E.getLogLevel() >= 2: self.printTree( tree )\n \n ntree = self.addChildren( tree )\n \n #######\n # descend tree and add new domains\n # if domain has only a single child: delete the child and\n # rewire\n for t in ntree:\n info, children = t\n \n if info:\n node, parent, level, ranges = info\n \n if len(children) == 1:\n ntree[children[0]][0] = None\n ntree[node][1] = ntree[children[0]][1]\n \n #######\n # build new tree with new node identifiers\n current_node = 0\n covering_tree = []\n \n levels = map( lambda x: [], [0] * len(tree))\n \n for t in ntree:\n info, children = t\n \n if not info: continue\n node, parent, level, ranges = info\n \n if len(children) == 2:\n \n # add new node to tree, rename parent in children and\n # set borders\n leftchild = children[0]\n rightchild = children[1] \n \n # change left child\n lnode, lparent, llevel, lranges = ntree[leftchild][0]\n rnode, rparent, rlevel, rranges = ntree[rightchild][0] \n \n if ranges:\n lranges, rranges = self.getCoveringRanges( lranges, rranges, ranges )\n else:\n continue\n \n # change left child\n ntree[leftchild][0]= (None, current_node, level + 1, lranges) \n \n # change right child \n # cnode, cparent, clevel, cranges = ntree[rightchild][0]\n ntree[rightchild][0]= (None, current_node, level + 1, rranges )\n \n covering_tree.append( [level, parent, 0, 0, ranges] )\n levels[level].append( current_node )\n \n current_node += 1\n \n max_range = covering_tree[0][4][0][1]\n \n self.debug( \"convertTreeToCoveringTree: tree before removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n ###################################\n ## remove small fragments\n ## has to be done per level in order to be consistent\n ## done here and not during matrix decomposition, so that\n ## matrix needs not to be permuted more than once.\n for l in range(0, len(levels)):\n if len(levels[l]) == 0: break\n # collect all domains per level in a list of the form\n # (from, to, node)\n ranges = []\n for node in levels[l]:\n ranges += map(lambda x: (x[0], x[1], node), covering_tree[node][4])\n covering_tree[node][4] = []\n \n # and remove small fragments\n new_ranges = self.removeSmallRanges( ranges )\n \n # and put back into tree if there is more than one range\n for (xfrom, xto, node) in new_ranges:\n covering_tree[node][4].append( (xfrom, xto) )\n \n ###################################\n ## delete nodes with empty ranges or only a single child.\n ## renumber nodes so that there are no gaps\n\n self.debug( \"convertTreeToCoveringTree: after removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n return self.collapseTree( covering_tree )",
"def test_copy_button_clicked_with_no_selection_on_to_task_tree_view(self):\n # select one task in from_task_tree_view\n\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n self.assertEqual(PatchedMessageBox.called_function, '')\n\n # now try to copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n self.assertEqual(PatchedMessageBox.called_function, 'critical')\n self.assertEqual(PatchedMessageBox.title, 'Error')\n self.assertEqual(PatchedMessageBox.message,\n 'Please select a task from <b>To Task</b> list')",
"def run_diff_on_subtree(self, event=None):\n ns = self.get_subtree()\n if ns is None:\n g.es('nodediff.py: Make sure that the selected node has exactly two children.',\n color='red')\n return\n self.run_appropriate_diff(ns)",
"def prepare(self, left_tree, right_tree):",
"def makeTree(self):\n return makeTree(self.events,self.outTree)",
"def tree_removeDeadBranches():\n nonlocal d_tree\n d_tree = { k : v for k, v in d_tree.items() if v}\n # By creating a new binding for 'd_tree', we have effectively\n # severed the connection back to the original dictionary.\n # We now need to copy this d_tree to the self.d_inputTree\n # self.d_outputTree structures\n self.d_inputTree = d_tree\n self.d_outputTree = self.d_inputTree.copy()",
"def copytree(src, dest):\n shutil.copytree(src, dest)\n restorecon(dest, recursive=True)",
"def copy_tree(t):\r\n return tree(label(t), [copy_tree(b) for b in branches(t)])",
"def convert(srctree, dsttree=dsttree, readonly=False, dumpall=False,\n ignore_exceptions=False, fullcomp=False):\n\n if fullcomp:\n allow_ast_comparison()\n\n parse_file = code_to_ast.parse_file\n find_py_files = code_to_ast.find_py_files\n srctree = os.path.normpath(srctree)\n\n if not readonly:\n dsttree = os.path.normpath(dsttree)\n logging.info('')\n logging.info('Trashing ' + dsttree)\n shutil.rmtree(dsttree, True)\n\n unknown_src_nodes = set()\n unknown_dst_nodes = set()\n badfiles = set()\n broken = []\n\n oldpath = None\n\n allfiles = find_py_files(srctree, None if readonly else dsttree)\n for srcpath, fname in allfiles:\n # Create destination directory\n if not readonly and srcpath != oldpath:\n oldpath = srcpath\n if srcpath >= srctree:\n dstpath = srcpath.replace(srctree, dsttree, 1)\n if not dstpath.startswith(dsttree):\n raise ValueError(\"%s not a subdirectory of %s\" %\n (dstpath, dsttree))\n else:\n assert srctree.startswith(srcpath)\n dstpath = dsttree\n os.makedirs(dstpath)\n\n srcfname = os.path.join(srcpath, fname)\n logging.info('Converting %s' % srcfname)\n try:\n srcast = parse_file(srcfname)\n except SyntaxError:\n badfiles.add(srcfname)\n continue\n\n try:\n dsttxt = to_source(srcast)\n except:\n if not ignore_exceptions:\n raise\n dsttxt = ''\n\n if not readonly:\n dstfname = os.path.join(dstpath, fname)\n try:\n with open(dstfname, 'wb') as f:\n f.write(out_prep(dsttxt))\n except UnicodeEncodeError:\n badfiles.add(dstfname)\n\n # As a sanity check, make sure that ASTs themselves\n # round-trip OK\n try:\n dstast = ast.parse(dsttxt) if readonly else parse_file(dstfname)\n except SyntaxError:\n dstast = []\n if fullcomp:\n unknown_src_nodes.update(strip_tree(srcast))\n unknown_dst_nodes.update(strip_tree(dstast))\n bad = srcast != dstast\n else:\n bad = not fast_compare(srcast, dstast)\n if dumpall or bad:\n srcdump = dump_tree(srcast)\n dstdump = dump_tree(dstast)\n logging.warning(' calculating dump -- %s' %\n ('bad' if bad else 'OK'))\n if bad:\n broken.append(srcfname)\n if dumpall or bad:\n if not readonly:\n try:\n with open(dstfname[:-3] + '.srcdmp', 'wb') as f:\n f.write(out_prep(srcdump))\n except UnicodeEncodeError:\n badfiles.add(dstfname[:-3] + '.srcdmp')\n try:\n with open(dstfname[:-3] + '.dstdmp', 'wb') as f:\n f.write(out_prep(dstdump))\n except UnicodeEncodeError:\n badfiles.add(dstfname[:-3] + '.dstdmp')\n elif dumpall:\n sys.stdout.write('\\n\\nAST:\\n\\n ')\n sys.stdout.write(srcdump.replace('\\n', '\\n '))\n sys.stdout.write('\\n\\nDecompile:\\n\\n ')\n sys.stdout.write(dsttxt.replace('\\n', '\\n '))\n sys.stdout.write('\\n\\nNew AST:\\n\\n ')\n sys.stdout.write('(same as old)' if dstdump == srcdump\n else dstdump.replace('\\n', '\\n '))\n sys.stdout.write('\\n')\n\n if badfiles:\n logging.warning('\\nFiles not processed due to syntax errors:')\n for fname in sorted(badfiles):\n logging.warning(' %s' % fname)\n if broken:\n logging.warning('\\nFiles failed to round-trip to AST:')\n for srcfname in broken:\n logging.warning(' %s' % srcfname)\n\n ok_to_strip = 'col_offset _precedence _use_parens lineno _p_op _pp'\n ok_to_strip = set(ok_to_strip.split())\n bad_nodes = (unknown_dst_nodes | unknown_src_nodes) - ok_to_strip\n if bad_nodes:\n logging.error('\\nERROR -- UNKNOWN NODES STRIPPED: %s' % bad_nodes)\n logging.info('\\n')\n return broken",
"def copytree(self, src, dst, **kwargs):\n logger.more(f\"Copy tree '{_safe_relpath(src)}' -> '{_safe_relpath(dst)}'.\")\n shutil.copytree(src, dst, copy_function=self.copy, **kwargs)",
"def test_copy_button_clicked_with_same_task_is_selected_in_both_sides(self):\n # select one task in from_task_tree_view\n\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n # Select Task4 in to_task_tree_view\n selection_model = self.dialog.to_task_tree_view.selectionModel()\n model = self.dialog.to_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.to_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.to_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n self.assertEqual(PatchedMessageBox.called_function, '')\n\n # now try to copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n self.assertEqual(PatchedMessageBox.called_function, 'critical')\n self.assertEqual(PatchedMessageBox.title, 'Error')\n self.assertEqual(PatchedMessageBox.message,\n 'Please select two different tasks')",
"def range(self, event):\r\n \r\n p = (event.x, self.toCartesian(event.y))\r\n \r\n if self.selectedRegion is None:\r\n self.selectedStart = Region(p[X],p[Y], p[X],p[Y])\r\n self.selectedRegion = self.selectedStart.unionPoint(p)\r\n \r\n self.paint()\r\n \r\n # return (node,sub-tree) where sub-tree is True if draining entire tree\r\n # rooted at node. Draw these as shaded red rectangle to identify whole\r\n # sub-tree is selected.\r\n for pair in self.tree.range(self.selectedRegion):\r\n p = pair[0].point\r\n \r\n if pair[1]:\r\n self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min), \r\n pair[0].region.x_max, self.toTk(pair[0].region.y_max),\r\n fill='Red', stipple='gray12')\r\n else:\r\n self.canvas.create_rectangle(p[X] - BoxSize, self.toTk(p[Y]) - BoxSize, \r\n p[X] + BoxSize, self.toTk(p[Y]) + BoxSize, fill='Red')\r\n\r\n self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min), \r\n self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max), \r\n outline='Red', dash=(2, 4))",
"def update_tree(self):\n if self.state == OI_STARTED:\n for ancestor in self.ancestors.filter(state__lt = OI_STARTED):\n #start all ancestors not yet started if task is started\n if not ancestor.switch_to(OI_STARTED, None):\n return False\n if self.state == OI_DELIVERED:\n for descendant in self.descendants.filter(state__lt = OI_DELIVERED):\n #update all descendants not yet delivered if project is delivered\n if not descendant.switch_to(OI_DELIVERED, None):\n return False\n if self.state == OI_VALIDATED:\n for descendant in self.descendants.filter(state__lt = OI_VALIDATED):\n #update all descendants not yet delivered if project is delivered\n if not descendant.switch_to(OI_VALIDATED, None):\n return False\n \n if self.start_date:\n for ancestor in self.ancestors.filter(start_date__gt = self.start_date):\n #ancestor start date should be before task start date\n ancestor.start_date = self.start_date\n ancestor.check_dates()\n ancestor.save()\n if self.due_date:\n for ancestor in self.ancestors.filter(due_date__lt = self.due_date):\n #ancestor due date should be after task due date\n ancestor.due_date = self.due_date\n ancestor.check_dates()\n ancestor.save()\n\n if self.start_date:\n for descendant in self.descendants.filter(start_date__lt = self.start_date):\n #descendant start date should be after project start date\n descendant.start_date = self.start_date\n descendant.check_dates()\n descendant.save()\n if self.due_date:\n for descendant in self.descendants.filter(due_date__gt = self.due_date):\n #descendant due date should be before project due date\n descendant.due_date = self.due_date\n descendant.check_dates()\n descendant.save()\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy `in` to `out` for events where event.`key` does not exist in `keys` `keys` is the set of keys seen so far. | def tree_copy_duplicate_removal(in_tree, out_tree, key, keys):
for entry in in_tree:
key_value = getattr(entry, key)
if not key_value in keys:
out_tree.Fill()
keys.add(key_value) | [
"def remove_missing_values(events):\n ret = deepcopy(events)\n srchd, key_events = [], []\n for evt in events:\n _tmp = [(j, e) for j, e in enumerate(events) if e['key']\n == evt['key'] and not e['key'] in srchd]\n if _tmp != []:\n key_events.append(_tmp)\n srchd.append(evt['key'])\n dels = []\n for di_evts in key_events:\n if di_evts[0][1]['event'] == 'keystrokeUp':\n dels.append(di_evts[0][0])\n if di_evts[len(di_evts) - 1][1]['event'] == 'keystrokeDown':\n dels.append(di_evts[len(di_evts) - 1][0])\n if dels != []:\n for i in sorted(dels, reverse=True):\n del ret[i]\n return ret",
"def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value",
"def omit(d, keys):\n return {key: d[key] for key in d if key not in keys}",
"def deletekeys(self, keys):\n with self.lock.acquire(): \n data = self.read()\n _old = {}\n for key in keys:\n try:\n _old[key] = data.pop(key)\n except KeyError:\n pass\n self.write(data)\n return _old",
"def keepkeys(d, keys):\n\n ks = set(list(keys))\n to_rm = [k for k in d.keys() if k not in ks]\n for k in to_rm:\n del d[k]\n return d",
"def key_not_in(self, key_not_in):\n\n self._key_not_in = key_not_in",
"def exclude(self, keys):\n self.fields = {k: v for k, v in self.fields.items() if k not in keys}",
"def copy_keys(table1, keys):\n table2 = {}\n for key in keys:\n if table1[key]:\n table2[key] = table1[key]\n else:\n print(\"key does not exist in table 2\")\n break\n return table2",
"def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)",
"def collectKeys(boxes, keys, key=0):\n for newKey in boxes[key]:\n if newKey not in keys and newKey < len(boxes):\n keys.append(newKey)\n collectKeys(boxes, keys, newKey)",
"def remove_outlier(keys):\n for key in keys:\n data_dict.pop(key, 0)",
"def assertNotHasKeys(self, obj, keys):\n for k in keys:\n self.assertFalse(k in obj, 'Object contains key \"%s\"' % k)",
"def update_ifnotin(d1, d2):\n for k, v in d2.items():\n if k not in d1:\n d1[k] = v\n return d1",
"def __merge_keys(\n self, kv_src_bucket, kv_dest_bucket, kvs_num=1, filter_exp=None):\n valid_keys_src, deleted_keys_src = kv_src_bucket[\n kvs_num].key_set()\n valid_keys_dest, deleted_keys_dest = kv_dest_bucket[\n kvs_num].key_set()\n\n self.log.info(\"src_kvstore has %s valid and %s deleted keys\"\n % (len(valid_keys_src), len(deleted_keys_src)))\n self.log.info(\"dest kvstore has %s valid and %s deleted keys\"\n % (len(valid_keys_dest), len(deleted_keys_dest)))\n\n if filter_exp:\n # If key based adv filter\n if \"META().id\" in filter_exp:\n filter_exp = filter_exp.split('\\'')[1]\n\n filtered_src_keys = [key for key in valid_keys_src if re.search(str(filter_exp), key) is not None]\n valid_keys_src = filtered_src_keys\n self.log.info(\n \"{0} keys matched the filter expression {1}\".format(\n len(valid_keys_src),\n filter_exp))\n\n for key in valid_keys_src:\n # replace/add the values for each key in src kvs\n if key not in deleted_keys_dest:\n partition1 = kv_src_bucket[kvs_num].acquire_partition(key)\n partition2 = kv_dest_bucket[kvs_num].acquire_partition(key)\n # In case of lww, if source's key timestamp is lower than\n # destination than no need to set.\n if self.__lww and partition1.get_timestamp(\n key) < partition2.get_timestamp(key):\n continue\n key_add = partition1.get_key(key)\n partition2.set(\n key,\n key_add[\"value\"],\n key_add[\"expires\"],\n key_add[\"flag\"])\n kv_src_bucket[kvs_num].release_partition(key)\n kv_dest_bucket[kvs_num].release_partition(key)\n\n for key in deleted_keys_src:\n if key not in deleted_keys_dest:\n partition1 = kv_src_bucket[kvs_num].acquire_partition(key)\n partition2 = kv_dest_bucket[kvs_num].acquire_partition(key)\n # In case of lww, if source's key timestamp is lower than\n # destination than no need to delete.\n if self.__lww and partition1.get_timestamp(\n key) < partition2.get_timestamp(key):\n continue\n partition2.delete(key)\n kv_src_bucket[kvs_num].release_partition(key)\n kv_dest_bucket[kvs_num].release_partition(key)\n\n valid_keys_dest, deleted_keys_dest = kv_dest_bucket[\n kvs_num].key_set()\n self.log.info(\"After merging: destination bucket's kv_store now has {0}\"\n \" valid keys and {1} deleted keys\".\n format(len(valid_keys_dest), len(deleted_keys_dest)))",
"def keep_in_dictionary(self, dictionary, *keys):\n self._validate_dictionary(dictionary)\n remove_keys = [k for k in dictionary if k not in keys]\n self.remove_from_dictionary(dictionary, *remove_keys)",
"def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)",
"def _filter_dict(d, keys):\n if keys is None:\n return d\n else:\n keys = set(keys)\n present_keys = keys.intersection(d.keys())\n missing_keys = keys.difference(d.keys())\n res = {k: d[k] for k in present_keys}\n if len(missing_keys) != 0:\n warnings.warn(\"Missing expected keys: {}\".format(missing_keys), stacklevel=2)\n return res",
"def fromkeys():\n st = time.clock()\n d = {key: 0 for key in keys}\n e1 = time.clock() - st\n\n st = time.clock()\n d = dict.fromkeys(keys, 0)\n e2 = time.clock() - st\n\n assert e1 > e2",
"def removekeys(self, *keys) -> None:\n for k in keys:\n for i in self.list:\n i.pop(k)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the numpy array representing the GOL grid to a QImage. | def numpy_to_qimage(np_array: np.ndarray, show_age: bool):
# Only support 2D array of bytes
assert len(np_array.shape) == 2 and np_array.dtype == np.uint8
width = np_array.shape[1]
height = np_array.shape[0]
bytes_per_line = width
image = QImage(np_array, width, height, bytes_per_line, QImage.Format_Indexed8)
# Maps array values to color
if show_age:
image.setColorTable(colors.AGE_COLOR_TABLE)
else:
image.setColorTable(colors.BINARY_COLOR_TABLE)
return image | [
"def rgb2qimage(rgb):\n if len(rgb.shape) != 3:\n raise ValueError(\"rgb2QImage can only convert 3D arrays\")\n if rgb.shape[2] not in (3, 4):\n raise ValueError(\"rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels\")\n\n #h, w, channels = rgb.shape\n\n\n\n ## Qt expects 32bit BGRA data for color images:\n #bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')\n #bgra[...,0] = rgb[...,2]\n #bgra[...,1] = rgb[...,1]\n #bgra[...,2] = rgb[...,0]\n #if rgb.shape[2] == 3:\n #bgra[...,3].fill(255)\n #fmt = QImage.Format_RGB32\n #else:\n #bgra[...,3] = rgb[...,3]\n #fmt = QImage.Format_ARGB32\n\n #result = QImage(bgra.data, w, h, fmt)\n #result.ndarray = bgra\n #return result\n height, width, bytesPerComponent = rgb.shape\n bytesPerLine = bytesPerComponent * width\n result = QImage(rgb, width, height, bytesPerLine, QImage.Format_RGB888)\n return result.rgbSwapped()",
"def convertNumpy2Image(self, array):\n cv2image = cv2.cvtColor(array, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n return imgtk",
"def readQImage(self):\n w = self.width()\n h = self.height()\n self.repaint()\n pixels = np.empty((h, w, 4), dtype=np.ubyte)\n pixels[:] = 128\n pixels[..., 0] = 50\n pixels[..., 3] = 255\n\n glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels)\n\n # swap B,R channels for Qt\n tmp = pixels[..., 0].copy()\n pixels[..., 0] = pixels[..., 2]\n pixels[..., 2] = tmp\n pixels = pixels[::-1] # flip vertical\n\n img = fn.makeQImage(pixels, transpose=False)\n return img",
"def array2qimage(rgb):\n if len(rgb.shape) != 3:\n raise ValueError(\"rgb2QImage can only convert 3D arrays\")\n if rgb.shape[2] not in (3, 4):\n raise ValueError(\"rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels\")\n\n h, w, channels = rgb.shape\n\n # Qt expects 32bit BGRA data for color images:\n bgra = np.empty((h, w, 4), np.uint8, 'C')\n bgra[...,0] = rgb[...,2]\n bgra[...,1] = rgb[...,1]\n bgra[...,2] = rgb[...,0]\n if rgb.shape[2] == 3:\n bgra[...,3].fill(255)\n else:\n bgra[...,3] = rgb[...,3]\n\n fmt = QImage.Format_ARGB32\n result = QImage(bgra.data, w, h, fmt)\n result.ndarray = bgra\n return result",
"def make_image(self, save=False):\n\n # image_grid = np.full((self.size_x, self.size_y), '#888888', dtype=str)\n image_grid = np.full((self.size_x, self.size_y, 3), 0, dtype=np.uint8)\n\n # self.grid = np.flip(self.grid, 1)\n\n # self.grid = np.swapaxes(self.grid, 0, 0)\n \"\"\"\n image_grid[self.grid == 0] = 'FFFFFF'\n image_grid[self.grid == 1] = '000000'\n image_grid[self.grid == 2] = '00FF00'\n image_grid[self.grid == 3] = '0000FF'\n image_grid[self.grid == 4] = 'FFFF00'\n image_grid[self.grid == 5] = '00FFFF'\n image_grid[self.grid == 6] = 'FF00FF'\n \"\"\"\n image_grid[self.grid == 0] = (1, 1, 1)\n image_grid[self.grid == 1] = (0, 0, 0)\n image_grid[self.grid == 2] = (1, 0, 1)\n image_grid[self.grid == 3] = (0, 1, 0)\n image_grid[self.grid == 4] = (0, 0, 1)\n image_grid[self.grid == 5] = (0, 1, 1)\n image_grid[self.grid == 6] = (1, 1, 0)\n\n #for ant in self.ants:\n # image_grid[ant.x, ant.y] = (1, 0, 0)\n\n # image_grid = image_grid.swapaxes(0, 1)\n # self.grid = self.grid.swapaxes(0, 1)\n\n\n\n DPI = 100\n width, height = 1000, 1000\n fig = plt.figure(figsize=(width / DPI, height / DPI), dpi=DPI, facecolor='k')\n ax = fig.add_subplot()\n\n plt.axis('equal')\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n\n for y in range(self.size_x):\n for x in range(self.size_y):\n if self.grid[x, y] != 0:\n # Only plot a hexagon if its state is not zero.\n plot_hex(ax, x, y, image_grid[x, y])\n\n ax.set_xlim(0, self.size_x)\n ax.set_ylim(0, self.size_y)\n\n plt.show()\n\n logging.info(\"Finished Image Processing\")",
"def nparrayToQPixmap(array_image):\n\n pil_image = toimage(array_image)\n qtImage = ImageQt(pil_image)\n if len(array_image.shape) == 3:\n frm = QtGui.QImage.Format_ARGB32\n else:\n frm = QtGui.QImage.Format_Mono\n q_image = QtGui.QImage(qtImage).convertToFormat(frm)\n q_pixmap = QtGui.QPixmap(q_image)\n return q_pixmap",
"def render(arr):\n mode = \"RGB\" if arr.shape[-1] == 3 else \"L\"\n img = Image.fromarray(np.squeeze(arr), mode)\n img.show()",
"def transformed(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QImage",
"def _mat_as_gtk_pixbuf(self):\n if self.type() != CV_8UC3:\n raise TypeError('The source image is not of type CV_8UC3.')\n \n return gtk.gdk.pixbuf_new_from_data(self.data, gtk.gdk.COLORSPACE_RGB,\n False, 8, self.cols, self.rows, self.step)",
"def rgb2qimage(rgb):\n if len(rgb.shape) != 3:\n raise ValueError(\"rgb2QImage can only convert 3D arrays\")\n if rgb.shape[2] not in (3, 4):\n raise ValueError(\n \"rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels\")\n\n h, w, channels = rgb.shape\n\n # Qt expects 32bit BGRA data for color images:\n bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')\n bgra[..., 0] = rgb[..., 2]\n bgra[..., 1] = rgb[..., 1]\n bgra[..., 2] = rgb[..., 0]\n if rgb.shape[2] == 3:\n bgra[..., 3].fill(255)\n fmt = QImage.Format_RGB32\n else:\n bgra[..., 3] = rgb[..., 3]\n fmt = QImage.Format_ARGB32\n\n result = QImage(bgra.data, w, h, fmt)\n result.ndarray = bgra\n return result",
"def gray2qimage(gray):\n if len(gray.shape) != 2:\n raise ValueError(\"gray2QImage can only convert 2D arrays\")\n\n gray = numpy.require(gray, numpy.uint8, 'C')\n\n h, w = gray.shape\n\n result = QImage(gray.data, w, h, QImage.Format_Indexed8)\n result.ndarray = gray\n for i in range(256):\n result.setColor(i, QColor(i, i, i).rgb())\n return result",
"def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()",
"def convert_frame(self):\n if self.colorspace == \"rgb\":\n self.qimage = QImage(\n self.camera.frame.data,\n self.camera.frame.shape[1],\n self.camera.frame.shape[0],\n self.camera.frame.shape[1] * 3,\n QImage.Format_RGB888\n )\n elif self.colorspace == \"gray\":\n self.qimage = QImage(\n self.camera.frame.data,\n self.camera.frame.shape[1],\n self.camera.frame.shape[0],\n self.camera.frame.shape[1] * 1,\n QImage.Format_Grayscale8)\n\n self.pixmap.convertFromImage(self.qimage)",
"def fromData(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QImage",
"def gray2qimage(gray):\n if len(gray.shape) != 2:\n raise ValueError(\"gray2QImage can only convert 2D arrays\")\n\n gray = numpy.require(gray, numpy.uint8, 'C')\n\n h, w = gray.shape\n\n result = QImage(gray.data, w, h, QImage.Format_Indexed8)\n result.ndarray = gray\n for i in range(256):\n result.setColor(i, QColor(i, i, i).rgb())\n return result",
"def display(self, array) :\n plt.imshow(array)\n plt.show()",
"def show_image_from_array(array: numpy.ndarray):\n image = Image.fromarray(array)\n image.show()",
"def convertToFormat(self, QImage_Format, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QImage",
"def _prepare_image(self, grid):\n grid = np.array(grid, dtype=np.uint8)\n\n width = int(grid.shape[1] * self.scale_percent)\n height = int(grid.shape[0] * self.scale_percent)\n grid = cv2.resize(grid, (width, height), interpolation=cv2.INTER_AREA)\n return grid"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare paths specified as config. The input is a list of either strings, or 2tuples (source, target). Where single strings are supplied, the basenames are used as targets. Where targets are given explicitly, they must not be absolute paths. Returns a list of 2tuples, or throws ConfigError if something is wrong in the input. | def process_path_specs(specs):
processedSpecs = []
for spec in specs:
if not isinstance(spec, (list, tuple)):
source = spec
target = None
elif len(spec) != 2:
raise ConfigError("path spec must be a list or tuple of "
"length two")
else:
source, target = spec
source = os.path.normpath(source)
if not target:
target = os.path.basename(source)
elif os.path.isabs(target):
raise ConfigError("target path for include file may not be "
"an absolute path")
processedSpecs.append((source, target))
return processedSpecs | [
"def _prepare_files(self, targets, storage_folder=None):\n target_root = self.env.target\n\n targets = [\n helpers.get_relative_path(target_root, target, base=target_root)\n for target in targets\n ]\n\n if storage_folder:\n storage_folder = helpers.get_relative_path(self.env.root, storage_folder, base=self.env.root)\n\n return [\n (os.path.join(storage_folder, os.path.basename(t)), t)\n for t in targets\n ]\n\n else:\n return [(t, t) for t in targets]",
"def populate_paths(cls, settings: Settings) -> List[\"Target\"]:\n file_paths = crawl_in(settings.targets, settings.recurse)\n file_paths = filter_blacklist(file_paths, settings.ignore)\n file_paths = filter_extensions(file_paths, settings.mask)\n targets = [cls(file_path, settings) for file_path in file_paths]\n targets = list(dict.fromkeys(targets)) # unique values\n targets = list(filter(cls._matches_media, targets))\n return targets",
"def cfgPathToList( arg ):\n from types import StringTypes\n listPath = []\n if type( arg ) not in StringTypes:\n return listPath\n while arg.find( '/' ) == 0:\n arg = arg[1:]\n return arg.split( '/' )",
"def _resolve_paths(self, rel_base, paths):\n\n # meta targets are composed of already-resolved paths\n if not paths or self.is_meta:\n return paths\n\n def flatten_paths(*items):\n \"\"\"Flattens one or more items into a list. If the item is iterable each of its items is\n flattened. If an item is callable, it is called and the result is flattened. Otherwise the\n atom is appended to the flattened list. These rules are applied recursively such that the\n returned list will only contain non-iterable, non-callable atoms.\"\"\"\n\n flat = []\n\n def flatmap(item):\n if isinstance(item, Compatibility.string):\n flat.append(item)\n else:\n try:\n for i in iter(item):\n flatmap(i)\n except:\n if callable(item):\n flatmap(item())\n else:\n flat.append(item)\n\n for item in items:\n flatmap(item)\n\n return flat\n\n src_relpath = os.path.relpath(self.address.buildfile.parent_path,\n os.path.join(get_buildroot(), self.target_base))\n\n resolve_basepath = os.path.join(get_buildroot(), rel_base, src_relpath)\n with pushd(resolve_basepath):\n return [ os.path.normpath(os.path.join(src_relpath, path)) for path in flatten_paths(paths) ]",
"def process_config_files(config_file_paths, default_ext='.txt'):\n if isinstance(config_file_paths, str):\n config_file_paths = [config_file_paths]\n\n paths = []\n # Expand globs and directories.\n for path in config_file_paths:\n if '*' in path:\n paths.extend(glob.glob(path))\n elif os.path.isdir(path):\n paths.extend(glob.glob(os.path.join(path, '*' + default_ext)))\n else:\n paths.append(path)\n # Add jobs from config files.\n configs = []\n for path in sorted(paths):\n path_configs = process_config_file(path)\n configs.extend(path_configs)\n print('Added {} configs from {}'.format(len(path_configs), path))\n return configs",
"def _copy_paths(self, paths, source, destination, output_path,\r\n final_path=None):\r\n for path in paths:\r\n if final_path:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, final_path))\r\n else:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, path))",
"def _resolvePathPatterns(self, sources, source):\n kept = []\n pattern = re.compile(source['pathPattern'])\n basedir = self._basePath / source['path']\n if (self._basePath.name == Path(self._largeImagePath).name and\n (self._basePath.parent / source['path']).is_dir()):\n basedir = self._basePath.parent / source['path']\n basedir = basedir.resolve()\n for entry in basedir.iterdir():\n match = pattern.search(entry.name)\n if match:\n if entry.is_file():\n kept.append((entry.name, entry, match))\n elif entry.is_dir() and (entry / entry.name).is_file():\n kept.append((entry.name, entry / entry.name, match))\n for idx, (_, entry, match) in enumerate(sorted(kept)):\n subsource = copy.deepcopy(source)\n # Use named match groups to augment source values.\n for k, v in match.groupdict().items():\n if v.isdigit():\n v = int(v)\n if k.endswith('1'):\n v -= 1\n if '.' in k:\n subsource.setdefault(k.split('.', 1)[0], {})[k.split('.', 1)[1]] = v\n else:\n subsource[k] = v\n subsource['path'] = entry\n for axis in self._axesList:\n stepKey = '%sStep' % axis\n valuesKey = '%sValues' % axis\n if stepKey in source:\n if axis in source or valuesKey not in source:\n subsource[axis] = subsource.get(axis, 0) + idx * source[stepKey]\n else:\n subsource[valuesKey] = [\n val + idx * source[stepKey] for val in subsource[valuesKey]]\n del subsource['pathPattern']\n sources.append(subsource)",
"def build_path_pairs(self):\n\n if self.source_paths is None:\n\n raise ValueError(\"self.source_paths uninitialized!\")\n\n for source_path in self.source_paths:\n\n for block_data_dir in data_settings.BLOCK_DATA_DIRS:\n\n block_id = os.path.split(block_data_dir)[-1]\n\n source_data_dir, filename = os.path.split(source_path)\n containing_dir = os.path.split(source_data_dir)[-1]\n\n if not containing_dir in [block_id, data_settings.GRANULE]:\n\n continue\n\n block_data_path = os.path.join(block_data_dir, filename)\n self.path_pairs.append((source_path, block_data_path))",
"def _copy_paths(self, paths, source, destination, output_path,\n final_path=None):\n for path in paths:\n copy(path, source, os.path.join(output_path, destination),\n final_path, overwrite=True)",
"def in_filepath_list(class_paths: List[str]) -> List:\n registry, not_founds = build_registry(class_paths)\n builder = FilepathListBuilder()\n source = builder.build(registry)\n\n return [source, not_founds]",
"def _GetFilePairs(config):\n\n ret = []\n\n has_bazel_genfiles = os.path.exists(\"bazel-bin\")\n\n for filename in config.file_list:\n target = os.path.join(config.package_name, filename)\n generated = os.path.join(config.package_name, config.pattern % filename)\n if has_bazel_genfiles:\n generated = os.path.join(\"bazel-bin\", generated)\n\n # Generated files should always exist. Blaze should guarantee this before\n # we are run.\n if not os.path.isfile(generated):\n print(\"Generated file '%s' does not exist.\" % generated)\n print(\"Please run this command to generate it:\")\n print(\" bazel build %s:%s\" % (config.package_name, config.target_name))\n sys.exit(1)\n ret.append(_FilePair(target, generated))\n\n return ret",
"def resolve_target_sources(target_sources, extension):\n resolved_sources = []\n\n if target_sources:\n for target in target_sources:\n for resolved in target.resolve():\n if hasattr(resolved, 'sources'):\n resolved_sources.extend(os.path.join(resolved.target_base, source)\n for source in resolved.sources if source.endswith(extension))\n return resolved_sources",
"def _get_list_of_csv_files_and_configs_to_process(\n self,\n list_of_csv_files_and_configs):\n files_and_configs = []\n for f_c in list_of_csv_files_and_configs:\n if os.path.isdir(f_c[0]):\n for f in self._get_list_of_csv_files_in_a_directory(f_c[0]):\n files_and_configs.append(\n self._pair_file_path_name_and_config([f] + f_c[1:]))\n elif os.path.isfile(f_c[0]):\n files_and_configs.append(\n self._pair_file_path_name_and_config(f_c))\n else:\n raise transform_errors.NotAFileOrAFolder(\n f\"{f_c} is NOT a file nor a folder. \"\n f\"Please make sure to check this input.\")\n\n return files_and_configs",
"def build_destination_files(destination, requested_paths):\n pathlib.Path(destination).resolve()\n longest_common_requested_path = longest_common_path_prefix(requested_paths)\n destination_files = [destination / path.relative_to(longest_common_requested_path) for path in requested_paths]\n existing_files = [path for path in destination_files if path.exists()]\n return destination_files, existing_files",
"def compile_config_files(\n self,\n context: Context,\n ):\n for source in (\n *self.source_types[DirectoryModuleSource],\n *self.source_types[GithubModuleSource],\n ):\n source.config(context=context)",
"def _resolve_target_sources(self, target_sources, extension=None, relative_to_target_base=False):\r\n resolved_sources = []\r\n for resolved in Target.resolve_all(target_sources):\r\n if hasattr(resolved, 'sources'):\r\n resolved_sources.extend(\r\n source if relative_to_target_base else os.path.join(resolved.target_base, source)\r\n for source in resolved.sources if not extension or source.endswith(extension)\r\n )\r\n return resolved_sources",
"def convtargets(tarlist,deplist,targets,variables):\n\n finaltars = []\n deps = expand(deplist,variables)\n tars = expand(tarlist,variables) #ugh high risk of confusion because of the names...\n for target in tars:\n if \"%\" in target:\n tarsplit = target.split(\"%\")\n (l1,l2) = len(tarsplit[0]), len(tarsplit[1])\n for buildtarget in targets:\n for newtar in buildtarget[1]:\n if newtar[-l2:] == tarsplit[1] and newtar[0:l1] == tarsplit[0]:\n rulelst = [newtar,[]]\n for newdep in deps:\n if \"%\" in newdep:\n depsplit = newdep.split(\"%\")\n rulelst[1] += [depsplit[0] + newtar[l1:-l2] + depsplit[1]]\n else:\n rulelst[1] += [newdep]\n finaltars.append(rulelst)\n else:\n finaltars.append([target,deps])\n return finaltars",
"def populate_targets_from_pack(pack_list):\n if not isinstance(pack_list, (list, tuple)):\n pack_list = [pack_list]\n for pack_or_path in pack_list:\n for part, tgt in _create_targets_from_pack(pack_or_path):\n part = part.lower()\n \n # Make sure there isn't a duplicate target name.\n if part not in TARGET:\n TARGET[part] = tgt",
"def _resolveFramePaths(self, sourceList):\n # we want to work with both _basePath / <path> and\n # _basePath / .. / <path> / <name> to be compatible with Girder\n # resource layouts.\n sources = []\n for source in sourceList:\n if source.get('pathPattern'):\n self._resolvePathPatterns(sources, source)\n else:\n self._resolveSourcePath(sources, source)\n for source in sources:\n if hasattr(source.get('path'), 'resolve'):\n source['path'] = source['path'].resolve(False)\n return sources"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the paths of directories which contain files that should not be included, generally because they contain standard system libraries. | def _GetDefaultBinPathExcludes(self):
if sys.platform == "win32":
import cx_Freeze.util
systemDir = cx_Freeze.util.GetSystemDir()
windowsDir = cx_Freeze.util.GetWindowsDir()
return [windowsDir, systemDir, os.path.join(windowsDir, "WinSxS")]
elif sys.platform == "darwin":
return ["/lib", "/usr/lib", "/System/Library/Frameworks"]
else:
return ["/lib", "/lib32", "/lib64", "/usr/lib", "/usr/lib32",
"/usr/lib64"] | [
"def _GetDefaultBinPathExcludes(self):\r\n if sys.platform == \"win32\":\r\n import cx_Freeze.util\r\n systemDir = cx_Freeze.util.GetSystemDir()\r\n windowsDir = cx_Freeze.util.GetWindowsDir()\r\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\r\n elif sys.platform == \"darwin\":\r\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\r\n else:\r\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\r\n \"/usr/lib64\"]",
"def get_include_dirs():\n import numpy as np\n return [np.get_include()]",
"def _get_exclude_paths(self):\n paths = []\n for path in [self.simulation_dir, self.output_dir]:\n if not path.endswith('/'):\n paths.append(path + '/')\n return paths",
"def removeduppaths():\n # This ensures that the initial path provided by the interpreter contains\n # only absolute pathnames, even if we're running from the build directory.\n L = []\n known_paths = set()\n for dir in sys.path:\n # Filter out duplicate paths (on case-insensitive file systems also\n # if they only differ in case); turn relative paths into absolute\n # paths.\n dir, dircase = makepath(dir)\n if not dircase in known_paths:\n L.append(dir)\n known_paths.add(dircase)\n sys.path[:] = L\n return known_paths",
"def removeduppaths():\r\n # This ensures that the initial path provided by the interpreter contains\r\n # only absolute pathnames, even if we're running from the build directory.\r\n L = []\r\n known_paths = set()\r\n for dir in sys.path:\r\n # Filter out duplicate paths (on case-insensitive file systems also\r\n # if they only differ in case); turn relative paths into absolute\r\n # paths.\r\n dir, dircase = makepath(dir)\r\n if not dircase in known_paths:\r\n L.append(dir)\r\n known_paths.add(dircase)\r\n sys.path[:] = L\r\n return known_paths",
"def get_root_files_and_directories() -> List[str]:\n skip = [\n \"requirements\",\n \"migrations\",\n \"__pycache__\",\n \"fd_device.egg-info\",\n \"build\",\n ]\n root_files = glob(PROJECT_ROOT + \"/*.py\")\n root_directories = [\n os.path.join(PROJECT_ROOT, name)\n for name in next(os.walk(PROJECT_ROOT))[1]\n if not name.startswith(\".\")\n ]\n files_and_directories = [\n arg for arg in root_files + root_directories if not arg.endswith(tuple(skip))\n ]\n\n return files_and_directories",
"def include_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-I'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-I for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)",
"def compute_exclusions(self):\n source_folders = [self.filter_dot(s) for s in set(dirname(\n src) for src in self.resources.c_sources + self.resources.cpp_sources + self.resources.s_sources)]\n\n self.excluded_folders = set(self.resources.ignored_dirs) - set(self.resources.inc_dirs)",
"def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)",
"def paths(self):\r\n if not self.select_paths:\r\n return sys.path\r\n result = []\r\n match_any = set()\r\n for path in sys.path:\r\n path = os.path.normcase(os.path.abspath(path))\r\n for match in self.select_paths:\r\n match = os.path.normcase(os.path.abspath(match))\r\n if '*' in match:\r\n if re.search(fnmatch.translate(match+'*'), path):\r\n result.append(path)\r\n match_any.add(match)\r\n break\r\n else:\r\n if path.startswith(match):\r\n result.append(path)\r\n match_any.add(match)\r\n break\r\n else:\r\n logger.debug(\"Skipping path %s because it doesn't match %s\"\r\n % (path, ', '.join(self.select_paths)))\r\n for match in self.select_paths:\r\n if match not in match_any and '*' not in match:\r\n result.append(match)\r\n logger.debug(\"Adding path %s because it doesn't match anything already on sys.path\"\r\n % match)\r\n return result",
"def get_standard_lib_paths() -> Set[Path]:\n paths: Set[Path] = set()\n\n for is_plat_specific in [True, False]:\n\n # Get lib modules paths.\n lib_path = sysconfig.get_python_lib(\n standard_lib=True, plat_specific=is_plat_specific\n )\n\n for path in os.listdir(lib_path):\n paths.add(Path(os.path.join(lib_path, path)))\n\n # Get lib dynload modules paths, if exists.\n lib_dynload_path = os.path.join(lib_path, LIB_DYNLOAD)\n\n if os.path.isdir(lib_dynload_path):\n\n for path in os.listdir(lib_dynload_path):\n paths.add(Path(os.path.join(lib_dynload_path, path)))\n\n return paths",
"def files_missing(rootdir):\n missing = set()\n for asset in assets:\n path = joinpaths(rootdir, asset)\n if not os.path.isfile(path):\n missing.add(path)\n return missing",
"def include_dirs(self):",
"def get_include():\n import os\n lxml_path = __path__[0]\n include_path = os.path.join(lxml_path, 'includes')\n includes = [include_path, lxml_path]\n\n for name in os.listdir(include_path):\n path = os.path.join(include_path, name)\n if os.path.isdir(path):\n includes.append(path)\n\n return includes",
"def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)",
"def get_third_party_lib_paths() -> Set[Path]:\n paths: Set[Path] = set()\n\n packages_paths: Set[str] = {\n path\n for path in sys.path\n if path and Path(path).parts[-1] in [DIST_PACKAGES, SITE_PACKAGES]\n }\n\n for path in packages_paths:\n\n for name in os.listdir(path):\n if not name.startswith(\"_\") and not name.endswith(BIN_PY_EXTENSIONS):\n paths.add(Path(os.path.join(path, name)))\n\n return paths",
"def untracked(prefix, exclude_self_build=False):\r\n conda_files = conda_installed_files(prefix, exclude_self_build)\r\n return {path for path in walk_prefix(prefix) - conda_files\r\n if not (path.endswith('~') or\r\n (sys.platform=='darwin' and path.endswith('.DS_Store')) or\r\n (path.endswith('.pyc') and path[:-1] in conda_files))}",
"def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths",
"def get_ext_root_dirs(self):\n dir_list = []\n if op.exists(EXTENSIONS_DEFAULT_DIR):\n dir_list.append(EXTENSIONS_DEFAULT_DIR)\n dir_list.extend(self.get_thirdparty_ext_root_dirs())\n return list(set(dir_list))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if the file should be copied to the target machine. This is done by checking the binPathIncludes, binPathExcludes, binIncludes and binExcludes configuration variables using first the full file name, then just the base file name, then the file name without any version numbers. Files are included unless specifically excluded but inclusions take precedence over exclusions. | def _ShouldCopyFile(self, path):
# check for C runtime, if desired
path = os.path.normcase(path)
dirName, fileName = os.path.split(path)
if fileName.startswith("msvcr") and fileName.endswith(".dll"):
self.msvcRuntimeDir = dirName
return self.includeMSVCR
# check the full path
if path in self.binIncludes:
return True
if path in self.binExcludes:
return False
# check the file name by itself (with any included version numbers)
if fileName in self.binIncludes:
return True
if fileName in self.binExcludes:
return False
# check the file name by itself (version numbers removed)
name = self._RemoveVersionNumbers(fileName)
if name in self.binIncludes:
return True
if name in self.binExcludes:
return False
# check the path for inclusion/exclusion
for path in self.binPathIncludes:
if dirName.startswith(path):
return True
for path in self.binPathExcludes:
if dirName.startswith(path):
return False
return True | [
"def _ShouldCopyFile(self, path):\r\n\r\n # check for C runtime, if desired\r\n path = os.path.normcase(path)\r\n dirName, fileName = os.path.split(path)\r\n if fileName.startswith(\"msvcr\") and fileName.endswith(\".dll\"):\r\n self.msvcRuntimeDir = dirName\r\n return self.includeMSVCR\r\n\r\n # check the full path\r\n if path in self.binIncludes:\r\n return True\r\n if path in self.binExcludes:\r\n return False\r\n\r\n # check the file name by itself (with any included version numbers)\r\n if fileName in self.binIncludes:\r\n return True\r\n if fileName in self.binExcludes:\r\n return False\r\n\r\n # check the file name by itself (version numbers removed)\r\n name = self._RemoveVersionNumbers(fileName)\r\n if name in self.binIncludes:\r\n return True\r\n if name in self.binExcludes:\r\n return False\r\n\r\n # check the path for inclusion/exclusion\r\n for path in self.binPathIncludes:\r\n if dirName.startswith(path):\r\n return True\r\n for path in self.binPathExcludes:\r\n if dirName.startswith(path):\r\n return False\r\n\r\n return True",
"def file_copy_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"file_copy_enabled\")",
"def should_copy(target_file: Path, source_file: Path) -> bool:\n if not source_file.is_file():\n return False\n\n if not target_file.exists():\n return True\n\n if target_file.stat().st_mtime < source_file.stat().st_mtime:\n return True\n\n return False",
"def hasTargetSourcePath(self) -> bool:\n return self.buildTarget in self.targetInstSrc",
"def _rsync_bin_test(self):\n\n rsync_location = self.app_config['rsync_binary']\n\n if os.path.isfile(rsync_location):\n stat = os.stat(rsync_location)\n\n if os.access(rsync_location, os.X_OK):\n return True\n else:\n return False\n\n else:\n return False",
"def include_file(self, filename):\n # Only include Python files for now.\n if filename[-3:] == '.py':\n return True\n return False",
"def _can_sync(self, source_file, directory):\n if (directory.get(\"sync_on_save\", False)):\n try:\n #checks the file is in the include pattern\n regexp = directory.get(\"include_pattern\", \"\")\n if (regexp and not re.match(regexp, source_file)):\n return False\n #checks the file is in the exclude pattern\n regexp = directory.get(\"exclude_pattern\", \"\")\n if (regexp and re.match(regexp, source_file)):\n return False\n\n except Exception as err:\n self._print_status(\"Invalid pattern. Error: \" + str(err))\n\n #checks the file is in the synchronized dir\n local_dir = directory.get(\"local\", \"\")\n if (local_dir and source_file.find(local_dir) != -1):\n return True\n return False",
"def copy_file_check(self):\n pass",
"def _isBuildRequired(self):\n if self.config.getDoInSourceBuilds():\n return True\n\n foutpath = self.getOutputFilePath()\n\n if os.path.isfile(foutpath):\n mtime = os.path.getmtime(foutpath)\n\n # If the configuration file is newer than the output base ontology,\n # a new build might be needed if any imports changes were made.\n if mtime < os.path.getmtime(self.config.getConfigFilePath()):\n return True\n\n # Check the modification time of the base ontology.\n if mtime < os.path.getmtime(self.config.getBaseOntologyPath()):\n return True\n\n # Check the modification time of the top-level imports file. If\n # this file was changed, and full ontologies were added as imports,\n # the import modules would not need to be built but we would still\n # need to update the base ontology.\n if mtime < os.path.getmtime(self.config.getTopImportsFilePath()):\n return True\n\n return False\n else:\n return True",
"def include_source_files(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"include_source_files\")",
"def copy_file(self, filepath):\n copy_file = False\n try:\n copy_file = self.data[filepath]['copy']\n except KeyError:\n return False\n return copy_file",
"def get_can_handle_file(self, review_file, **kwargs):\n if not self.file_patterns:\n return True\n\n filename = review_file.dest_file.lower()\n\n for pattern in self.file_patterns:\n if fnmatchcase(filename, pattern):\n return True\n\n return False",
"def test_scriptfiles_are_abs(self):\n self.core = gc3libs.core.Core(self.cfg)\n for resource in self.core.get_resources():\n for (k, v) in resource.iteritems():\n if k not in ['prologue', 'epilogue',\n 'myapp_prologue', 'myapp_epilogue']:\n continue\n assert os.path.isfile(v)\n assert os.path.isabs(v)\n assert (os.path.abspath(v) ==\n v)",
"def check_for_arduino_file(self):\n result = False\n filename = self.filename\n filename_prj = self.filename_project\n if filename and filename_prj:\n if filename.endswith(\"ino\") or filename_prj.endswith(\"ino\"):\n result = True\n elif (filename.endswith(\"h\") or filename_prj.endswith(\"h\")) or (\n filename.endswith(\"cpp\") or filename_prj.endswith(\"cpp\")\n ):\n result = \"subfile\"\n return result",
"def wantFile(self, file, package=None):\n if self.coverInclusive:\n if file.endswith(\".py\"):\n if package and self.coverPackages:\n for want in self.coverPackages:\n if package.startswith(want):\n return True\n else:\n return True\n return None",
"def should_watch_file(self, entry):\n return not self.include_pattern.match(entry.name)",
"def _is_file_deployed(self, file_name):\n file_path = os.path.join(\n cluster.DEPLOY_ROOT_DIR,\n \"{app_id}-{deploy_id}\".format(\n app_id=self.application.name,\n deploy_id=self.app.deploy_id\n ),\n file_name\n )\n self.test_file(\n self.master,\n const.consul['container'],\n file_path\n )",
"def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False",
"def copy_file(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n if _passes_filter(src, filter):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if _should_copy(src, dst):\n #console(f'copy {src}\\n --> {dst}')\n shutil.copyfile(src, dst, follow_symlinks=True)\n shutil.copystat(src, dst, follow_symlinks=True)\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a user and a group name, and returns `True` if the user is in that group. | def is_in_group(user, group_name):
return is_in_group_user_id(user.id, group_name) | [
"def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False",
"def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()",
"def is_user_in_group(user, group):\n return find_group(user, group)",
"def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()",
"def in_group(user, groups):\r\n group_list = force_unicode(groups).split(',')\r\n return bool(user.groups.filter(name__in=group_list).values('name'))",
"def user_exists_in_group(username, group):\n global group_auth\n try:\n return group in group_auth[username]\n except:\n pass\n return False",
"def group_user_check(group, user):\n d = group_check(group)\n if d is None:\n return False\n else:\n return user in d[\"members\"]",
"def is_user_in_group(user_id, group_id):\n query = \"\"\"\n SELECT pos_id\n FROM current_position_holders NATURAL JOIN positions\n WHERE user_id = %s AND group_id = %s\n LIMIT 1\n \"\"\"\n with flask.g.pymysql_db.cursor() as cursor:\n cursor.execute(query, [user_id, group_id])\n return cursor.fetchone() is not None",
"def in_group(*groups):\n\tdef condition(client):\n\t\treturn bool([group.name for group in client.user.groups] & groups)\n\treturn condition",
"def user_is_group_member(self, group=None):\n if isinstance(group, str):\n return GroupMembership.objects.filter(\n user=self, group__name=group\n ).count() == 1\n else:\n return GroupMembership.objects.filter(\n user=self, group=group\n ).count() == 1",
"def search_in_group(username, group):\n with LdapConnector(username) as l:\n group_object = l.search_s(\n 'cn=' + group + ',ou=Gruppen,ou=Sektion Wundtstrasse,o=AG DSN,c=de',\n ldap.SCOPE_SUBTREE, '(memberuid=%s)' % username)\n\n if group_object:\n return True\n return False",
"def group_authenticated(self, user_token, group):\n if self.authenticated(user_token):\n token = self.token_storage.get(user_token)\n groups = self.get_groups(token.username)\n if group in groups:\n return True\n\n return False",
"def endpoint_in_group(request, group_name):\n user, password, err = auth_info(request)\n if err:\n return NOT_AUTHORIZED\n user_dn = USER_DN_TMPL % user\n success = authenticate(user_dn, password)\n if success:\n groups = get_groups(user_dn, password)\n required_group = GROUP_DN_TMPL % group_name\n if required_group in groups:\n return OK\n logger.debug(\"user %s is not in required group %s\" % (user_dn,\n required_group))\n return NOT_AUTHORIZED",
"def user_has_group(self, username, group):\n if not self.user_exists(username.lower()):\n raise AuthKitNoSuchUserError(\"No such user %r\"%username.lower())\n if group is not None and not self.group_exists(group.lower()):\n raise AuthKitNoSuchGroupError(\"No such group %r\"%group.lower())\n user = self.session.query(self.model.User).filter_by(username=username.lower()).one()\n if user.group is None:\n if group == None:\n return True\n else:\n if group is not None and user.group.name == group.lower():\n return True\n return False",
"def has_group(self, group_name):\n # print self.groups\n return group_name in self.groups",
"def belongs_to_group(self, group):\n return group in [x.name for x in self.groups.all()]",
"def groupExists(self, groupName):\n\t\treturn groupName in self.groups",
"def adgrp(grps,name):\n\tfor temp in grps:\n\t\t#print temp\n\t\t#print \"cn=\"+name\n\t\tif \"cn=\"+name.lower() in temp.lower():\n\t\t\tprint \"User is a member of \"+name\n\t\t\tstatus=0\n\t\t\treturn(status)\n\tstatus=1\n\tprint \"STATUS: User is not a member of Group\"\n\treturn(status)",
"def _group_in(group: list, list_groups: list):\n\n for l_group in list_groups:\n group_in = True\n # check if every item of group exists in l_group\n for item in group:\n if item not in l_group:\n group_in = False\n break\n if group_in:\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
'If you create a Lambda function that processes events from streambased services (Amazon Kinesis Streams), the number of shards per stream is the unit of concurrency. If your stream has 100 active shards, there will be 100 Lambda functions running concurrently. Then, each Lambda function processes events on a shard in the order that they arrive.' Therefore, for checkpointing logic, we should make the primary | def handler(event, context):
debug = False
rewind = False
dry_run = False
table = _ensure_dynamo_table()
consumer_id = 'test-consumer'
if debug:
state = table.scan()
print "Active leases in Dynamo:", state["Count"]
for item in state["Items"]:
print json.dumps(item, indent=4, sort_keys=True)
lease = None
shard = None
try:
visitors = set()
last_timestamp = None
for i, record in enumerate(event.get('Records', [])):
event_id, data = (record['eventID'], record['kinesis']['data'])
shard, checkpoint = event_id.split(u':')
if rewind:
print "Rewinding to checkpoint 0"
_clear_consumer_lease(table, consumer_id, shard)
rewind = False
if lease is None:
lease = _get_consumer_lease(table, consumer_id, shard) \
or {"checkpoint": "0"}
if checkpoint <= lease["checkpoint"]:
# replayed event, we should skip it
print "Replayed event; skipping"
continue
# => decode from b64
raw_event = base64.b64decode(data)
# => parse from JSON
json_event = json.loads(raw_event)
# => extract out visitor id and timestamp if present
visitor = json_event.get("visitor_site_id", "N/A")
visitors.add(visitor)
last_timestamp = json_event.get("ts_action", "N/A")
# => do something with the data
result = process(json_event)
if result:
pass
# => checkpoint the shard
lease["checkpoint"] = checkpoint
logger.info("Saw {} unique visitors in batch ending with {}".format(
len(visitors), last_timestamp))
if not dry_run:
_put_consumer_lease(table, consumer_id, shard, lease)
except Exception as ex:
# do not save consumer checkpoints because error happened
# instead, we should probably log something about the error
# in the consumer lease, to allow the Lambda to retry a fixed
# number of times, before finally "giving up" and skipping
# the records
raise
"^ some form of error handling required"
if ex:
pass | [
"def lambda_handler(event, context):\n\n mytime, lambda_name, env_vars = lambda_init.init_lambda(context)\n stage = env_vars[\"stage\"]\n consumer_master_past_lambda = env_vars[\"consumer_master_past_name\"]\n\n apps, test_params = init_apps_from_test_params(event)\n filters = init_filters()\n\n step = generate_step_from_mytime(mytime)\n\n print(\"step:\", step)\n for app in apps:\n advance_app_timestamp(app, step)\n\n consumer_event = {}\n\n # Invoke the consumer-master lambda for each app in apps\n for app in apps:\n headers = Headers(\n shadowreader_type=\"past\", stage=stage, app=app, step=step\n ).headers\n\n consumer_event = {\n \"app\": app.name,\n \"identifier\": app.identifier,\n \"base_url\": app.base_url,\n \"cur_timestamp\": app.cur_timestamp,\n \"rate\": app.rate,\n \"baseline\": app.baseline,\n \"parent_lambda\": lambda_name,\n \"child_lambda\": consumer_master_past_lambda,\n \"headers\": headers,\n \"filters\": filters,\n }\n invoke_func(consumer_event, func=consumer_master_past_lambda)\n\n if apps and consumer_event:\n print_to_logs(consumer_event, apps)\n\n # Collect metrics and put metrics into CW\n metrics = []\n for app in apps:\n # This is the timestamp (in epoch time) that is being replayed\n # by the load test.\n metric = {\n \"name\": \"replayed_timestamp\",\n \"stage\": stage,\n \"lambda_name\": lambda_name,\n \"app\": app.name,\n \"identifier\": app.identifier,\n \"mytime\": mytime,\n \"val\": app.cur_timestamp,\n }\n metrics.append(metric)\n\n if sr_plugins.exists(\"metrics\"):\n metric_emitter = sr_plugins.load(\"metrics\")\n for metric in metrics:\n metric_emitter.main(metric)\n\n cur_params = {\"apps\": apps, \"filters\": filters, \"test_params\": test_params}\n\n if sr_plugins.exists(\"test_params_emitter\"):\n params_emitter = sr_plugins.load(\"test_params_emitter\")\n params_emitter.main(\n cur_params,\n lambda_name,\n mytime,\n stage,\n env_vars,\n sr_config,\n sr_plugins._sr_plugins,\n )\n\n return json.dumps(cur_params, default=str), json.dumps(consumer_event, default=str)",
"def __init__(\n self, stream_name, checkpoint_table=None, host_key=None, shard_iterator_type=None,\n iterator_timestamp=None, shard_iterators=None, recover_from_dynamo=False,\n iterator_sequence_number=None, custom_kinesis_client=None):\n\n super(AsyncKinesisConsumer, self).__init__()\n\n self.stream_name = stream_name\n self.shard_iterator_type = shard_iterator_type\n self.iterator_timestamp = iterator_timestamp\n self.iterator_sequence_number = iterator_sequence_number\n self.restricted_shard_iterators = shard_iterators\n\n if recover_from_dynamo and not checkpoint_table:\n raise RuntimeError('Can not use recover_from_dynamo without checkpoint table')\n self.recover_from_dynamodb = recover_from_dynamo\n\n # Allow a custom kinesis client to be passed in. This allows for setting of any additional parameters in\n # the client without needing to track them in this library.\n if custom_kinesis_client is not None:\n self.kinesis_client = custom_kinesis_client\n else:\n self.kinesis_client = aioboto3.client('kinesis')\n\n self.checkpoint_table = checkpoint_table\n self.checkpoint_callback = None\n self.host_key = host_key\n\n self.shard_readers = {}\n self.dynamodb_instances = {}\n self.stream_data = None\n self.force_rescan = True\n\n self.checkpoint_interval = AsyncKinesisConsumer.DEFAULT_CHECKPOINT_INTERVAL\n self.lock_holding_time = AsyncKinesisConsumer.DEFAULT_LOCK_HOLDING_TIME\n self.reader_sleep_time = AsyncKinesisConsumer.DEFAULT_SLEEP_TIME\n self.fallback_time_delta = AsyncKinesisConsumer.DEFAULT_FALLBACK_TIME_DELTA",
"def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }",
"def test_kinesis_too_large_record(sdc_builder, sdc_executor, aws, keep_data):\n record_1_content = 'Hello 1'\n record_2_content = 'Hello ' + '2' * 1024 * 1024\n record_3_content = 'Hello 3'\n file_content = f'{record_1_content}\\n{record_2_content}\\n{record_3_content}'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data=file_content,\n stop_after_first_batch=True,\n max_line_length=len(record_2_content)\n )\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n kinesis_producer = pipeline_builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> [kinesis_producer, wiretap.destination]\n pipeline = pipeline_builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n assert len(received_data) == 2\n assert received_data[0] == record_1_content\n assert received_data[1] == record_3_content\n\n error_records = wiretap.error_records\n assert len(error_records) == 1\n assert error_records[0].header['errorCode'] == 'KINESIS_08'\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)",
"def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))",
"def present(\n name,\n retention_hours=None,\n enhanced_monitoring=None,\n num_shards=None,\n do_reshard=True,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n ret = {\"name\": name, \"result\": True, \"comment\": \"\", \"changes\": {}}\n\n comments = []\n changes_old = {}\n changes_new = {}\n\n # Ensure stream exists\n exists = __salt__[\"boto_kinesis.exists\"](name, region, key, keyid, profile)\n if exists[\"result\"] is False:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n comments.append(\"Kinesis stream {} would be created\".format(name))\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n else:\n is_created = __salt__[\"boto_kinesis.create_stream\"](\n name, num_shards, region, key, keyid, profile\n )\n if \"error\" in is_created:\n ret[\"result\"] = False\n comments.append(\n \"Failed to create stream {}: {}\".format(name, is_created[\"error\"])\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n comments.append(\"Kinesis stream {} successfully created\".format(name))\n changes_new[\"name\"] = name\n changes_new[\"num_shards\"] = num_shards\n else:\n comments.append(\"Kinesis stream {} already exists\".format(name))\n\n stream_response = __salt__[\"boto_kinesis.get_stream_when_active\"](\n name, region, key, keyid, profile\n )\n if \"error\" in stream_response:\n ret[\"result\"] = False\n comments.append(\n \"Kinesis stream {}: error getting description: {}\".format(\n name, stream_response[\"error\"]\n )\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n stream_details = stream_response[\"result\"][\"StreamDescription\"]\n\n # Configure retention hours\n if retention_hours is not None:\n old_retention_hours = stream_details[\"RetentionPeriodHours\"]\n retention_matches = old_retention_hours == retention_hours\n if not retention_matches:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n comments.append(\n \"Kinesis stream {}: retention hours would be updated to {}\".format(\n name, retention_hours\n )\n )\n else:\n if old_retention_hours > retention_hours:\n retention_updated = __salt__[\n \"boto_kinesis.decrease_stream_retention_period\"\n ](name, retention_hours, region, key, keyid, profile)\n else:\n retention_updated = __salt__[\n \"boto_kinesis.increase_stream_retention_period\"\n ](name, retention_hours, region, key, keyid, profile)\n\n if \"error\" in retention_updated:\n ret[\"result\"] = False\n comments.append(\n \"Kinesis stream {}: failed to update retention hours: {}\".format(\n name, retention_updated[\"error\"]\n )\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n comments.append(\n \"Kinesis stream {}: retention hours was successfully updated\".format(\n name\n )\n )\n changes_old[\"retention_hours\"] = old_retention_hours\n changes_new[\"retention_hours\"] = retention_hours\n\n # wait until active again, otherwise it will log a lot of ResourceInUseExceptions\n # note that this isn't required below; reshard() will itself handle waiting\n stream_response = __salt__[\"boto_kinesis.get_stream_when_active\"](\n name, region, key, keyid, profile\n )\n if \"error\" in stream_response:\n ret[\"result\"] = False\n comments.append(\n \"Kinesis stream {}: error getting description: {}\".format(\n name, stream_response[\"error\"]\n )\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n stream_details = stream_response[\"result\"][\"StreamDescription\"]\n else:\n comments.append(\n \"Kinesis stream {}: retention hours did not require change, already set\"\n \" at {}\".format(name, old_retention_hours)\n )\n else:\n comments.append(\n \"Kinesis stream {}: did not configure retention hours\".format(name)\n )\n\n # Configure enhanced monitoring\n if enhanced_monitoring is not None:\n if enhanced_monitoring is True or enhanced_monitoring == [\"ALL\"]:\n # for ease of comparison; describe_stream will always return the full list of metrics, never 'ALL'\n enhanced_monitoring = [\n \"IncomingBytes\",\n \"OutgoingRecords\",\n \"IteratorAgeMilliseconds\",\n \"IncomingRecords\",\n \"ReadProvisionedThroughputExceeded\",\n \"WriteProvisionedThroughputExceeded\",\n \"OutgoingBytes\",\n ]\n elif enhanced_monitoring is False or enhanced_monitoring == \"None\":\n enhanced_monitoring = []\n\n old_enhanced_monitoring = stream_details.get(\"EnhancedMonitoring\")[0][\n \"ShardLevelMetrics\"\n ]\n\n new_monitoring_set = set(enhanced_monitoring)\n old_monitoring_set = set(old_enhanced_monitoring)\n\n matching_metrics = new_monitoring_set.intersection(old_monitoring_set)\n enable_metrics = list(new_monitoring_set.difference(matching_metrics))\n disable_metrics = list(old_monitoring_set.difference(matching_metrics))\n\n if len(enable_metrics) != 0:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n comments.append(\n \"Kinesis stream {}: would enable enhanced monitoring for {}\".format(\n name, enable_metrics\n )\n )\n else:\n\n metrics_enabled = __salt__[\"boto_kinesis.enable_enhanced_monitoring\"](\n name, enable_metrics, region, key, keyid, profile\n )\n if \"error\" in metrics_enabled:\n ret[\"result\"] = False\n comments.append(\n \"Kinesis stream {}: failed to enable enhanced monitoring: {}\".format(\n name, metrics_enabled[\"error\"]\n )\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n comments.append(\n \"Kinesis stream {}: enhanced monitoring was enabled for shard-level\"\n \" metrics {}\".format(name, enable_metrics)\n )\n\n if len(disable_metrics) != 0:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n comments.append(\n \"Kinesis stream {}: would disable enhanced monitoring for {}\".format(\n name, disable_metrics\n )\n )\n else:\n\n metrics_disabled = __salt__[\"boto_kinesis.disable_enhanced_monitoring\"](\n name, disable_metrics, region, key, keyid, profile\n )\n if \"error\" in metrics_disabled:\n ret[\"result\"] = False\n comments.append(\n \"Kinesis stream {}: failed to disable enhanced monitoring: {}\".format(\n name, metrics_disabled[\"error\"]\n )\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n comments.append(\n \"Kinesis stream {}: enhanced monitoring was disabled for\"\n \" shard-level metrics {}\".format(name, disable_metrics)\n )\n\n if len(disable_metrics) == 0 and len(enable_metrics) == 0:\n comments.append(\n \"Kinesis stream {}: enhanced monitoring did not require change, already\"\n \" set at {}\".format(\n name,\n (\n old_enhanced_monitoring\n if len(old_enhanced_monitoring) > 0\n else \"None\"\n ),\n )\n )\n elif not __opts__[\"test\"]:\n changes_old[\"enhanced_monitoring\"] = (\n old_enhanced_monitoring if len(old_enhanced_monitoring) > 0 else \"None\"\n )\n changes_new[\"enhanced_monitoring\"] = (\n enhanced_monitoring if len(enhanced_monitoring) > 0 else \"None\"\n )\n else:\n comments.append(\n \"Kinesis stream {}: did not configure enhanced monitoring\".format(name)\n )\n\n # Reshard stream if necessary\n min_hash_key, max_hash_key, full_stream_details = __salt__[\n \"boto_kinesis.get_info_for_reshard\"\n ](stream_details)\n old_num_shards = len(full_stream_details[\"OpenShards\"])\n\n if num_shards is not None and do_reshard:\n num_shards_matches = old_num_shards == num_shards\n if not num_shards_matches:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n comments.append(\n \"Kinesis stream {}: would be resharded from {} to {} shards\".format(\n name, old_num_shards, num_shards\n )\n )\n else:\n log.info(\n \"Resharding stream from %s to %s shards, this could take a while\",\n old_num_shards,\n num_shards,\n )\n # reshard returns True when a split/merge action is taken,\n # or False when no more actions are required\n continue_reshard = True\n while continue_reshard:\n reshard_response = __salt__[\"boto_kinesis.reshard\"](\n name, num_shards, do_reshard, region, key, keyid, profile\n )\n\n if \"error\" in reshard_response:\n ret[\"result\"] = False\n comments.append(\n \"Encountered error while resharding {}: {}\".format(\n name, reshard_response[\"error\"]\n )\n )\n _add_changes(ret, changes_old, changes_new, comments)\n return ret\n\n continue_reshard = reshard_response[\"result\"]\n\n comments.append(\n \"Kinesis stream {}: successfully resharded to {} shards\".format(\n name, num_shards\n )\n )\n changes_old[\"num_shards\"] = old_num_shards\n changes_new[\"num_shards\"] = num_shards\n else:\n comments.append(\n \"Kinesis stream {}: did not require resharding, remains at {} shards\".format(\n name, old_num_shards\n )\n )\n else:\n comments.append(\n \"Kinesis stream {}: did not reshard, remains at {} shards\".format(\n name, old_num_shards\n )\n )\n\n _add_changes(ret, changes_old, changes_new, comments)\n return ret",
"def flax_shard_checkpoint(params, max_shard_size=\"10GB\"):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = {}\n current_block_size = 0\n total_size = 0\n\n # flatten the weights to chunk\n weights = flatten_dict(params, sep=\"/\")\n for item in weights:\n weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = {}\n current_block_size = 0\n\n current_block[item] = weights[item]\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = FLAX_WEIGHTS_NAME.replace(\".msgpack\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack\")\n shards[shard_file] = shard\n for weight_name in shard.keys():\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index",
"def redshift_lambda_handler(event, context):\n logging.debug('event: %s', event)\n\n detail = event['detail']\n event_name = detail['eventName']\n creator = get_creator(event)\n\n logger.info('Event type: %s', event_name)\n\n if is_err_detail(logger, detail):\n return False\n\n if event_name == 'CreateCluster':\n logger.debug('%s is creating cluster: %s',\n creator, detail['requestParameters']['clusterIdentifier'])\n\n # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html\n cluster_arn = 'arn:aws:redshift:' + detail['awsRegion'] + ':'\\\n + detail['userIdentity']['accountId'] + ':cluster:'\\\n + detail['requestParameters']['clusterIdentifier']\n short_msg = {\n \"EventName\": event_name,\n \"Creator\": creator,\n \"ResourceArn\": cluster_arn,\n \"TagStatus\": \"pending\",\n \"MaxRetries\": int(os.environ['SFN_MAX_RETRIES']),\n \"Retries\": 0\n }\n\n sfn = Boto3Wrapper.get_client('stepfunctions')\n response = sfn.start_execution(\n stateMachineArn=os.environ['SFN_ARN'],\n name=creator+'-'+event_name+'-'+detail['eventID'],\n input=json.dumps(short_msg)\n )\n\n logger.info('Step Functions start execution: %s', response)\n\n return True",
"def get_initial_bookmarks(config, state, table_name):\n client = dynamodb.get_client(config)\n streams_client = dynamodb.get_stream_client(config)\n\n table = client.describe_table(TableName=table_name)['Table']\n stream_arn = table['LatestStreamArn']\n\n finished_shard_bookmarks = [shard['ShardId'] for shard in get_shards(streams_client, stream_arn)]\n state = singer.write_bookmark(state, table_name, 'finished_shards', finished_shard_bookmarks)\n\n return state",
"def lambda_handler(event, context):\n return",
"def test_stream_name(sdc_builder, sdc_executor, aws, test_name, stream_generator, keep_data):\n builder = sdc_builder.get_pipeline_builder()\n\n # Create dev_raw_data_source with 10 messages\n expected_data = [f'Hello {i}' for i in range(10)]\n source = builder.add_stage('Dev Raw Data Source')\n source.set_attributes(data_format='TEXT',\n raw_data='\\n'.join(expected_data),\n stop_after_first_batch=True)\n\n pipeline = None\n\n # Create Kinesis stream and capture the ShardId\n client = aws.kinesis\n try:\n\n # Create Kinesis Stream\n stream_name = f'{aws.kinesis_stream_prefix}_{stream_generator}'\n # if stream is longer than 128 then select first 128 characters\n stream_name = stream_name[0:128]\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n desc_response = client.describe_stream(StreamName=stream_name)\n shard_id = desc_response['StreamDescription']['Shards'][0]['ShardId']\n\n # Create Kinesis Producer\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[\n {'key': 'AggregationEnabled', 'value': 'false'}])\n\n source >> kinesis_producer\n\n pipeline = builder.build().configure_for_environment(aws)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # read data from Kinesis to assert it is what got ingested into the pipeline\n shard_iterator = client.get_shard_iterator(StreamName=stream_name,\n ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')\n response = client.get_records(ShardIterator=shard_iterator['ShardIterator'])\n msgs_received = [rec['Data'].decode().strip() for rec in response['Records']]\n\n logger.debug('Number of messages received from Kinesis = %d', (len(msgs_received)))\n\n assert msgs_received == expected_data\n\n finally:\n if pipeline and sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)",
"def test_multiple_batch(sdc_builder, sdc_executor, aws, keep_data):\n builder = sdc_builder.get_pipeline_builder()\n\n # Create dev_data_generator with 10 messages\n BATCH_SIZE = 3\n BATCHES = 2\n origin = builder.add_stage('Dev Data Generator')\n origin.set_attributes(batch_size=BATCH_SIZE, delay_between_batches=0,\n fields_to_generate=[{\n \"type\": \"CODE_IMEI\",\n \"field\": \"text\"\n }])\n\n # Create Kinesis stream and capture the ShardId\n client = aws.kinesis\n\n try:\n\n # Create Kinesis Stream\n stream_name = f'{aws.kinesis_stream_prefix}_{get_random_string()}'\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n desc_response = client.describe_stream(StreamName=stream_name)\n shard_id = desc_response['StreamDescription']['Shards'][0]['ShardId']\n\n # Create Kinesis Producer - Aggregation disabled avoids records to be compressed using protobuf\n # https://github.com/awslabs/amazon-kinesis-producer/issues/80 shows how to configure Kinesis\n # to avoid compression.\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(data_format='TEXT',\n binary_field_path='/text',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[\n {'key': 'AggregationEnabled', 'value': 'false'}])\n\n wiretap = builder.add_wiretap()\n\n origin >> [kinesis_producer, wiretap.destination]\n\n pipeline = builder.build().configure_for_environment(aws)\n pipeline.configuration['rateLimit'] = 1\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', BATCH_SIZE * BATCHES, timeout_sec=120)\n sdc_executor.stop_pipeline(pipeline)\n\n # read data from Kinesis to assert it is what got ingested into the pipeline\n shard_iterator = client.get_shard_iterator(StreamName=stream_name,\n ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')[\"ShardIterator\"]\n\n out = client.get_records(ShardIterator=shard_iterator)\n\n # Records read using kinesis client are retrieved and decoded\n response = [rec['Data'].decode() for rec in out['Records']]\n\n logger.debug('Number of messages received from Kinesis = %d', (len(response)))\n assert response == [record.field['text'].value for record in wiretap.output_records]\n\n finally:\n if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)",
"def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)",
"def test_kinesis_preserve_record_order(sdc_builder, sdc_executor, aws, keep_data):\n expected_data = [f'Hello {i}' for i in range(100)]\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data='\\n'.join(expected_data),\n stop_after_first_batch=True\n )\n\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n dev_raw_data_source >> kinesis_producer\n pipeline = builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS ...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream ...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n\n logger.debug(f'Number of messages received from Kinesis = {len(received_data)}')\n assert received_data == expected_data\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)",
"def batch_lambda_handler(event, lambda_context):\n LOGGER.info('Invoked with event %s', json.dumps(event))\n\n s3_enumerator = S3BucketEnumerator(\n os.environ['S3_BUCKET_NAME'], event.get('S3ContinuationToken'))\n sqs_batcher = SQSBatcher(os.environ['SQS_QUEUE_URL'], int(os.environ['OBJECTS_PER_MESSAGE']))\n\n # As long as there are at least 10 seconds remaining, enumerate S3 objects into SQS.\n num_keys = 0\n while lambda_context.get_remaining_time_in_millis() > 10000 and not s3_enumerator.finished:\n keys = s3_enumerator.next_page()\n num_keys += len(keys)\n for key in keys:\n sqs_batcher.add_key(key)\n\n # Send the last batch of keys.\n sqs_batcher.finalize()\n\n # If the enumerator has not yet finished but we're low on time, invoke this function again.\n if not s3_enumerator.finished:\n LOGGER.info('Invoking another batcher')\n LAMBDA_CLIENT.invoke(\n FunctionName=os.environ['BATCH_LAMBDA_NAME'],\n InvocationType='Event', # Asynchronous invocation.\n Payload=json.dumps({'S3ContinuationToken': s3_enumerator.continuation_token}),\n Qualifier=os.environ['BATCH_LAMBDA_QUALIFIER']\n )\n\n return num_keys",
"def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n lambdautils.utils.send_to_kinesis_stream(search_events, \"dummy_stream\")\n boto3_client(\"kinesis\").put_records.call_count == 1",
"def parallel_execute(event):\n\n return [\n sfn_client.start_execution(stateMachineArn=STATE_MACHINE_ARN, input=str(json.dumps(sfn_input)))['executionArn']\n for sfn_input in range(len(event))]",
"def runs_on_aws_lambda():\n return 'AWS_SAM_LOCAL' not in os.environ and 'LAMBDA_TASK_ROOT' in os.environ",
"def test_kinesis_consumer(sdc_builder, sdc_executor, aws):\n # build consumer pipeline\n application_name = get_random_string(string.ascii_letters, 10)\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n kinesis_consumer = builder.add_stage('Kinesis Consumer')\n kinesis_consumer.set_attributes(application_name=application_name, data_format='TEXT',\n initial_position='TRIM_HORIZON',\n stream_name=stream_name)\n\n trash = builder.add_stage('Trash')\n\n kinesis_consumer >> trash\n\n consumer_origin_pipeline = builder.build(title='Kinesis Consumer pipeline').configure_for_environment(aws)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n # run pipeline and capture snapshot\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n\n expected_messages = set('Message {0}'.format(i) for i in range(10))\n # not using PartitionKey logic and hence assign some temp key\n put_records = [{'Data': exp_msg, 'PartitionKey': '111'} for exp_msg in expected_messages]\n client.put_records(Records=put_records, StreamName=stream_name)\n\n # messages are published, read through the pipeline and assert\n snapshot = sdc_executor.capture_snapshot(consumer_origin_pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n output_records = [record.field['text'].value\n for record in snapshot[kinesis_consumer.instance_name].output]\n\n assert set(output_records) == expected_messages\n finally:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name) # Stream operations are done. Delete the stream.\n logger.info('Deleting %s DynamoDB table on AWS ...', application_name)\n aws.dynamodb.delete_table(TableName=application_name)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if the path holder contains a shot render. | def test(cls, pathHolder, parentCrawler):
if not super(ShotRenderCrawler, cls).test(pathHolder, parentCrawler):
return False
renderType = pathHolder.baseName().split(".")[0].split("_")[-1]
return renderType == "sr" | [
"def is_screenshot(self):\n\n if os.path.basename(self.path).endswith('.png'):\n try:\n if magic.from_file(self.path, mime=True) == 'image/png':\n return True\n except:\n pass\n\n return False",
"def point_is_shot(self, point: Point):\n return point in self.shot_locations",
"def check_shot_existence(self):\n shots_with_no_task = []\n for shot in self.shot_list:\n shot_name = shot.full_shot_name",
"def test(cls, pathHolder, parentCrawler):\n if not super(TurntableCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n renderType = pathHolder.baseName().split(\".\")[0].split(\"_\")[-1]\n\n return renderType == \"tt\"",
"def test(cls, pathHolder, parentCrawler):\n if not super(Jpg, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() == 'jpg'",
"def expected_shot_folder(self, sequence_folder, shot_folder):\n\n if not shot_folder.startswith(\"sq\"):\n return False\n if os.path.exists(\n os.path.join(self.base_path, sequence_folder, shot_folder, self.camera_name)\n ):\n return False\n if not os.path.exists(\n os.path.join(self.base_path, sequence_folder, shot_folder, \"ANI\")\n ):\n return False\n else:\n return True",
"def is_shot(event):\n event_id = event['eventId']\n return event_id == 10",
"def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()",
"def player_shot(self):\n x, y = self.display.get_input()\n if self.ai_board.valid_target(x, y):\n self.ai_board.shoot(x, y)\n return True\n else:\n return False",
"def is_renderable(self):\n name = self.file.name\n return (\n name is not None\n and name.endswith(\".gz\")\n and not name.endswith(\".ps.gz\")\n and not name.endswith(\".dvi.gz\")\n )",
"def screenshot(self):\n return bool(self.media_subtypes & Photos.PHAssetMediaSubtypePhotoScreenshot)",
"def hasPileup(self):\n return self._putype is not None",
"def is_shot_related_version(self, version):\n return self.get_shot(version) is not None",
"def is_shot_valid(self, shot):\n a = self.check_position(shot.opponent)\n b = self.check_shot_direction(shot)\n c = self.check_shot_on_target(shot)\n return a and b and c",
"def _is_repeatedshot_type(cls, object_):\n return (type(object_).__name__ in ['RepeatedShot'])",
"def IsScreenshotsHooked(self) -> bool:\n return self.steam.IsScreenshotsHooked()",
"def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)",
"def test_render_exists(self):\n this_render = self.renders[0]\n this_render.save()\n self.assertTrue(self.renders[0])",
"def is_renderable(self):\n return self.source_file and self.source_file.is_renderable()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find links in jsoncompatible data. | def find_links(obj):
if isinstance(obj, dict):
for key, value in obj.iteritems():
for url in find_links(value):
yield url
elif isinstance(obj, list):
for item in obj:
for url in find_links(item):
yield url
else:
try:
if is_link(str(obj)):
yield obj
except Exception:
pass | [
"def get_data_urls(response):\n return [link['href'] for link in response.json()['links'] if link.get('rel', 'data') == 'data']",
"def extract_links(input, output):\n rspecs = json.load(input)\n links = _extract_links(rspecs)\n output.write(json.dumps(links, sort_keys=True, indent=4, default=serialize_sets_as_lists))",
"def LinksReader(data):\n data = data.replace(\"\\'\", \"\\\"\")\n\n try: \n json_data = json.loads(data)\n return json_data\n except ValueError: # includes simplejson.decoder.JSONDecodeError\n logging.info(\"Links json is invalid\")\n return None\n #json_data = ast.literal_eval(data)",
"def get_urls(self, data):\n data = json.loads(data)\n urls = []\n for article in data['articles']:\n urls.append(article['url'])\n return urls",
"def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links",
"def get_nodes_links(data):\r\n nodes = {}\r\n for node in data['nodes']:\r\n nodes[node['id']] = node['label']\r\n\r\n for link in data['links']:\r\n print('Source: {0}({1}) Target: {2}({3}) Status: {4}'.format(\r\n nodes[link['source']], link.get('startPortName', ''),\r\n nodes[link['target']], link.get('endPortName', ''),\r\n link['linkStatus']\r\n ))",
"def _all_js_links(self) -> Iterator[Set[str]]:\n return (result['js_links'] for result in self.results)",
"async def crawl(self, text):\n urls = set()\n try:\n load = json.loads(text)\n for keys in load.keys(): # Iterate through keys of dict.\n val = load.get(keys)\n if isinstance(val, int) or isinstance(val, dict) or val is None:\n continue\n if isinstance(val, list):\n if len(val) == 0: # Make sure not indexing an empty list.\n continue\n val = val[0] # First value should be dict.\n if isinstance(val, dict): # Sanity check.\n for key in val.keys():\n value = val.get(key)\n if isinstance(value, str) and value != '' and 'https://' in value or 'http://' in value:\n urls.add(value)\n if isinstance(val, str) and val != '' and 'https://' in val or 'http://' in val:\n urls.add(val)\n tmp = set()\n for url in urls:\n if '<' in url and 'href=' in url: # Format is <href=\"https://www.website.com\"/>\n equal_index = url.index('=')\n true_url = ''\n for ch in url[equal_index + 1:]:\n if ch == '\"':\n tmp.add(true_url)\n break\n true_url += ch\n else:\n if url != '':\n tmp.add(url)\n return tmp\n except Exception as e:\n print(f'Exception occurred: {e}')\n return []",
"def find(fp, links):\n find_links(fp, links)\n build_links_dicts(links)",
"def _get_urls_from_search() -> None:\n\n try:\n with open(os.path.join('output', 'urls.json')) as json_file:\n urls_json = json.load(json_file)\n\n # Loads URLs that the user wants to ignore\n try:\n with open(os.path.join('output', 'ignored_urls.txt')) as txt_file:\n urls_txt = {url.strip() for url in txt_file.readlines()}\n except FileNotFoundError:\n urls_txt = set()\n\n # Get property URLs for each Search URL and place them under their\n # respective Search URL. Coverts result list -> set -> list to remove\n # any potential duplicates. Should be none but don't want any\n # chance of making redundant get request for the analysis.\n for search_url in urls_json.setdefault('Search', dict()):\n\n # Gets all URLs from the search link. Any duplicate properties\n # compared to urls in 'Property' in urls.json is removed. This\n # way if user deletes a specific property to track,\n # the analysis of that property can be easily deleted as well.\n urls_search = set(get_all_urls(search_url))\n urls_properties = set(urls_json.get('Property', set()))\n urls_search.difference_update(urls_properties)\n urls_search.difference_update(urls_txt)\n\n # Converts urls_search back to list to add to json.\n # It does not accept sets.\n urls_json['Search'][search_url] = list(urls_search)\n\n with open(os.path.join('output', 'urls.json'), 'w') as json_file:\n json.dump(urls_json, json_file, indent=4)\n\n except (FileNotFoundError, json.JSONDecodeError):\n with open(os.path.join('output', 'urls.json'), 'w') as json_file:\n json.dump({'Search': {}, 'Property': {}}, json_file, indent=4)",
"def getDiscussionLinks(self, json_info, tag_filter=[]):\n discussion_links = []\n for t in json_info['document']['data']:\n if(t['type'] == 'discussions'):\n id = (t['id'])\n slug = t['attributes']['slug']\n tags = []\n for tag in t['relationships']['tags']['data']:\n tags.append(int(tag['id']))\n \n if(len(tag_filter) == 0 or len(list(set(tag_filter) & set(tags))) > 0):\n discussion_links.append(\"https://fbtag.net/d/{id}-{slug}\".format(id=id, slug=slug))\n else:\n logging.debug(msg=(tags, 'not in filter ', tag_filter, 'link', id, slug))\n pass\n \n return discussion_links",
"def parse_json_export(json_file):\n\n json_file.seek(0)\n links = json.load(json_file)\n json_date = lambda s: datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ')\n\n for link in links:\n # example line\n # {\"href\":\"http:\\/\\/www.reddit.com\\/r\\/example\",\"description\":\"title here\",\"extended\":\"\",\"meta\":\"18a973f09c9cc0608c116967b64e0419\",\"hash\":\"910293f019c2f4bb1a749fb937ba58e3\",\"time\":\"2014-06-14T15:51:42Z\",\"shared\":\"no\",\"toread\":\"no\",\"tags\":\"reddit android\"}]\n if link:\n # Parse URL\n url = link.get('href') or link.get('url') or link.get('URL')\n if not url:\n raise Exception('JSON must contain URL in each entry [{\"url\": \"http://...\", ...}, ...]')\n\n # Parse the timestamp\n ts_str = str(datetime.now().timestamp())\n if link.get('timestamp'):\n # chrome/ff histories use a very precise timestamp\n ts_str = str(link['timestamp'] / 10000000) \n elif link.get('time'):\n ts_str = str(json_date(link['time'].split(',', 1)[0]).timestamp())\n elif link.get('created_at'):\n ts_str = str(json_date(link['created_at']).timestamp())\n elif link.get('created'):\n ts_str = str(json_date(link['created']).timestamp())\n elif link.get('date'):\n ts_str = str(json_date(link['date']).timestamp())\n elif link.get('bookmarked'):\n ts_str = str(json_date(link['bookmarked']).timestamp())\n elif link.get('saved'):\n ts_str = str(json_date(link['saved']).timestamp())\n \n # Parse the title\n title = None\n if link.get('title'):\n title = link['title'].strip() or None\n elif link.get('description'):\n title = link['description'].replace(' — Readability', '').strip() or None\n elif link.get('name'):\n title = link['name'].strip() or None\n\n yield {\n 'url': url,\n 'timestamp': ts_str,\n 'title': title,\n 'tags': link.get('tags') or '',\n 'sources': [json_file.name],\n }",
"def get_links(self, html):\r\n raise NotImplementedError()",
"def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]",
"def readLinkoJson(file):\n with open(file, 'r') as jsonFile:\n preLinko = json.load(jsonFile)\n\n linko = Linkograph([], preLinko[0])\n\n for entry in preLinko[1:]:\n linko.append((set(entry[0]), set(entry[1]), set(entry[2])))\n linko.uuids.append(entry[3])\n\n return linko",
"def _merge_links(self, links_jsons: JSONs) -> JSON:\n root, *stitched = links_jsons\n if stitched:\n merged = {\n 'links_id': root['links_id'],\n 'version': root['version']\n }\n for common_key in ('project_id', 'schema_type'):\n merged[common_key] = one({row[common_key] for row in links_jsons})\n source_contents = [row['content'] for row in links_jsons]\n # FIXME: Explicitly verify compatible schema versions for stitched subgraphs\n # https://github.com/DataBiosphere/azul/issues/3215\n schema_type = 'links'\n schema_version = '3.0.0'\n schema_url = furl(url='https://schema.humancellatlas.org',\n path=('system', schema_version, schema_type))\n merged_content = {\n 'schema_type': schema_type,\n 'schema_version': schema_version,\n 'describedBy': str(schema_url),\n 'links': sum((sc['links'] for sc in source_contents), start=[])\n }\n merged['content'] = merged_content # Keep result of parsed JSON for reuse\n merged['content_size'] = len(json.dumps(merged_content))\n assert merged.keys() == one({\n frozenset(row.keys()) for row in links_jsons\n }), merged\n assert merged_content.keys() == one({\n frozenset(sc.keys()) for sc in source_contents\n }), merged_content\n return merged\n else:\n return root",
"def get_all_links(self):\n links_url = \"{}/links\".format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)",
"def sdf_ref_properties(json_data, url):\r\n if \"https\" in url:\r\n ref_json_dict = load_json_schema_fromURL(url)\r\n sdf_make_reference_external(ref_json_dict, url)\r\n elif \"http\" in url:\r\n ref_json_dict = load_json_schema_fromURL(url)\r\n sdf_make_reference_external(ref_json_dict, url)\r\n else:\r\n ref_json_dict = json_data\r\n\r\n keyValue = url.split(\"/\")[-1]\r\n\r\n output = \"\"\r\n lookup = None\r\n try:\r\n lookup = ref_json_dict['definitions'][keyValue]\r\n except:\r\n print(\"!!!!sdf_ref_properties : error in finding\", keyValue, flush=True)\r\n\r\n output += sdf_properties_block(lookup)\r\n return output",
"def search_links(soup: BeautifulSoup, url: str, rel: bool = False) -> List[str]:\n links = [] # type: List[str]\n if rel:\n link_tags = soup.find_all(\"link\", rel=\"alternate\")\n else:\n link_tags = soup.find_all(\"link\")\n for link in link_tags:\n if link.get(\"type\") in [\n \"application/rss+xml\",\n \"text/xml\",\n \"application/atom+xml\",\n \"application/x.atom+xml\",\n \"application/x-atom+xml\",\n \"application/json\",\n ]:\n links.append(urljoin(url, link.get(\"href\", \"\")))\n\n return links"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the correct backend driver for data persistent. | def _load_driver(backend, **kargs):
bk_module = importlib.import_module('backend', __package__)
driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')
return driver_cls(**kargs) | [
"def _load_driver(self):\r\n name = self.config.backend\r\n if not name:\r\n #Raise some exception, bail out we are done.\r\n raise RuntimeError('config item db.backend not set')\r\n if '.' in name:\r\n importname = name\r\n else:\r\n importname = 'pyaib.dbd.%s' % name\r\n basename = name.split('.').pop()\r\n driver_ns = import_module(importname)\r\n for name, cls in inspect.getmembers(driver_ns, inspect.isclass):\r\n if hasattr(cls, CLASS_MARKER):\r\n #Load up the driver\r\n self._driver = cls(self.config.driver.setdefault(basename, {}))\r\n break\r\n else:\r\n raise RuntimeError('Unable to instance db driver %r' % name)",
"def _load_backend(self, backend_type, config):\n backend_cls = self._backends[backend_type]\n return backend_cls(config, self.root_path, self.user)",
"def connectToBackend(path, backend):\n if(backend == \"sqlite\"):\n return ldb.backend.SQLiteBackend(\"sqlite:///\"+path)\n else:\n raise Exception(\"Unknown backend '%s'\" % backend)",
"def set_backend(self, backend):\n if backend not in AVAILABLE_BACKENDS:\n raise StorageError(f'Unrecognized backend {backend}; use one of {AVAILABLE_BACKENDS}')\n if backend == 'tinydb':\n LOGGER.debug(\"Using TinyDB database as requested for %s\", self.name)\n self._backend = DB_TINYDB\n elif backend == 'sqlite':\n LOGGER.debug(\"Using SQLite database as requested for %s\", self.name)\n self._backend = DB_SQLITE\n elif backend == 'auto':\n if self._sqlite_storage.database_exists():\n LOGGER.debug(\"Using SQLite database in AUTO mode because one already exists for %s\", self.name)\n self._backend = DB_SQLITE\n else:\n LOGGER.debug(\"Using TinyDB (default) in AUTO because no database already exists for %s\", self.name)\n self._backend = DB_TINYDB",
"def _load_driver_module(self):\n driver = get_dbapi_module(self.driver_module)\n exceptions.register(driver.DatabaseError)\n return driver",
"def get_storage_backend(self):\n return self.client.info()['Driver']",
"def load_backend(self, alias):\n # Prepares the settings\n self.ensure_server_defaults(alias)\n self.prepare_server_test_settings(alias)\n\n # Gets the settings for `alias`\n server = self.servers[alias]\n indices = self.get_server_indices(server)\n\n # Loads the backend\n backend_class = load_backend(server['ENGINE'])\n\n return backend_class(alias, server, indices)",
"def _switch_backend(self, model_db):\n if model_db['backend_name'] != self.backend_name:\n backend = switch_backend(model_db['backend_name'])\n self.backend_name = backend.__name__\n self.backend_version = None\n if self.backend_name == 'keras':\n from ..backend import keras_backend\n self.backend = keras_backend\n elif self.backend_name == 'sklearn':\n from ..backend import sklearn_backend\n self.backend = sklearn_backend\n if hasattr(backend, '__version__'):\n check = self.backend_version != backend.__version__\n self.backend_version = backend.__version__\n if check and self.verbose > 0: # pragma: no cover\n sys.stderr.write('Warning: the backend versions'\n 'do not match.\\n') # pragma: no cover",
"def test_load_backend(self):\n jsonpickle.load_backend('simplejson', 'dumps', 'loads', ValueError)",
"def init_backend(experiment_id=None, backend=None):\n if issubclass(type(backend), _BaseBackend):\n return backend\n\n if experiment_id is None:\n msg = ('Experiment ID is not provided. This is only allowed '\n 'in package testing (otherwise, it is a package bug)')\n logger.warning(msg)\n experiment_id = 'daskperiment_package_test'\n\n if backend == 'local':\n # LocalBackend\n dname = '{}'.format(experiment_id)\n from daskperiment.config import _CACHE_DIR\n backend = _CACHE_DIR / dname\n\n if maybe_redis(backend):\n from daskperiment.backend.redis import RedisBackend\n return RedisBackend(experiment_id, backend)\n elif maybe_mongo(backend):\n from daskperiment.backend.mongo import MongoBackend\n return MongoBackend(experiment_id, backend)\n elif isinstance(backend, pathlib.Path):\n from daskperiment.backend.local import LocalBackend\n return LocalBackend(experiment_id, backend)\n else:\n raise NotImplementedError(backend)",
"def preferred_backend_module():\n return _available_backends[_preferred_backend]",
"def get_backend(self):\n return self.analyze_db_task(constants.TRAIN_DB).backend",
"def load_storage_driver(conf, cache, control_mode=False):\n\n mode = 'control' if control_mode else 'data'\n driver_type = 'marconi.queues.{0}.storage'.format(mode)\n\n try:\n mgr = driver.DriverManager(driver_type,\n conf['drivers'].storage,\n invoke_on_load=True,\n invoke_args=[conf, cache])\n\n return mgr.driver\n\n except RuntimeError as exc:\n LOG.exception(exc)\n raise errors.InvalidDriver(exc)",
"def _getDriver(self):\n if not hasattr(self, '_driver'):\n with self._getDatasetLock:\n if not self.dataset or not self.dataset.GetDriver():\n self._driver = None\n else:\n self._driver = self.dataset.GetDriver().ShortName\n return self._driver",
"def model(self):\r\n from rapidsms.models import Backend\r\n backend, _ = Backend.objects.get_or_create(name=self.name)\r\n return backend",
"def backend_b():\n diffsync = BackendB(name=\"backend-b\")\n diffsync.load()\n return diffsync",
"def load_backend(name, options=None):\n if name is None:\n assert options is None\n return get_default()\n if options is None:\n options = {}\n if name not in _backends:\n raise UnknownBackend(name)\n try:\n res = _backends[name]()(**options)\n except Exception as e:\n raise LoadingError(name) from e\n return res",
"def model(self):\n from rapidsms.models import Backend\n backend, _ = Backend.objects.get_or_create(name=self.name)\n return backend",
"def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the table name to save data from the url. | def _get_table_name(url):
try:
return urlparse(url).path.strip('/').split('/')[1]
except IndexError:
return None | [
"def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]",
"def table_name(self) -> str:\n return pulumi.get(self, \"table_name\")",
"def get_table_name(self):\n return self._config['table']",
"def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")",
"def table_name(self):\n path = os.path.splitext(self.file_name)[0]\n return os.path.split(path)[-1]",
"def _table_name(self):\n return self._query.split(\"FROM\")[-1].strip().split(\" \")[0]",
"def table_name() -> str:\n pass",
"def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")",
"def test_get_table_name(self):\n tab = SqaExtractor(\n staging_table=self.staging_table,\n history_table=self.history_table,\n logical_pk=[\"id\", \"number\"],\n load_strategy='database_table',\n con=get_engine()).get_table_name()\n self.assertEqual(tab, self._table_name())",
"def retrieve_table(self):\n if self.use_local_table:\n self.retrieve_table_local()\n else:\n self.retrieve_table_from_url()",
"def table_name(self):\n return self._table_name",
"def get_table_name(cls):\n return cls.__name__",
"def get_table_url(self, _id, table_id, data_type=\"json\"):\n coll = self._get_tables_coll(\"juchao\" if self.sources == \"finance.juchao.item\" else self.sources)\n all_items = coll.find({\"fileId\": self._id_validate(_id)})\n for item in all_items:\n if item[\"_id\"] == table_id:\n if data_type == \"json\":\n return item[\"data_file\"]\n else:\n return item[\"html_file\"]",
"def table_name(cls) -> str:\n return cls.TABLE",
"def create_table_url(self, table_id):\n return self.base_url + \"/table?table=\" + str(table_id)",
"def get_tables(in_tables):\n\n # input file\n table = in_tables[0]\n in_name = table[\"full_path\"]\n in_destination = table[\"destination\"]\n logging.info(\"Data table: \" + str(in_name))\n logging.info(\"Input table source: \" + str(in_destination))\n\n return in_name",
"def tablename(entity) -> str:\n return entity.__tablename__",
"def get_table_name(self, tier: str, tb: str) -> str:\n template = self.table_format[tier]\n fm = string.Formatter()\n parse_arr = np.array(list(fm.parse(template)))\n names = list(parse_arr[:, 1])\n if len(names) > 0:\n keyword = names[0]\n args = {keyword: tb}\n table_name = template.format(**args)\n else:\n table_name = template\n return table_name",
"def retrieve_table_from_url(self):\n table_list = pd.read_html(self.url)\n self.raw_table = table_list[0]\n self.format_table_from_url()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save data from response to backend persistent driver. Only save the detail item from a url, filter out the overall items like | def save(self, response):
url = response.url
if self.item_url(url):
table_name = self._get_table_name(url)
if table_name:
data = response.json()
self.backend.save(table_name, data) | [
"def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list",
"def save_data(self, soup, url):\n # get the web page title\n title = soup.find('title').string\n # get the h1 tag of the page\n h1 = soup.find('h1')\n # checks if there is a h1 tag in the page\n # because is possible that a product url redirects to\n # another page.\n # In this way, only a valid product will be save.\n if h1:\n product_name = h1.contents[0].string\n page_values = PageValues(product_name, title, url, self.__csv_file_name)\n page_values.save_csv()\n else:\n # Shows the web page that have some problem.\n print('It was not possible to open {}'.format(url))",
"def crawl_detail_and_save(self):\n\n async def detail_crawl(url, article_txid):\n print(f'Send request .. {url}')\n\n async with aiohttp.ClientSession() as sess:\n async with sess.get(url) as res:\n r = await res.text() # 다음 TASK 를 실행하는 지점 / 응답이 온 순서대로 작업 실행하는 지점.\n soup = BeautifulSoup(r, 'lxml')\n\n print(f'Get response .. {url}')\n\n title = soup.find('meta', {'property': 'og:title'}).get('content')\n content = soup.select_one('div.wrap_body').prettify()\n media_name = soup.find('meta', {'name': 'article:media_name'})['content']\n\n try: # 구독자 수 크롤링\n num_subscription = int(''.join(re.findall(\n r'\\w',\n soup.select_one('span.num_subscription').text\n )))\n\n # 브런치에서 구독자 수가 1만을 초과하는 경우 '4.3만' 과 같이 표기된다.\n # 이때 int 형변환 시 '.' 과 '만' 때문에 ValueError 발생하므로 적절한 예외 처리 필요\n # - '만' 을 '0000'으로 replace\n # - 소수점 밑자리 숫자는 생략 처리\n except ValueError:\n char_num_subscription = soup.select_one('span.num_subscription').text\n trunc_part = ''.join(re.findall(r'.\\d+', char_num_subscription))\n num_subscription = int(char_num_subscription.replace('만', '0000').replace(trunc_part, ''))\n\n published_time = re.findall(\n r'(\\S+)\\+',\n soup.find('meta', {'property': 'article:published_time'})['content'],\n )[0]\n user_id, text_id = re.findall(\n r'/@(\\S+)/(\\d+)',\n soup.find('meta', {'property': 'og:url'})['content']\n )[0]\n\n writer, _ = Writer.objects.update_or_create(\n user_id=user_id,\n defaults={\n 'media_name': media_name,\n 'num_subscription': num_subscription\n }\n )\n\n # article 생성\n article_without_keyword = Article.objects.create(\n title=title,\n content=content,\n article_txid=article_txid,\n published_time=published_time,\n text_id=text_id,\n writer=writer\n )\n\n # article 에 keyword 추가(ManyToMany) - keyword 검색일 경우에 한하여\n if self.keyword:\n article_without_keyword.keyword.add(self.obj_keyword)\n\n print(f'저장 완료 {url}')\n\n # futures 에 Task 할당(url, 중복검사 완료된 checked_article_txid)\n async def create_task_async():\n \"\"\"\n TASK 객체를 생성한 후, gather() 을 통해 TASK(FUTURE) 전달 및 코루틴 실행\n \"\"\"\n brunch_url = 'https://brunch.co.kr/'\n\n # article_txid 문자열 변환을 통해 url 링크 생성(detail_crawl 의 인자가 됨)\n futures = [\n asyncio.ensure_future(\n detail_crawl(\n url=brunch_url + '@@' + article_txid.replace('_', '/'),\n article_txid=article_txid\n )\n ) for article_txid in self.cleand_txid_list\n ]\n\n await asyncio.gather(*futures)\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(create_task_async())",
"def store(self, details):",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()",
"def parse_detail(self, response):\n try:\n self.logger1.info(\"start to parse detail page: {}\".format(response.url))\n request_count = response.meta.get('request_count', 1)\n keys = response.xpath('//p[@class=\"tab1-p-left\"]/text()').extract()\n assert keys, \"this page has changed, please check xpath: {]\".format(response.url)\n keys = [re.sub(r':|:', '', key) for key in keys]\n values = response.xpath('//p[@class=\"tab1-p-right\"]').xpath('string()').extract()\n result_dict = dict(zip(keys, values))\n for key, value in result_dict.items():\n if key.endswith(('期', '戳')):\n # 许可生效期、许可截止期、公示日期 等等\n _, value = convert_formal_date(value, need_time=True)\n if len(value) == 10:\n value += ' 00:00:00'\n result_dict[key] = value\n\n # 如果没有抓到有效的数据,则重新请求该详情页,至多10次\n if not clean_all_space(''.join(result_dict.values())):\n if request_count < 10:\n self.logger1.warning(\"the parsed data was empty, so request again!\")\n yield Request(response.url, callback=self.parse_detail, errback=self.err_parse_detail,\n dont_filter=True, meta={'request_count': request_count + 1})\n else:\n self.logger1.warning(\"this page has requested more than 10 times, ignore it!\")\n else:\n yield self.handle_result(response, result_dict)\n self.logger1.info(\"store data to database successfully!\")\n except:\n err_msg = traceback.format_exc()\n self.logger.warning(\"failed to parse detail page, url {url} error:{err_msg}\"\n .format(url=response.url, err_msg=err_msg))",
"def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content",
"def save(self, url):\n self.database.insert({\n 'url': url,\n 'last_crawled': None,\n 'valid': True,\n 'sub_urls': [],\n })",
"def process_item_data(self, db, ref, response):\n raise Exception(\"To be implemented\")",
"def process_item(self, item, spider):\n session = self.Session()\n hotel = Hotel()\n hotel.hotel_name = item[\"hotel_name\"]\n hotel.address = item[\"address\"],\n hotel.link = item[\"link\"]\n hotel.quality_star = item[\"quality_star\"]\n hotel.rating = item[\"rating\"]\n hotel.number_people_rating = item[\"number_people_rating\"]\n hotel.description = item[\"description\"]\n hotel.distance = item[\"distance\"]\n hotel.image = item[\"image\"]\n hotel.price = item[\"price\"]\n hotel.city_id = item[\"city_id\"]\n hotel.hotel_id = item[\"hotel_id\"]\n try:\n session.add(hotel)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item",
"def save(self, scraper):\n entry = HistoryEntry(scraper.url, scraper.response)\n self.load_history_entries(entry)",
"def process_item(self, item, spider):\n if item is None:\n raise DropItem(\"Something went wrong in parsing data...\")\n try:\n self.curr.execute(\n SqlStatements.insert_new_real_estate(),\n (\n item['listing_type'],\n item['property_type'], \n item['price'], \n item['location_city'], \n item['location_city_district'], \n item['area_property'],\n item['area_land'],\n item['construction_type'],\n item['num_floors_building'],\n item['apartment_floor'],\n item['registered'],\n item['heating_type'],\n item['num_rooms'],\n item['num_bathrooms'],\n item['source']\n )\n )\n self.conn.commit()\n except Exception as e:\n print(e)\n self.conn.rollback()\n return item\n self._log_progress()\n return item",
"def process_result(rep, item):\n if item.config.option.TESTY_SAVE_URL:\n make_db(item)\n\n files, json_files = result_files(item)\n\n doc = build_report_doc(rep, json_files, item)\n\n attach_files(doc, files, item)",
"def save_to_items(self):\n Data.add_data(self.item_data())",
"def store_item(self, item_in_json): # pragma: no cover\n raise NotImplementedError",
"def save_title_details_data(source_file, service, new_file, api_key1, api_key2):\n data = fetch_title_details(source_file, service, api_key1, api_key2)\n with open(new_file, 'w') as outfile:\n json.dump(data, outfile)",
"def process_item(self, item, spider):\n session = self.Session()\n article = Article()\n restaurant = Restaurant()\n\n # populate article\n article.url = item['article_url']\n article.title = item['article_title']\n article.datetime = item['article_datetime']\n \n # populate restaurant\n restaurant.name = item['restaurant_name']\n restaurant.slug = item['restaurant_slug']\n restaurant.address = item['restaurant_address']\n restaurant.googlemaps_url = item['restaurant_googlemaps']\n restaurant.googlemaps_id = parse_googlemaps_id(restaurant.googlemaps_url)\n restaurant.lat = parse_lat(restaurant.googlemaps_url)\n restaurant.lng = parse_lng(restaurant.googlemaps_url)\n\n # determine if new article\n exist_article = session.query(Article).filter_by(url = article.url).first()\n if exist_article: \n article = exist_article\n\n # determine if new restaurant\n exist_restaurant = session.query(Restaurant).filter_by(slug = restaurant.slug).first()\n if exist_restaurant: \n restaurant = exist_restaurant\n if article not in restaurant.articles: \n restaurant.articles.append(article)\n else:\n # geocode for lat lng if necessary\n if restaurant.googlemaps_id: \n restaurant.lat, restaurant.lng, restaurant.address = convert_id(restaurant.googlemaps_id)\n # add article to restaurant.articles\n restaurant.articles.append(article)\n\n try:\n session.add(restaurant)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item",
"def save_item(item):\n existing_items = get_items()\n updated_items = [item if item['id'] == existing_item['id'] else existing_item for existing_item in existing_items]\n\n session['items'] = updated_items\n\n return item"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a keyvault with 0 access policies is processed properly and doesn't raise an exception. | def test_whitelist_zero_access_policies(self):
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault2*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 0) | [
"def test_reject_units_when_auth_keys_is_empty(self):\n self._propose('my.config.unit', 'myvalue')\n\n self._expect_get('hashblock.units.vote.authorized_keys')\n self._expect_get('hashblock.units.vote.approval_threshold')\n\n self._expect_invalid_transaction()",
"def test_get_authz_file_empty_raises(self):\n self.env.config.set('authz_policy', 'authz_file', '')\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)",
"def test_authorized_keys_accept_no_approval_threshhold(self):\n self._propose(\"foo.bar.count\", \"1\")\n\n self._expect_get('hashblock.units.vote.authorized_keys',\n 'some_key,' + self._public_key)\n self._expect_get('hashblock.units.vote.approval_threshold')\n\n # check the old unit and set the new one\n self._expect_get('foo.bar.count')\n self._expect_set('foo.bar.count', '1')\n\n self._expect_add_event('foo.bar.count')\n\n self._expect_ok()",
"def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)",
"async def test_get_access_requests_no_envars(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n await self.inst._get_access_requests(\n \"test-container\"\n )\n self.sys_exit_mock.assert_called_once()",
"def test_excess_auth():\n # Disable validators so we can test transaction is at work, not validators\n table = CapTable(validators=[])\n pg = Person(\"Peter Gregory\")\n gb = Person(\"Gavin Belson\")\n table.record(None, CommonStock.auth(1000))\n table.record(None, CommonStock.issue(holder=pg, amount=500))\n with pytest.raises(AssertionError):\n table.record(None, CommonStock.issue(holder=gb, amount=501))",
"def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access",
"def test_invalid_access_key(self):\r\n data = {\r\n \"EdX-ID\": self.receipt_id,\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test testing:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('Access key invalid', response.content)\r\n self.assertEqual(response.status_code, 400)",
"def test_set_value_empty_authorized_keys(self):\n self._propose(\"hashblock.units.vote.authorized_keys\", \"\")\n\n self._expect_get('hashblock.units.vote.authorized_keys',\n self._public_key)\n self._expect_get('hashblock.units.vote.approval_threshold')\n\n self._expect_invalid_transaction()",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"price-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_allow_set_authorized_keys_when_initially_empty(self):\n self._propose(\"hashblock.units.vote.authorized_keys\",\n self._public_key)\n\n self._expect_get('hashblock.units.vote.authorized_keys')\n self._expect_get('hashblock.units.vote.approval_threshold')\n\n # Check that it is set\n self._expect_get('hashblock.units.vote.authorized_keys')\n self._expect_set('hashblock.units.vote.authorized_keys',\n self._public_key)\n\n self._expect_add_event('hashblock.units.vote.authorized_keys')\n\n self._expect_ok()",
"def test_act_bad_policy(self) -> None:\n self.aea._skills_exception_policy = \"non exists policy\" # type: ignore\n self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method\n\n with pytest.raises(AEAException, match=r\"Unsupported exception policy.*\"):\n self.aea.start()\n\n assert not self.aea.is_running",
"def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403",
"def test_get_policies(self):\n pass",
"def test__check_policy_fail(self):\n # Disable W0212(protected-access)\n # pylint: disable=W0212\n mock_service_class = namedtuple('MockSvc', ['name'])\n mock_policy = monitor.MonitorRestartPolicy()\n mock_policy._service_exits_log = self.root\n mock_policy._service = mock_service_class('mock_service')\n mock_policy._policy_interval = 30\n mock_policy._policy_limit = 3\n\n failure_ts = [100.403, 115.871, 124, 130.35]\n for ts in failure_ts:\n exit_file = '%014.3f,001,000' % ts\n with open(os.path.join(self.root, exit_file), 'a'):\n pass\n\n res = mock_policy._check_policy()\n\n self.assertEqual(res, monitor.MonitorRestartPolicyResult.FAIL)\n self.assertEqual(mock_policy._last_timestamp, 130.35)\n self.assertEqual(mock_policy._last_rc, 1)\n self.assertEqual(mock_policy._last_signal, 0)\n self.assertEqual(os.unlink.call_count, 0)",
"def test_call_bad_perms(self):\r\n self.assertRaises(ValueError, self.cs_overview, -1)",
"def test_view_protected(self):\n from AccessControl import Unauthorized\n logout()\n self.assertRaises(\n Unauthorized,\n self.task.restrictedTraverse,\n 'view-protected'\n )",
"def test_keys_no_auth(client):\n\n keys_response = client.get('/NY/keys')\n\n assert(keys_response.status_code == 401)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filters a list of elements. 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'elements' is the list of elements to filter. Returns a list containing only those elements for which 'select' returns True. | def filter(self, viewer, parent, elements):
return [e for e in elements if self.select(viewer, parent, e)] | [
"def filterSelection(filters):\n result = simpleFilter(pm.selected(), filters)\n return result",
"def select(self, viewer, parent, element):\n\n return True",
"def filter_by_reviewers(reviews, selected_reviewers):\n return [x for x in reviews if x.reviewer in selected_reviewers]",
"def select_allfaces_fromobjects(self, elements):\n self.logger.glb.info(\"Selecting outer faces.\")\n\n sel = []\n\n for i in elements:\n\n oFaceIDs = self.oeditor.GetFaceIDs(i)\n\n for facce in oFaceIDs:\n sel.append(int(facce))\n return sel",
"def select(elements, val=True):\n for el in elements:\n el.select_set(val)",
"def validate(elements):\n return list(filter(lambda el: el.is_valid, elements))",
"def pick_elements(self):\n npickable = 0\n for a in self.actors:\n npickable += a.nelems()\n self.pick_parts('element',npickable,store_closest=\\\n self.selection_filter == 'single' or\\\n self.selection_filter == 'closest' or\\\n self.selection_filter == 'connected'\n )",
"def order_filter(self,elements):",
"def find_elements(self, locator, parent=None):\n return self._element_finder.find(locator, first_only=False,\n required=False, parent=parent)",
"def filterSelection(selection=mc.ls(sl=True, l=True), source='scene'):\n result = []\n if not selection and source == 'scene':\n mc.error('Nothing is selected !')\n else:\n for sel in selection or []:\n if mc.nodeType(sel) == 'transform' and filter(lambda x: mc.nodeType(x) == 'mesh', mc.listRelatives(sel, s=True, f=True) or []):\n result.append(sel)\n return result",
"def find_elements(self, elements: List[WebElement]) -> List[WebElement]:\n return elements",
"def filter(self, *args):\n return _libsbml.ElementFilter_filter(self, *args)",
"def query_parent(selectors, tree_item):\n return [subitem for subitem in iterate_parent(tree_item)\n if all(selectors, subitem)]",
"def select_runs(self, *selected):\n selected = set(selected)\n return self.empty(iov for iov in self\n if any(run in selected for run in iov.runs))",
"def filtered_elements(self, model):\n documents = (document for document in model.elements.values() if type(document) == Document)\n for document in documents:\n yield document",
"def filter(self, filters=None):\n self.filtered_articles = []\n if filters is None:\n filters = []\n for article in self.articles:\n if all(keep(article) for keep in filters):\n self.filtered_articles.append(article)\n return self.filtered_articles",
"def filter(self, filters):",
"def test_filter_by_env_elements(self):\r\n envs = self.F.EnvironmentFactory.create_full_set(\r\n {\"OS\": [\"Linux\", \"Windows\"]})\r\n self.factory(\r\n tester__username=\"Tester 1\", environment=envs[0])\r\n self.factory(\r\n tester__username=\"Tester 2\", environment=envs[1])\r\n\r\n res = self.get(\r\n params={\"filter-envelement\": envs[0].elements.all()[0].id})\r\n\r\n self.assertInList(res, \"Tester 1\")\r\n self.assertNotInList(res, \"Tester 2\")",
"def filter_none(elems):\n return [x for x in elems if x is not None]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the element is 'allowed' (ie. NOT filtered). 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'element' is the element to select. By default we return True. | def select(self, viewer, parent, element):
return True | [
"def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None",
"def tag_visible(element):\n \n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True",
"def filter(self, viewer, parent, elements):\n\n return [e for e in elements if self.select(viewer, parent, e)]",
"def supported_elem(cls, elemname, parent=None):\r\n if elemname==\"configurationlayer\":\r\n return True\r\n else:\r\n return False",
"def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False",
"def is_visible(element):\n # returns false for elements in non-visible sections of html\n if element.parent.name in ['head', 'title', '[document]', 'style', 'script']:\n return False\n # returns false for comments\n elif re.match('<!--.*-->', str(element.encode('utf-8'))):\n return False\n return True",
"def supported_elem(cls, elemname, parent=None):\r\n if elemname==\"configurationroot\":\r\n return True\r\n else:\r\n return False",
"def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)",
"def is_filter_trait(self, element, trait_name):\n\n return False",
"def filterAcceptsRow(self, sourceRow, sourceParentIndex):\n parent_item = self.sourceModel().treeItem(sourceParentIndex)\n tree_item = parent_item.child(sourceRow)\n\n accept = ((self._show_special_attributes or\n not tree_item.is_special_attribute) and\n (self._show_callables or\n not tree_item.is_callable_attribute))\n\n return accept",
"def check_parent_and_children_not_in_view(self, element: Element) -> None:\n for view in self.element_views:\n if view.element in element.child_elements:\n raise ValueError(f\"A child of {element.name} is already in this view.\")\n if view.element is getattr(element, \"parent\", None):\n raise ValueError(\n f\"The parent of {element.name} is already in this view.\"\n )",
"def _inline(self, element):\n if element.name in self._inline_tags:\n return True\n return False",
"def __contains__(self, element: Element):\n return element in self.get_elements()",
"def is_element_visible(self, locator):\n return self._is_visible(locator)",
"def _is_interactable(self):\n\n if self._element is None:\n self._set_element()\n\n if self._element.is_displayed() and self._element.is_enabled():\n return True\n return False",
"def has_visible_parents(self, node):\n parent = node\n if parent is None:\n return True \n else:\n style = self.get_style(parent)\n if style is not None and style.replace(\" \",\"\").find(\"display:none\") >= 0:\n return False\n return self.has_visible_parents(parent.getparent())",
"def isElement(self):\n \n pass",
"def is_enable(self,by_locator):\n element=WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(by_locator))\n return bool(element)",
"def assertVisible(self, element):\n return self.assertTrue(element.is_displayed())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Is the filter affected by changes to an element's trait? 'element' is the element. 'trait_name' is the name of the trait. Returns True if the filter would be affected by changes to the trait named 'trait_name' on the specified element. By default we return False. | def is_filter_trait(self, element, trait_name):
return False | [
"def have_traits_changed(self, name_or_uuid, traits):\n with self.lock:\n provider = self._find_with_lock(name_or_uuid)\n return provider.have_traits_changed(traits)",
"def trait_is_defined(obj, trait_name):\n return obj.has_trait(trait_name) and trait_name in obj._trait_values",
"def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None",
"def __contains__(self, element: Element):\n return element in self.get_elements()",
"def has_traits(self, name_or_uuid, traits):\n with self.lock:\n provider = self._find_with_lock(name_or_uuid)\n return provider.has_traits(traits)",
"def has_visibility(trait, visibility_name):\n\n return trait.visibility == getattr(schema.Trait.Visibility, visibility_name)",
"def filter_non_game_elements(element):\n if 'update'.upper() in element.upper():\n return False\n if 'upd'.upper() in element.upper():\n return False\n\n return True",
"def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None",
"def _IsModified(self, name):\r\n return self._columns[name].IsModified()",
"def has_traits(self, traits):\n return not bool(set(traits) - self.traits)",
"def is_calibration_tag_for_name(ins, exp, run, name='dark') :\n for attr in run_attributes(ins, exp, run) :\n if attr['class'] == 'Calibrations' and attr['name'] == name : return True\n return False",
"def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])",
"def have_traits_changed(self, new):\n return set(new) != self.traits",
"def isModifiedByCategory(self,node, queryCategory):\n pred = self.getModifiers(node )\n for p in pred:\n #if( queryCategory.lower() == p.getCategory().lower() ):\n if( p.isA(queryCategory) ):\n return True\n\n return False",
"def isModifiedByCategory(self, node, queryCategory):\n predecessors = self.getModifiers(node)\n for predecessor in predecessors:\n if predecessor.isA(queryCategory):\n return True\n\n return False",
"def isElement(self):\n \n pass",
"def is_injected(self, name):\n return name in self.__provisions",
"def supported_elem(cls, elemname, parent=None):\r\n if elemname==\"configurationlayer\":\r\n return True\r\n else:\r\n return False",
"def __eq__(self, argument) -> bool:\n if not isinstance(argument, type(self)):\n return False\n # 'hide' is excluded from equality testing:\n if (self.reference_duration == argument.reference_duration and\n self.units_per_minute == argument.units_per_minute and\n self.textual_indication == argument.textual_indication and\n self.custom_markup == argument.custom_markup):\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a binary tree, find its minimum depth. The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node. | def minDepth(self, root: TreeNode) -> int:
return self.bfs(root) | [
"def mindepth(l):\n m = [depth(x) for x in l if len(x)]\n if not m:\n return 0\n return min(m)",
"def mindepth(l):\n m = [depth(x) for x in l if length(x)]\n if not m:\n return 0\n return min(m)",
"def max_depth(root):\n # basic case\n if root is None:\n return 0\n\n # breadth-first traversal\n queue = collections.deque([root])\n depth = 0\n while queue:\n queue_size = len(queue)\n for i in range(queue_size):\n curr = queue.popleft()\n if curr.left is not None:\n queue.append(curr.left)\n if curr.right is not None:\n queue.append(curr.right)\n depth += 1\n\n return depth",
"def min_subnet_depth(topology):\n num_subnets = len(topology)\n\n assert len(topology[0]) == num_subnets\n\n depths = []\n Q = deque()\n for subnet in range(num_subnets):\n if topology[subnet][INTERNET] == 1:\n depths.append(0)\n Q.appendleft(subnet)\n else:\n depths.append(float('inf'))\n\n while len(Q) > 0:\n parent = Q.pop()\n for child in range(num_subnets):\n if topology[parent][child] == 1:\n # child is connected to parent\n if depths[child] > depths[parent] + 1:\n depths[child] = depths[parent] + 1\n Q.appendleft(child)\n return depths",
"def depth(node):\n if node is None:\n return 0\n else:\n return 1 + max(depth(node.left_child), depth(node.right_child))",
"def calc_bidi_mindepths(tree):\n if not hasattr(tree, 'depths'):\n calc_length_metrics(tree)\n tree.mindepth = min(tree.depths)\n for node in tree.preorder(include_self=False):\n if node.is_tip():\n node.mindepth = 0.0\n continue\n if node.parent.is_root():\n node.mindepth_above = node.length + \\\n min(min(x.depths) + x.length for x in tree.children\n if x is not node)\n else:\n node.mindepth_above = node.parent.mindepth_above + node.length\n node.mindepth = min(node.mindepth_above, min(node.depths))\n for base in tree.non_tips(include_self=False):\n delattr(base, 'mindepth_above')",
"def get_max_depth(self):\n depth = 1\n for node in self.nodes:\n depth = max(depth,1+self.nodes[node].get_max_depth())\n return depth",
"def left_depth(self):\n return self.max_depth(self.get_root().left)",
"def depth(self, node: TreeNode) -> int:\n h = 0\n while node.left:\n h += 1\n node = node.left\n return h",
"def depth(self, value):\n depth = 0\n node = self.root\n while node is not None and node.value != value:\n depth += 1\n if node.value < value:\n node = node.right\n else:\n node = node.left\n\n if node is None:\n return -1\n else:\n return depth",
"def calc_bidi_minlevels(tree):\n # check if tree is unrooted (if yes, \"root\" accounts for one node)\n unrooted = int(len(tree.children) != 2)\n\n # execute calc_split_metrics if not yet\n if not hasattr(tree, 'postlevels'):\n calc_split_metrics(tree)\n\n # root's minlevel is the min of all descendants (single direction)\n tree.minlevel = min(tree.postlevels)\n\n # internal node's minlevel is the min of post- and pre-direction\n for node in tree.preorder(include_self=False):\n\n # tips are already at surface, so minlevel = 1\n if node.is_tip():\n node.minlevel = 1\n continue\n\n # basal nodes: compare siblings, consider (un)rooting\n if node.parent.is_root():\n node.minlevel_above = 1 + unrooted + \\\n min(min(x.postlevels) for x in tree.children if x is not node)\n\n # derived nodes: increment from parent\n else:\n node.minlevel_above = node.parent.minlevel_above + 1\n\n # minimum level of post- and pre-direction levels\n node.minlevel = min(node.minlevel_above, min(node.postlevels))\n\n # clean up\n for node in tree.non_tips(include_self=False):\n delattr(node, 'minlevel_above')",
"def min_layer(t: Tree) -> int:\n queue = [t]\n layer = 1\n min_val, min_layer = t.node, layer\n nq = True\n while nq:\n nq = []\n total = 0\n while queue:\n node = queue.pop()\n if isinstance(node, int):\n continue # Already been counted\n if isinstance(node.left, Tree):\n nq.append(node.left)\n total += node.left.node\n else:\n if node.left:\n total += node.left\n if isinstance(node.right, Tree):\n nq.append(node.left)\n total += node.right.node\n else:\n if node.right:\n total += node.right\n layer += 1\n queue = nq\n if total < min_val:\n min_layer, min_val = layer, total\n\n return min_layer",
"def depth(self, node):\n if node is self.root:\n return 0\n return nx.shortest_path_length(self.graph, self.root, node)",
"def get_min_path_depth(romfs_path):\n\n ## This is for relative paths *only*\n assert(not romfs_path.startswith(b\"/\"))\n\n ## The symlinks should always be unix paths\n romfs_path = posixpath.normpath(romfs_path)\n cur_depth = 0\n min_depth = 0\n\n components = []\n\n while True:\n ## get the directory on the end of the path\n romfs_path, subdir = posixpath.split(romfs_path)\n\n components += [subdir]\n if not romfs_path:\n break\n\n components.reverse()\n for subdir in components:\n if subdir == b\"..\":\n cur_depth -= 1\n elif subdir != b\".\":\n cur_depth += 1\n\n if cur_depth < min_depth:\n min_depth = cur_depth\n\n return min_depth",
"def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()",
"def depth(self, node):\n\n if not node:\n return 0\n else:\n l_depth = self.depth(node.left)\n r_depth = self.depth(node.right)\n\n if l_depth > r_depth:\n return l_depth + 1\n else:\n return r_depth + 1",
"def node_depths_recursive(root):\n depth_sums = 0\n depth_sums = sum_node_depths(root, depth_sums, 0)\n return depth_sums",
"def get_min_depth(l_k):\n return max(l_k.values())",
"def get_min_path(root):\n\n min_sum = float('inf')\n min_path = []\n\n def helper(root, curr_path=[]):\n nonlocal min_path\n if root.left:\n helper(root.left, curr_path + [root.data])\n\n if root.right:\n helper(root.right, curr_path + [root.data])\n\n if root.left is None and root.right is None:\n if sum(curr_path) < min_sum:\n min_path = curr_path + [root.data]\n # print(min_path)\n\n helper(root)\n return min_path"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts the complex number `c` to a string in Fortranformat, i.e. (Re c, Im c). If c is iterable, it returns a string of the form [(Re c_1, Im c_1), ...]. | def str_complex(c, kindstr=''):
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr) | [
"def complex_vct_str ( vct , format = '%.5g%-+.5gj' ) :\n try :\n lst = [] \n for c in vct :\n cc = complex ( c )\n item = format % ( cc.real , cc.imag )\n lst.append ( cc ) \n return '[ ' + ', '.join ( lst ) + ' ]' \n except TypeError :\n pass\n return complex_vct_str ( vct , format = '%.5g%-+.5gj' )",
"def _complex_to_string(number):\n result = ''\n\n if number.real == 0:\n if number.imag == 0:\n result += '0'\n elif number.imag == 1:\n result += 'i'\n elif number.imag == -1:\n result += '-i'\n else:\n result += str(int(number.imag)) + 'i'\n else:\n result += str(int(number.real))\n if number.imag == 1:\n result += '+i'\n elif number.imag > 0:\n result += '+' + str(int(number.imag)) + 'i'\n elif number.imag == -1:\n result += '-i'\n elif number.imag < 0:\n result += str(int(number.imag)) + 'i'\n\n return result",
"def format_cplx(z):\n return \"{0.real:.2f} + {0.imag:.2f}j\".format(z)",
"def __str__(self):\r\n\r\n # if the imaginary part is negative, the number is converted to a string of the form 'a - bi'\r\n if self.imaginary_part < 0:\r\n return str(self.real_part) + ' - ' + str(abs(self.imaginary_part)) + 'i'\r\n\r\n # if the imaginary part is 0, only the real part is converted to a string of the form 'a'\r\n elif self.imaginary_part == 0:\r\n return str(self.real_part)\r\n\r\n # string has the standard form of a complex number: a+bi\r\n else:\r\n return str(self.real_part) + ' + ' + str(self.imaginary_part) + 'i'",
"def _cell_to_string(self, c):\n if (self.max_decimals > -1) and isinstance(c, float):\n cell_format = \"%.\" + str(self.max_decimals) + \"f\"\n result = str(cell_format % c)\n else:\n result = str(c)\n return result",
"def __str__( self ) :\n\n return( ' '.join( [ \"%g\" % c_l for c_l in self.coefficients ] ) )",
"def epc_string(n, bb1=False, c_coeffs=False, ToFloat=True):\n\n try:\n\n ep_str = epc_strings[(n, bb1, c_coeffs, ToFloat)]\n\n return ep_str\n\n except KeyError:\n\n ep_n = expansion_coefficient(n)\n ep_n = ep_n.subs(N, x)\n\n if ToFloat:\n ep_n = sp.N(ep_n)\n #print ep_n\n\n if not bb1:\n ep_n = ep_n.subs(b1, 0)\n\n if not c_coeffs:\n coeff_c_subs_dict = {sp.Indexed(c, i):0 for i in range(n)}\n ep_n = ep_n.subs(coeff_c_subs_dict)\n\n\n ep_n = str(ep_n)\n\n\n # Update global dict\n if bb1:\n ep_n = re.sub(r'f\\[(\\d*)\\]', r'f_b1_\\1', ep_n)\n ep_n = re.sub(r'c\\[(\\d*)\\]', r'c_b1_\\1', ep_n)\n\n else:\n ep_n = re.sub(r'f\\[(\\d*)\\]', r'f_b0_\\1', ep_n)\n ep_n = re.sub(r'c\\[(\\d*)\\]', r'c_b0_\\1', ep_n)\n\n epc_strings[(n, bb1, c_coeffs, ToFloat)] = ep_n\n\n\n return ep_n",
"def complexinfo(a, str=None):\n\n if str:\n print \n print \"\\t\", str\n re = a.real.copy()\n im = a.imag.copy()\n _log.debug(\"\\t%.2e %.2g = re.sum im.sum\" % (re.sum(), im.sum()))\n _log.debug(\"\\t%.2e %.2g = abs(re).sum abs(im).sum\" % (abs(re).sum(), abs(im).sum()))",
"def nice_cubic_polynomial(p):\n tmp = \"\"\n if p[\"a\"] == 1:\n tmp += \" x^3\"\n elif p[\"a\"] != 0:\n tmp += \"%.2fx^3\" % p[\"a\"]\n if p[\"b\"] == 1:\n tmp += \"\\t+ x^2\"\n elif p[\"b\"] != 0:\n tmp += \"\\t+ %.2fx^2\" % p[\"b\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"c\"] == 1:\n tmp += \"\\t+ x\"\n elif p[\"c\"] != 0:\n tmp += \"\\t+ %.2fx\" % p[\"c\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"d\"] != 0:\n tmp += \"\\t+ %.2f\" % p[\"d\"]\n return tmp",
"def __str__(self):\n return \"C{0}\".format(self._c)",
"def counter_to_str (c):\n m = \"\"\n for i, j in c.most_common():\n m += \"\\t{}: {:,}\".format(i, j)\n return m",
"def to_f90str(value):\n\n if type(value) is int:\n return str(value)\n elif type(value) is float:\n return str(value)\n elif type(value) is bool:\n return '.{}.'.format(str(value).lower())\n elif type(value) is complex:\n return '({}, {})'.format(value.real, value.imag)\n elif type(value) is str:\n return '\\'{}\\''.format(value)\n elif value is None:\n return ''\n else:\n raise ValueError('Type {} of {} cannot be converted to a Fortran type.'\n ''.format(type(value), value))",
"def coefflist2str(coefflist):\n if coefflist == []:\n s = '0'\n else:\n s = ''\n for i in range(len(coefflist) - 1, -1, -1):\n if coefflist[i] != 0:\n if coefflist[i] > 0:\n if i < len(coefflist) - 1:\n sign = '+'\n else:\n sign = ''\n else:\n sign = '-'\n if abs(coefflist[i]) != 1 or i == 0:\n coeff = ' ' + str(abs(coefflist[i]))\n else:\n coeff = ''\n if i > 0:\n x = ' x'\n else:\n x = ''\n if i > 1:\n exponent = '^%d' % i\n else:\n exponent = ''\n s += ' %s%s%s%s' % (sign, coeff, x, exponent)\n return s.strip()",
"def repr_lincomb(symbols, coeffs):\n s = \"\"\n first = True\n i = 0\n\n all_atomic = True\n for c in coeffs:\n b = latex(symbols[i])\n if c != 0:\n if c == 1:\n s += b\n else:\n coeff = coeff_repr(c)\n if not first:\n coeff = \" + %s\"%coeff\n else:\n coeff = \"%s\"%coeff\n s += \"%s%s\"%(coeff, b)\n first = False\n i += 1\n if first:\n s = \"0\"\n s = s.replace(\"+ -\",\"- \")\n return s",
"def order2string(order):\n nparray = np.array(order)\n num_x = np.sum(nparray==0)\n num_y = np.sum(nparray==1)\n num_z = np.sum(nparray==2)\n string_repr = \"$\"\n if num_x == 0 and num_y == 0 and num_z == 0:\n return \"constant\"\n if num_x > 0:\n string_repr += \"x^{{{}}}\".format(num_x)\n if num_y > 0 :\n string_repr += \"y^{{{}}}\".format(num_y)\n if num_z > 0:\n string_repr += \"z^{{{}}}\".format(num_z)\n string_repr += \"$\"\n return string_repr",
"def f90complex(s):\n assert type(s) == str\n\n if s[0] == '(' and s[-1] == ')' and len(s.split(',')) == 2:\n s_re, s_im = s[1:-1].split(',', 1)\n\n # NOTE: Failed float(str) will raise ValueError\n return complex(f90float(s_re), f90float(s_im))\n else:\n raise ValueError('{} must be in complex number form (x, y)'.format(s))",
"def z_to_seq( z ):\n z = complex(z)\n re,im = z.real,z.imag\n return (re, -im, im, re)",
"def float_vct_str ( vct , format = '%.5g' ) :\n try :\n return '[ ' + ', '.join ( [ format % v for v in vct ] ) + ' ]' \n except TypeError :\n pass\n return float_vct_str ( vct , format = '%.5g' )",
"def __repr__(self):\n \n #Trivial case for the zero polynomial\n if self.is_zero():\n return '0'\n else:\n deg = self.degree()\n #Degree zero just returns the constant coeficient\n if deg == 0:\n return str( self.coefs[0] )\n \n else:\n poly_str_lst = []\n #Special formatting for constant coef and x coef\n if self.coefs[0] != 0:\n poly_str_lst.append( str(self.coefs[0]) )\n first_coef = self.coefs[1]\n if first_coef != 0:\n first_coef_ = '' if first_coef == 1 else first_coef\n poly_str_lst.append( '%sx' % first_coef_ )\n if deg == 1:\n poly_str = ' + '.join( poly_str_lst )\n else:\n #adds powers to the rest, and hides coef if it is 1\n for power, coef in zip( range(2, deg+1), self.coefs[2:] ):\n if coef != 0:\n coef_ = '' if coef == 1 else coef\n poly_str_lst.append( '%sx^{%s}' % ( coef_, power) )\n poly_str = ' + '.join( poly_str_lst )\n return poly_str"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select PORT update events, notify the observers upon a port update in APPL_DB/CONFIG_DB or a XCVR insertion/removal in STATE_DB | def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler):
if not stop_event.is_set():
(state, _) = sel.select(SELECT_TIMEOUT_MSECS)
if state == swsscommon.Select.TIMEOUT:
return
if state != swsscommon.Select.OBJECT:
logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')
return
for port_tbl in asic_context.keys():
while True:
(key, op, fvp) = port_tbl.pop()
if not key:
break
if not validate_port(key):
continue
fvp = dict(fvp) if fvp is not None else {}
if 'index' not in fvp:
fvp['index'] = '-1'
port_index = int(fvp['index'])
port_change_event = None
if op == swsscommon.SET_COMMAND:
port_change_event = PortChangeEvent(key,
port_index,
asic_context[port_tbl],
PortChangeEvent.PORT_SET,
fvp)
elif op == swsscommon.DEL_COMMAND:
port_change_event = PortChangeEvent(key,
port_index,
asic_context[port_tbl],
PortChangeEvent.PORT_DEL,
fvp)
if port_change_event is not None:
port_change_event_handler(port_change_event) | [
"def on_dpport_config(self, evt):\n if fibclog.dump_msg():\n _LOG.debug(\"%s\", evt.msg)\n\n try:\n port = fibcdbm.portmap().find_by_dp(dp_id=evt.dp_id, port_id=evt.port_id)\n if evt.enter:\n port.update_dp(evt.enter)\n self.send_port_status_if_ready(port, \"UP\")\n\n else:\n self.send_port_status_if_ready(port, \"DOWN\")\n port.update_dp(evt.enter)\n\n except KeyError as expt:\n _LOG.warn(\"dp port not registered. dpid:%d, port:%d\",\n evt.dp_id, evt.port_id)\n\n except Exception as expt:\n _LOG.exception(expt)",
"def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n reason_dict = {ofproto.OFPPR_ADD: \"added\",\n ofproto.OFPPR_DELETE: \"deleted\",\n ofproto.OFPPR_MODIFY: \"modified\", }\n\n if reason in reason_dict:\n\n print \"switch%d: port %s %s\" % (dpid, reason_dict[reason], port_no)\n else:\n print \"switch%d: Illeagal port state %s %s\" % (port_no, reason)",
"def process_update_port(self, plugin_context, data, result):\n self._call_on_ext_drivers(\"process_update_port\", plugin_context,\n data, result)",
"def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)",
"def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)",
"def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices",
"def port_changed(self, task, port_obj):\n pass",
"def _handle_PortStatus(self, event):\n\t\tconnection.send(ofp_features_request())",
"def _port_stats_reply_handler(self, ev):\n body = ev.msg.body\n for stat in body:\n data = {\n 'datapath': str(ev.msg.datapath.id)+\":\"+str(stat.port_no),\n 'rx_packets': stat.rx_packets,\n 'rx_bytes': stat.rx_bytes,\n 'rx_errors': stat.rx_errors,\n 'tx_packets': stat.tx_packets,\n 'tx_bytes': stat.tx_bytes,\n 'tx_error': stat.tx_packets\n }\n print \"c1 update %s port-info\" % data['datapath']\n requests.post(url=port_url, data=json.dumps(data))",
"def update_handler(self, fd, events):\n ...",
"def target(self):\n\n while True:\n new_ports = serial.tools.list_ports.comports()\n\n if self.ports is None or [p.name for p in self.ports] != [p.name for p in new_ports]:\n self.portsUpdate.emit(new_ports)\n\n time.sleep(self.interval)\n\n self.ports = new_ports",
"def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler):\n if not stop_event.is_set():\n (state, _) = sel.select(SELECT_TIMEOUT_MSECS)\n if state == swsscommon.Select.TIMEOUT:\n return\n if state != swsscommon.Select.OBJECT:\n logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')\n return\n\n read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler)",
"def port_changed():\n config = hookenv.config()\n current_port = config['port']\n previous_port = config.previous('port')\n if previous_port is not None:\n hookenv.close_port(previous_port)\n hookenv.open_port(current_port)",
"def on_vsport_config(self, evt):\n pkt = evt.msg\n if fibclog.dump_msg():\n _LOG.debug(\"%s\", pkt)\n\n try:\n hw_addr = fibcdbm.dps().find_port(evt.vs_id, evt.port_id).hw_addr\n except:\n hw_addr = \"00:00:00:00:00:{0:02X}\".format(evt.port_id & 0xff)\n\n try:\n port = fibcdbm.portmap().find_by_name(re_id=pkt.re_id, name=pkt.ifname)\n if port.update_vs(evt.vs_id, evt.port_id, hw_addr):\n self.send_port_status_if_ready(port, \"UP\")\n\n except KeyError:\n _LOG.warn(\"vs port not registered. re_id:%s, ifname:%s\",\n pkt.re_id, pkt.ifname)\n\n except Exception as expt:\n _LOG.exception(expt)",
"def update_all_PVs():\n for PV_name in connected_PVs():\n notify_subscribers_if_changed(PV_name, PV_value(PV_name, cached=False))",
"def update_handler(self, fd, events):\r\n raise NotImplementedError()",
"def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)",
"def _manage_port(self, event, body, timestmp):\n if event in PORT_DELETE_EVENTS:\n port = body.get(\"payload\", dict()).get(\"port_id\", \"UNDEFINED\")\n self._delete_port(port, timestmp)\n else:\n # Get port details\n port_attr = body.get(\"payload\", dict()).get(\"port\", dict())\n port = port_attr.get(\"id\", \"UNDEFINED\")\n mac = port_attr.get(\"mac_address\", \"UNDEFINED\")\n fixd_ip = \"UNDEFINED\"\n for ips in port_attr.get(\"fixed_ips\", list()):\n fixd_ip = ips.get('ip_address', \"UNDEFINED\")\n net_id = port_attr.get(\"network_id\", \"UNDEFINED\")\n dev_id = port_attr.get(\"device_id\", \"UNDEFINED\")\n\n # Add/Update Event.\n if event in PORT_ADD_EVENTS:\n self._add_port(port, mac, fixd_ip, dev_id, net_id, timestmp)\n elif event in PORT_UPDATE_EVENTS:\n self._update_port(port, mac, fixd_ip, dev_id, net_id, timestmp)",
"def update_port_status_after_serving(self):\n for port in self.port_groups[self.current_serving_group]:\n if port == self.current_serving_port and self.pkt_sent[port] == True:\n self.update_status_for_granted(port)\n else:\n self.update_status_for_waiting(port)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select CONFIG_DB PORT table changes, once there is a port configuration add/remove, notify observers | def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler):
if not stop_event.is_set():
(state, _) = sel.select(SELECT_TIMEOUT_MSECS)
if state == swsscommon.Select.TIMEOUT:
return
if state != swsscommon.Select.OBJECT:
logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')
return
read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler) | [
"def on_dpport_config(self, evt):\n if fibclog.dump_msg():\n _LOG.debug(\"%s\", evt.msg)\n\n try:\n port = fibcdbm.portmap().find_by_dp(dp_id=evt.dp_id, port_id=evt.port_id)\n if evt.enter:\n port.update_dp(evt.enter)\n self.send_port_status_if_ready(port, \"UP\")\n\n else:\n self.send_port_status_if_ready(port, \"DOWN\")\n port.update_dp(evt.enter)\n\n except KeyError as expt:\n _LOG.warn(\"dp port not registered. dpid:%d, port:%d\",\n evt.dp_id, evt.port_id)\n\n except Exception as expt:\n _LOG.exception(expt)",
"def port_changed():\n config = hookenv.config()\n current_port = config['port']\n previous_port = config.previous('port')\n if previous_port is not None:\n hookenv.close_port(previous_port)\n hookenv.open_port(current_port)",
"def port_changed(self, task, port_obj):\n pass",
"def collect_port(self, port, settings):\n pass # IMPLEMENT",
"def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()",
"def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices",
"def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n reason_dict = {ofproto.OFPPR_ADD: \"added\",\n ofproto.OFPPR_DELETE: \"deleted\",\n ofproto.OFPPR_MODIFY: \"modified\", }\n\n if reason in reason_dict:\n\n print \"switch%d: port %s %s\" % (dpid, reason_dict[reason], port_no)\n else:\n print \"switch%d: Illeagal port state %s %s\" % (port_no, reason)",
"def test_port_configuration(self, env):\n self.suite_logger.debug(\"Get Ports table\")\n ports = env.switch[1].ui.get_table_ports()\n\n # Print Ports table\n self.suite_logger.debug(\"Ports table: {}\".format(ports))\n\n # Get Port info\n port_1 = env.switch[1].ui.get_table_ports([1], all_params=True)\n\n # Print Port info\n self.suite_logger.debug(\"Port info: {}\".format(port_1))\n\n # Change port configuration:\n # adminMode, macAddress, autoNegotiate, speed, duplex, flowControl,\n # maxFrameSize, pvid, pvpt, ingressFiltering, discardMode, cutThrough, appError\n env.switch[1].ui.modify_ports([1], maxFrameSize=2000)\n\n # Verify Port configuration has been changed\n assert env.switch[1].ui.get_table_ports([1], all_params=True)[0]['maxFrameSize'] == 2000, \\\n \"maxFrameSize value has not been changed\"",
"def updatePortList(self):\n serialInfos = self._serialPort.scanComs()\n availablePorts = []\n for info in serialInfos:\n availablePorts.append(info.portName())\n self._serialConfigWidget.updatePortList(availablePorts)",
"def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler):\n if not stop_event.is_set():\n (state, _) = sel.select(SELECT_TIMEOUT_MSECS)\n if state == swsscommon.Select.TIMEOUT:\n return\n if state != swsscommon.Select.OBJECT:\n logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')\n return\n for port_tbl in asic_context.keys():\n while True:\n (key, op, fvp) = port_tbl.pop()\n if not key:\n break\n if not validate_port(key):\n continue\n fvp = dict(fvp) if fvp is not None else {}\n if 'index' not in fvp:\n fvp['index'] = '-1'\n port_index = int(fvp['index'])\n port_change_event = None\n if op == swsscommon.SET_COMMAND:\n port_change_event = PortChangeEvent(key,\n port_index,\n asic_context[port_tbl],\n PortChangeEvent.PORT_SET,\n fvp)\n elif op == swsscommon.DEL_COMMAND:\n port_change_event = PortChangeEvent(key,\n port_index,\n asic_context[port_tbl],\n PortChangeEvent.PORT_DEL,\n fvp)\n if port_change_event is not None:\n port_change_event_handler(port_change_event)",
"def config_changed(self):\n self.config_version += 1\n self.driver.config_changed()",
"def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)",
"def on_vsport_config(self, evt):\n pkt = evt.msg\n if fibclog.dump_msg():\n _LOG.debug(\"%s\", pkt)\n\n try:\n hw_addr = fibcdbm.dps().find_port(evt.vs_id, evt.port_id).hw_addr\n except:\n hw_addr = \"00:00:00:00:00:{0:02X}\".format(evt.port_id & 0xff)\n\n try:\n port = fibcdbm.portmap().find_by_name(re_id=pkt.re_id, name=pkt.ifname)\n if port.update_vs(evt.vs_id, evt.port_id, hw_addr):\n self.send_port_status_if_ready(port, \"UP\")\n\n except KeyError:\n _LOG.warn(\"vs port not registered. re_id:%s, ifname:%s\",\n pkt.re_id, pkt.ifname)\n\n except Exception as expt:\n _LOG.exception(expt)",
"def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)",
"def db_port(self, db_port):\n self._db_port = db_port",
"def update_com_ports(self) -> None:\n previousPort = self.port # Record the previous port before we clear the combobox\n \n self.port_combobox.clear()\n\n index = 0\n indexOfCH340 = -1\n indexOfPrevious = -1\n for desc, name, sys in gen_serial_ports():\n longname = desc + \" (\" + name + \")\"\n self.port_combobox.addItem(longname, sys)\n if(\"CH340\" in longname):\n # Select the first available CH340\n # This is likely to only work on Windows. Linux port names are different.\n if (indexOfCH340 == -1):\n indexOfCH340 = index\n # it could be too early to call\n #self.addMessage(\"CH340 found at index \" + str(indexOfCH340))\n # as the GUI might not exist yet\n if(sys == previousPort): # Previous port still exists so record it\n indexOfPrevious = index\n index = index + 1\n\n if indexOfPrevious > -1: # Restore the previous port if it still exists\n self.port_combobox.setCurrentIndex(indexOfPrevious)\n if indexOfCH340 > -1: # If we found a CH340, let that take priority\n self.port_combobox.setCurrentIndex(indexOfCH340)",
"def target(self):\n\n while True:\n new_ports = serial.tools.list_ports.comports()\n\n if self.ports is None or [p.name for p in self.ports] != [p.name for p in new_ports]:\n self.portsUpdate.emit(new_ports)\n\n time.sleep(self.interval)\n\n self.ports = new_ports",
"def on_config_changed(self, value, alarm):\n pass",
"def setup_logical_port_connectivity(self, context, port_db):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get port mapping from CONFIG_DB | def get_port_mapping(namespaces):
port_mapping = PortMapping()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
config_db = daemon_base.db_connect("CONFIG_DB", namespace=namespace)
port_table = swsscommon.Table(config_db, swsscommon.CFG_PORT_TABLE_NAME)
for key in port_table.getKeys():
if not validate_port(key):
continue
_, port_config = port_table.get(key)
port_config_dict = dict(port_config)
port_change_event = PortChangeEvent(key, port_config_dict['index'], asic_id, PortChangeEvent.PORT_ADD)
port_mapping.handle_port_change_event(port_change_event)
return port_mapping | [
"def getDbPort():\n\n if \"DB_PORT\" in controller.CONF.keys():\n return controller.CONF[\"DB_PORT\"]\n\n return basedefs.DB_PORT",
"def get_bridge_port_map(db):\n db.connect('ASIC_DB')\n if_br_oid_map = {}\n br_port_str = db.keys('ASIC_DB', \"ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:*\")\n if not br_port_str:\n return\n\n offset = len(\"ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:\")\n oid_pfx = len(\"oid:0x\")\n for br_s in br_port_str:\n # Example output: ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:oid:0x3a000000000616\n br_port_id = br_s[(offset + oid_pfx):]\n ent = db.get_all('ASIC_DB', br_s, blocking=True)\n if b\"SAI_BRIDGE_PORT_ATTR_PORT_ID\" in ent:\n port_id = ent[b\"SAI_BRIDGE_PORT_ATTR_PORT_ID\"][oid_pfx:]\n if_br_oid_map[br_port_id] = port_id\n\n return if_br_oid_map",
"def db_port(self):\n return self._db_port",
"def db_port(self) -> Optional[int]:\n return pulumi.get(self, \"db_port\")",
"def db_instance_port(self) -> int:\n return pulumi.get(self, \"db_instance_port\")",
"def port_extension_map(self):\n return usb_config.CAMBRIONIX_PORT_MAP[self.model]",
"def determine_ports():\n ports = [config('admin-port'), config('service-port')]\n return list(set(ports))",
"def get_ports_mapping(status=psutil.CONN_LISTEN):\n ports = defaultdict(list)\n\n for process in get_processes():\n try:\n connections = process.connections()\n except psutil.Error:\n continue\n\n if connections:\n for conn in connections:\n if conn.status == status:\n ports[process].append(conn.laddr.port)\n\n return ports",
"def connection_configuration_mapping(self, value):\n if value == \"Y\":\n return \"0\"\n elif value == \"D\":\n return \"2\"\n elif value == \"Z\":\n return \"5\"\n else:\n raise ValueError(\"Unknown configuration {}\".format(value))",
"def port_list(self):\n return ['1']",
"def get_ports(self, database_name):\n databases = self.list_databases()\n for d in databases:\n if d['name'] == database_name:\n database_id = d['id']\n break\n else:\n raise ClientError('Could not find database, does not exist.')\n end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n return resp.json()",
"def _get_mapping(self, port_index, port_descr):\n\n port_id = None\n try:\n ent_alias_mapping_identifier = self.snmp.get(('ENTITY-MIB', 'entAliasMappingIdentifier', port_index, 0))\n port_id = int(ent_alias_mapping_identifier['entAliasMappingIdentifier'].split('.')[-1])\n except Exception as e:\n\n if_table_re = \"/\".join(re.findall('\\d+', port_descr))\n for interface in self.if_table.values():\n if re.search(if_table_re, interface['ifDescr']):\n port_id = int(interface['suffix'])\n break\n return port_id",
"def hana_db_sql_port(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"hana_db_sql_port\")",
"def _get_broker_port(self, key):\n return int(self.defines[key])",
"def _vm_get_ports_mapping(self, vm_instance):\n pass",
"def get_port_from_file(): \n with open(PORT_INFO_PATH, \"rb\") as fp:\n return int(fp.read())",
"def ports(self) -> Sequence[str]:\n return pulumi.get(self, \"ports\")",
"def __get_torch_serve_port(self):\n config_properties = self.server_config[\"config_properties\"]\n inference_port = \"http://localhost:8080\"\n management_port = \"http://localhost:8081\"\n export_url = None\n address_strings = self.server_config[\"torchserve_address_names\"]\n if config_properties is not None and os.path.exists(config_properties):\n with open(config_properties, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n name = line.strip().split(\"=\")\n if name[0] == address_strings[0] and name[1] is not None:\n inference_port = name[1]\n if name[0] == address_strings[1] and name[1] is not None:\n management_port = name[1]\n if name[0] == address_strings[2] and name[1] is not None:\n export_url = name[1]\n return inference_port, management_port, export_url",
"def get_all_portbindings(context):\n session = context.session\n ports = session.query(ml2_models.PortBinding).all()\n return {port.port_id: _make_port_dict(port)\n for port in ports}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a class that doesn't descend from Pickleable to the pickle whitelist | def addClassToPickleWhitelist(cls):
unpickleWhitelist_.add(cls) | [
"def make_class_serializable(cls):\n global _registered_serializable_classes\n if cls not in _registered_serializable_classes:\n _registered_serializable_classes.append(cls)",
"def _check_pickleable(obj):\n def recurse(obj):\n if isinstance(obj, (list, tuple, set)):\n return [recurse(x) for x in obj]\n if isinstance(obj, dict):\n return [[recurse(x), recurse(y)] for x, y in obj.items()]\n if isinstance(obj, (str, int, float, bool, bytes, bytearray)):\n return None # Python primitive types are pickleable.\n if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:\n return None # NumPy arrays and PyTorch tensors are pickleable.\n if is_persistent(obj):\n return None # Persistent objects are pickleable, by virtue of the constructor check.\n return obj\n with io.BytesIO() as f:\n pickle.dump(recurse(obj), f)",
"def test__pickle_unpickle(self):\n pass",
"def register_class(clazz, serializer):\n _special_classes_registry[clazz] = serializer",
"def __init__(self, ignore_classes=(basestring,)):\r\n\t\tassert basestring in ignore_classes, 'basestring must be in ignore_classes'\r\n\t\tself.ignore_classes = ignore_classes",
"def test_pickleable(self):\n try:\n pickle.dumps(self.matcher)\n except TypeError:\n self.fail(\"Cannot dump matcher using pickle\")",
"def safe_classes(self, value):\n if not isinstance(value, list):\n raise ValueError(\"Argument 'value' must be a list\")\n\n new_safe_classes = copy(value)\n new_safe_classes.extend(self.BUILTIN_CLASSES)\n new_safe_classes = list(set(new_safe_classes))\n\n self._safe_classes = new_safe_classes",
"def _add_objects(self):\n modname = sys.modules[__name__]\n for name, cls in inspect.getmembers(modname, self._is_obj_class):\n self._sub_classes[name] = cls",
"def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class",
"def add_fields_pickling(klass, disable_unpickleable_fields=False):\n def state_extractor(self):\n from types import MethodType\n\n fields_dict = {}\n unpickleable_fields = []\n\n def save_field(name, method):\n try:\n retval = method()\n if disable_unpickleable_fields or _is_pickleable(retval):\n fields_dict[name] = retval # call the method\n else:\n unpickleable_fields.append(name)\n except TypeError:\n raise TypeError(\"\"\"not a \"fields\" class, problem with method '%s'\"\"\" % name)\n\n for attr_name in dir(self):\n if attr_name in (\"__init__\", \"__getstate__\", \"__setstate__\"):\n continue # skip constructor and state magic methods\n\n attr = getattr(self, attr_name)\n\n if type(attr) == MethodType:\n save_field(attr_name, attr)\n\n return (fields_dict, unpickleable_fields)\n\n def build_from_state(self, state):\n fields_dict, unpickleable_fields = state\n # saved fields\n for name in fields_dict.keys():\n # set the default name argument to prevent overwriting the name\n setattr(self, name, lambda name=name:fields_dict[name])\n\n # unpickleable fields\n for name in unpickleable_fields:\n def getter(name=name):\n raise UnpickleableFieldError(\n \"%s()'s result wasn't pickleable\" % name)\n setattr(self, name, getter)\n\n klass.__getstate__ = state_extractor\n klass.__setstate__ = build_from_state",
"def record_class_examined(self, cls):\n serialized = self.serialize_type(cls)\n if serialized is not None:\n self.classes_examined.add(serialized)",
"def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result",
"def _record_unpatched_classes():\n # type: () -> Dict[str, type]\n installed_packages = _get_installed_modules()\n\n original_classes = {}\n\n for package, orig_path in CLASSES_TO_INSTRUMENT.items():\n if package in installed_packages:\n try:\n original_cls = _import_by_path(orig_path)\n except (AttributeError, ImportError):\n logger.debug(\"[OTel] Failed to import %s\", orig_path)\n continue\n\n original_classes[package] = original_cls\n\n return original_classes",
"def tracked(cls):\n cls.__bases__ = (TrackedMixin,) + cls.__bases__\n return cls",
"def add_unpicklable(self, statement, names):\n self.unpicklables.append(statement)\n\n for name in names:\n self.remove_global(name)\n if name not in self.unpicklable_names:\n self.unpicklable_names.append(name)",
"def test_pickling(self):\n proxy = self.model_def.model_class()\n pickled = pickle.dumps(proxy)\n self.assertEqual(pickle.loads(pickled), proxy)\n self.assertEqual(pickle.loads(pickled), proxy.model)",
"def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))",
"def add_managee(self, **saveable_classes):\n check_compliance(saveable_classes)\n for name in saveable_classes:\n if name in self.__dict__:\n logging.warning(\"Attribute of SavableCollection {} already \"\n \"exists, will be replaced\".format(name))\n\n self.__dict__.update(saveable_classes)",
"def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively searches for 'datacubedefinition.prj' in a level2 directory and returns its parent directory. | def _get_datacubeprj_dir(directory):
prj_path = []
for path in Path(directory).rglob('datacube-definition.prj'):
prj_path.append(path)
if len(prj_path) < 1:
raise FileNotFoundError(f"'datacube-definition.prj' not found in {directory}")
elif len(prj_path) > 1:
raise RuntimeError(f"'datacube-definition.prj' multiple copies found in {directory}")
else:
return prj_path[0].parent | [
"def _find_parent(api, project, name):\n cur_folder = None\n for f in [x for x in name.split(\"/\") if x]:\n if not cur_folder:\n cur_folder = list(api.files.query(project, names=[f]).all())[0]\n else:\n cur_folder = list(api.files.query(parent=cur_folder.id, names=[f]).all())[0]\n return cur_folder",
"def find_project(self):\n dir = os.getcwd()\n prevdir = None\n prefix = \".\" + self.main_command.path\n while dir != prevdir:\n if (\n os.path.exists(dir + '/project.' + prefix)\n or os.path.exists(prefix)\n ):\n return dir\n prevdir = dir\n dir = os.path.dirname(dir)\n return None",
"def _get_parent_path(self):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"genes\")",
"def test_find_in_grandparent_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake', 'fake')\n result = steptest.find_project_directory(subdirectory)\n self.assertEqual(directory, result)",
"def FindVssProjectInfo(fullfname):\n path, fnameonly = os.path.split(fullfname)\n origPath = path\n project = \"\"\n retPaths = [fnameonly]\n while not project:\n iniName = os.path.join(path, g_iniName)\n database = win32api.GetProfileVal(\"Python\", \"Database\", \"\", iniName)\n project = win32api.GetProfileVal(\"Python\", \"Project\", \"\", iniName)\n if project:\n break\n # No valid INI file in this directory - look up a level.\n path, addpath = os.path.split(path)\n if not addpath: # Root?\n break\n retPaths.insert(0, addpath)\n if not project:\n win32ui.MessageBox(\n \"%s\\r\\n\\r\\nThis directory is not configured for Python/VSS\" % origPath\n )\n return\n return project, \"/\".join(retPaths), database",
"def search_parents(name, cwd):\n for pdir in parents(cwd):\n if name in os.listdir(pdir):\n return os.path.join(pdir, name)\n\n return None",
"def test_find_in_parent_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake')\n result = steptest.find_project_directory(subdirectory)\n self.assertEqual(directory, result)",
"def _get_project_dir():\n cwd = os.path.realpath(os.getcwd())\n while cwd is not os.path.dirname(cwd):\n if 'package.json' in os.listdir(cwd):\n return cwd\n cwd = os.path.dirname(cwd)\n raise RuntimeError(\"No 'package.json' in cwd (%s) or parent directories.\" %\n (os.getcwd(),))",
"def find_parent_directory_containing(filename, path=None):\n if not path:\n path = Path.cwd()\n else: # assure pathlib object\n path = Path(path)\n\n while True:\n if (path / filename).exists():\n return path\n if path.parent == path:\n return None\n path = path.parent # go up",
"def _find_data_folder():\n starting_folder = os.path.dirname(__file__)\n if starting_folder == '.':\n starting_folder = os.getcwd()\n found = False\n data_path = ''\n while not found:\n if 'data' in os.listdir(starting_folder):\n data_path = os.path.join(starting_folder, 'data')\n if os.path.exists(data_path):\n break\n starting_folder = os.path.dirname(starting_folder)\n if starting_folder == '':\n raise RuntimeError('Could not find \"data\" path')\n return data_path",
"def get_parent_value(self):\r\n if len(self.cur_fullpath) > BASE2_OFFSET:\r\n return self.get_subdir(self.cur_fullpath[-3][ISUBDIR], [self.cur_fullpath[-2][INAME]], value=True)\r\n elif len(self.cur_fullpath) == BASE2_OFFSET and self.get_base_subdir() == GLOBALS_DIR:\r\n return self.get_main_module()\r\n else:\r\n return None",
"def parent_dir(path):\n parent = os.path.dirname(os.path.dirname(os.path.join(path, \"\")))\n return parent",
"def get_parent_dirname(path, level = 1):\n if DEBUG:\n print(\"Finding parent level {} of path {}\".format(level, path))\n parent = path\n for i in range(level):\n parent = os.path.dirname(parent)\n return parent",
"def _loadSubprojects(self):\n logger.debug(\"Func: _loadSubprojects\")\n\n if not os.path.isfile(self._pathsDict[\"subprojectsFile\"]):\n data = [\"None\"]\n self._dumpJson(data, self._pathsDict[\"subprojectsFile\"])\n else:\n data = self._loadJson(self._pathsDict[\"subprojectsFile\"])\n if data == -2:\n return -2\n return data",
"def have_ebuild_dir(path, maxdepth=3):\n\tstack = [(normalize_path(path), 1)]\n\twhile stack:\n\t\tpath, depth = stack.pop()\n\t\tbasename = os.path.basename(path)\n\t\ttry:\n\t\t\tlistdir = os.listdir(path)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\tfor filename in listdir:\n\t\t\tabs_filename = os.path.join(path, filename)\n\t\t\ttry:\n\t\t\t\tst = os.stat(abs_filename)\n\t\t\texcept OSError:\n\t\t\t\tcontinue\n\t\t\tif stat.S_ISDIR(st.st_mode):\n\t\t\t\tif depth < maxdepth:\n\t\t\t\t\tstack.append((abs_filename, depth + 1))\n\t\t\telif stat.S_ISREG(st.st_mode):\n\t\t\t\tif filename.endswith(\".ebuild\") and \\\n\t\t\t\t\tfilename.startswith(basename + \"-\"):\n\t\t\t\t\treturn os.path.dirname(os.path.dirname(path))",
"def project_yaml():\n current = os.getcwd()\n parent = parent_dir(current)\n while current != parent:\n test_file = os.path.join(current, \"project.yaml\")\n if os.path.exists(test_file):\n return os.path.abspath(test_file)\n else:\n current = parent\n parent = parent_dir(current)\n abort(\"Could not find the project.yaml file in {0} or any parent directories\".format(os.getcwd()))",
"def __getImmediateRoot(depth=1):\n callingFileName = os.path.realpath(getCallingFileName(depth+1))\n return os.path.abspath(os.path.join(callingFileName,'..','..'))",
"def get_include_info(self, path, cmake_dir):\n path = os.path.abspath(path) if path is not None else os.path.abspath(os.getcwd())\n possible_parents = self.get_include_locations(cmake_dir)\n # Check there is some possible parent\n if not possible_parents:\n raise CMakeProjectException(cmake_dir, \"Does not define any of the cache fields: {}\"\n .format(\",\".join(CMakeHandler.CMAKE_LOCATION_FIELDS)))\n full_parents = map(os.path.abspath, possible_parents)\n # Parents *are* the common prefix for their children\n parents = filter(lambda parent: os.path.commonprefix([parent, path]) == parent, full_parents)\n\n def parent_reducer(accum, item):\n \"\"\"\n Reduces a list of parents, and the given path, to a single path who is the closes parent to the existing\n path while stilling being a parent.\n :param accum: latest best parent, or None if no valid parent found\n :param item: current parent being evaluated\n :return: next latest best parent\n \"\"\"\n # None items cannot weigh-in\n if item is None:\n return accum\n # Calculate common path\n prefix = os.path.commonprefix([item, path])\n # Return the previous parent, if new prefix is not the directory itself, or the old one was closer\n return accum if prefix != item or (accum is not None and len(accum) > len(item)) else item\n nearest_parent = functools.reduce(parent_reducer, parents, None)\n # Check that a parent is the true parent\n if nearest_parent is None:\n raise CMakeOrphanException(path)\n return os.path.relpath(path, nearest_parent), nearest_parent",
"def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a vocabulary from the training directory return a sorted vocabulary list | def create_vocabulary(directory, cutoff):
top_level = os.listdir(directory)
a = cutoff
vocab = {}
for d in top_level:
subdir = d if d[-1] == '/' else d+'/'
files = os.listdir(directory+subdir)
for f in files:
with open(directory+subdir+f,'r', encoding="utf-8") as doc:
for word in doc:
word = word.strip()
if not word in vocab and len(word) > 0:
vocab[word] = 1
elif len(word) > 0:
vocab[word] += 1
return sorted([word for word in vocab if vocab[word] >= cutoff]) | [
"def create_vocabulary(directory, cutoff):\n top_level = os.listdir(directory)\n vocab = {}\n for d in top_level:\n subdir = d if d[-1] == '/' else d+'/'\n files = os.listdir(directory+subdir)\n for f in files:\n with open(directory+subdir+f,'r') as doc:\n for word in doc:\n word = word.strip()\n if not word in vocab and len(word) > 0:\n vocab[word] = 1\n elif len(word) > 0:\n vocab[word] += 1\n return sorted([word for word in vocab if vocab[word] >= cutoff])",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n lower=True, tokenizer=None, normalize_digits=True):\n \n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n if tokenizer == 'nltk':\n tokens = nltk.word_tokenize(line)\n elif tokenizer == 'basic':\n tokens = basic_tokenizer(line)\n else:\n tokens = line.strip().split()\n for w in tokens:\n # add lower casing\n if lower: w = w.lower() \n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n word = _UNF if w[-1]=='-' else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")",
"def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n print(\"creating vocab from\",data_path)\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n \n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n print(\"vocab too big\")\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True,\n _DIGIT_RE=re.compile(br\"\\d\"),\n _START_VOCAB=[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary %s from data %s exists\" % (vocabulary_path, data_path))",
"def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)",
"def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)",
"def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])",
"def getVocabularyAndCreateFile(inputFilename, outputFilename): \n\n dictionary = create_description_dictionary(inputFilename)\n dictionary = clean_descriptions(dictionary)\n vocabulary = create_vocabulary(dictionary)\n create_clean_description_file(dictionary, outputFilename)\n return vocabulary",
"def write_vocabulary(vocabulary_path, data_path, tokenizer=list):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 5000 == 0:\n print(\" processing line %d\" % counter)\n tokens = line.strip().split()\n assert len(tokens) == 1\n tokens = tokenizer(tokens[0]) #split into single characters\n for word in tokens:\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n\n vocab_list = _START_VOCAB + sorted(vocab)\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))",
"def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary",
"def build_vocab(data, vocab_dir, vocab_size=400001):\n print('Building vocabulary...')\n\n all_data = [] # group all data\n for content in data:\n all_data.extend(content.split())\n\n counter = Counter(all_data) # count and get the most common words\n count_pairs = counter.most_common(vocab_size - 1)\n words, _ = list(zip(*count_pairs))\n\n words = ['<PAD>'] + list(words) # add a padding with id 0 to pad the sentence to same length\n open_file(vocab_dir, 'w').write('\\n'.join(words) + '\\n')",
"def _build_vocab(filename, vocab_path, vocab_size):\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n words = words[:vocab_size]\n with open(vocab_path, \"w\") as f:\n f.write(\"\\n\".join(words))",
"def create_vocab(data_files, vocab_fname):\n chars = set()\n for data_fname in data_files:\n with io.open(data_fname, 'r', encoding='utf8') as fp:\n raw = fp.read().lower()\n chars.update(raw)\n\n vocab = list(chars - set(['\\t', '\\n'])) + SPECIALS\n tf.logging.info('Creating vocab file..')\n with io.open(vocab_fname, 'w', encoding='utf8') as fp:\n fp.write('\\n'.join(vocab))",
"def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts(\n cache_dir='/tmp')\n return list(vocab_dict.keys())[:vocab_size]",
"def create_voc_train_lst_txt():\n voc_train_data_lst = __get_data_list(VOC_TRAIN_TXT)\n with open(VOC_TRAIN_LST_TXT, mode='w') as f:\n for id_ in voc_train_data_lst:\n id_ = id_.strip()\n img_ = os.path.join(VOC_IMG_DIR, id_ + '.jpg')\n anno_ = os.path.join(VOC_ANNO_GRAY_DIR, id_ + '.png')\n f.write(img_ + ' ' + anno_ + '\\n')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the class conditional probability of label over all words, with smoothing | def p_word_given_label(vocab, training_data, label):
smooth = 1 # smoothing factor
word_prob = {}
# TODO: add your code here
total_word = 0
word_prob[None] = 0
for dic in training_data:
for index0, i0 in enumerate(dic['bow']):
if (list(dic['bow'])[index0] in word_prob):
continue;
word_prob[list(dic['bow'])[index0]] = 0
#word_prob[None] = 0
if(dic["label"] == label):
for index, i in enumerate(dic["bow"]):
if(list(dic['bow'])[index] in vocab):
if(list(dic['bow'])[index] in word_prob):
word_prob[list(dic['bow'])[index]] += dic["bow"][i]
else:
word_prob[list(dic['bow'])[index]] = dic["bow"][i]
else:
if(None in word_prob):
word_prob[None] += dic["bow"][i]
else:
word_prob[None] = 0
total_word += dic["bow"][i]
#word_prob [None] = 5
for h in word_prob:
word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1)))
return word_prob | [
"def p_word_given_label(vocab, training_data, label):\n\n smooth = 1 # smoothing factor\n wordCnt = 0\n word_prob = {}\n\n for word in vocab:\n word_prob[word] = smooth\n \n word_prob[None] = smooth\n\n for data in training_data:\n if data['label'] == label:\n for word in data['bow']:\n wordCnt += data['bow'][word]\n if word in vocab:\n word_prob[word] += data['bow'][word]\n else:\n word_prob[None] += data['bow'][word]\n\n for word in word_prob:\n word_prob[word] = math.log(word_prob[word]) - math.log(wordCnt + smooth * (len(vocab) + 1))\n\n return word_prob",
"def __probWordGivenLabel(self, word, label):\n ## Get no. of occurrences of word in label\n if word not in self.db.labelToWordToFreq[label]:\n freqInLabel = 0\n else:\n freqInLabel = self.db.labelToWordToFreq[label][word]\n\n ## Get total count of words in label\n totalWordCountInLabel = sum(self.db.labelToWordToFreq[label].values())\n\n ## Find probability of word coming up in class 'label', using Laplace Smoothing\n return float(freqInLabel + self.k) / (totalWordCountInLabel + (self.k * len(self.db.wordToTotalFreq)))",
"def prior(training_data, label_list):\n smooth = 1 # smoothing factor\n logprob = {}\n labelDict = {}\n\n for label in label_list:\n if not label in labelDict:\n labelDict[label] = smooth\n \n for data in training_data:\n currLabel = data['label']\n labelDict[currLabel] += 1\n\n for label in label_list:\n logprob[label] = math.log(labelDict[label] / (len(training_data) + 2))\n\n return logprob",
"def get_conditional_prob(features, labels):\n train_instances_num = len(features)\n words_num = len(features[0])\n # abusive stores p(c_1)\n abusive = sum(labels) / len(labels)\n # we do not use zeros\n abuse_per_word, non_abuse_per_word = np.ones(words_num), np.ones(words_num)\n abuse_words_num, non_abuse_words_num = 2., 2.\n for i in range(train_instances_num):\n if labels[i] == 1:\n abuse_per_word += features[i]\n abuse_words_num += sum(features[i])\n else:\n non_abuse_per_word += features[i]\n non_abuse_words_num += sum(features[i])\n # element-wise (we use log() to avoid underflow)\n # abuse_probs stores p(w_i | c_1) for each word i (the label is abusive)\n abuse_probs = np.log(abuse_per_word / abuse_words_num)\n # non_abuse_probs stores p(w_i | c_0) for each word i (the label is non-abusive)\n non_abuse_probs = np.log(non_abuse_per_word / non_abuse_words_num)\n return non_abuse_probs, abuse_probs, abusive",
"def __probWordGivenNotLabel(self, word, label):\n ## Get no. of occurrences of word not in label\n if word not in self.db.wordToTotalFreq.keys():\n totalOccurrences = 0\n else:\n totalOccurrences = self.db.wordToTotalFreq[word]\n\n ## Get no. of occurrences of word IN label\n if word not in self.db.labelToWordToFreq[label]:\n freqInClass = 0\n else:\n freqInClass = self.db.labelToWordToFreq[label][word]\n\n occurrencesNotInClass = totalOccurrences - freqInClass\n\n ## Get total count of words not in clazz\n totalWordCountNotInClazz = self.db.totalWordCount - sum(self.db.labelToWordToFreq[label].values())\n\n ## Find probability of word coming up in clazz, using Laplace Smoothing with k = 1\n return float(occurrencesNotInClass + 1) / (totalWordCountNotInClazz + len(self.db.wordToTotalFreq))",
"def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass",
"def prob_words(context, vocab, temperature=1.0):\n dot = np.dot(vocab, context)\n return _softmax(dot / temperature)",
"def classify(self, sText):\n\n target_tokens = self.tokenize(sText)\n\n # initialize probabilities\n prob_pos = 0.0\n prob_neg = 0.0\n\n # calculate probabilities for each word\n for token in target_tokens:\n word = token.lower().strip()\n # add one smoothing\n freq_pos = 1\n freq_neg = 1\n\n if self.positive_dict.has_key(word):\n freq_pos += float(self.positive_dict[word])\n if self.negative_dict.has_key(word):\n freq_neg += float(self.negative_dict[word])\n\n prob_pos += math.log(freq_pos/float(sum(self.positive_dict.values())))\n prob_neg += math.log(freq_neg/float(sum(self.negative_dict.values())))\n\n # calculate probabilities of bigrams\n for i in range(len(target_tokens)-2):\n phrase = ''.join(target_tokens[i:i+2]).lower().strip()\n freq_pos = 1\n freq_neg = 1\n if self.two_gram_p.has_key(phrase):\n freq_pos += float(self.two_gram_p[phrase])\n if self.two_gram_n.has_key(phrase):\n freq_neg += float(self.two_gram_n[phrase])\n\n prob_pos += math.log(freq_pos/float(sum(self.two_gram_p.values())))\n prob_neg += math.log(freq_neg/float(sum(self.two_gram_n.values())))\n\n # determine positive/negative/neutral by comparing probabilities\n if prob_pos > prob_neg:\n result = \"positive\"\n else:\n result = \"negative\"\n # else:\n # result = \"neutral\"\n\n return result",
"def predict_probability(text, model=MODEL):\n return model.predict_proba([text])[:, 1][0]",
"def predict(self, corpus):\n predict_labels = []\n for index in range(len(corpus.text)):\n word_list = corpus.text[index]\n predict_label = []\n for i in range(self.features):\n p_tc_positive = []\n p_tc_negative = []\n for word in word_list.words:\n try:\n p_tc_positive.append(math.log10(self.model[i]['p_tc_positive'][word]))\n except KeyError:\n p_tc_positive.append(math.log10(1 / (self.model[i]['positive_term'] + self.model[i]['bins'])))\n try:\n p_tc_negative.append(math.log10(self.model[i]['p_tc_negative'][word]))\n except KeyError:\n p_tc_negative.append(math.log10(1 / (self.model[i]['negative_term'] + self.model[i]['bins'])))\n if i == 0:\n p_lc_positive, p_lc_negative = 0, 0\n else:\n try:\n p_lc_positive = math.log10(self.model[i]['p_lc_positive'][tuple(predict_label)])\n except KeyError:\n p_lc_positive = math.log10(1 / (self.model[i]['lc_positive_term'] + self.model[i]['lc_bins']))\n try:\n p_lc_negative = math.log10(self.model[i]['p_lc_negative'][tuple(predict_label)])\n except KeyError:\n p_lc_negative = math.log10(1 / (self.model[i]['lc_negative_term'] + self.model[i]['lc_bins']))\n\n if cmap(self.model[i]['pc_positive'], p_tc_positive, p_lc_positive) > cmap(self.model[i]['pc_negative'], p_tc_negative, p_lc_negative):\n predict_label.append(1)\n else:\n predict_label.append(0)\n # print(cmap(self.model[i]['pc_positive'], p_tc_positive, p_lc_positive))\n # print(cmap(self.model[i]['pc_negative'], p_tc_negative, p_lc_negative))\n predict_labels.append(predict_label)\n\n return predict_labels",
"def classify(self, sText):\n threshold = .1\n posCount = float(sum(self.posFreqDict.itervalues()))\n negCount = float(sum(self.negFreqDict.itervalues()))\n negProbability=0.0\n posProbability=0.0\n for word in self.tokenize(sText):\n if word in self.posFreqDict:\n posProbability+= log10(float( (1.0+float(self.posFreqDict[word]))/posCount))\n else:\n posProbability+=log10(float(1.0/posCount))\n if word in self.negFreqDict:\n negProbability+= log10(float( (1.0+float(self.negFreqDict[word]))/negCount))\n else:\n negProbability+= log10(float(1.0/negCount))\n if abs(posProbability-negProbability)< .1 :\n return \"neutral\"\n elif posProbability>negProbability:\n return \"positive\"\n else:\n return \"negative\"",
"def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p",
"def classify(self, words):\n\t# Write code here\n\tfemaleProb = 0.0\n\tmaleProb = 0.0\n\tallDocumentCount = sum(self.classCounts.values())\n\n\n\tfemaleProb += math.log(self.classCounts['female']) - math.log(allDocumentCount) #class probabilities\n\tmaleProb += math.log(self.classCounts['male']) - math.log(allDocumentCount)\n\n\tif self.BOOLEAN_NB:\n\t\t\n\t\t#do boolean naive bayes\n\t\tnoDuplicates = []\n\t\tfor i in words:\n\t\t\tif i not in noDuplicates:\n\t\t\t\tnoDuplicates.append(i)\n\t\twords = noDuplicates[:]\n\n\t\ttempfemale = math.log(self.binaryTotalCount['female'] + len(self.allWords))\n\t\ttempmale = math.log(self.binaryTotalCount['male'] + len(self.allWords))\n\t\tfor word in words:\n\t\t\tfemaleProb += math.log(self.binaryWordCounts['female'][word] + 1)\n\t\t\tfemaleProb -= tempfemale\n\n\t\t\tmaleProb += math.log(self.binaryWordCounts['male'][word] + 1)\n\t\t\tmaleProb -= tempmale\n\n\n\telif self.BEST_MODEL:\n\t\tnoDuplicates = []\n\t\tfor i in words:\n\t\t\tif i not in noDuplicates:\n\t\t\t\tnoDuplicates.append(i)\n\t\twords = noDuplicates[:]\n\t\ttempfemale = math.log(self.binaryTotalCount['female'] + 5*len(self.allWords))\n\t\ttempmale = math.log(self.binaryTotalCount['male'] + 5*len(self.allWords))\n\n\t\tfor word in words:\n\t\t\tfemaleProb += math.log(self.binaryWordCounts['female'][word] + 5)\n\t\t\tfemaleProb -= tempfemale\n\n\t\t\tmaleProb += math.log(self.binaryWordCounts['male'][word] + 5)\n\t\t\tmaleProb -= tempmale\n\n\t\t#personal heuristics\n\n\telse:\n\t\tif (self.FILTER_STOP_WORDS):\n\t\t\twords = self.filterStopWords(words)\n\t\t#regular naive bayes\n\n\t\ttempfemale = math.log(self.totalCount['female'] + len(self.allWords))\n\t\ttempmale = math.log(self.totalCount['male'] + len(self.allWords))\n\t\tfor word in words:\n\n\t\t\tfemaleProb += math.log(self.wordCounts['female'][word] + 1) #one-add smoothing\n\t\t\tfemaleProb -= tempfemale\n\n\t\t\tmaleProb += math.log(self.wordCounts['male'][word] + 1) #one-add smoothing\n\t\t\tmaleProb -= tempmale #denominator\n\n\n\tif femaleProb >= maleProb:\n\t\treturn 'female'\n\n\treturn 'male'",
"def __probLabel(self, label):\n if label not in self.db.labelToDocsCount.keys():\n print \"Error: label %s not in DB\" % label\n exit()\n\n # Use Laplacian smoothing\n return float(self.db.labelToDocsCount[label] + 1) / (self.db.allDocsCount + 2)",
"def get_predicted_label(probs):\n if probs[-1]>.6:\n return 'Track'\n else:\n return 'Background'",
"def _findClassProbabilities(training_labels):\r\n label_count_dict = Counter(training_labels)\r\n total_label_size = len(training_labels)\r\n \r\n for label, count in label_count_dict.iteritems():\r\n label_count_dict[label] = count / float(total_label_size)\r\n\r\n return label_count_dict",
"def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find all pairs of unique indices which form a palindrome. | def palindromePairs(lst):
results = []
for i, e1 in enumerate(lst):
for j, e2 in enumerate(lst):
if i != j:
if isPalindrome(e1+e2):
results.append((i, j))
return results | [
"def palindromo(start, end):\n return [i for i in range(start, end + 1) if str(i) == str(i)[::-1]]",
"def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n",
"def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal",
"def find_palindromes(self):\n\t\tself.square_palindromes = [x for x in self.squares if self.is_palindrome(x)]",
"def revp(s):\n palindromes = []\n for k in range(12, 3, -1):\n for i in range(len(s) - k + 1):\n substr = s[i:i+k]\n if substr == revc(substr):\n palindromes.append((i, k))\n return palindromes",
"def find_palindromes(text=TEXT):\n palindromes = []\n for i in range(0, len(text)-1):\n for j in range(i+2, len(text)+1):\n chain = text[i:j]\n if chain == chain[::-1]:\n palindromes.append(chain)\n return palindromes",
"def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True",
"def palindrom(sir):\n return sir == sir[::-1]",
"def palindromes(n: int) -> int:\n # 1 -> 2 -> 3 ... 9 -> 11 -> 22 -> 33 -> 44 .. 99 -> 101\n # 101 -> 111 -> 121 -> 131 -> ... -> 191 -> 202 -> 212\n # 989 -> 999 -> 1001 -> 1111 -> 1221\n # 9889 -> 9999 -> 10001 -> 10101 -> 10201\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]])) # join '12' with '21'\n else:\n pal = int(''.join([s, s[-2::-1]])) # join '12' with '1'\n if prev <= pal:\n yield pal\n \n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even: n //= 10\n s = str(n)",
"def ispseduoPalindrom(string):\n c_string = Counter(string)\n odds = sum([v % 2 for v in c_string.values()])\n return odds < 2",
"def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1",
"def can_form_palindrome(s):\n n_odd = 0\n for count in collections.Counter(s).values():\n if count % 2 == 1:\n n_odd += 1\n return n_odd <= 1",
"def detect_even_palindrome(arr):\n # END OF CONTEXT\n return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']\n # END OF SOLUTION",
"def palindrome_products(n_digits=2):\n upper = int('9' * n_digits)\n lower = int('9' * (n_digits - 1))\n for a in range(upper, lower, -1):\n for b in range(upper, lower, -1):\n product = a * b\n if is_palindrom(product):\n # print(\"{} x {} = {}\".format(a, b, product))\n yield product",
"def generate_palindromes(start, stop, step = 1): \r\n for x in range(start, stop, step):\r\n if str(x) == str(x)[::-1]:\r\n yield x",
"def repeated_palindrome(palindromes_list):\n # the list is ordered in the reversed form (long to short)\n ordered_palindrome = sorted(palindromes_list)\n longest_first = ordered_palindrome[::-1]\n # initialize a new list to receive unique plaindromes data\n pal_list = [longest_first[0]]\n # the longest palindrome cannot fit in any other sequence \n # iterates over the longest_first original palindromes\n # get the start and end positions \n for data in longest_first:\n start = data[1]\n end = start + data[0]\n # iterate through the pal_list and \n # compare the start and end of the potential and palindromes \n # to check if the potential palindrome is unique.\n unique_palindrome = None\n for dat in pal_list:\n start_unique = dat[1]\n end_unique = start_unique + dat[0]\n # statement should test to check if the test palindrome fits\n # inside any of the identified 'real/unique' palindromes.\n if start >= start_unique and end <= end_unique:\n # if the palindrome tested fits inside\n unique_palindrome = False\n break\n else:\n # other wise it is unique\n unique_palindrome = True\n if unique_palindrome:\n # check if if it is not in the list\n if data not in pal_list:\n pal_list += [data]\n return pal_list",
"def check_palindrome():\n result = []\n for num in range(100000, 1000000):\n if (\n is_palindrome(str(num)[2:])\n and is_palindrome(str(num + 1)[1:])\n and is_palindrome(str(num + 2)[1:5])\n and is_palindrome(str(num + 3))\n ):\n result.append(num)\n print(result)",
"def find_palindrome_pivot(s):\n\t#initialize index\n\tindex = 0\n\tcount = 0\n\n\t#remove whitespace\n\tnoWhitespace = ''.join(s.split())\n\n\t\n\t#if length is even, divide length in 2 and subtract 1, else divide by 2\n\tif len(noWhitespace) %2 == 0:\n\t\tindex = int(len(noWhitespace)/2) - 1\n\telse:\n\t\tindex = int(len(noWhitespace)/2)\n\t\n\tif s != noWhitespace:\n\t\tfor i in range(len(s)):\n\t\t\tif s[i] != \" \":\n\t\t\t\tcount += 1\n\t\t\t\tif count == index + 1:\n\t\t\t\t\tindex = i\n\t\t\t\t\tbreak\t\t\n\treturn index",
"def palindromeIterative(string):\n string = string.lower()\n length = len(string)\n number = 0\n for i in range(length):\n for j in range(i+1, length+1):\n tmp = string[i:j]\n if tmp == tmp[::-1] and len(tmp) > 1:\n number += 1\n return number"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a dictionary to an earth engine feature server side | def dict_to_feature(d):
f = ee.Feature(None,ee.Dictionary(d))
return f | [
"def _preprocess(self, feature_dict):\n return feature_dict",
"def readings_dict_to_features(dict, raw):\n final = None\n for key in sorted(dict):\n converted = reading_to_feature(dict[key], raw)\n if final is None:\n final = converted\n else:\n final = np.concatenate([final, converted])\n return final",
"def _convert_features_dict(features):\n\n result = []\n\n for io, lst in features.items():\n for feature in lst:\n feature['io'] = io\n result.append(feature)\n\n return result",
"def map_to_app_features(self, app):\n app['features'] = []\n for form_feature in self.features:\n feature = {}\n if form_feature.feature_name.data:\n feature['name'] = form_feature.feature_name.data\n if form_feature.feature_version.data:\n feature['version'] = form_feature.feature_version.data\n if form_feature.feature_provisioner.data:\n feature['provisioner'] = form_feature.feature_provisioner.data\n if form_feature.feature_parameters.data:\n json_ob = json.loads(form_feature.feature_parameters.data)\n if json_ob:\n feature['parameters'] = json_ob\n feature['version'] = ''\n else:\n feature['parameters'] = {}\n if feature:\n app['features'].append(feature)",
"def add_engineered(features):\n features[\"londiff\"] = features[\"dropofflon\"] - features[\"pickuplon\"]\n features[\"latdiff\"] = features[\"dropofflat\"] - features[\"pickuplat\"]\n features[\"euclidean\"] = tf.math.sqrt(\n features[\"londiff\"]**2 + features[\"latdiff\"]**2)\n return features",
"def dict_to_example(dictionary):\n features = {}\n for k, v in six.iteritems(dictionary):\n features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))\n return tf.train.Example(features=tf.train.Features(feature=features))",
"def _feature_collection_from_features(features):\n layer = {\n \"type\": \"FeatureCollection\",\n \"features\": features\n }\n return layer",
"def sites_geojson():\n\n with Config() as config:\n with db.Connection(config) as con:\n features = con.features()\n features = list(features)\n return flask.jsonify(features)",
"def process_feature_file(filename: str) -> Dict[str, Any]:\n feature = json.loads(open(filename).read())\n template = feature['query']\n name = feature['name']\n params = feature['params']\n feature_spec = {\n 'name': name,\n 'template': template,\n 'params': params\n }\n return feature_spec",
"def xyz_json_to_feature(feat_json, fields):\n\n names = set(fields.names())\n\n qattrs = list()\n\n # handle xyz id\n v = feat_json.get(XYZ_ID, \"\")\n val = QVariant(v)\n qattrs.append([QGS_XYZ_ID, val])\n\n if QGS_XYZ_ID not in names:\n fields.append(make_field(QGS_XYZ_ID, val))\n\n props = feat_json.get(\"properties\")\n if isinstance(props, dict):\n props = rename_special_props(props) # rename fid in props\n attrs = list(_attrs(props))\n for k, v in attrs:\n val = QVariant(v)\n # if not val.isValid():\n # val = QVariant(\"\")\n if not val.type() in valid_fieldTypes:\n for cast in [QVariant.Int, QVariant.String]:\n if val.canConvert(cast):\n val.convert(cast)\n break\n if not val.type() in valid_qvariant:\n print_qgis(\"Field '%s': Invalid type: %s. Value: %s\" % (k, val.typeName(), val))\n continue\n if k not in names:\n fields.append(make_field(k, val))\n qattrs.append([k, val])\n\n feat = QgsFeature(fields)\n\n for k, v in qattrs:\n feat.setAttribute(k, v)\n\n geom = feat_json.get(\"geometry\")\n if geom is not None:\n s = json.dumps(geom)\n geom_ = QgsGeometry.fromWkt(ogr.CreateGeometryFromJson(s).ExportToWkt())\n feat.setGeometry(geom_)\n\n return feat",
"def json_to_kml():",
"def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict",
"def dict_to_tfexample(mol_dict):\n example = tf.train.Example()\n feature_map = example.features.feature\n feature_map[fmap_constants.ATOM_WEIGHTS].float_list.value.extend(\n mol_dict[fmap_constants.ATOM_WEIGHTS])\n feature_map[fmap_constants.ATOM_IDS].int64_list.value.extend(\n mol_dict[fmap_constants.ATOM_IDS])\n feature_map[fmap_constants.ADJACENCY_MATRIX].int64_list.value.extend(\n mol_dict[fmap_constants.ADJACENCY_MATRIX])\n feature_map[fmap_constants.MOLECULE_WEIGHT].float_list.value.append(\n mol_dict[fmap_constants.MOLECULE_WEIGHT])\n feature_map[fmap_constants.DENSE_MASS_SPEC].float_list.value.extend(\n mol_dict[fmap_constants.DENSE_MASS_SPEC])\n feature_map[fmap_constants.INCHIKEY].bytes_list.value.append(\n mol_dict[fmap_constants.INCHIKEY].encode('utf-8'))\n feature_map[fmap_constants.MOLECULAR_FORMULA].bytes_list.value.append(\n mol_dict[fmap_constants.MOLECULAR_FORMULA].encode('utf-8'))\n feature_map[fmap_constants.NAME].bytes_list.value.append(\n mol_dict[fmap_constants.NAME].encode('utf-8'))\n feature_map[fmap_constants.SMILES].bytes_list.value.append(\n mol_dict[fmap_constants.SMILES].encode('utf-8'))\n\n if fmap_constants.INDEX_TO_GROUND_TRUTH_ARRAY in mol_dict:\n feature_map[\n fmap_constants.INDEX_TO_GROUND_TRUTH_ARRAY].int64_list.value.append(\n mol_dict[fmap_constants.INDEX_TO_GROUND_TRUTH_ARRAY])\n\n for fp_len in ms_constants.NUM_CIRCULAR_FP_BITS_LIST:\n for rad in ms_constants.CIRCULAR_FP_RADII_LIST:\n for fp_type in fmap_constants.FP_TYPE_LIST:\n fp_key = ms_constants.CircularFingerprintKey(fp_type, fp_len, rad)\n feature_map[str(fp_key)].float_list.value.extend(mol_dict[fp_key])\n\n return example",
"def save_feature(self):\n feature_dict = {\n 'name': self.name,\n 'preActionDes': self.pre_action_des,\n 'inActionDes': self.in_action_des,\n 'postActionDes': self.post_action_des,\n 'actionable': self.actionable,\n 'usable': self.usable,\n 'state': self.state,\n 'featureId': self.feature_id\n }\n return feature_dict",
"def enhance_metadata(metadata, features='all'):\n\n # available options\n ortographic_features = ['w_length','n_vowels','n_consonants']\n lexical_features = ['uni_freq', 'bi_freq', 'func_word','count']\n position_features = ['position','position_end','is_first_word','is_last_word']\n\n # make list of features\n if features == 'all': features = ortographic_features +lexical_features + position_features \n\n # use ws clean to lower case\n words = [word.lower() for word in metadata['word'].values]\n\n # itereate features and fill metadata\n for feature in features:\n # ORTOGRAPHIC ##############################\n if feature == 'w_length': \n metadata[feature] = w_length(words)\n if feature == 'n_consonants':\n metadata[feature] = n_consonants(words)\n if feature == 'n_vowels':\n metadata[feature] = n_vowels(words)\n\n # LEXICAL ###################################\n if feature == 'uni_freq':\n metadata[feature] = unigram(words)\n if feature == 'bi_freq':\n metadata[feature] = bigram(words)\n if feature == 'func_word':\n metadata[feature] = function_word(words)\n if feature == 'count':\n metadata[feature] = count(words)\n\n # POSITION ###################################\n if feature == 'position':\n metadata[feature] = position(words)\n if feature == 'position_end':\n metadata[feature] = position_end(words)\n if feature == 'is_first_word':\n metadata[feature] = first_word(words)\n if feature == 'is_last_word':\n metadata[feature] = last_word(words)\n\n return metadata",
"def encode_feature(self, dataset_structure, old_feature, new_feature, encoding_map):\n for key, value in dataset_structure.items():\n if isinstance(value, dict):\n self.encode_feature(value, old_feature, new_feature, encoding_map)\n else:\n value[new_feature] = value[old_feature].map(encoding_map)",
"def convert_series_to_feature(series: Types.SeriesObj,) -> Dict[str, tf.train.Feature]:\n try:\n image, metadata = series\n dicom_id = f\"{metadata.get('Study Instance UID', 'unknown_study')}/{metadata.get('Series Instance UID', 'unknown_series')}/\"\n\n if metadata.get(\"flags\") and metadata.get(\"time\"):\n name = f\"time{metadata.get('time')[1:]}/{'_'.join(metadata.get('flags'))}/\"\n else:\n name = dicom_id\n return dict(\n [\n (f\"{name}{k}\", v)\n for (k, v) in {\n \"image\": floatList_feature(image.flatten().tolist()),\n \"dx\": float_feature(metadata.get(\"Pixel Spacing\")[0]),\n \"dy\": float_feature(metadata.get(\"Pixel Spacing\")[1]),\n \"dz\": float_feature(metadata.get(\"Spacing Between Slices\")),\n \"is_seg\": int64_feature(int(metadata.get(\"Modality\") == \"SEG\")),\n \"right\": int64_feature(int(metadata.get(\"Laterality\") == \"R\")),\n \"shape\": int64List_feature(image.shape),\n \"dicom_id\": bytes_feature(dicom_id.encode()),\n \"Image Position (Patient)\": floatList_feature(metadata.get(\"Image Position (Patient)\")),\n \"Image Orientation (Patient)\": floatList_feature(metadata.get(\"Image Orientation (Patient)\")),\n \"z_bound\": floatList_feature(metadata.get(\"slice_z\")),\n }.items()\n ]\n )\n except Exception as e:\n _logger.error(\n f\"Error making Series Features. Series meta: {metadata}. Error: {str(e)}\"\n )\n return {}",
"def convert_study_to_feature(study: List[Types.SeriesObj]) -> List[Dict[str, tf.train.Feature]]:\n return [convert_series_to_feature(s) for s in study]",
"def feature_extractor():\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert volume to flux | def volumeToFlux(volume_image):
image = ee.Image(volume_image)
flux_image = image.divide(ee.Image(AREA_PFAF6_30MIN)).multiply(1e6).copyProperties(image)
flux_image = flux_image.set("units","m")
flux_image = flux_image.set("convertedToFlux", 1)
return flux_image | [
"def flux(source, freq=0.0, deltafreq=0.0, daysback=0.0) :\n x = queryFlux(source,freq,deltafreq,daysback)\n return x.flux",
"def flux(self, x):\n return self.cal_spec.get_flux(self(x))",
"def flux(self, q):\n q1, q2 = q\n if q1 > 0:\n u = q2/q1\n else:\n u = 0\n return np.array([q1*u, q1 * u*u + 0.5*9.81 * q1*q1])",
"def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume",
"def flux(self, u):\n flu = np.zeros((3,2), dtype=np.float64)\n flu[0,0] = u[1]\n flu[1,0] = u[0] * (u[1]/u[0])**2 + 0.5 * 9.81*u[0]**2\n flu[2,0] = u[1] * u[2]/u[0] #FIXME attenzione che c'è il punto controllare se sono scalari o vettori'\n flu[0,1] = u[2]\n flu[1,1] = u[2] * u[1]/u[0]\n flu[2,1] = u[0] * (u[2]/u[0])**2 + 0.5 * 9.81*u[0]**2\n return flu",
"def ns_to_flux(self, ns: float, index: float):\n\n raise NotImplementedError",
"def get_flux(self):\n return self._flux",
"def itensity_normalize_one_volume(volume):\n \n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out",
"def convert_flux(nu, flux, target_unit):\n\n curr_unit = flux.unit\n\n if curr_unit.is_equivalent(u.erg / u.s):\n flux = flux / sed.distance ** 2\n elif curr_unit.is_equivalent(u.Jy):\n flux = flux * nu\n elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):\n raise Exception(\"Don't know how to convert {0} to ergs/cm^2/s\" % (flux.unit))\n\n # Convert to requested unit\n\n if target_unit.is_equivalent(u.erg / u.s):\n flux = flux * sed.distance ** 2\n elif target_unit.is_equivalent(u.Jy):\n flux = flux / nu\n elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):\n raise Exception(\"Don't know how to convert %s to %s\" % (curr_unit, unit_flux))\n\n return flux.to(target_unit)",
"def get_converted_fluxes(self):\n return self._corrected_fluxes",
"def calculateFlux(state,wv):\n massFlux = state[1]\n momFlux = state[1]*wv[0] + wv[1]\n enFlux = (state[2] + wv[1])*wv[0]\n \n return np.array([ massFlux,momFlux,enFlux])",
"def normalize(volume):\n max = np.amax(volume)\n if max == 0:#Fixes dividing by 0 error if nothing in the volume\n return volume.astype(np.uint8)\n\n normalized = volume * (255.0 / max)\n normalized = np.round(normalized).astype(np.uint8)\n return normalized",
"def normalize_flux(self):\n fmax = 0\n fmin = 1e99\n for n in self.graph:\n if n.flux > fmax:\n fmax = n.flux\n if n.flux < fmin:\n fmin = n.flux\n for n in self.graph:\n n.flux = (n.flux-fmin)/(fmax-fmin)",
"def flux():\n delta = 0.01 # film thickness, [dm]\n c = pre * 10 ** 2 / (R * tem) # total concentration calculated by ideal gas equation, in [mol/L]\n D12 = 0.001626528 / pre # HCl diffusion in Air, [dm2/s] @296K\n D13 = 3e-7 # HCl gas diffusion in water, [dm2/s] @296K\n D23 = 1.5e-7 # CH4 gas diffusion in water, [dm2/s] @296K\n N1 = ((x1_bar * x2d * D23) / (x2_bar * delta * D13) - x1_bar / delta) / \\\n (x2_bar / (D12 * c) + x3_bar / (D13 * c) + D23 * x1_bar / (D12 * D13 * c))\n # print 'Flux of HCl into water', abs(N1), [mol/(dm2*sec)]\n return N1",
"def normalisation(wav, flux):\n \n return flux / flux.max() # flux maximal = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm",
"def mag2flux(magnitude, band):\n\n fnu = 10 ** (-(magnitude + 48.6) / 2.5) * erg / s / cm ** 2 / Hz\n flux = fnu.to(erg / s / cm ** 2 / angstrom,\n equivalencies=spectral_density(band.effective_wavelength))\n\n return flux",
"def toa_incoming_shortwave_flux(srad0, srad0u):\n return srad0 - srad0u",
"def magtoflux(_mag, _id):\n return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9))",
"def uvflux(file,start,step) :\n doshift = False\n uvflags = \"sdlcef\"\n handle = uvopen(file,\"old\")\n uvset(handle,\"data\",\"channel\",1,float(start),float(step),float(step))\n PolIndx = [0] * 14\n sources = [\" \"] * 14\n isrc = -1\n nsrc = 0\n npol = 0\n fluxr = []\n fluxi = []\n amp = []\n amp2 = []\n rms2 = []\n ncnt = []\n PolMin = -9\n PolMax = 4\n for i in range(0,MAXPOL) :\n temp = [0.0] * MAXSRC\n fluxr.append(temp)\n temp = [0.0] * MAXSRC\n fluxi.append(temp)\n temp = [0.0] * MAXSRC\n amp.append(temp)\n temp = [0.0] * MAXSRC\n amp2.append(temp)\n temp = [0.0] * MAXSRC\n rms2.append(temp)\n temp = [0] * MAXSRC\n ncnt.append(temp)\n preamble,data,flags = uvread(handle)\n ipol = -20\n while(len(flags) > 0) :\n ipol = uvrdvri(handle,\"pol\",1)\n if(PolIndx[ipol] == 0) :\n npol += 1\n PolIndx[ipol] = npol\n ipol = PolIndx[ipol]\n t,l,update = uvprobvr(handle,\"source\")\n if(update) :\n source = uvgetvra(handle,\"source\")\n found = False\n if(isrc >= 0) :\n found = source == sources[isrc]\n if(not found) :\n if(source in sources) :\n isrc = sources.index(source)\n found = True\n if(not found) :\n nsrc += 1\n sources[nsrc - 1] = source\n for i in range(0,MAXPOL) :\n fluxr[i][nsrc] = 0.0\n fluxi[i][nsrc] = 0.0\n amp[i][nsrc] = 0.0\n amp2[i][nsrc] = 0.0\n rms2[i][nsrc] = 0.0\n ncnt[i][nsrc] = 0\n isrc = nsrc-1\n sig2 = uvinfo(handle,\"variance\")[0]\n for i in range(0,len(flags)) :\n if(flags[i]) :\n fluxr[ipol][isrc] += data[i].real\n fluxi[ipol][isrc] += data[i].imag\n rms2[ipol][isrc] += sig2\n temp = calculations.cabs(data[i])\n amp[ipol][isrc] += temp\n amp2[ipol][isrc] += temp*temp\n ncnt[ipol][isrc] += 1\n preamble,data,flags = uvread(handle)\n uvclose(handle)\n npol = 0\n p = [0] * MAXPOL\n pp = [0] * MAXPOL\n for j in range(PolMin,PolMax+1) :\n if(PolIndx[j] > 0) :\n p[npol] = j\n pp[npol] = PolIndx[j]\n npol = npol + 1\n for i in range(npol-1,1,-1) :\n print i\n if(abs(p[i]) < abs(p[i-1])) :\n t = p[i]\n p[i] = p[i-1]\n p[i-1] = t\n t = pp[i]\n pp[i] = pp[i-1]\n pp[i-1] = t\n PolCode = \"--\"\n retVal = []\n for isrc in range(0,nsrc) :\n source = sources[isrc]\n for i in range(0,npol) :\n ipol = pp[i]\n if(ncnt[ipol][isrc] > 0) :\n PolCode = polsc2p(p[i])\n fluxr[ipol][isrc] /= ncnt[ipol][isrc]\n fluxi[ipol][isrc] /= ncnt[ipol][isrc]\n vecaver = complex(fluxr[ipol][isrc],fluxi[ipol][isrc])\n vecscat = amp2[ipol][isrc] / (2*ncnt[ipol][isrc])- 0.5*(fluxr[ipol][isrc]**2+fluxi[ipol][isrc]**2)\n vecscat = math.sqrt(abs(vecscat))\n vecamp,vecpha = amphase(vecaver)\n scalamp = amp[ipol][isrc] / ncnt[ipol][isrc]\n scalscat = amp2[ipol][isrc] / ncnt[ipol][isrc]- (amp[ipol][isrc] / ncnt[ipol][isrc])**2\n scalscat = math.sqrt(abs(scalscat))\n sig2 = math.sqrt(rms2[ipol][isrc]/ncnt[ipol][isrc])\n retVal.append([source,PolCode,sig2,complex(fluxr[ipol][isrc],fluxi[ipol][isrc]),vecscat,scalamp,scalscat,ncnt[ipol][isrc]])\n return retVal"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
filters an imagecollection based on year and month | def filter_ic(ic,year,month):
ic_filtered = (ic.filter(ee.Filter.eq("month",month))
.filter(ee.Filter.eq("year",year)))
image = ee.Image(ic_filtered.first())
return(image) | [
"def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)",
"def filter_month(data, month, year):\n input_month = str(month).zfill(2)\n input_year = str(year)\n\n month_data = []\n\n for row in data:\n date_as_string = row['inspection_date'][:10]\n month, day, year = date_as_string.split('/')\n if input_month == month and input_year == year:\n month_data.append(row)\n\n return month_data",
"def get_images(self,\n collection,\n bounds=None,\n year=None,\n start_date=None,\n end_date=None,\n start_julian=1,\n end_julian=365,\n index_list=None,\n scale_factor=None,\n **kwargs):\n coll = ee.ImageCollection(collection)\n\n if year is not None:\n start_date = '{}-01-01'.format(str(year))\n end_date = '{}-12-31'.format(str(year))\n\n if bounds is not None:\n coll = coll.filterBounds(bounds)\n if (start_date is not None) and (end_date is not None):\n coll = coll.filterDate(start_date, end_date)\n\n coll = coll.filter(ee.Filter.calendarRange(start_julian, end_julian))\n\n if len(kwargs) > 0:\n for key, value in kwargs.items():\n if key == 'map':\n if value == 'add_indices':\n\n if index_list is not None:\n self.index_list = index_list\n\n if scale_factor is not None:\n self.scale_factor = scale_factor\n\n func = getattr(self, value, None)\n\n if func is not None:\n coll = coll.map(func)\n else:\n warnings.warn('The function {} is not implemented'.format(str(key)))\n return coll",
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def test_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"year\": entry.publication_date.year}\n\n self._test_filtering(**params)",
"def filter_images(images, now, args):\n minSize = 0\n pastDate = None\n\n if args.minSize:\n minSize = (int(re.search('([0-9]*)', args.minSize)[1]) * 1000000)\n if args.minAge:\n pastDate = now - timedelta(days=args.minAge)\n\n if args.minAge and not args.minSize:\n return [image for image in images if image['created'] < pastDate]\n elif not args.minAge and args.minSize:\n return [image for image in images if image['size'] > minSize]\n elif args.minAge and args.minSize:\n return [image for image in images if image['created'] < pastDate if image['size'] > minSize]\n else:\n return images",
"def daily_images(self, day, **kwargs):\n start = datetime(day.year, day.month, day.day)\n end = datetime(day.year, day.month, day.day, 23, 59, 59, 999999)\n for img in self.iter_images(start, end, **kwargs):\n yield img",
"def test_collection_author_year_filtering(self):\n # Create a collection\n entries = Entry.objects.filter(id__in=(1, 5, 10, 15))\n collection = CollectionFactory(entries=entries)\n entry = Entry.objects.get(id=1)\n\n # Get a valid collection\n params = {\n \"collection\": collection.id,\n \"author\": entry.first_author.id,\n \"year\": entry.publication_date.year,\n }\n self._test_filtering(**params)",
"def get_S1(\n self,\n year,\n month,\n day,\n tempfilter=True,\n tempfilter_radius=7,\n applylcmask=False,\n mask_globcover=True,\n dualpol=True,\n trackflt=None,\n maskwinter=False,\n masksnow=True,\n explicit_t_mask=None,\n ascending=False,\n maskLIA=True,\n ):\n\n def computeLIA(image):\n # comput the local incidence angle (LIA) based on the srtm and the s1 viewing angle\n # get the srtm\n srtm = ee.Image(\"USGS/SRTMGL1_003\")\n srtm_slope = ee.Terrain.slope(srtm)\n srtm_aspect = ee.Terrain.aspect(srtm)\n # get the S1 incidence angle\n inc = ee.Image(image).select(\"angle\")\n # comput the LIA\n s = srtm_slope.multiply(\n ee.Image.constant(277)\n .subtract(srtm_aspect)\n .multiply(math.pi / 180)\n .cos()\n )\n lia = inc.subtract(\n ee.Image.constant(90).subtract(ee.Image.constant(90).subtract(s))\n ).abs()\n # add band to current image\n return image.addBands(\n lia.select([\"angle\"], [\"lia\"]).reproject(srtm.projection())\n )\n\n def maskterrain(image):\n # mask for terrain, local incidence angle and high and low backscatter\n tmp = ee.Image(image)\n # srtm dem\n if maskLIA == False:\n gee_srtm = ee.Image(\"USGS/SRTMGL1_003\")\n gee_srtm_slope = ee.Terrain.slope(gee_srtm)\n mask = gee_srtm_slope.lt(20)\n else:\n lia = tmp.select(\"lia\")\n mask = lia.gt(20).bitwiseAnd(lia.lt(45))\n mask2 = tmp.lt(0).bitwiseAnd(tmp.gt(-25))\n mask = mask.bitwiseAnd(mask2)\n tmp = tmp.updateMask(mask)\n\n return tmp\n\n def masklc(image):\n # load land cover info\n corine = ee.Image(\"users/felixgreifeneder/corine\")\n\n # create lc mask\n valLClist = [10, 11, 12, 13, 18, 19, 20, 21, 26, 27, 28, 29]\n\n lcmask = (\n corine.eq(valLClist[0])\n .bitwiseOr(corine.eq(valLClist[1]))\n .bitwiseOr(corine.eq(valLClist[2]))\n .bitwiseOr(corine.eq(valLClist[3]))\n .bitwiseOr(corine.eq(valLClist[4]))\n .bitwiseOr(corine.eq(valLClist[5]))\n .bitwiseOr(corine.eq(valLClist[6]))\n .bitwiseOr(corine.eq(valLClist[7]))\n .bitwiseOr(corine.eq(valLClist[8]))\n .bitwiseOr(corine.eq(valLClist[9]))\n .bitwiseOr(corine.eq(valLClist[10]))\n .bitwiseOr(corine.eq(valLClist[11]))\n )\n\n tmp = ee.Image(image)\n\n tmp = tmp.updateMask(lcmask)\n return tmp\n\n def mask_lc_globcover(image):\n\n tmp = ee.Image(image)\n\n # load lc\n glbcvr = ee.Image(\"ESA/GLOBCOVER_L4_200901_200912_V2_3\").select(\"landcover\")\n\n valLClist = [\n 11,\n 14,\n 20,\n 30,\n 40,\n 50,\n 60,\n 70,\n 90,\n 100,\n 110,\n 120,\n 130,\n 140,\n 150,\n 160,\n 170,\n 180,\n 190,\n 200,\n 210,\n 220,\n 230,\n ]\n\n lcmask = (\n glbcvr.eq(valLClist[0])\n .bitwiseOr(glbcvr.eq(valLClist[1]))\n .bitwiseOr(glbcvr.eq(valLClist[2]))\n .bitwiseOr(glbcvr.eq(valLClist[3]))\n .bitwiseOr(glbcvr.eq(valLClist[4]))\n .bitwiseOr(glbcvr.eq(valLClist[5]))\n .bitwiseOr(glbcvr.eq(valLClist[6]))\n .bitwiseOr(glbcvr.eq(valLClist[7]))\n .bitwiseOr(glbcvr.eq(valLClist[8]))\n .bitwiseOr(glbcvr.eq(valLClist[9]))\n .bitwiseOr(glbcvr.eq(valLClist[10]))\n .bitwiseOr(glbcvr.eq(valLClist[11]))\n .bitwiseOr(glbcvr.eq(valLClist[12]))\n .bitwiseOr(glbcvr.eq(valLClist[13]))\n .bitwiseOr(glbcvr.eq(valLClist[14]))\n .bitwiseOr(glbcvr.eq(valLClist[15]))\n .bitwiseOr(glbcvr.eq(valLClist[16]))\n .bitwiseOr(glbcvr.eq(valLClist[17]))\n .bitwiseOr(glbcvr.eq(valLClist[18]))\n .bitwiseOr(glbcvr.eq(valLClist[19]))\n .bitwiseOr(glbcvr.eq(valLClist[20]))\n .bitwiseOr(glbcvr.eq(valLClist[21]))\n .bitwiseOr(glbcvr.eq(valLClist[22]))\n )\n\n tmp = tmp.updateMask(lcmask)\n\n return tmp\n\n def setresample(image):\n image = image.resample()\n return image\n\n def toln(image):\n\n tmp = ee.Image(image)\n\n # Convert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to ln\n out = vv.log()\n if dualpol == True:\n out = out.addBands(vh.log())\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = out.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def tolin(image):\n\n tmp = ee.Image(image)\n\n # Covert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to\n if dualpol == True:\n out = vv.addBands(vh)\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = vv.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def todb(image):\n\n tmp = ee.Image(image)\n\n return (\n ee.Image(10)\n .multiply(tmp.log10())\n .set(\"system:time_start\", tmp.get(\"system:time_start\"))\n )\n\n def applysnowmask(image):\n\n tmp = ee.Image(image)\n sdiff = tmp.select(\"VH\").subtract(snowref)\n wetsnowmap = sdiff.lte(-2.6).focal_mode(100, \"square\", \"meters\", 3)\n\n return tmp.updateMask(wetsnowmap.eq(0))\n\n def projectlia(image):\n tmp = ee.Image(image)\n trgtprj = tmp.select(\"VV\").projection()\n tmp = tmp.addBands(tmp.select(\"angle\").reproject(trgtprj), [\"angle\"], True)\n return tmp\n\n def apply_explicit_t_mask(image):\n\n t_mask = ee.Image(\"users/felixgreifeneder/\" + explicit_t_mask)\n mask = t_mask.eq(0)\n return image.updateMask(mask)\n\n ee.Reset()\n ee.Initialize()\n\n # load S1 data\n gee_s1_collection = ee.ImageCollection(\"COPERNICUS/S1_GRD\")\n\n # Filter the image collection\n gee_s1_filtered = (\n gee_s1_collection.filter(ee.Filter.eq(\"instrumentMode\", \"IW\"))\n .filterBounds(self.roi)\n .filter(ee.Filter.eq(\"platform_number\", \"A\"))\n .filter(ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VV\"))\n )\n\n if ascending == True:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"ASCENDING\")\n )\n else:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"DESCENDING\")\n )\n\n if dualpol == True:\n # Consider only dual-pol scenes\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VH\")\n )\n\n if trackflt is not None:\n # Specify track\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"relativeOrbitNumber_start\", trackflt)\n )\n\n if maskwinter == True:\n # Mask winter based on DOY\n gee_s1_filtered = gee_s1_filtered.filter(ee.Filter.dayOfYear(121, 304))\n\n # add LIA\n if maskLIA == True:\n # compute the local incidence angle if it shall be used for masking\n gee_s1_filtered = gee_s1_filtered.map(computeLIA)\n s1_lia = gee_s1_filtered.select(\"lia\")\n else:\n s1_lia = None\n\n s1_angle = gee_s1_filtered.select(\"angle\")\n\n if applylcmask == True:\n # apply land-cover mask based on Corine\n gee_s1_filtered = gee_s1_filtered.map(masklc)\n if mask_globcover == True:\n # apply land-cover mask based on globcover\n gee_s1_filtered = gee_s1_filtered.map(mask_lc_globcover)\n\n # Enable bilinear resampling (instead of NN)\n gee_s1_filtered = gee_s1_filtered.map(setresample)\n\n if explicit_t_mask == None:\n # apply masking based on the terraing (LIA)\n gee_s1_filtered = gee_s1_filtered.map(maskterrain)\n else:\n # apply specific terrain mask\n gee_s1_filtered = gee_s1_filtered.map(apply_explicit_t_mask)\n\n if masksnow == True:\n # automatic wet snow masking\n gee_s1_linear_vh = gee_s1_filtered.map(tolin).select(\"VH\")\n snowref = ee.Image(10).multiply(\n gee_s1_linear_vh.reduce(ee.Reducer.intervalMean(5, 100)).log10()\n )\n gee_s1_filtered = gee_s1_filtered.map(applysnowmask)\n\n #### SHOULD BE IF STATEMENT HERE\n\n # create a list of availalbel dates\n tmp = gee_s1_filtered.getInfo()\n tmp_ids = [x[\"properties\"][\"system:index\"] for x in tmp[\"features\"]]\n \n dates = np.array(\n [\n dt.date(year=int(x[17:21]), month=int(x[21:23]), day=int(x[23:25]))\n for x in tmp_ids\n ]\n )\n \n if not len(dates):\n raise Exception(\n \"There are no S1 images with the selected filters, please consider \"\n \"changing the area of interest or selecting a different orbit\"\n )\n \n # find the closest acquisitions\n doi = dt.date(year=year, month=month, day=day)\n doi_index = np.argmin(np.abs(dates - doi))\n date_selected = dates[doi_index]\n\n # filter imagecollection for respective date\n gee_s1_drange = gee_s1_filtered.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_angle_drange = s1_angle.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if maskLIA == True:\n s1_lia_drange = s1_lia.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if gee_s1_drange.size().getInfo() > 1:\n if maskLIA == True:\n s1_lia = s1_lia_drange.mosaic()\n s1_angle = s1_angle_drange.mosaic()\n s1_sig0 = gee_s1_drange.mosaic()\n s1_lia = ee.Image(s1_lia.copyProperties(s1_lia_drange.first()))\n s1_sig0 = ee.Image(s1_sig0.copyProperties(gee_s1_drange.first()))\n else:\n s1_sig0 = ee.Image(gee_s1_drange.first())\n s1_angle = ee.Image(s1_angle_drange.first())\n s1_lia = ee.Image(s1_lia_drange.first())\n\n # fetch image from image collection\n # get the track number\n s1_sig0_info = s1_sig0.getInfo()\n track_nr = s1_sig0_info[\"properties\"][\"relativeOrbitNumber_start\"]\n\n # only uses images of the same track\n gee_s1_filtered = gee_s1_filtered.filterMetadata(\n \"relativeOrbitNumber_start\", \"equals\", track_nr\n )\n\n if tempfilter == True:\n # despeckle\n radius = tempfilter_radius\n units = \"pixels\"\n gee_s1_linear = gee_s1_filtered.map(tolin)\n gee_s1_dspckld_vv = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VV\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vv = gee_s1_dspckld_vv.map(todb)\n gee_s1_fltrd_vv = gee_s1_dspckld_vv.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vv = gee_s1_fltrd_vv.mosaic()\n\n if dualpol == True:\n gee_s1_dspckld_vh = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VH\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vh = gee_s1_dspckld_vh.map(todb)\n gee_s1_fltrd_vh = gee_s1_dspckld_vh.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vh = gee_s1_fltrd_vh.mosaic()\n\n if dualpol == True:\n s1_sig0 = s1_sig0_vv.addBands(s1_sig0_vh).select(\n [\"constant\", \"constant_1\"], [\"VV\", \"VH\"]\n )\n else:\n s1_sig0 = s1_sig0_vv.select([\"constant\"], [\"VV\"])\n\n # extract information\n s1_sig0_vv = s1_sig0.select(\"VV\")\n s1_sig0_vv = s1_sig0_vv.clip(self.roi)\n if dualpol == True:\n s1_sig0_vh = s1_sig0.select(\"VH\")\n s1_sig0_vh = s1_sig0_vh.clip(self.roi)\n\n gee_s1_ln = gee_s1_filtered.map(toln)\n gee_s1_lin = gee_s1_filtered.map(tolin)\n k1vv = ee.Image(gee_s1_ln.select(\"VV\").mean()).clip(self.roi)\n k2vv = ee.Image(gee_s1_ln.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vv = ee.Image(gee_s1_lin.select(\"VV\").mean()).clip(self.roi)\n std_vv = ee.Image(gee_s1_lin.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n if dualpol == True:\n k1vh = ee.Image(gee_s1_ln.select(\"VH\").mean()).clip(self.roi)\n k2vh = ee.Image(gee_s1_ln.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vh = ee.Image(gee_s1_lin.select(\"VH\").mean()).clip(self.roi)\n std_vh = ee.Image(gee_s1_lin.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n # export\n if dualpol == False:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K2VV = k2vv\n self.S1_DATE = date_selected\n else:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_SIG0_VH_db = s1_sig0_vh\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K1VH = k1vh\n self.K2VV = k2vv\n self.K2VH = k2vh\n self.S1_DATE = date_selected\n self.S1MEAN_VV = mean_vv\n self.S1MEAN_VH = mean_vh\n self.S1STD_VV = std_vv\n self.S1STD_VH = std_vh\n\n if maskLIA == True:\n self.S1_LIA = s1_lia",
"def baseline_image_mask():\n baseline_months_collection = ee.ImageCollection('JRC/GSW1_1/MonthlyHistory').filterDate('2001-01', '2006-01')\n baseline_months_sum = baseline_months_collection.reduce(ee.Reducer.sum())\n baselines_sum_img = ee.Image(baseline_months_sum)\n \n return baselines_sum_img",
"def filter_data(self,year=None,category=None):\n out={}\n if year:\n year = str(year)\n if year and category:\n for file in self.data_dict:\n if (category in file) and (year in file):\n out[file]=self.data_dict[file]\n elif year and not category: #si on veut filtrer une année sans distinction de catégorie\n for file in self.data_dict:\n if year in file:\n out[file]=self.data_dict[file]\n elif category and not year: #si on veut filtrer une catégorie sans distinction d'année\n for file in self.data_dict:\n \n if category in file:\n out[file]=self.data_dict[file]\n return out",
"def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def time_filter(self, oid_list, opts):\n if \"y\" in opts:\n opts[\"year\"] = opts[\"y\"]\n if \"m\" in opts:\n opts[\"month\"] = opts[\"m\"]\n if \"mon\" in opts:\n opts[\"month\"] = opts[\"mon\"]\n if \"d\" in opts:\n opts[\"day\"] = opts[\"d\"]\n \n if not \"year\" in opts and not \"month\" in opts and not \"day\" in opts:\n return oid_list\n\n if \"year\" in opts and opts[\"year\"] < 100:\n now = time.localtime(time.time())\n if now.tm_year >= opts[\"year\"]+2000:\n opts[\"year\"] += 2000\n else:\n opts[\"year\"] += 1900\n \n year, mon, day = None, None, None\n if \"year\" in opts:\n year = opts[\"year\"]\n if \"month\" in opts:\n mon = opts[\"month\"]\n if \"day\" in opts:\n day = opts[\"day\"]\n \n filtered_oids = []\n for oid in oid_list:\n tags = self.oxide.get_tags(oid)\n for tag in tags:\n if \"time\" in tag and isinstance(tags[tag], (float, int)):\n t = time.localtime(tags[tag])\n if (year and mon and day and year == t.tm_year \n and mon == t.tm_mon and day == t.tm_mday):\n filtered_oids.append(oid)\n elif year and mon and year == t.tm_year and mon == t.tm_mon:\n filtered_oids.append(oid)\n elif mon and day and mon == t.tm_mon and day == t.tm_mday:\n filtered_oids.append(oid)\n elif year and day and year == t.tm_year and day == t.tm_mday:\n filtered_oids.append(oid)\n elif year and year == t.tm_year:\n filtered_oids.append(oid)\n elif mon and mon == t.tm_mon:\n filtered_oids.append(oid)\n elif day and day == t.tm_mday:\n filtered_oids.append(oid)\n return filtered_oids",
"def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1",
"def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def filter_creation_date(groups, start, end):\n results = []\n for g in groups:\n created = datetime.fromtimestamp(g['creationTime'] / 1000.0)\n if created > end:\n continue\n if created > start:\n g['exportStart'] = created\n else:\n g['exportStart'] = start\n results.append(g)\n return results",
"def subset_list_by_daterange(in_raster_path_list, startdate, enddate, months):\n\n # Sort the list and then get the indices for the start and end dates to slice the list\n in_raster_path_list.sort()\n startindex = [i for i, path in enumerate(in_raster_path_list) if startdate in path][0]\n endindex = [i for i, path in enumerate(in_raster_path_list) if enddate in path][0] + 1\n raster_path_list = in_raster_path_list[startindex:endindex]\n\n if months:\n # convert integer month list to two-digit strings\n month_strings = [str(m).zfill(2) for m in months]\n print(\"Months:\", \",\".join(month_strings))\n # Create a list of year strings by creating a range from startdate and enddate\n start = date(int(startdate[0:4]), int(startdate[5:6]), int(startdate[7:8]))\n end = date(int(enddate[0:4]), int(enddate[5:6]), int(enddate[7:8]))\n year_strings = [str(year) for year in range(start.year, end.year + 1)]\n print(\"Years:\", \",\".join(year_strings))\n # Join the cartesian product of the years and months list.\n # Remove any duplicates in case the start year and end year are the same and sort\n yearmo_strings = list(set([\"\".join(i) for i in itertools.product(year_strings, month_strings)]))\n yearmo_strings.sort()\n print(yearmo_strings)\n # Find matching rasters in specified months from the list of all rasters in that date range\n out_raster_path_list = [r for r in raster_path_list if any(ym in r for ym in yearmo_strings )]\n else:\n out_raster_path_list = raster_path_list\n\n\n # Print input dates and output list length for QC\n print(f'Date Start: {startdate} Date End: {enddate}')\n print(f\"Number of subset rasters: {len(out_raster_path_list)}\")\n\n return out_raster_path_list",
"def _month(self, items):\n return filter(\n lambda item: item.month == self.month,\n items,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Zonal statistics with rasters as input and rasters and lists as output | def zonalStatsToRaster(image,zonesImage,geometry,maxPixels,reducerType):
# reducertype can be mean, max, sum, first. Count is always included for QA
# the resolution of the zonesimage is used for scale
reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mean"),ee.Reducer.mean(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"max"),ee.Reducer.max(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"sum"),ee.Reducer.sum(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"first"),ee.Reducer.first(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mode"),ee.Reducer.mode(),"error"))))
)
reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName="zones")
scale = zonesImage.projection().nominalScale().getInfo()
zonesImage = zonesImage.select(zonesImage.bandNames(),["zones"])
totalImage = ee.Image(image).addBands(zonesImage)
resultsList = ee.List(totalImage.reduceRegion(
geometry= geometry,
reducer= reducer,
scale= scale,
maxPixels=maxPixels
).get("groups"))
resultsList = resultsList.map(ensure_default_properties);
zoneList = mapList(resultsList, 'zones');
countList = mapList(resultsList, 'count');
valueList = mapList(resultsList, reducerType);
valueImage = zonesImage.remap(zoneList, valueList).select(["remapped"],[reducerType])
countImage = zonesImage.remap(zoneList, countList).select(["remapped"],["count"])
newImage = zonesImage.addBands(countImage).addBands(valueImage)
return newImage,zoneList,valueList,countList | [
"def zonal_stats(vectors, raster, layer=0, band_num=1, nodata_value=None,\n global_src_extent=False, categorical=False, stats=None,\n copy_properties=False, all_touched=False, transform=None, affine=None,\n add_stats=None, raster_out=False, category_map=None, **kwargs):\n stats, run_count = check_stats(stats, categorical)\n\n rtype, rgt, rshape, global_src_extent, nodata_value = \\\n raster_info(raster, global_src_extent, nodata_value, affine, transform)\n\n features_iter = read_features(vectors, layer)\n\n if global_src_extent and rtype == 'gdal':\n # create an in-memory numpy array of the source raster data\n extent = raster_extent_as_bounds(rgt, rshape)\n global_src_offset = bbox_to_pixel_offsets(rgt, extent, rshape)\n window = pixel_offsets_to_window(global_src_offset)\n with rasterio.drivers():\n with rasterio.open(raster, 'r') as src:\n global_src_array = src.read(\n band_num, window=window, masked=False)\n elif global_src_extent and rtype == 'ndarray':\n global_src_offset = (0, 0, raster.shape[0], raster.shape[1])\n global_src_array = raster\n\n results = []\n\n for i, feat in enumerate(features_iter):\n geom = shape(feat['geometry'])\n\n # Point and MultiPoint don't play well with GDALRasterize\n # convert them into box polygons the size of a raster cell\n # TODO warning, suggest point_query instead\n buff = rgt[1] / 2.0\n if geom.type == \"MultiPoint\":\n geom = MultiPolygon([box(*(pt.buffer(buff).bounds))\n for pt in geom.geoms])\n elif geom.type == 'Point':\n geom = box(*(geom.buffer(buff).bounds))\n\n geom_bounds = list(geom.bounds)\n\n # calculate new pixel coordinates of the feature subset\n src_offset = bbox_to_pixel_offsets(rgt, geom_bounds, rshape)\n\n new_gt = (\n (rgt[0] + (src_offset[0] * rgt[1])),\n rgt[1],\n 0.0,\n (rgt[3] + (src_offset[1] * rgt[5])),\n 0.0,\n rgt[5]\n )\n\n if src_offset[2] <= 0 or src_offset[3] <= 0:\n # we're off the raster completely, no overlap at all\n # so there's no need to even bother trying to calculate\n feature_stats = dict([(s, None) for s in stats])\n else:\n if not global_src_extent:\n # use feature's source extent and read directly from source\n window = pixel_offsets_to_window(src_offset)\n with rasterio.drivers():\n with rasterio.open(raster, 'r') as src:\n src_array = src.read(\n band_num, window=window, masked=False)\n else:\n # subset feature array from global source extent array\n xa = src_offset[0] - global_src_offset[0]\n ya = src_offset[1] - global_src_offset[1]\n xb = xa + src_offset[2]\n yb = ya + src_offset[3]\n src_array = global_src_array[ya:yb, xa:xb]\n\n # create ndarray of rasterized geometry\n rv_array = rasterize_geom(geom, src_offset, new_gt, all_touched)\n assert rv_array.shape == src_array.shape\n\n # Mask the source data array with our current feature\n # we take the logical_not to flip 0<->1 for the correct mask effect\n # we also mask out nodata values explicitly\n masked = np.ma.MaskedArray(\n src_array,\n mask=np.logical_or(\n src_array == nodata_value,\n np.logical_not(rv_array)\n )\n )\n\n if masked.compressed().size == 0:\n # nothing here, fill with None and move on\n feature_stats = dict([(stat, None) for stat in stats])\n if 'count' in stats: # special case, zero makes sense here\n feature_stats['count'] = 0\n else:\n if run_count:\n keys, counts = np.unique(masked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n\n if categorical:\n feature_stats = dict(pixel_count)\n if category_map:\n feature_stats = remap_categories(category_map, feature_stats)\n else:\n feature_stats = {}\n\n if 'min' in stats:\n feature_stats['min'] = float(masked.min())\n if 'max' in stats:\n feature_stats['max'] = float(masked.max())\n if 'mean' in stats:\n feature_stats['mean'] = float(masked.mean())\n if 'count' in stats:\n feature_stats['count'] = int(masked.count())\n # optional\n if 'sum' in stats:\n feature_stats['sum'] = float(masked.sum())\n if 'std' in stats:\n feature_stats['std'] = float(masked.std())\n if 'median' in stats:\n feature_stats['median'] = float(np.median(masked.compressed()))\n if 'majority' in stats:\n try:\n feature_stats['majority'] = float(key_assoc_val(pixel_count, max))\n except IndexError:\n feature_stats['majority'] = None\n if 'minority' in stats:\n try:\n feature_stats['minority'] = float(key_assoc_val(pixel_count, min))\n except IndexError:\n feature_stats['minority'] = None\n if 'unique' in stats:\n feature_stats['unique'] = len(list(pixel_count.keys()))\n if 'range' in stats:\n try:\n rmin = feature_stats['min']\n except KeyError:\n rmin = float(masked.min())\n try:\n rmax = feature_stats['max']\n except KeyError:\n rmax = float(masked.max())\n feature_stats['range'] = rmax - rmin\n\n for pctile in [s for s in stats if s.startswith('percentile_')]:\n q = get_percentile(pctile)\n pctarr = masked.compressed()\n if pctarr.size == 0:\n feature_stats[pctile] = None\n else:\n feature_stats[pctile] = np.percentile(pctarr, q)\n\n if 'nodata' in stats:\n featmasked = np.ma.MaskedArray(src_array, mask=np.logical_not(rv_array))\n keys, counts = np.unique(featmasked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n feature_stats['nodata'] = pixel_count.get(nodata_value, 0)\n\n if add_stats is not None:\n for stat_name, stat_func in add_stats.items():\n feature_stats[stat_name] = stat_func(masked)\n\n if raster_out:\n masked.fill_value = nodata_value\n masked.data[masked.mask] = nodata_value\n feature_stats['mini_raster'] = masked\n feature_stats['mini_raster_GT'] = new_gt\n feature_stats['mini_raster_NDV'] = nodata_value\n\n if 'fid' in feat:\n # Use the fid directly,\n # likely came from OGR data via .utils.feature_to_geojson\n feature_stats['__fid__'] = feat['fid']\n else:\n # Use the enumerated id\n feature_stats['__fid__'] = i\n\n if 'properties' in feat and copy_properties:\n for key, val in list(feat['properties'].items()):\n feature_stats[key] = val\n\n results.append(feature_stats)\n\n return results",
"def zonal_stats(src_poly, src_raster, operator=['mean'], features=None):\n assert src_raster.geo_transform is not None, \"src_raster.geo_transform should not be None\"\n assert isinstance(operator, list), \"operator should be a list of string. ex: ['mean']\"\n features = list(range(src_raster.bands)) if features is None else features\n assert len(features) == src_raster.bands, \"length of features should equals number of bands of the raster\"\n df_shp = src_poly.copy()\n df_shp['poly_idx'] = list(range(len(df_shp)))\n df_shp['poly_idx'] = df_shp['poly_idx'].astype('float')\n poly_rst = tgp.ShapeGrid.rasterize_layer(df_shp, src_raster.rows, src_raster.cols, src_raster.geo_transform, 'poly_idx', all_touched=True, no_data_value=np.nan)\n X_combine = np.concatenate([poly_rst.data, src_raster.data], axis=-1)\n X_combine_df = pd.DataFrame(X_combine.reshape(-1, src_raster.bands))\n X_groupby = X_combine_df.groupby(0, as_index=False)\n for op in operator:\n columns = {0:'poly_idx'}\n for f_idx, f in enumerate(features):\n columns[f_idx+1] = f'zonal_{op}_{f}'\n if op == 'mean':\n df_shp = df_shp.merge(X_groupby.mean().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'max':\n df_shp = df_shp.merge(X_groupby.max().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'min':\n df_shp = df_shp.merge(X_groupby.min().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'median':\n df_shp = df_shp.merge(X_groupby.median().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'sum':\n df_shp = df_shp.merge(X_groupby.sum().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'std':\n df_shp = df_shp.merge(X_groupby.std().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'count':\n df_shp = df_shp.merge(X_groupby.count().rename(columns=columns), on='poly_idx', how='left')\n else:\n assert False, \"no this operator\"\n return df_shp",
"def raster_stats(*args, **kwargs):\n warnings.warn(\"'raster_stats' is an alias to 'zonal_stats'\"\n \" and will disappear in 1.0\", DeprecationWarning)\n return zonal_stats(*args, **kwargs)",
"def test_sum_rasters(self):\n\n # arr = sr.sum_rasters(TestSpatialReader.BIOMASS_DIR)\n #\n # print(arr.shape)\n # print(arr.min())\n # print(arr.max())\n # print(arr.unique())\n\n self.assertEqual(2, 2)\n pass",
"def zonalStats(dataArray, stat=stat):\n stats = [] \n for i in dataArray:\n x = rs.zonal_stats(project_area, i, transform=transform_zonal, stats=stat) \n stats.append(x)\n #extract just the values from the results, and convert 'None' values to nan\n stats = [[t[stat] if t[stat] is not None else np.nan for t in feature] for feature in stats]\n stats = np.array(stats)\n return stats",
"def zonal_stats(service_raster_path, zonal_raster_path):\n service_nodata = pygeoprocessing.get_raster_info(\n service_raster_path)['nodata'][0]\n\n service_raster = gdal.OpenEx(service_raster_path)\n service_band = service_raster.GetRasterBand(1)\n\n zone_raster = gdal.OpenEx(zonal_raster_path)\n zone_band = zone_raster.GetRasterBand(1)\n\n try:\n zonal_stat_dict = {}\n last_blocksize = None\n for block_offset in pygeoprocessing.iterblocks(\n (zonal_raster_path, 1), offset_only=True):\n blocksize = (block_offset['win_ysize'], block_offset['win_xsize'])\n\n if last_blocksize != blocksize:\n service_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(service_band))\n zone_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(zone_band))\n last_blocksize = blocksize\n\n service_data = block_offset.copy()\n service_data['buf_obj'] = service_array\n service_band.ReadAsArray(**service_data)\n\n zone_data = block_offset.copy()\n zone_data['buf_obj'] = zone_array\n zone_band.ReadAsArray(**zone_data)\n\n zone_values = numpy.unique(zone_array[zone_array > 0])\n\n for zone in zone_values:\n valid_mask = (\n (service_array != service_nodata) &\n (zone_array == zone))\n # the below was necessary for 10km CV raster\n # valid_mask = (\n # (service_array > 0) &\n # (zone_array == zone))\n valid_block = service_array[valid_mask]\n zone_sum = numpy.sum(valid_block)\n if zone_sum > 0:\n zone_avg = zone_sum / valid_block.size\n else:\n zone_avg = 0\n if zone in zonal_stat_dict:\n zonal_stat_dict[zone]['sum'] += zone_sum\n zonal_stat_dict[zone]['average'] += zone_avg\n else:\n zonal_stat_dict[zone] = {\n 'sum': zone_sum,\n 'average': zone_avg,\n }\n finally:\n service_band = None\n zone_band = None\n gdal.Dataset.__swig_destroy__(service_raster)\n gdal.Dataset.__swig_destroy__(zone_raster)\n\n return zonal_stat_dict",
"def calculate_zonal_statistics(self):\n counter = 1\n for layer in self.input_elevation_rasters:\n QgsMapLayerRegistry.instance().addMapLayer(layer)\n for polygon in self.copied_groyne_cell_polygons:\n QgsMapLayerRegistry.instance().addMapLayer(polygon)\n # Arguments - (polygon, raster, attribute prefix, band, stat to calculate\n zonal_stats = QgsZonalStatistics(polygon, layer.source(), \"GR_{0!s}_\".format(counter), 1,\n QgsZonalStatistics.Mean)\n zonal_stats.calculateStatistics(None)\n counter += 1\n\n self.add_height_adjustment()",
"def zonal_timeseries(dataArray, shp_loc, results_loc, feature_name, stat='mean', csv=False, netcdf=False, plot=False):\n\n #use dask to chunk the data along the time axis in case its a very large dataset\n dataArray = dataArray.chunk(chunks = {'time':20})\n \n #create 'transform' tuple to provide ndarray with geo-referencing data. \n one = float(dataArray.x[0])\n two = float(dataArray.y[0] - dataArray.y[1])\n three = 0.0\n four = float(dataArray.y[0])\n five = 0.0\n six = float(dataArray.x[0] - dataArray.x[1])\n\n transform_zonal = (one, two, three, four, five, six)\n\n #import shapefile, make sure its in the right projection to match the dataArray\n #and set index to the feature_name\n project_area = gpd.read_file(shp_loc) #get the shapefile\n reproj=int(str(dataArray.crs)[5:]) #do a little hack to get EPSG from the dataArray \n project_area = project_area.to_crs(epsg=reproj) #reproject shapefile to match dataArray\n project_area = project_area.set_index(feature_name) #set the index\n \n #define the general function\n def zonalStats(dataArray, stat=stat): \n \"\"\"extract the zonal statistics of all\n pixel values within each polygon\"\"\"\n stats = [] \n for i in dataArray:\n x = rs.zonal_stats(project_area, i, transform=transform_zonal, stats=stat) \n stats.append(x)\n #extract just the values from the results, and convert 'None' values to nan\n stats = [[t[stat] if t[stat] is not None else np.nan for t in feature] for feature in stats]\n stats = np.array(stats)\n return stats\n\n #use the zonal_stats functions to extract the stats:\n n = len(project_area) #number of polygons in the shapefile (defines the dimesions of the output)\n statistics = dataArray.data.map_blocks(zonalStats, chunks=(-1,n), drop_axis=1, dtype=np.float64).compute()\n\n #get unique identifier and timeseries data from the inputs \n colnames = pd.Series(project_area.index.values)\n time = pd.Series(dataArray['time'].values)\n\n #define functions for cleaning up the results of the rasterstats operation\n def tidyresults(results):\n x = pd.DataFrame(results).T #transpose\n x = x.rename(colnames, axis='index') #rename the columns to the timestamp\n x = x.rename(columns = time)\n return x\n\n #place results into indexed dataframes using tidyresults function\n statistics_df = tidyresults(statistics)\n \n #convert into xarray for merging into a dataset\n stat_xr = xr.DataArray(statistics_df, dims=[feature_name, 'time'], coords={feature_name: statistics_df.index, 'time': time}, name= stat)\n \n #options for exporting results as csv, netcdf, pdf plots\n #export results as a .csv\n if csv:\n statistics_df.to_csv('{0}{1}.csv'.format(results_loc, stat))\n \n if netcdf:\n #export out results as netcdf\n stat_xr.to_netcdf('{0}zonalstats_{1}.nc'.format(results_loc, stat), mode='w',format='NETCDF4') \n\n if plot: \n #place the data from the xarray into a list\n plot_data = []\n for i in range(0,len(stat_xr[feature_name])):\n x = stat_xr.isel([stat], **{feature_name: i})\n plot_data.append(x)\n\n #extract the unique names of each polygon\n feature_names = list(stat_xr[feature_name].values)\n\n #zip the both the data and names together as a dictionary \n monthly_dict = dict(zip(feature_names,plot_data))\n\n #create a function for generating the plots\n def plotResults(dataArray, title):\n \"\"\"a function for plotting up the results of the\n fractional cover change and exporting it out as pdf \"\"\"\n x = dataArray.time.values\n y = dataArray.data \n\n plt.figure(figsize=(15,5))\n plt.plot(x, y,'k', color='#228b22', linewidth = 1)\n plt.grid(True, linestyle ='--')\n plt.title(title)\n plt.savefig('{0}{1}.pdf'.format(results_loc, title), bbox_inches='tight')\n\n #loop over the dictionaries and create the plots\n {key: plotResults(monthly_dict[key], key + \"_\"+ stat) for key in monthly_dict} \n \n #return the results as a dataframe\n return statistics_df",
"def zonalstatistics(inZoneData, zoneField, inRaster, outTable, outFC): \n arcpy.sa.ZonalStatisticsAsTable (inZoneData, zoneField, inRaster, outTable, ignore_nodata=\"DATA\", statistics_type=\"ALL\")\n #ras = ZonalStatistics (in_zone_data=shp, zone_field=field, in_value_raster=rw, statistics_type=statistic, ignore_nodata=\"DATA\")\n #ras.save(outras)\n path, name = os.path.split(inZoneData)\n arcpy.Copy_management(inZoneData, outFC)\n # Join the zone feature class to zonal statistics table\n arcpy.JoinField_management (in_data=outFC, in_field=zoneField, join_table=outTable, join_field=zoneField)\n try:\n df = attribute_table_to_df(outFC)\n df.index = df[zoneField]\n return df\n except:\n pass",
"def gen_zonal_stats(\n vectors, raster,\n layer=0,\n band=1,\n nodata=None,\n affine=None,\n stats=None,\n all_touched=True,\n percent_cover_selection=None,\n percent_cover_weighting=True,\n percent_cover_scale=20,\n categorical=False,\n category_map=None,\n add_stats=None,\n zone_func=None,\n raster_out=False,\n prefix=None,\n geojson_out=False, **kwargs):\n stats, run_count = check_stats(stats, categorical)\n\n # check inputs related to percent coverage\n percent_cover = False\n if percent_cover_weighting or percent_cover_selection is not None:\n percent_cover = True\n if percent_cover_scale is None:\n warnings.warn('No value for `percent_cover_scale` was given. '\n 'Using default value of 10.')\n percent_cover_scale = 10\n\n try:\n if percent_cover_scale != int(percent_cover_scale):\n warnings.warn('Value for `percent_cover_scale` given ({0}) '\n 'was converted to int ({1}) but does not '\n 'match original value'.format(\n percent_cover_scale, int(percent_cover_scale)))\n\n percent_cover_scale = int(percent_cover_scale)\n\n if percent_cover_scale <= 1:\n raise Exception('Value for `percent_cover_scale` must be '\n 'greater than one ({0})'.format(\n percent_cover_scale))\n\n except:\n raise Exception('Invalid value for `percent_cover_scale` '\n 'provided ({0}). Must be type int.'.format(\n percent_cover_scale))\n\n if percent_cover_selection is not None:\n try:\n percent_cover_selection = float(percent_cover_selection)\n except:\n raise Exception('Invalid value for `percent_cover_selection` '\n 'provided ({0}). Must be able to be converted '\n 'to a float.'.format(percent_cover_selection))\n\n # if not all_touched:\n # warnings.warn('`all_touched` was not enabled but an option requiring '\n # 'percent_cover calculations was selected. Automatically '\n # 'enabling `all_touched`.')\n # all_touched = True\n\n with Raster(raster, affine, nodata, band) as rast:\n features_iter = read_features(vectors, layer)\n for _, feat in enumerate(features_iter):\n geom = shape(feat['geometry'])\n\n if 'Point' in geom.type:\n geom = boxify_points(geom, rast)\n percent_cover = False\n\n geom_bounds = tuple(geom.bounds)\n fsrc = rast.read(bounds=geom_bounds)\n\n if percent_cover:\n cover_weights = rasterize_pctcover_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n scale=percent_cover_scale,\n all_touched=all_touched)\n rv_array = cover_weights > (percent_cover_selection or 0)\n else:\n rv_array = rasterize_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n all_touched=all_touched)\n\n # nodata mask\n isnodata = (fsrc.array == fsrc.nodata)\n\n # add nan mask (if necessary)\n if np.issubdtype(fsrc.array.dtype, float) and \\\n np.isnan(fsrc.array.min()):\n isnodata = (isnodata | np.isnan(fsrc.array))\n\n # Mask the source data array\n # mask everything that is not a valid value or not within our geom\n masked = np.ma.MaskedArray(\n fsrc.array,\n mask=(isnodata | ~rv_array))\n\n # execute zone_func on masked zone ndarray\n if zone_func is not None:\n if not callable(zone_func):\n raise TypeError(('zone_func must be a callable '\n 'which accepts function a '\n 'single `zone_array` arg.'))\n zone_func(masked)\n\n if masked.compressed().size == 0:\n # nothing here, fill with None and move on\n feature_stats = dict([(stat, None) for stat in stats])\n if 'count' in stats: # special case, zero makes sense here\n feature_stats['count'] = 0\n else:\n if run_count:\n keys, counts = np.unique(masked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n\n if categorical:\n feature_stats = dict(pixel_count)\n if category_map:\n feature_stats = remap_categories(category_map, feature_stats)\n else:\n feature_stats = {}\n\n if 'min' in stats:\n feature_stats['min'] = float(masked.min())\n if 'max' in stats:\n feature_stats['max'] = float(masked.max())\n if 'mean' in stats:\n if percent_cover:\n feature_stats['mean'] = float(\n np.sum(masked * cover_weights) /\n np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['mean'] = float(masked.mean())\n if 'count' in stats:\n if percent_cover:\n feature_stats['count'] = float(np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['count'] = int(masked.count())\n # optional\n if 'sum' in stats:\n if percent_cover:\n feature_stats['sum'] = float(np.sum(masked * cover_weights))\n else:\n feature_stats['sum'] = float(masked.sum())\n if 'std' in stats:\n feature_stats['std'] = float(masked.std())\n if 'median' in stats:\n feature_stats['median'] = float(np.median(masked.compressed()))\n if 'majority' in stats:\n feature_stats['majority'] = float(key_assoc_val(pixel_count, max))\n if 'minority' in stats:\n feature_stats['minority'] = float(key_assoc_val(pixel_count, min))\n if 'unique' in stats:\n feature_stats['unique'] = len(list(pixel_count.keys()))\n if 'range' in stats:\n try:\n rmin = feature_stats['min']\n except KeyError:\n rmin = float(masked.min())\n try:\n rmax = feature_stats['max']\n except KeyError:\n rmax = float(masked.max())\n feature_stats['range'] = rmax - rmin\n\n for pctile in [s for s in stats if s.startswith('percentile_')]:\n q = get_percentile(pctile)\n pctarr = masked.compressed()\n feature_stats[pctile] = np.percentile(pctarr, q)\n\n if 'nodata' in stats:\n featmasked = np.ma.MaskedArray(fsrc.array, mask=np.logical_not(rv_array))\n feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())\n\n if add_stats is not None:\n for stat_name, stat_func in add_stats.items():\n feature_stats[stat_name] = stat_func(masked)\n\n if raster_out:\n feature_stats['mini_raster_array'] = masked\n feature_stats['mini_raster_affine'] = fsrc.affine\n feature_stats['mini_raster_nodata'] = fsrc.nodata\n\n if prefix is not None:\n prefixed_feature_stats = {}\n for key, val in feature_stats.items():\n newkey = \"{}{}\".format(prefix, key)\n prefixed_feature_stats[newkey] = val\n feature_stats = prefixed_feature_stats\n\n if geojson_out:\n for key, val in feature_stats.items():\n if 'properties' not in feat:\n feat['properties'] = {}\n feat['properties'][key] = val\n yield feat\n else:\n yield feature_stats",
"def zonal_stats(feat, input_zone_polygon, input_value_raster, pointBuf):\n ## https://gis.stackexchange.com/questions/77993/issue-trying-to-create-zonal-statistics-using-gdal-and-python\n # Open raster data\n raster = gdal.Open(input_value_raster)\n\n # --- Check if current feature intersects with raster extent\n\n # Now open up that reprojected input_zone_polygon\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n\n # Get raster georeference info\n rasterExtent, xOrigin, yOrigin, pixelWidth, pixelHeight = get_raster_extent(raster)\n\n # Get extent of feat\n geom = feat.GetGeometryRef()\n\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n elif (geom.GetGeometryName() == 'POINT'):\n # Create 3 points:\n # center (actual xy of point) and an UR & LL based on a buffer distance of pointBuf\n pointsX = []; pointsY = []\n pointsX.append(geom.GetX())\n pointsX.append(geom.GetX() + pointBuf)\n pointsX.append(geom.GetX() - pointBuf)\n pointsY.append(geom.GetY())\n pointsY.append(geom.GetY() + pointBuf)\n pointsY.append(geom.GetY() - pointBuf)\n\n else:\n sys.exit()\n\n # Get the extent of the current feature\n xmin = min(pointsX)\n xmax = max(pointsX)\n ymin = min(pointsY)\n ymax = max(pointsY)\n ## [left, bottom, right, top]\n featExtent = [xmin,ymax,xmax,ymin]\n\n # Need to find intersection of featExtent and rasterExtent here\n\n intersection = [max(rasterExtent[0], featExtent[0]) , \\\n min(rasterExtent[1], featExtent[1]) , \\\n min(rasterExtent[2], featExtent[2]) , \\\n max(rasterExtent[3], featExtent[3]) ]\n\n if rasterExtent != featExtent:\n print '\\tLooking for overlap (intersection) b/w feature and raster...'\n # check for any overlap at all...\n if (intersection[2] < intersection[0]) or (intersection[1] < intersection[3]):\n intersection = None\n print '\\t***No overlap. Returning np.nan value for zonal statistics'\n return np.nan, np.nan\n else:\n print '\\tHere is the overlap (intersection):',intersection\n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth)+1\n ycount = int((ymax - ymin)/pixelWidth)+1\n\n # print '\\t Create memory target raster...'\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # print '\\t Rasterize zone polygon to raster, fill with 1's...'\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # print '\\tRead raster as arrays...'\n banddataraster = raster.GetRasterBand(1)\n try:\n dataraster = banddataraster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n except Exception, e:\n print '\\t' + str(e)\n dataraster = banddataraster.ReadAsArray(xoff, yoff, xcount, ycount)\n\n # Set up datamask that is filled with 1's\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount)##.astype(np.float)\n\n if geom.GetGeometryName() == 'POINT':\n # For points, this has to be done, otherwise you get 0s for all but the center position...\n datamask.fill(1)\n\n # Mask zone of raster\n try:\n zoneraster = np.ma.masked_array(dataraster, np.logical_not(datamask))\n zoneraster[zoneraster <= -99.] = np.nan\n\n try:\n # Get a masked array that prevents nans from interfering\n ##https://stackoverflow.com/questions/5480694/numpy-calculate-averages-with-nans-removed\n m_zoneraster = np.ma.masked_array(zoneraster,np.isnan(zoneraster))\n\n # Calculate statistics of zonal raster\n ##print '\\t std: ' + str(round(np.std(zoneraster),2))\n ##return round(np.mean(zoneraster),2), round(np.std(zoneraster),2)\n print '\\t std: ' + str(round(m_zoneraster.std(),2))\n\n zValList = dataraster.flatten()\n return round(m_zoneraster.mean(),2), round(m_zoneraster.std(),2), zValList\n\n except Exception, e:\n print '\\t' + str(e)\n\n return np.nan, np.nan, None\n\n except Exception, e:\n print '\\t No stats for features straddling edge of raster.'\n\n return np.nan, np.nan, None",
"def nested_zonal_stats(\n service_raster_path, country_raster_path, kba_raster_path,\n area_raster_path):\n service_nodata = pygeoprocessing.get_raster_info(\n service_raster_path)['nodata'][0]\n country_nodata = pygeoprocessing.get_raster_info(\n country_raster_path)['nodata'][0]\n\n service_raster = gdal.OpenEx(service_raster_path)\n service_band = service_raster.GetRasterBand(1)\n\n country_raster = gdal.OpenEx(country_raster_path)\n country_band = country_raster.GetRasterBand(1)\n\n kba_raster = gdal.OpenEx(kba_raster_path)\n kba_band = kba_raster.GetRasterBand(1)\n\n area_raster = gdal.OpenEx(area_raster_path)\n area_band = area_raster.GetRasterBand(1)\n\n try:\n zonal_stat_dict = {}\n last_blocksize = None\n for block_offset in pygeoprocessing.iterblocks(\n (country_raster_path, 1), offset_only=True):\n blocksize = (block_offset['win_ysize'], block_offset['win_xsize'])\n\n if last_blocksize != blocksize:\n service_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(service_band))\n country_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(country_band))\n kba_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(kba_band))\n area_array = numpy.zeros(\n blocksize,\n dtype=pygeoprocessing._gdal_to_numpy_type(area_band))\n last_blocksize = blocksize\n\n service_data = block_offset.copy()\n service_data['buf_obj'] = service_array\n service_band.ReadAsArray(**service_data)\n\n country_data = block_offset.copy()\n country_data['buf_obj'] = country_array\n country_band.ReadAsArray(**country_data)\n\n kba_data = block_offset.copy()\n kba_data['buf_obj'] = kba_array\n kba_band.ReadAsArray(**kba_data)\n\n area_data = block_offset.copy()\n area_data['buf_obj'] = area_array\n area_band.ReadAsArray(**area_data)\n\n country_values = numpy.unique(\n country_array[country_array != country_nodata])\n for country_id in country_values:\n # sum of service inside KBAs, inside country\n service_in_kba_country_mask = (\n (service_array != service_nodata) &\n (kba_array > 0) &\n (country_array == country_id))\n service_in_kba_in_country = numpy.sum(\n service_array[service_in_kba_country_mask])\n # area of pixels inside the country\n country_mask = (country_array == country_id)\n kba_mask = (\n (kba_array > 0) &\n (country_array == country_id))\n total_area = numpy.sum(area_array[country_mask])\n kba_area = numpy.sum(area_array[kba_mask])\n if country_id in zonal_stat_dict:\n zonal_stat_dict[country_id]['sum'] += (\n service_in_kba_in_country)\n zonal_stat_dict[country_id]['total_area_km2'] += (\n total_area)\n zonal_stat_dict[country_id]['kba_area_km2'] += (\n kba_area)\n else:\n zonal_stat_dict[country_id] = {\n 'sum': service_in_kba_in_country,\n 'total_area_km2': total_area,\n 'kba_area_km2': kba_area,\n }\n finally:\n service_band = None\n country_band = None\n kba_band = None\n gdal.Dataset.__swig_destroy__(service_raster)\n gdal.Dataset.__swig_destroy__(country_raster)\n gdal.Dataset.__swig_destroy__(kba_raster)\n\n return zonal_stat_dict",
"def zonal_stats(self, gdf, stats, all_touched=False):\n _ST = [\"count\", \"min\", \"max\", \"sum\", \"mean\", \"std\", \"median\"]\n\n def rmd(ds, stat):\n return {var: f\"{var}_{stat}\" for var in ds.raster.vars}\n\n def gen_zonal_stat(ds, geoms, stats, all_touched=False):\n dims = (ds.raster.y_dim, ds.raster.x_dim)\n for i, geom in enumerate(geoms):\n # add buffer to work with point geometries\n ds1 = ds.raster.clip_bbox(geom.bounds, buffer=2).raster.mask_nodata()\n if np.any(np.asarray(ds1.raster.shape) < 2):\n continue\n mask = full(ds1.raster.coords, nodata=0, dtype=np.uint8)\n features.rasterize(\n [(geom, 1)],\n out_shape=mask.raster.shape,\n fill=0,\n transform=mask.raster.transform,\n out=mask.data,\n all_touched=all_touched,\n )\n ds1 = ds1.where(mask == 1)\n dss = []\n for stat in stats:\n if stat in _ST:\n ds1_stat = getattr(ds1, stat)(dims)\n dss.append(ds1_stat.rename(rmd(ds1, stat)))\n elif isinstance(stat, str) and stat.startswith(\"q\"):\n qs = np.array([float(q) for q in stat.strip(\"q\").split(\",\")])\n dss.append(\n ds1.quantile(qs / 100, dims).rename(rmd(ds1, \"quantile\"))\n )\n elif callable(stat):\n dss.append(\n ds1.reduce(stat, dims).rename(rmd(ds1, stat.__name__))\n )\n else:\n raise ValueError(f\"Stat {stat} not valid.\")\n yield xr.merge(dss), i\n\n if isinstance(stats, str):\n stats = stats.split()\n elif callable(stats):\n stats = list([stats])\n\n if gdf.crs is not None and self.crs is not None and gdf.crs != self.crs:\n gdf = gdf.to_crs(self.crs)\n geoms = gdf[\"geometry\"].values\n\n ds = self._obj.copy()\n if isinstance(ds, xr.DataArray):\n if ds.name is None:\n ds.name = \"values\"\n ds = ds.to_dataset()\n\n out = list(gen_zonal_stat(ds, geoms, stats, all_touched))\n if len(out) == 0:\n raise IndexError(\"All geometries outside raster domain\")\n\n dss, idx = zip(*out)\n ds_out = xr.concat(dss, \"index\")\n ds_out[\"index\"] = xr.IndexVariable(\"index\", gdf.index.values[np.array(idx)])\n\n return ds_out",
"def get_statistics_for_year(rasters, year, mean_path, std_path, sd_mean_path, deviation_path, land_use, raster_base_path=os.path.join(base_folder, \"spatial_comparisons\",), debug=False):\n\tsummed_rasters = []\n\tfor raster in rasters:\n\t\tif type(raster) is not arcpy.Raster:\n\t\t\traster_path = os.path.join(raster_base_path, raster)\n\t\telse:\n\t\t\traster_path = raster\n\n\t\tsummed_rasters.append(make_annual(raster_path, year))\n\n\tif debug:\n\t\tfor raster in summed_rasters:\n\t\t\toutput = tempfile.mktemp(prefix=\"summed_\", suffix=os.path.split(str(raster))[1]) # add the original filename to the end\n\t\t\traster.save(output)\n\t\t\tprint(\"Composite Output at {}\".format(output))\n\n\t# create the mask of features we actually want\n\tif use_backup_mask is False:\n\t\tmask = make_mask(land_use, dsa=dsa_feature, mask_query=land_use_mask_queries[year])\n\telse:\n\t\tmask = backup_masks[year]\n\n\twith Env(\"mask\", mask):\n\n\t\tarcpy.CheckOutExtension(\"Spatial\")\n\t\ttry:\n\n\t\t\tmean_raster = arcpy.sa.CellStatistics(summed_rasters, \"MEAN\", \"NODATA\")\n\t\t\tstd_raster = arcpy.sa.CellStatistics(summed_rasters, \"STD\", \"NODATA\")\n\n\t\t\thistogram_from_raster(mean_raster, \"Histogram of mean ET for {}\".format(year), output_folder=output_folder)\n\n\t\t\tmean_raster.save(mean_path)\n\t\t\tstd_raster.save(std_path)\n\n\t\t\toverall_mean = get_overall_mean(mean_raster) # get the mean value across the whole raster\n\t\t\tdeviation_from_mean_raster = (mean_raster - overall_mean)/overall_mean\n\t\t\tdeviation_from_mean_raster.save(deviation_path)\n\n\t\t\tsd_mean = std_raster / mean_raster\n\t\t\tsd_mean.save(sd_mean_path)\n\t\tfinally:\n\t\t\tarcpy.CheckInExtension(\"Spatial\")\n\n\treturn mean_raster, std_raster",
"def test_rasters_and_arrays(self):\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A1 = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)\n lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == 105.25\n assert A1[0, 0] == linear_function(lon[0], lat[4])\n\n # Lower left corner\n assert A1[4, 0] == 103.25\n assert A1[4, 0] == linear_function(lon[0], lat[0])\n\n # Upper right corner\n assert A1[0, 7] == 112.25\n assert A1[0, 7] == linear_function(lon[7], lat[4])\n\n # Lower right corner\n assert A1[4, 7] == 110.25\n assert A1[4, 7] == linear_function(lon[7], lat[0])\n\n # Generate raster object and write\n projection = ('GEOGCS[\"WGS 84\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS 84\",6378137,298.2572235630016,'\n 'AUTHORITY[\"EPSG\",\"7030\"]],'\n 'AUTHORITY[\"EPSG\",\"6326\"]],'\n 'PRIMEM[\"Greenwich\",0],'\n 'UNIT[\"degree\",0.0174532925199433],'\n 'AUTHORITY[\"EPSG\",\"4326\"]]')\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n msg = 'Longitudes not as expected: %s' % str(longitudes)\n assert numpy.allclose(longitudes, [100.5, 101.5, 102.5, 103.5, 104.5,\n 105.5, 106.5, 107.5]), msg\n\n msg = 'Latitudes not as expected: %s' % str(latitudes)\n assert numpy.allclose(latitudes, [5.5, 6.5, 7.5, 8.5, 9.5]), msg\n\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, geotransform,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster object')\n assert numlat == R1.rows, msg\n assert numlon == R1.columns, msg\n\n # Write back to new (tif) file\n out_filename = unique_filename(suffix='.tif')\n R1.write_to_file(out_filename)\n assert R1.filename == out_filename\n\n # Check nodata in original layer\n assert numpy.isnan(R1.get_nodata_value())\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n # Check nodata in read layer\n assert numpy.isnan(R2.get_nodata_value())\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, numlat, numlon))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert numlat == R2.rows, msg\n assert numlon == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = 'Array values of written raster array were not as expected'\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n # Exercise projection __eq__ method\n assert R1.projection == R2.projection\n\n # Check that equality raises exception when type is wrong\n try:\n R1.projection == 234\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n # Check keywords\n assert R1.keywords == R2.keywords\n\n # Check override of ==\n assert R1 == R2",
"def read_preprocess_rasters(can_rst_fn, dem_rst_fn, peat_type_rst_fn, peat_depth_rst_fn):\n with rasterio.open(dem_rst_fn) as dem:\n dem = dem.read(1)\n with rasterio.open(can_rst_fn) as can:\n can_arr = can.read(1)\n with rasterio.open(peat_type_rst_fn) as pt:\n peat_type_arr = pt.read(1)\n with rasterio.open(peat_depth_rst_fn) as pd:\n peat_depth_arr = pd.read(1)\n \n \n #Some small changes to get mask of canals: 1 where canals exist, 0 otherwise\n can_arr[can_arr < 0.5] = 0\n can_arr[abs(can_arr) > 0.5] = 1\n can_arr = np.array(can_arr, dtype=int)\n \n # Convert from numpy no data to -9999.0\n dem[dem <-10] = -9999.0\n dem[np.where(np.isnan(dem))] = -9999.0\n dem[dem > 1e20] = -9999.0 # just in case\n \n # control nodata values\n peat_type_arr[peat_type_arr < 0] = -1\n # fill some nodata values to get same size as dem\n peat_type_arr[(np.where(dem>0.1) and np.where(peat_type_arr <0.1))] = 1.\n \n # control nodata values\n peat_depth_arr[peat_depth_arr < 0] = -1\n \n peat_depth_arr = peat_depth_map(peat_depth_arr) # translate number keys to depths\n \n # fill some nodata values to get same size as dem\n peat_depth_arr[(np.where(dem>0.1) and np.where(peat_depth_arr <0.1))] = 1.\n \n \n # Eliminate rows and columns full of noData values.\n dem = dem[7:-7, 5:-15] #old\n can_arr = can_arr[7:-7, 5:-15]\n peat_type_arr = peat_type_arr[7:-7, 5:-15]\n peat_depth_arr = peat_depth_arr[7:-7, 5:-15]\n \n return can_arr, dem, peat_type_arr, peat_depth_arr",
"def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]",
"def get_z_ranges(self):\n\n summary = self.get_rasters_summary()\n\n # Convert to dict in format:\n # { 'stat' : { 'z': (min, max), ... } ... }\n\n ranges = summary.groupby(['stat', 'z'], as_index=False)\n ranges = ranges.agg({'min': 'min', 'max': 'max'})\n ranges['vals'] = ranges.apply(\n lambda row: {\n row['z']: (row['min'], row['max'])\n }, axis=1)\n ranges = ranges.groupby('stat')['vals'].apply(\n lambda group: group.values)\n ranges = ranges.apply(\n lambda group: {\n int(k): v for d in group for k,\n v in d.items()})\n\n return ranges.to_dict()",
"def _sum_n_rasters(\n raster_path_list, target_raster_path):\n LOGGER.info('Summing %s rasters to %s', len(raster_path_list),\n target_raster_path)\n LOGGER.debug('Attempting to open %s', raster_path_list[0])\n pygeoprocessing.new_raster_from_base(\n raster_path_list[0], target_raster_path, gdal.GDT_Float32,\n [NODATA_FLOAT32_MIN])\n\n target_raster = gdal.OpenEx(\n target_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n target_band = target_raster.GetRasterBand(1)\n\n n_pixels_to_process = (\n (target_raster.RasterXSize * target_raster.RasterYSize) *\n len(raster_path_list))\n n_pixels_processed = 0\n last_log_time = time.time()\n\n raster_tuple_list = []\n for raster_path in raster_path_list:\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n raster_tuple_list.append((raster, band, nodata))\n\n for block_info in pygeoprocessing.iterblocks(\n (raster_path_list[0], 1), offset_only=True):\n\n sum_array = numpy.empty(\n (block_info['win_ysize'], block_info['win_xsize']),\n dtype=numpy.float32)\n sum_array[:] = 0.0\n\n # Assume everything is valid until proven otherwise\n pixels_touched = numpy.zeros(sum_array.shape, dtype=bool)\n for (_, band, nodata) in raster_tuple_list:\n if time.time() - last_log_time >= 5.0:\n percent_complete = round(\n n_pixels_processed / n_pixels_to_process, 4)*100\n LOGGER.info(f'Summation {percent_complete:.2f}% complete')\n last_log_time = time.time()\n\n array = band.ReadAsArray(**block_info)\n valid_pixels = slice(None)\n if nodata is not None:\n valid_pixels = ~utils.array_equals_nodata(array, nodata)\n\n sum_array[valid_pixels] += array[valid_pixels]\n pixels_touched[valid_pixels] = 1\n n_pixels_processed += sum_array.size # for logging\n\n sum_array[~pixels_touched] = NODATA_FLOAT32_MIN\n\n target_band.WriteArray(\n sum_array, block_info['xoff'], block_info['yoff'])\n\n LOGGER.info('Summation 100.00% complete')\n raster_tuple_list = None\n\n target_band.ComputeStatistics(0)\n target_band = None\n target_raster = None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate the daily profile class.. | def __init__(self, profile: Dict[datetime.time, float] = None) -> None:
if profile is None:
profile = dict()
if not isinstance(profile, dict):
raise ProgrammerJudgementFault(
"The input daily profile provided is not a mapping of the correct type."
)
self._profile = profile | [
"def __init__(self, dt=60*60*24):\n pass",
"def __init__(self, start_date=None, subusers=None): \n self._subusers = None\n super(SubuserStats, self).__init__()\n\n # Minimum required for subusers stats\n if start_date and subusers:\n self.start_date = start_date\n for subuser_name in subusers:\n self.add_subuser(Subuser(subuser_name))",
"def __init__(self):\n self.cron = CronTab(user=True)\n self.update_cron_data()",
"def __init__(self, *args, **kwargs):\n #print(kwargs)\n self.data = []\n self.date = kwargs.get('date', date.today() - timedelta(days=1))\n #print(self.date)\n self.period = kwargs.get('period', 1)\n self.unitid = kwargs.get('unitid', '')\n self.unittype = kwargs.get('unittype', '')\n self.leadparty = kwargs.get('leadparty', '')\n self.ngcunitname = kwargs.get('ngcunitname', '')\n self.historic = kwargs.get('historic', True)\n self.latest = kwargs.get('latest', False)\n\n self.set_type(kwargs.get('type', 'Derived'))",
"def create_instance(self, date):\n raise NotImplementedError",
"def __init__(self):\n super(FATDateTimeEpoch, self).__init__(1980, 1, 1)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.creation_date = datetime.now()",
"def __init__(self, security_identifier, profile_path):\n super(UserProfile, self).__init__()\n self.profile_path = profile_path\n self.security_identifier = security_identifier",
"def __init__(self, date_time, diastolic):\n Encounter.__init__(self, date_time)\n self.__diastolic = diastolic",
"def my_init(self):\n self.temp_files = {}\n\n from datetime import timedelta\n self.max_allowed_age = timedelta(hours=6)",
"def __init__(self, settings) -> None:\n if not (\"keyData\" in settings[0]):\n settings = [{\"keyData\": {}}]\n self.settings = settings\n self.profileMap = mapProfiles(\n self.settings) # Profile ID mapped to index in the list\n # current_profile[0] = evdevId that was used to choose this profile if exists\n # current_profile[1]= list location of the profile in settings\n self.current_profile = (\n -1, 0)\n self.old_profiles = OrderedDict([self.current_profile])\n self.forget = []",
"def __init__(self, x, y, date):\n super().__init__(x, y)\n self.date = date",
"def __init__(self, day=0, month=0, year=0):\n\n self.day = day\n self.month = month\n self.year = year",
"def __init__(self, students, end_date, section, start_date=None,\n num_weeks=2):\n self.students = students\n self.section = section\n self.end_date = end_date\n self.start_date = start_date\n self.num_weeks = num_weeks\n self.display_end_date = \\\n AttendanceTableCreator.compute_display_end_date(self.end_date)\n if not self.start_date:\n self.start_date = \\\n AttendanceTableCreator.compute_default_start_date(\n self.display_end_date, self.num_weeks)\n self.days_count = self.compute_real_days()\n self.total_days_count = SchoolDB.models.get_num_days_in_period(\n self.start_date, self.display_end_date)\n self.dayperiod_type = []\n self.date_list = []\n self.day_description = []\n self.html_table = '<table id=\"headerTable\" class=\"simple\">'\n self.html_pretty = True\n self._load_days_lists()",
"def __init__(self, data=None, **kw):\n def _get_class_by_id(profile_id):\n from solariat_bottle.db.user_profiles.social_profile import DELIMITER, TwitterProfile, FacebookProfile\n pos = unicode(profile_id).rfind(DELIMITER) + 1\n if pos == 0:\n return self.__class__\n platform = None\n try:\n index = int(profile_id[pos:])\n except ValueError:\n logger.info(u\"Could not obtain platform from profile id: {}\".format(profile_id))\n else:\n platform = PLATFORM_BY_INDEX.get(index)\n class_ = {\n TwitterProfile.platform: TwitterProfile,\n FacebookProfile.platform: FacebookProfile\n }.get(platform, self.__class__)\n return class_\n\n if data:\n profile_id = data.get('_id')\n else:\n profile_id = kw.get('id')\n if isinstance(profile_id, basestring):\n self.__class__ = _get_class_by_id(profile_id)\n super(UserProfile, self).__init__(data, **kw)",
"def __init__(self, start_date=None, categories=None): \n self._categories = None\n super(CategoryStats, self).__init__()\n\n # Minimum required for category stats\n if start_date and categories:\n self.start_date = start_date\n for cat_name in categories:\n self.add_category(Category(cat_name))",
"def __init__(self, first_name, last_name, birthday, username):\n self.first_name = first_name\n self.last_name = last_name\n self.birthday = birthday\n self.username = username\n self.login_attempts = 0\n self.age = self.set_birthday()",
"def test_profile_initialization(self):\n profileObj = profile.Profile()\n self.assertIsInstance(profileObj, profile.Profile)",
"def __init__(self):\r\n self.postgres = PostgreSQL()\r\n self.couch_query = Queries()\r\n self.unit_conversion = UnitConversion()\r\n self.calc = calculate_alarm_trigger.CalculateAlarmTrigger()\r\n super(DeviceOverview, self).__init__()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the internal profile with the mapping provided. | def update(self, profile: Dict[datetime.time, float]) -> None:
if self._profile is None:
self._profile = profile
else:
self._profile.update(profile) | [
"def update_profile(self):\n self.update()",
"def UpdateProfile(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr(type(self), 'field_names')\n for key, value in mapping.items():\n if key in field_names:\n setattr(self, key, value)",
"def profile(self, profile):\n\n self._profile = profile",
"def update_mapping(self, key, val):\n self.table[key] = val",
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })",
"def update_profile(ProfileArn=None, ProfileName=None, IsDefault=None, Timezone=None, Address=None, DistanceUnit=None, TemperatureUnit=None, WakeWord=None, Locale=None, SetupModeDisabled=None, MaxVolumeLimit=None, PSTNEnabled=None, MeetingRoomConfiguration=None):\n pass",
"def assign_profile(self, objProf):\n self.Profile = objProf\n pass",
"def update_metadata_mapping(self, metadata_mapping):\n self.config.metadata_mapping.update(metadata_mapping)",
"def update_profile(self, channels=None): # pragma: no cover\n pass",
"def _update_profile_only(self, data, guild):\n try:\n profile = self.guild_profiles[guild.id]\n except KeyError:\n self.guild_profiles[guild.id] = GuildProfile.from_data(data)\n guild.users[self.id] = self\n else:\n profile._update_attributes(data)",
"def apply_profile(self, profile=None):\n raise NotImplementedError(\n 'operation apply_profile(...) not yet implemented')",
"def update(self, mapItem: MapItem):\n pass",
"def _put_profile(cls, profile):\n if not profile:\n return\n profile.put()\n models.MemcacheManager.delete(\n cls._memcache_key(profile.user_id),\n namespace=cls.TARGET_NAMESPACE)",
"def update(\n self,\n mapping: Mapping | Iterable[tuple[str, Any]] | None = None,\n **kwargs: Any,\n ) -> None:\n with self.changed.blocked():\n if mapping:\n items = mapping.items() if isinstance(mapping, Mapping) else mapping\n for key, value in items:\n getattr(self, key).value = value\n for key, value in kwargs.items():\n getattr(self, key).value = value\n self.changed.emit()",
"def update(self):\n uri = common.genuri('security-profile', self.uuid)\n return super(SecurityProfile, self)._action('PUT', uri)",
"def setMappedInfo(self, mapped_info):\n \n self.mapped_info = mapped_info",
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The density of air varies as a function of temperature. | def density_of_air(self) -> float:
return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature) | [
"def air_density(alt, temp): \n return air_density_pressure(temp, pressure_from_altitude(alt))",
"def air_density(self):\n return self.flow_field.air_density",
"def density(self):\n return self.fluid.density(self.T_C)",
"def air_density_pressure(temp, pressure_hpa): \n R_air = 287\n temp_K = temp + 273\n pressure_pa = 100*pressure_hpa\n air_density = pressure_pa / temp_K / R_air\n return air_density",
"def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3",
"def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho",
"def density(self, x):",
"def density():\n\tpass",
"def _calc_density(self):\n self.df[\"density\"] = self.df.apply(\n lambda r: gsw.density.rho(\n r.salinity_00,\n r.temperature_00,\n r.seapressure_00),\n axis=1)",
"def fGasDensity(GasGravity, Temperature, Pressure):\n\tGasConstant = 8.314\n\tPress = Pressure / 145.038 # MPa\n\tTemp = Temperature + 273.16 # Deg K\n\tPr = Press / (4.892 - (0.4048 * GasGravity))\n\tTr = Temp / (94.72 + (170.75 * GasGravity))\n\tA = 0.03 + 0.00527 * ((3.5 - Tr)**3)\n\tB = (0.642 * Tr) - (0.007 * (Tr**4)) - 0.52\n\tC = 0.109 * ((3.85 - Tr)**2)\n\tD = exp(-((0.45 + (8 * ((0.56 - (1 / Tr))**2))) * ((Pr**1.2) / Tr)))\n\tZ = (A * Pr) + B + (C * D)\n\treturn (28.8 * GasGravity * Press) / (Z * GasConstant * Temp)",
"def fWaterDensity(Salinity, GasWaterRatio, Temperature, Pressure):\n\tTemp = Temperature\n\tPress = Pressure / 145.038\n\tSal = Salinity / 1000\n\tA = (-80 * Temp) + (-3.3 * (Temp**2)) + (0.00175 * (Temp**3))\n\tB = (489 * Press) + (-2 * Temp * Press) + (0.016 * (Temp**2) * Press)\n\tC = (-0.000013 * (Temp**3) * Press) + (-0.333 * (Press**2)) + (0.002 * Temp * (Press ** 2))\n\tPureWaterDensity = 1 + ((A + B + C) * 1e-6)\n\tA = 80 + (3 * Temp) + (-3300 * Sal) + (-13 * Press) + (47 * Press * Sal)\n\tB = (300 * Press) + (-2400 * Press * Sal)\n\tC = 0.000001 * (B + (Temp * A))\n\tD = 0.668 + (0.44 * Sal)\n\treturn PureWaterDensity + (Sal * (D + C))",
"def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature",
"def density(self):\n constant = AMU_TO_KG * 1000 / 1e-24\n return self.composition.weight / self.volume * constant",
"def density(self):\n return self.get_density()",
"def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )",
"def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq",
"def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)",
"def energy_density(tau):\n return ((pi**2 * tau**4) /\n (15 * hbar**3 * c**3))",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The dynamic viscosity of air varies as a function of temperature. | def dynamic_viscosity_of_air(self) -> float:
return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (
self.ambient_temperature + 110.4
) | [
"def dynamic_viscosity(self):\n return self.fluid.viscosity(self.T_C)",
"def kinematic_viscosity_of_air(self) -> float:\n\n return self.dynamic_viscosity_of_air / self.density_of_air",
"def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06",
"def air_density(self):\n return self.flow_field.air_density",
"def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def dynamic_pressure(self):\n return 0.5 * self.density * self.velocity ** 2",
"def temperature(self):\n return 2 * self.annealing_factor**self.episodes_so_far",
"def calculate_visibility(qv,qc,qr,qi,qs,T,p):\n Rd = 287.\n COEFLC = 144.7\n COEFLP = 2.24\n COEFFC = 327.8\n COEFFP = 10.36\n EXPLC = 0.88\n EXPLP = 0.75\n EXPFC = 1.\n EXPFP = 0.7776\n\n Tv = T * (1+0.61*qv) # Virtual temperature\n\n rhoa = p/(Rd*Tv) # Air density [kg m^-3]\n rhow = 1e3 # Water density [kg m^-3]\n rhoi = 0.917e3 # Ice density [kg m^-3]\n\n vovmd = (1+qv)/rhoa + (qc+qr)/rhow + (qi+qs)/rhoi\n\n conc_lc = 1e3*qc/vovmd\n conc_lp = 1e3*qr/vovmd\n conc_fc = 1e3*qi/vovmd\n conc_fp = 1e3*qs/vovmd\n\n # Make sure all concentrations are positive\n conc_lc[conc_lc < 0] = 0\n conc_lp[conc_lp < 0] = 0\n conc_fc[conc_fc < 0] = 0\n conc_fp[conc_fp < 0] = 0\n\n betav = COEFFC*conc_fc**EXPFC\\\n + COEFFP*conc_fp**EXPFP\\\n + COEFLC*conc_lc**EXPLC\\\n + COEFLP*conc_lp**EXPLP+1E-10\n\n vis = -np.log(0.02)/betav # Visibility [km]\n vis[vis > 24.135] = 24.135\n\n return vis",
"def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)",
"def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)",
"def thermal_voxel_respiration_estimation():\n pass",
"def air_quality_value(self):\n pass",
"def calculate_average_viscosity_at_40(self):\n if self.viscosity_at_40.low == None or self.viscosity_at_40.low == '':\n return 0\n elif self.viscosity_at_40.high == None or self.viscosity_at_40.high == '':\n return 0\n else:\n # assuming here that the values are valid\n try:\n return (int(self.viscosity_at_40.high) - int(self.viscosity_at_40.low))/2\n except ValueError:\n print(\"Invalid viscosity values for product {0}\".format(self.material_code))\n print(\"Viscosity at 100 low = \" +str(self.viscosity_at_100.low))\n print(\"Viscosity at 100 high = \" +str(self.viscosity_at_100.high))\n return 0",
"def get_specific_heat() -> float:\n return 1006.0",
"def kinematic_viscosity(self):\n return self.mu / self.rho",
"def specific_kinetic_energy(particles):\n\n return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2)",
"def variational(self):\n return self._variational",
"def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the heat capacity of air in Joules perkilogram Kelvin. The heat capacity of air varies with a function of temperature and is given by an empiricallyderived formula. | def heat_capacity_of_air(self) -> float:
return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2 | [
"def vibrational_heat_capacity(self, temperature, volume):\n y = self.debye_temperature(volume) / temperature\n factor = 3. / y ** 3\n if y < 155:\n integral = quadrature(lambda x: x ** 4 *np.exp(x)/ (np.exp(x) - 1.)**2, 0, y)\n return 3*self.kb * self.natoms * list(integral)[0] * factor\n else:\n return self.kb * self.natoms * 4./5.*scipy_constants.pi**4 * factor",
"def heatCapacity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"heat capacity\", Tk)\n return (\n sum(\n [\n +1.38642e-13 * Tk**4,\n -6.47481e-10 * Tk**3,\n +1.02345e-06 * Tk**2,\n -4.32829e-04 * Tk,\n +1.06133e00,\n ]\n )\n * 1000.0\n ) # kJ / kg K to J / kg K",
"def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density",
"def volumetric_heat_capacity(self):\n return self.rho * self.cp",
"def heatCapacity(planet, T):\n c = planet.cpCoeff\n return np.polyval(c, T)",
"def heatCapacity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"heat capacity\", Tk)\n\n return 159 - 2.72e-2 * Tk + 7.12e-6 * Tk**2",
"def heat_capacity(r, phi, q, kT):\n pot = q*(phi - phi[0])\n a = np.trapz(pot**2 * np.exp(-pot/kT) * r, r)\n b = np.trapz(pot * np.exp(-pot/kT) * r, r)\n c = np.trapz(np.exp(-pot/kT) * r, r)\n return 3/2 + 1/kT**2 * (a/c - b**2/c**2)",
"def specific_heat_capacity(self):\n return self.fluid.specific_heat(self.T_C)",
"def constant_pressure_heat_capacity(partition, temperature,\n scale_factor=const.N_A, *, d_t=1E-4):\n return (enthalpy(partition, temperature + d_t, scale_factor) -\n enthalpy(partition, temperature - d_t, scale_factor)) / (2 * d_t)",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def helmholtz_free_energy(partition, temperature, scale_factor=const.N_A):\n formula = -np.log(partition(temperature)) * temperature * const.KB_J_PER_K\n return scale_factor * formula",
"def constant_volume_heat_capacity(partition, temperature,\n scale_factor=const.N_A, *, d_t=1E-4):\n return (\n (thermodynamic_energy(partition, temperature + d_t, scale_factor) -\n thermodynamic_energy(partition, temperature - d_t, scale_factor))\n / (2 * d_t))",
"def capacity(self):\n\n capacity = None\n if self.power_density is not None:\n capacity = self.area * self.power_density\n\n return capacity",
"def get_specific_heat() -> float:\n return 1006.0",
"def enthalpy(partition, temperature, scale_factor=const.N_A):\n # Note, scaling in the formula is 1.0 and not the given scale factor,\n # as the user defined scale_factor will be applied below.\n formula = (thermodynamic_energy(partition, temperature, 1.0) +\n temperature * const.KB_J_PER_K)\n return scale_factor * formula",
"def icing_weight(self, tech_factor):\n\n mass_deicing = self.mass['engine']*k_air_engine\n self.mass['anti_icing'] = mass_deicing\n return mass_deicing",
"def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06",
"def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f",
"def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The kinematic viscosity of air varies as a function of temperature. | def kinematic_viscosity_of_air(self) -> float:
return self.dynamic_viscosity_of_air / self.density_of_air | [
"def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )",
"def kinematic_viscosity(self):\n return self.mu / self.rho",
"def dynamic_viscosity(self):\n return self.fluid.viscosity(self.T_C)",
"def kinematic_viscosity(mu, rho):\n\n nu = mu / rho\n\n return nu",
"def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def kts(self):\n return CAL_TO_J * 0.0077 * (self.rho/1000.0) * (self.rho/1000.0)",
"def specific_kinetic_energy(particles):\n\n return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2)",
"def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature",
"def kinetic_energy(self):\n return .5*self.m*self.speed**2",
"def ice_viscosity(self, T, Tm):\n\t\treturn self.constants.visc0i * np.exp(self.constants.Qs * (Tm / T - 1) / self.constants.Rg / Tm)",
"def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)",
"def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f",
"def temperature(self):\n return 2 * self.annealing_factor**self.episodes_so_far",
"def von_klitzing_constant(self):\n return self._von_klitzing_constant",
"def Nu_constantsurfacetemp_laminar(self):\n return 3.66",
"def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)",
"def kineticEngergy(self):\r\n \r\n kineticEnergy = (1/2) * self.mass * (norm(self.vel)**2)\r\n \r\n return kineticEnergy",
"def softness_coefficient_viscosity(self):\n a = self.alpha\n return 6 * a / ((a + 1) * (a+2))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines the radiative temperature of the sky. The "sky," as a black body, has a radiative temperature different to that of the surrounding air, or the ambient temperature. This function converts between them and outputs the sky's radiative temperature. | def sky_temperature(self) -> float:
return 0.0552 * (self.ambient_temperature**1.5) | [
"def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)",
"def temperature(self):\n return 2 * self.annealing_factor**self.episodes_so_far",
"def ambient_temp(self):\n if self.temp_scale == CELSIUS:\n return self.ambient_temp_celsius\n return self.ambient_temp_fahrenheit",
"def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)",
"def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT",
"def get_temperature(self):\n temp_cpu = get_cpu_temp()\n # Calculates the real temperature compensating CPU heating.\n temp_avg = (self.get_temperature_from_humidity() + self.get_temperature_from_humidity()) / 2\n calibrated = temp_avg - ((temp_cpu - temp_avg) / 1.2)\n calibrated = get_smooth(calibrated)\n return calibrated",
"def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K",
"def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)",
"def get_temperature(self):\n \n # Get temp readings from both sensors\n humidity_temp = self._sense_hat.get_temperature_from_humidity()\n pressure_temp = self._sense_hat.get_temperature_from_pressure()\n \n # avg_temp becomes the average of the temperatures from both sensors\n # We need to check for pressure_temp value is not 0, to not ruin avg_temp calculation\n avg_temp = (humidity_temp + pressure_temp) / 2 if pressure_temp else humidity_temp\n \n # Get the CPU temperature\n cpu_temp = self._get_cpu_temp()\n \n # Calculate temperature compensating for CPU heating\n adj_temp = avg_temp - (cpu_temp - avg_temp) / 1.5\n \n # Average out value across the last three readings\n return self._get_smooth(adj_temp)",
"def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323",
"def calculate_windchill_temperature(amb_temp, windspeed):\n\ttwc = 33 + (amb_temp - 33) * (.474 + .454 * sqrt(windspeed) - 0.0454 * windspeed )\n\treturn(twc)",
"def read_temperature(self):\n raw = self.analog_read(THERM_PIN)\n return self._therm_value_to_temp(raw)",
"def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0",
"def antenna_temperature(flux,effective_area):\n return flux*Jy*effective_area/Physics.k/2",
"def black_body_flux_to_temperature(flux: float) -> float:\n return (flux / _STEFAN_BOLTZMAN)**0.25",
"def _calculate_temperature(self):\n\n # make changes in base temperature according to tendency\n change_tendency_probability = 0.001\n base_change_probability = 0.05\n\n tendency_changing = random.uniform(0, 1) < change_tendency_probability\n base_changing = random.uniform(0, 1) < base_change_probability\n\n if tendency_changing:\n self.tendency = -self.tendency\n if base_changing:\n old_factor = 0.9\n new_factor = 1 - old_factor\n\n new_base_temperature = random.randrange(5, 10) * self.tendency\n self.base_temperature = truncate(\n old_factor * self.base_temperature\n + new_factor * new_base_temperature,\n -20, 40\n )\n\n # Calculate all temperature factors with their weights.\n # there will be max 8 degrees warmer in a day\n day_heat = 8 * (sin(self.basetime / 230 + 300) / 2 + 0.5)\n sun_heat = 2 * self.weather['light']\n wind_chill = 5 * self.weather['wind']\n rain_chill = 3 * self.weather['rain']\n\n new_temp = (self.base_temperature\n + day_heat\n + sun_heat\n - wind_chill\n - rain_chill)\n\n old_factor = 0.3\n new_factor = 1 - old_factor\n self.weather['temperature'] = truncate(\n old_factor * self.weather['temperature']\n + new_factor * new_temp,\n -20, 40\n )",
"def target_temperature(self):\n return self._thermostat_temperature",
"def get_ambient_temperature(self) -> float:\n return self.query(WeatherCommand.GET_SENSOR_TEMP) / 100.",
"def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The thermal conductivity of air varies as a function of temperature. | def thermal_conductivity_of_air(self) -> float:
# This more accurate equation is not used by the paper.
# return (0.02646 * self.ambient_temperature ** 1.5) / (
# self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))
# )
# The reference suggests this equation is accurate to 1%.
return 0.02646 * (self.ambient_temperature / 300) ** 0.8646 | [
"def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)",
"def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature",
"def thermal_conductivity(self):\n return self._thermal_conductivity",
"def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity",
"def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3",
"def _compute_temperature(self):\n return 4.0 / np.log(1.0 + 4.0 / (self.demon_energy_accum / (self.mcs * self.N ** 2)))",
"def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n\n return 2.45 * Tk / (86.334 + 0.0511 * Tk)",
"def temperature(self):\n return 2 * self.annealing_factor**self.episodes_so_far",
"def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)",
"def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)",
"def getTemperature(self):\n print (\"*** Station.getTemperature()\")\n return self.averageProbes(self.temperatureProbes)",
"def temperature(self):\n return float(self._current_observation['temp_c'])",
"def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06",
"def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )",
"def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2",
"def get_temperature_factor(self):\n return self.temp_factor",
"def get_current_temperature(self) -> float:",
"async def _get_temperature_internal(self) -> float:\n\n self._device._update_temperature()\n return self._device._temperature[\"CCD\"]",
"def antenna_temperature(flux,effective_area):\n return flux*Jy*effective_area/Physics.k/2"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The thermal expansion coefficient of air varies as a function of temperature. | def thermal_expansivity_of_air(self) -> float:
return 1 / self.ambient_temperature | [
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)",
"def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity",
"def _compute_temperature(self):\n return 4.0 / np.log(1.0 + 4.0 / (self.demon_energy_accum / (self.mcs * self.N ** 2)))",
"def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial",
"def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f",
"def temperature(self):\n return 2 * self.annealing_factor**self.episodes_so_far",
"def antenna_temperature(flux,effective_area):\n return flux*Jy*effective_area/Physics.k/2",
"def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3",
"def get_phonopy_thermal_expansion(energies, volumes, force_constants, structure, t_min, t_step,\n t_max, mesh, eos, pressure=0):\n\n # quasi-harmonic approx\n phonopy_qha = get_phonopy_qha(energies, volumes, force_constants, structure, t_min, t_step,\n t_max, mesh, eos, pressure=pressure)\n\n # thermal expansion coefficient and temperature\n max_t_index = phonopy_qha._qha._max_t_index\n alpha = phonopy_qha.get_thermal_expansion()[:max_t_index]\n T = phonopy_qha._qha._temperatures[:max_t_index]\n return alpha, T",
"def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)",
"def formation_temperature(self) -> u.K:\n return self.temperature[np.argmax(self.ioneq)]",
"def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323",
"def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed",
"def get_temperature_factor(self):\n return self.temp_factor",
"def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)",
"def temperature(self):\n return float(self._current_observation['temp_c'])",
"def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )",
"def get_concentration(self, e_fermi: float, temperature: float) -> float:\n if self.fixed_concentration is None:\n expfac = -self.get_formation_energy(e_fermi) / (kboltz * temperature)\n concentration = self.degeneracy * np.exp(expfac)\n else:\n concentration = self.fixed_concentration\n return concentration"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.