query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Updates meta information in meta file.
|
def update_meta_file(meta: Dict):
print("Info: Updating meta file.")
try:
with open(meta_file_name, "w") as meta_file:
json.dump(meta, meta_file)
except OSError:
sys.exit("Could not open/write meta file: meta.json.")
|
[
"def update_metadata(self):\n for element in self.elements:\n self.meta_data[element.name] = element.meta_data()",
"def update_metadata(self, metadata):\n if metadata:\n self._metadata.update(metadata)\n self.to_swap_dir()",
"def update_metadata(self, new_control):\n self.meta.update(new_control.meta)",
"def _update_metadata(self, headers, meta_data):\r\n for key, value in list(meta_data.items()):\r\n key = 'x-ms-meta-%s' % (key)\r\n headers[key] = value",
"def __update_meta_data(self):\n\n # Extracts metadata from final image.\n self.img_meta_data.type = str(self.img_pillow.format)\n self.img_meta_data.width, self.img_meta_data.height = self.img_pillow.size\n\n # Updates EXIF data if available.\n self.img_meta_data.exif = eu.get_exif_data(self.img_pillow)\n\n # Logs acquired meta data\n self.log(self.rsc.RECOGNITION_ACQUIRED_META_DATA.format(str(self.img_meta_data.__dict__)))",
"def meta_update(client, server, metadata):\r\n metadata = meta_serialize(metadata)\r\n current_md = server.metadata\r\n to_del = [key for key in current_md.keys() if key not in metadata]\r\n if len(to_del) > 0:\r\n client.servers.delete_meta(server, to_del)\r\n\r\n client.servers.set_meta(server, metadata)",
"def update_exif(self):\n self.file_update_metadata()\n self.save()",
"def meta_load_socrata(self):\n import json\n\n meta = self.filesystem.download('meta')\n\n with open(meta) as f:\n d = json.load(f)\n\n md = self.metadata\n md.about.title = d['name']\n md.about.summary = d['description']\n\n md.write_to_dir()",
"def update_metadata(self, metadata):\r\n return self.manager.update_metadata(self, metadata, node=self)",
"def file_update_metadata(self):\n if splitext(self.name)[1].lower() in IMAGE_TYPES:\n assert self.deleted is None, \\\n u\"Can't update deleted file: {0}\".format(self.abspath)\n # The file must be accessible\n assert exists(self.abspath), \\\n u\"File not accessible: {0}\".format(self.abspath)\n\n img_exiv2 = self.file_exiv2()\n if img_exiv2 is None:\n logger.warn(\"Unable to read metadata from: {0}\".format(self.abspath))\n return\n\n #\n # Keywords\n #\n keywords = self.file_keywords()\n old_keywords = self.keyword_set.all()\n # Minimise db lookups by caching in a dictionary\n oldkwdict = {}\n for okw in old_keywords:\n oldkwdict[okw.name] = okw\n old_keywords = set(oldkwdict.keys())\n removed = old_keywords - keywords\n if len(removed) > 0:\n logger.debug(\"Removing keywords from {0}: {1}\".format(self.name, removed))\n added = keywords - old_keywords\n if len(added) > 0:\n logger.debug(\"Adding keywords from {0}: {1}\".format(self.name, added))\n for kw in removed:\n self.keyword_set.filter(name=kw)[0].files.remove(self)\n for kw in added:\n Keyword.get_or_add(kw).files.add(self)\n\n #\n # Date / Times\n #\n dt = img_exiv2.get('Exif.Photo.DateTimeDigitized', None)\n if dt is not None:\n try:\n # Side affect of adding to FileDate is to convert to datetime\n dt, field = FileDate.add(self, 'Exif.Photo.DateTimeDigitized', dt)\n self.date = dt\n self.date_field = field\n except ValueError as ve:\n logger.warn(\"{0} no date: {1}\".format(self.abspath, str(ve)))\n dt = img_exiv2.get('Exif.Photo.DateTimeOriginal', None)\n if dt is not None:\n try:\n # Side affect of adding to FileDate is to convert to datetime\n dt, field = FileDate.add(self, 'Exif.Photo.DateTimeOriginal', dt)\n self.date = dt\n self.date_field = field\n except ValueError as ve:\n logger.warn(\"{0} no date: {1}\".format(self.abspath, str(ve)))\n dt = img_exiv2.get('Exif.Image.DateTime', None)\n if dt is not None:\n try:\n # Side affect of adding to FileDate is to convert to datetime\n dt, field = FileDate.add(self, 'Exif.Image.DateTime', dt)\n self.date = dt\n self.date_field = field\n except ValueError as ve:\n logger.warn(\"{0} no date: {1}\".format(self.abspath, str(ve)))\n return",
"def test_v2_setting_metas_update(self):\n pass",
"def add_meta_info(self, meta_info):\r\n self.meta_info = {**self.meta_info, **meta_info}",
"def _updateinfos(self, filename='', extrainfos=None):\n self['infos'].updateinfos(filename, extrainfos)",
"def write_meta(self):\n\t\t#raise NotImplementedError\n\t\tpath = os.path.join(self.get_private_dir(create=True), \"meta.yaml\")\n\t\tunits = {key:str(value) for key, value in self.units.items()}\n\t\tmeta_info = dict(description=self.description,\n\t\t\t\t\t\t ucds=self.ucds, units=units, descriptions=self.descriptions,\n\t\t\t\t\t\t )\n\t\tvaex.utils.write_json_or_yaml(path, meta_info)",
"def update_account_meta(self, user, account, domain, meta, replace=False):\n return",
"def upsert_meta(self, pk, meta):\n pass",
"def RegenerateMetaData():\r\n\r\n # Get the posts.\r\n posts = os.listdir(Settings.Settings.webRoot + \"/posts/\") \r\n \r\n # Create meta data dictionary.\r\n metaInfo = {}\r\n \r\n # Grouped by tag. Key is tag, value is list of post md files with tag.\r\n metaInfo[\"byTag\"] = {}\r\n \r\n # Tags per post. Key is post file, value is list of tags.\r\n metaInfo[\"perPostTags\"] = {}\r\n \r\n # Title/filename map. Key is filename, value is post title.\r\n metaInfo[\"byTitle\"] = {}\r\n \r\n # Sorted by date. Value is list of all articles sorted by date.\r\n metaInfo[\"byDate\"] = {}\r\n \r\n # Dictionary of summaries. Key is post filename, value is summary.\r\n metaInfo[\"summaries\"] = {}\r\n \r\n # Collect the data.\r\n for postFile in posts:\r\n \r\n # Open the selected file. \r\n with open(Settings.Settings.webRoot + \"/posts/\" + postFile, 'r') as myfile:\r\n\r\n # Create markdown.\r\n markedDownText = markdown2.markdown(myfile.read(), extras=[\"fenced-code-blocks\", \"metadata\"])\r\n\r\n # Get meta info.\r\n meta = markedDownText.metadata\r\n \r\n pprint.pprint(meta)\r\n \r\n # Add title map entry.\r\n metaInfo[\"byTitle\"][postFile] = meta[\"title\"]\r\n \r\n # Add summary entry.\r\n metaInfo[\"summaries\"][postFile] = meta[\"summary\"]\r\n \r\n # Get list of tags.\r\n tags = [x.strip() for x in meta[\"tags\"].split(',')]\r\n \r\n # Set the per post tags.\r\n metaInfo[\"perPostTags\"][postFile] = [x.strip() for x in meta[\"tags\"].split(',')]\r\n \r\n # Add to tag lists.\r\n for tag in tags:\r\n metaInfo[\"byTag\"].setdefault(tag, [])\r\n metaInfo[\"byTag\"][tag].append(postFile)\r\n \r\n # The date is . separated in Y.M.D format.\r\n dt = datetime.datetime.strptime(meta[\"date\"], '%Y.%m.%d')\r\n \r\n # Pretty severe limitation since we use dates as keys, we can't do two posts\r\n # created on the same day. Warn about it for now.\r\n if dt in metaInfo[\"byDate\"]:\r\n print \"WARNING: already have a post with this date. The old one will not be in the by-date meta dictionary.\"\r\n \r\n # Add it.\r\n metaInfo[\"byDate\"][datetime.datetime.strptime(meta[\"date\"], '%Y.%m.%d')] = postFile\r\n \r\n \r\n # Store the by-date information as a stored dictionary.\r\n #metaInfo[\"byDate\"] = collections.OrderedDict(sorted(metaInfo[\"byDate\"].items()))\r\n # Can't pickle an ordered dict? We will have to sort when we retrieve.\r\n \r\n # Print the meta data for use inspection. \r\n pprint.pprint(metaInfo) \r\n \r\n # Create the pickle.\r\n with open(Settings.Settings.webRoot + \"/meta/meta.pickle\", 'wb') as handle:\r\n pickle.dump(metaInfo, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n \r\n # Test the pickle.\r\n with open(Settings.Settings.webRoot + \"/meta/meta.pickle\", 'rb') as handle:\r\n b = pickle.load(handle)\r\n \r\n # Print the meta data for use inspection. \r\n pprint.pprint(b)",
"async def test_updating_metadata(self):\n # TODO: implement test_updating_metadata",
"def set_metadata(self, dict_):\n for key, value in dict_.items():\n self.config.set('metadata', key, value) \n if self.status == STATE_NEEDS_METADATA and \\\n self.config.get('metadata', 'author') and \\\n self.config.get('metadata', 'title'):\n\n self.config.set('process', STATE_NEED_PDFS)\n\n self.write_config()\n\n self._set_current_status()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the server software version string.
|
def version_string(self):
return self.server_version + ' ' + self.sys_version
|
[
"def get_version() -> str:\n return VERSION",
"def get_software_version(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 0F\"))\n temp = self.board_socket.recv(1024)\n return(temp[3:10])",
"def server_version(self):\n ret = getattr(self, \"_SERVER_VERSION\", \"\")\n return ret",
"def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version",
"def _get_version_string() -> str:\n return \" GDM Version: {}. Registered extension packages: {}.\".format(\n _version.version, extensions.get_registered_package_info())",
"def get_version():\n return 'PyS2OPC v' + VERSION + ' on ' + ffi.string(libsub.SOPC_LibSub_GetVersion()).decode()",
"def firmware_version(self) -> str:\n self._logger.info(\"Retrieving current firmware version\")\n return self._device_info().get(\"firmware\")",
"def firmware_update_version(self) -> str:\n self._logger.info(\"Retrieving firmware update version...\")\n return self._device_info().get(\"NewVer\")",
"def show_version():\n response = \"App Version: \" + str(version)\n return response",
"def _get_software_version(self):\n return self.__software_version",
"def cmd_version(self):\n import re\n return \\\n re.sub(r'FPE[0-9]>', '',\n self.connection.send_command(\n \"version\",\n reply_pattern=\"Observatory Simulator Version .*\"))",
"def get_version(self, fingerprint):\n\n try:\n desc = self.control.get_server_descriptor(fingerprint)\n return str(desc.tor_version)\n except stem.ControllerError:\n return ''",
"def get_version_string():\n version = ffi.string(C.blosc_get_version_string())\n if not isinstance(version, str):\n version = version.decode()\n return version",
"def product_version_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_version_name\")",
"def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]",
"def get_version(self):\n url = self.url + '/api/sys/firmware/version/mgmt'\n resp = self._req(url)\n if len(resp):\n self.version = resp['firmwareRunning'][0]['packageVersion']\n logger.info('FXOS Software {} Version is {}'.format(self.url, self.version))",
"def get_system_version_info() -> str:\n output_template = '{:<12} {}'\n line_separator = '-' * 60\n not_found_str = '[Not Found]'\n out_lines = []\n\n # System (Python, OS)\n out_lines += ['System Version Info', line_separator]\n out_lines += [\n output_template.format(name, version) for name, version in (\n ('OS', '%s' % platform.platform()),\n ('Python', '%d.%d.%d' % sys.version_info[0:3]),\n )\n ]\n\n # Third-Party Packages\n out_lines += ['', 'Package Version Info', line_separator]\n backend_modules = (\n 'appdirs',\n 'av',\n 'click',\n 'cv2',\n 'moviepy',\n 'numpy',\n 'tqdm',\n )\n for module_name in backend_modules:\n try:\n module = importlib.import_module(module_name)\n out_lines.append(output_template.format(module_name, module.__version__))\n except ModuleNotFoundError:\n out_lines.append(output_template.format(module_name, not_found_str))\n\n # External Tools\n out_lines += ['', 'Tool Version Info', line_separator]\n\n tool_version_info = (\n ('ffmpeg', get_ffmpeg_version()),\n ('mkvmerge', get_mkvmerge_version()),\n )\n\n for (tool_name, tool_version) in tool_version_info:\n out_lines.append(\n output_template.format(tool_name, tool_version if tool_version else not_found_str))\n\n return '\\n'.join(out_lines)",
"def version():\n return 'v%s' % ninecms.__version__",
"def versionString(version):\n ver = list(map(str, version))\n numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:]\n return '.'.join(numbers) + '-'.join(rest)",
"def version(self):\n stdout, stderr = self.ctx.execute([self.exe, '-version'], quieter=1)\n return stdout.decode().strip()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Insert string s at index i.
|
def insert(self, index, s):
raise NotImplementedError
|
[
"def insert_string(string: str, index: int, insert: str):\n return string[:index] + insert + string[index:]",
"def insert(self, text, i):\n self.fqn[i] = text;\n self.len = i + 1\n for j in range(self.len, 256): # wipe out stuff at higher indices\n self.fqn[j] = ''",
"def insert(a_string, sub_string, index):\n return a_string[:index] + sub_string + a_string[index:]",
"def insert_at(index, my_chr, my_str):\n if index < 0 or index > len(my_str):\n print \"invalid index\"\n return my_str\n # use substring and add given index\n return my_str[:index] + my_chr + my_str[index:]",
"def _insert(self, str_to_insert='', pre_str=None, pre_ind=None):\n\t\tself._replace(str_to_insert=str_to_insert, pre_str=pre_str, post_str='', pre_ind=pre_ind)",
"def insert(old_string, new_string, index, index_offset):\n # in log, index starts at 1\n index += index_offset\n return old_string[:index] + new_string + old_string[index:]",
"def insert(self, string: 'SbString', insertbefore: 'int') -> \"void\":\n return _coin.SbStringList_insert(self, string, insertbefore)",
"def insert(self, index, text):",
"def insert_char(string, char, position):\n string_ = list(string)\n string_[position] = char\n return ''.join(string_)",
"def insert(self, i, x):",
"def add_substring_first(self, index, s):\n assert (index < len(self.rhs_list))\n\n # Add all symbols\n for symbol in self.substring_first_set_list[index]:\n s.add(symbol)\n\n return",
"def insert_character(a_word, c, position=0):\n n = len(a_word)\n if (position >= n + 1) or (position < 0):\n print('Warning out of range, cannot insert ')\n return \"\"\n return a_word[:position] + c + a_word[position:]",
"def insert(self, string):\n # make sure the string not already in the tree\n if self.contains(string) is False:\n # find the node to start adding new letters from\n current_node, index = self._find_node(string)\n # for each index position in the string greater than index\n for i in range(index, len(string)):\n # returned, add a new child node of the node returned\n next_char = string[i]\n new_node = PrefixTreeNode(next_char)\n current_node.add_child(next_char, new_node)\n # then move the current node to its child\n current_node = new_node\n # mark the last node to be added as terminal\n current_node.terminal = True\n # increment size of the tree\n self.size += 1",
"def insertSymbol(password, symbol, index):\n return password[:index] + symbol + password[index:]",
"def insertUnknowns(self, index, unknowns):\n for i in xrange(unknowns):\n self.data.insert(index, \"X\")",
"def insert_at_position(DL, n, data):\n DL.insert(data, n - 1)",
"def inject_string(string_input, index, injection):\n return string_input[:index] + str(injection) + string_input[index:]",
"def __setitem__(self, i: 'int', value: 'SbString') -> \"void\":\n return _coin.SoMFString___setitem__(self, i, value)",
"def insert(self, i, item):\n if item != None and item not in self:\n list.insert(self, i, item)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a single bug to the simulation.
|
def add_bug(self, index=None, bug=None, cell_iter=None):
bug = bug or Bug()
if bug in self.bugs:
LOG.warn("Bug %s already added" % bug)
return False
if index:
cell = self.grid[index]
else:
cell_iter = cell_iter or self.available_cell_iter
index, cell = cell_iter.next()
if self._can_add(index, cell):
cell.bugs.add(bug)
self.bugs.add(bug)
bug.idx = index
return bug
else:
LOG.debug("No more bug vacancies at cell (%s,%s)" % index)
return False
|
[
"def addIssue(self, issue):\r\n # type: (Issue) -> ()\r\n # let's worry about manual indexing later?\r\n self.issues.append(issue)\r\n self.fireTableDataChanged()",
"def foundBug(self):\n pass",
"def bug(*_):\n\n return REPLY(content=None, attachments=[\n ISSUE_NEW,\n ISSUE_BUG,\n ])",
"def _add(self, issue: Issue) -> None:\n self._issueList.append(issue)\n if isinstance(issue, LocalizedSourceIssue):\n index = issue.line\n else:\n index = 0\n if index not in self._issuesAtLine:\n self._issuesAtLine[index] = []\n self._issuesAtLine[index].append(issue)",
"def add_problem(self, problem):\n if self.patient != problem.patient:\n raise dbexception.AutoMODatabaseError(\"The Problem and Encounter should be from the same patient\")\n\n if problem in self.problems:\n raise dbexception.AutoMODatabaseError(\"This problem already exists in this encounter\")\n\n self.problems.append(problem)",
"def add_item(self, col_id, issue_name, issue_descript):\n # get col with same id as col_id\n for col in self.board:\n if col.id == col_id:\n col.append(Item(issue_name,issue_descript, self.__curr_issue_id))\n self.__curr_issue_id += 1\n return\n print(col_id, \"Does not exist\")",
"def add_zombie(self, row, col):\n self._zombie_list.append((row, col))\n print \"Added a zombie to the list\"",
"def add_bug_comment(request, pk):\n bug = get_object_or_404(Bug, pk=pk)\n if request.method == \"POST\":\n form = AddBugCommentForm(request.POST)\n\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.bug = bug\n comment.save()\n return redirect('bug_description', pk=bug.pk)\n else:\n form = AddBugCommentForm()\n return render(request, \"bugs/addbugcomment.html\", {\"form\": form})",
"def setBugs(self, bugs):\n self.bugs = bugs",
"async def bug(self, ctx, message):\r\n channel = self.bot.get_channel(762306208580370464)\r\n embed = discord.Embed(title='Bug report!', description='Your bug report, along with your discord name, discriminator, and id were sent to the bot owner. He will look into your report and hopefully fix the issue soon!', colour=discord.Color.green())\r\n if channel is None:\r\n await ctx.send('Unable to send request.')\r\n else:\r\n await channel.send(f'{ctx.message.content} | User: {ctx.message.author} ID: {ctx.message.author.id}')\r\n await ctx.send(embed=embed)",
"def _add_problem(cls, _):\n prob_adder = ProblemAdder(db_gateway=DjangoGateway(),\n presenter=CliPresenter())\n user_input = cls._record_problem_data()\n try:\n prob_adder.add_problem(\n difficulty=user_input['difficulty'],\n url=user_input['url'],\n name=user_input['name'],\n tags=cls._get_tags_from_user())\n except ValueError as err:\n print(err)\n return",
"def add_tile(self, tile):\n x = tile.x\n y = tile.y\n if self.tile_exists(x, y):\n raise ValueError('A tile already exists at ({}, {})'.format(x, y))\n\n self.world[(x, y)] = tile",
"def _openBug( self, sType, bSerial, sCompany, sName, bCamera=None, sSummary=None, sDescription=None ):\n\n\t\ttry:\n\t\t\ttime.sleep( 10 )\n\n\t\t\tif bCamera is None:\n\t\t\t\tdbgMsg( 'Opening Bug [%s] serial-[%d] company-[%s] name-[%s]' % \\\n\t\t\t\t\t( sType, bSerial, sCompany, sName ) )\n\t\t\telse:\n\t\t\t\tdbgMsg( 'Opening Bug [%s] serial-[%d] company-[%s] name-[%s] camera-[%d]' % \\\n\t\t\t\t\t( sType, bSerial, sCompany, sName, bCamera ) )\n\n\t\t\tif self._fOfflineDebug:\n\t\t\t\tdbgMsg( 'Skipping since we are in offline debug mode' )\n\t\t\t\treturn True\n\n\t\t\tsFileIn = ''\n\t\t\tsFileOut = ''\n\t\t\tsAssignee = ''\n\t\t\tif sType == \"Offline\":\n\t\t\t\tsFileIn = BASE + 'templates/dvs-offline.bug'\n\t\t\t\tsFileOut = '/tmp/dvs-offline.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telif sType == \"Camera Down\":\n\t\t\t\tsFileIn = BASE + 'templates/camera-down.bug'\n\t\t\t\tsFileOut = '/tmp/camera-down.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telif sType == \"Maintenance Reminder\":\n\t\t\t\tsFileIn = BASE + 'templates/maintenance-reminder.bug'\n\t\t\t\tsFileOut = '/tmp/maintenance-reminder.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telif sType == \"Weekly Check\":\n\t\t\t\tsFileIn = BASE + 'templates/weekly-check.bug'\n\t\t\t\tsFileOut = '/tmp/weekly-check.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telif sType == \"On-Site Maintenance\":\n\t\t\t\tsFileIn = BASE + 'templates/onsite-maintenance.bug'\n\t\t\t\tsFileOut = '/tmp/onsite-maintenance.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telif sType == \"DVS Log\":\n\t\t\t\tsFileIn = BASE + 'templates/dvs-log.bug'\n\t\t\t\tsFileOut = '/tmp/dvs-log.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telse:\n\t\t\t\traise Exception, 'unknown bug type [%s]' % sType\n\n\t\t\t# Sanitize input for perl replacements\n\t\t\tsCompany = sCompany.replace( '\"', \"_\" )\n\t\t\tsName = sName.replace( '\"', \"_\" )\n\t\t\tsAssignee = sAssignee.replace( '\"', \"_\" )\n\n\t\t\tos.system( 'cp %s %s' % ( sFileIn, sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@SERIAL\\@\\@|%03d|g\\\" %s\" % ( bSerial, sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@COMPANY\\@\\@|%s|g\\\" %s\" % ( re.escape( sCompany ), sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@NAME\\@\\@|%s|g\\\" %s\" % ( re.escape( sName ), sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@ASSIGNEE\\@\\@|%s|g\\\" %s\" % ( re.escape( sAssignee ), sFileOut ) )\n\t\t\tif bCamera is not None:\n\t\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@CAMERA\\@\\@|%03d|g\\\" %s\" % ( bCamera, sFileOut ) )\n\t\t\tif sSummary is not None:\n\t\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@SUMMARY\\@\\@|%s|g\\\" %s\" % ( sSummary, sFileOut ) )\n\t\t\tif sDescription is not None:\n\t\t\t\tos.system( \"perl -pi -e \\\"s|\\@\\@DESCRIPTION\\@\\@|%s|g\\\" %s\" % ( sDescription, sFileOut ) )\n\n\t\t\tos.system( \"perl -pi -e \\\"s|@|\\\\\\\\\\\\@|g\\\" %s\" % sFileOut )\n\n\t\t\tbStatus = os.system( '/usr/local/bin/bz_webservice_demo.pl --uri http://tickets.dividia.net/xmlrpc.cgi --rememberlogin --login bugzilla --password \\'dt!8734\\' --create %s 1>/dev/null 2>/dev/null' % sFileOut )\n\t\t\tos.unlink( sFileOut )\n\n\t\t\tif not os.WIFEXITED( bStatus ):\n\t\t\t\treturn False\n\n\t\t\tif os.WEXITSTATUS( bStatus ) != 0:\n\t\t\t\treturn False\n\n\t\t\treturn True\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error opening new bug in bugzilla' )\n\t\t\terrMsg( e )\n\t\t\treturn False",
"def __iadd__(self, other):\n for bug in other:\n self.add(bug)\n return self",
"def marker_single_add(self, x):\n pass",
"def add_cell(self, cell):\n\n if not isinstance(cell, openmc.Cell):\n msg = 'Unable to add a Cell to Universe ID=\"{0}\" since \"{1}\" is not ' \\\n 'a Cell'.format(self._id, cell)\n raise TypeError(msg)\n\n cell_id = cell.id\n\n if cell_id not in self._cells:\n self._cells[cell_id] = cell",
"def post(self, args):\n\n\t\tparams={}\n\t\tparams['product'] = args['product']\n\t\tparams['component'] = args['component']\n\t\tparams['summary'] = args['summary']\n\t\tparams['description'] = args['description']\n\t\t#params['assigned_to'] = args['assigned_to']\n\t\tparams['version'] = args['version']\n\t\t\n\t\tresult = self.call_bz(self.bz.Bug.create, params)\n\t\tlog_info('Bug %d submitted' % result['id'])\n\t\treturn result",
"def post_comment(comment, bug_id):\n success = bz.notify_bug(comment, bug_id)\n if success:\n log_msg('Posted comment: \"%s\" to %s' % (comment, bug_id))\n else:\n log_msg('Could not post comment to bug %s. Adding to comments table'\n % (bug_id))\n cmnt = Comment(comment=comment, bug=bug_id)\n db.CommentInsert(cmnt)",
"def add_bucell(self, bucell):\n\t\tsystem = self.system\n\t\tsystem.add_bucell(bucell)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Rudimentary heat diffusion. Suppose half the lost heat radiates and the other half transmits.
|
def diffuse(self):
transmission_coeff = 0.3
# allow the grid to cool down
sink_coeff = 0.1
for idx, cell in self.grid.cells():
# how much total heat the cell radiates
emission_loss = cell.heat * transmission_coeff
neighbors = self.grid.neighbors(idx)
for nidx,n in neighbors:
# Only colder cells (positive delta) will absorb the heat.
# Sum of transmissions cannot be greater that the total emission.
delta = cell.heat - n.heat
n.heat += emission_loss / len(neighbors)
cell.heat -= emission_loss + (cell.heat * sink_coeff)
|
[
"def heat(self):\r\n return self.m_act * (self.outlet.h - self.cond.h)",
"def _momentum_diffusion(self):\n\n # copy grid ids and other variables\n wet_pwet_h_links = self.wet_pwet_horizontal_links\n wet_pwet_v_links = self.wet_pwet_vertical_links\n link_east = self.link_east\n link_west = self.link_west\n link_north = self.link_north\n link_south = self.link_south\n\n dx = self.grid.dx\n dy = self.grid.dy\n dt = self.dt_local\n dx2 = dx * dx\n dy2 = dy * dy\n\n # diffusion of momentum\n self.calc_nu_t(self.u_temp, self.v_temp,\n self.h_link_temp, out=self.nu_t)\n self.u_temp[wet_pwet_h_links] += (\n self.nu_t[wet_pwet_h_links]\n * dt\n * (\n (\n self.u_temp[link_east[wet_pwet_h_links]]\n - 2 * self.u_temp[wet_pwet_h_links]\n + self.u_temp[link_west[wet_pwet_h_links]]\n )\n + (\n self.u_temp[link_north[wet_pwet_h_links]]\n - 2 * self.u_temp[wet_pwet_h_links]\n + self.u_temp[link_south[wet_pwet_h_links]]\n )\n )\n / dx2\n )\n self.v_temp[wet_pwet_v_links] += (\n self.nu_t[wet_pwet_v_links]\n * dt\n * (\n (\n self.v_temp[link_east[wet_pwet_v_links]]\n - 2 * self.v_temp[wet_pwet_v_links]\n + self.v_temp[link_west[wet_pwet_v_links]]\n )\n + (\n self.v_temp[link_north[wet_pwet_v_links]]\n - 2 * self.v_temp[wet_pwet_v_links]\n + self.v_temp[link_south[wet_pwet_v_links]]\n )\n )\n / dy2\n )\n self.update_boundary_conditions(\n u=self.u_temp,\n v=self.v_temp,\n u_node=self.u_node_temp,\n v_node=self.v_node_temp,\n )\n\n # map values\n map_links_to_nodes(\n self,\n u=self.u_temp,\n dudx=self.dudx_temp,\n dudy=self.dudy_temp,\n v=self.v_temp,\n dvdx=self.dvdx_temp,\n dvdy=self.dvdy_temp,\n u_node=self.u_node_temp,\n v_node=self.v_node_temp,\n U=self.U_temp,\n U_node=self.U_node_temp,\n )\n self.update_boundary_conditions(\n u_node=self.u_node_temp, v_node=self.v_node_temp,\n )",
"def heat_loss(self):\n return self._heat_loss",
"def _thermal_diffusion(self):\r\n if self.mineral == 'apatite': # Farley et al. (2000)\r\n Do = 50\r\n Ea = 137.522\r\n if self.mineral == 'zircon': # Reiners et al. (2004)\r\n Do = 0.46\r\n Ea = 169.0336\r\n \r\n R = 0.00831447\r\n T = self.T + 273.15\r\n \r\n d = (Do * np.exp(-Ea / (R * T))) * 1e8\r\n d = (d[:-1] + d[1:]) / 2 #average for dt\r\n \r\n self.diffusivities = d",
"def heat_func(self):\n return self.Q.val + self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)",
"def diffusion():\n return 5.1412512431",
"def get_heat_flux_by_conduction(\n temperature_from: Numeric,\n temperature_to: Numeric,\n thickness: Numeric,\n thermal_conductivity: Numeric,\n) -> Numeric:\n return thermal_conductivity * (temperature_from - temperature_to) / thickness",
"def updateHeatTemp(self):\r\n currTemp = self.getTemp()\r\n newTemp = currTemp + self.HEATDELTAT\r\n self.setACTemp(newTemp)\r\n self.setHeatTemp(newTemp)",
"def _rdaam_diffusivity(self):\r\n self._damage()\r\n \r\n R = 0.008314472\r\n \r\n product = lambda lmbd : np.exp(lmbd * self.t1) - np.exp(lmbd * self.t2)\r\n \r\n if self.mineral =='apatite':\r\n rhov = (8/8 * self.U238 * 3.19 * product(self.lmbd238)\r\n + 7/8 * self.U235 * 3.19 * product(self.lmbd235)\r\n + 6/8 * self.Th232 * 3.19 * product(self.lmbd232)\r\n )\r\n \r\n # Parameters: Flowers et al. (2009)\r\n lambdaf = 8.46e-17\r\n lambdaD = 1.55125e-10\r\n eta = 0.91\r\n L = 8.15e-4\r\n \r\n anneal_d = self.damage * (lambdaf / lambdaD) * rhov * eta * L\r\n anneal_d = np.sum(anneal_d, axis=0)\r\n \r\n omega = 1e-22\r\n psi = 1e-13\r\n Do = 0.6071\r\n Ea = 122.3\r\n Et = 34\r\n \r\n trap_diff = psi * anneal_d + omega * anneal_d**3\r\n \r\n diffusivities = 1e8 * ((Do * np.exp(-Ea / (R * self.T_mean)))\r\n / (trap_diff * np.exp(Et / (R * self.T_mean)) + 1))\r\n \r\n self.diffusivities = diffusivities\r\n \r\n if self.mineral =='zircon':\r\n alphai = (8 * self.U238 * product(self.lmbd238)\r\n + 7 * self.U235 * product(self.lmbd235)\r\n + 6 * self.Th232 * product(self.lmbd232)\r\n )\r\n \r\n anneal_d = alphai * np.flip(self.damage)\r\n anneal_d = np.sum(anneal_d, axis=0)\r\n \r\n # Parameters: Guenthner et al. (2013)\r\n Ba = 5.48E-19\r\n SV = 1.669\r\n D0l = 193188\r\n El = 165\r\n D0N17 = 0.0034\r\n EaN17 = 71\r\n Lint_lattice = 45920\r\n \r\n a = self.r * 1e-4\r\n fa = 1 - np.exp(-Ba * anneal_d)\r\n DI = 1 - np.exp(-Ba * anneal_d * 3)\r\n Lint = (4.2 / (fa * SV) - 2.5)\r\n Tau = (Lint_lattice / Lint)**2\r\n DTaua2 = (1 / Tau) * D0l * np.exp(-El / (R * self.T_mean)) / (a * (1 - DI))**2\r\n DN17a2 = D0N17 * np.exp(-EaN17 / (R * self.T_mean)) / (a * DI)**2\r\n \r\n diffusivities = self.r**2 * (DI / DN17a2 + (1 - DI) / DTaua2)**-1\r\n \r\n self.diffusivities = diffusivities",
"def thermal_conductivity(self):\n return 70.0 * units.watt / (units.meter * units.kelvin)",
"def compute_heatindex(t, hum): \n a = -42.379\n b = 2.04901523\n c = 10.14333127\n d = -0.22475541\n e = -0.00683783\n f = -0.05481717\n g = 0.00122874\n h = 0.00085282\n i = -0.00000199\n\n\n rh = hum/100\n\n hi = (a + (b*t) + (c * rh) + (d *t*rh) + (e * t ** 2)+\n (f * rh **2) + (g * t**2 * rh) + (h * t * rh **2)+\n (i * t**2 * rh **2))\n\n return hi",
"def _calculate_turbulent_kinetic_energy(self):\n\n map_values(\n self,\n h=self.h_temp,\n h_link=self.h_link_temp,\n u=self.u_temp,\n v=self.v_temp,\n U=self.U_temp,\n )\n self.Kh_temp[self.wet_pwet_links[self.Kh_temp[self.wet_pwet_links] < 0]] = 0.0\n\n # development of turbulent kinetic energy\n alpha = 0.1\n # Ri = self.R * self.g * self.Ch_link[self.wet_pwet_links] \\\n # / self.U[self.wet_pwet_links] / self.U[self.wet_pwet_links]\n # beta = (0.5 * self.ew_link[self.wet_pwet_links] *\n # (1 - Ri - 2.0 * self.Cf / alpha) + self.Cf) / (self.Cf /\n # alpha)**1.5\n\n beta = alpha ** 1.5 / self.Cf ** 0.5\n # K = self.Kh[self.wet_pwet_links] / \\\n # self.h_link_temp[self.wet_pwet_links]\n\n # self.Kh_temp[self.wet_pwet_links] += self.dt_local * (\n # alpha * K * self.U_temp[self.wet_pwet_links]\n # + 0.5\n # * self.ew_link[self.wet_pwet_links]\n # * self.U_temp[self.wet_pwet_links] ** 3\n # - beta * K ** 1.5\n\n self.Kh_temp[self.wet_pwet_links] += self.dt_local * (\n (self.Cf + 0.5 * self.ew_link[self.wet_pwet_links])\n * self.U_temp[self.wet_pwet_links]\n * self.U_temp[self.wet_pwet_links]\n * self.U_temp[self.wet_pwet_links]\n - beta\n * (self.Kh[self.wet_pwet_links] / self.h_link[self.wet_pwet_links]) ** 1.5\n - self.R\n * self.g\n * (\n np.sum(\n self.Ch_link_i_temp[:, self.wet_pwet_links] * self.ws, axis=0)\n + 0.5\n * self.U_temp[self.wet_pwet_links]\n * self.ew_link[self.wet_pwet_links]\n * self.Ch_link_temp[self.wet_pwet_links]\n )\n )\n\n # remove negative values\n self.Kh_temp[self.wet_pwet_links[self.Kh_temp[self.wet_pwet_links] < 0.0]] = 0.0\n\n # adjust_negative_values(self.Kh,\n # self.wet_pwet_links,\n # self.link_east,\n # self.link_west,\n # self.link_north,\n # self.link_south,\n # out_f=self.Kh_temp)\n\n # update friction coefficient Cf_link and Cf_nodes\n U_exist = self.U_temp[self.wet_pwet_links] > 1.0e-10\n self.Cf_link[self.wet_pwet_links[U_exist]] = (\n alpha\n * self.Kh_temp[self.wet_pwet_links[U_exist]]\n / self.U_temp[self.wet_pwet_links[U_exist]]\n / self.U_temp[self.wet_pwet_links[U_exist]]\n )\n # self.Cf_link[self.Cf_link > 0.1] = 0.1\n self.Cf_link[self.wet_pwet_links[~U_exist]] = 0.0\n map_values(self, Cf_link=self.Cf_link, Cf_node=self.Cf_node)",
"def pe_heat_loss(self):\n return self._pe_heat_loss",
"def photometric_Teff(apogee_cluster_data): ###Function to compute photometric effective temperatures. Requires AK_TARG and FE_H from apogee_cluster_data\n \n aktarg = apogee_cluster_data['AK_TARG'] ###Get extinction values for each star from allStar data\n #Exception for unlikely AK_TARG numbers\n for i in range(len(aktarg)): ###For number in array of extinction values\n if aktarg[i] <= -50.: ###If the value is very small\n aktarg[i] = np.nan ###Set it to NaN to be ignored later\n \n #Correct J and K for median extinction\n med_aktarg = np.nanmedian(aktarg) ###Compute the median of all of the individual extinction values (nanmedian in case values get masked out above)\n aj = med_aktarg*2.5 ###Compute the extinction factor for J (from the apogee package)\n J0 = apogee_cluster_data['J'] - aj ###Compute extinction-corrected J\n K0 = apogee_cluster_data['K'] - med_aktarg ###Compute extinction-corrected K\n \n #Get numbers needed for Teff calculation\n colour = J0 - K0 ###Get the colour you want to use to compute the temperatures (J0 - Ks0 in this case)\n metallicity = np.nanmedian(apogee_cluster_data['FE_H']) ###Compute the median of all individual metallicities (for consistency with median AK_TARG)\n b = np.array((0.6517, 0.6312, 0.0168, -0.0381, 0.0256, 0.0013)) #Coefficients from Hernandez and Bonifacio (2009)\n \n #Calculate photometric Teff\n Teff = 5040/(b[0] + b[1]*colour + b[2]*colour**2 + b[3]*colour*metallicity + b[4]*metallicity\n + b[5]*metallicity**2) ###This should be equation 10 from Hernandez 2009, isolated for Teff\n \n return Teff",
"def _damage(self):\r\n if self.mineral =='apatite': # Flowers et al. (2009)\r\n C0 = 0.39528\r\n C1 = 0.01073\r\n C2 = -65.12969\r\n C3 = -7.91715\r\n alpha = 0.04672\r\n rmr0 = 0.83\r\n kappa = 1.04 - rmr0\r\n elif self.mineral =='zircon': # Guenthner et al. (2013)\r\n C0=6.24534\r\n C1=-0.11977\r\n C2=-314.937\r\n C3=-14.2868\r\n alpha=-0.05721\r\n\r\n dt = (self.t1 - self.t2) * 365.25 * 24 * 60 * 60\r\n dam = np.zeros((len(dt),len(dt)))\r\n\r\n def _d_anneal(dt,T_mean):\r\n \"\"\"Calculate change in track length at each timestep\"\"\"\r\n d = ((C0 + C1 * ((np.log(dt) - C2) /\r\n (np.log(1/T_mean) - C3)))**(1/alpha) + 1)**-1\r\n return d\r\n \r\n def _t_eqv(T_mean,d):\r\n \"\"\"Calculate equivalent time\"\"\"\r\n t_eqv = (np.exp(C2 + (np.log(1 / T_mean) - C3) *\r\n (((1 / d) - 1)**alpha - C0) / C1))\r\n return t_eqv\r\n\r\n #calculate track length for newly generated tracks\r\n new_tracks = np.diag_indices_from(dam)\r\n dam[new_tracks] = _d_anneal(dt,self.T_mean)\r\n \r\n #calculate annealing using 'equivalent time'\r\n for i in range(1,len(self.t)-1):\r\n teqv = _t_eqv(self.T_mean[i],dam[i-1,:i])\r\n teqv = teqv + dt[i]\r\n dam[i,:i] = _d_anneal(teqv,self.T_mean[i])\r\n \r\n #volume conversion\r\n if self.mineral =='apatite':\r\n dam[(dam>=rmr0)] = ((dam[(dam>=rmr0)] - rmr0) / (1 - rmr0))**kappa\r\n dam[(dam>=0.765)] = 1.6 * dam[(dam>=0.765)] - 0.6\r\n\r\n df = ((dam!=0.0) & (dam<0.765))\r\n dam[df] = 9.205 * dam[df] * dam[df] - 9.157 * dam[df] + 2.269\r\n \r\n self.damage = dam\r\n \r\n elif self.mineral =='zircon':\r\n dam = 1.25 * (dam - 0.2)\r\n dam[dam<(0.36 / 1.25 + 0.2)] = 0\r\n \r\n self.damage = dam",
"def compute_dewpoint(t, h):\n tempC = (t-32)*5/9 # Convert temperatyre from deg F to deg C\n rh = h / 100\n\n b = 18.678\n c = 257.14 # deg C\n \n gamma = math.log(rh) + (b*tempC)/(c + tempC)\n\n tdp = c * gamma / (b - gamma)\n\n tdp_F = (9/5) * tdp + 32 \n\n return tdp_F",
"def hindered_rotor_heat_capacity(T, freq, barr):\n x = constants.h * constants.c * 100. * freq / constants.kB / T\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n z = 0.5 * constants.h * constants.c * 100. * barr / constants.kB / T\n bb = scipy.special.i1(z) / scipy.special.i0(z)\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x - 0.5 + z * (z - bb - z * bb * bb)",
"def integrate_heat(self):\n\n # Integrate heat produced by each injection.\n for injection in self.injections:\n # determine initial and final samples for injection i\n first_index = injection['first_index'] # index of timepoint for first filtered differential power measurement\n last_index = injection['last_index'] # index of timepoint for last filtered differential power measurement\n \n # Determine excess energy input into sample cell (with respect to reference cell) throughout this injection and measurement period.\n #excess_energy_input = injection['filter_period'] * (self.differential_power[first_index:(last_index+1)] - self.reference_power + self.baseline_power[first_index:(last_index+1)]).sum()\n excess_energy_input = injection['filter_period'] * (self.differential_power[first_index:(last_index+1)] - self.baseline_power[first_index:(last_index+1)]).sum() \n\n # DEBUG\n print \"injection %d, filter period %f s, integrating sample %d to %d\" % (injection['number'], injection['filter_period'] / Units.s, first_index, last_index)\n \n # Determine total heat evolved.\n evolved_heat = - excess_energy_input\n \n # Store heat evolved from this injection.\n injection['evolved_heat'] = evolved_heat\n \n return",
"def improve_humidity_measurement(raw_humidity, dig_h, t_fine):\n base_value = t_fine - 76800.0\n term1 = raw_humidity - (dig_h[3] * 64.0 + dig_h[4] / 16384.0 * base_value)\n term2a = base_value * (1.0 + dig_h[2] / 67108864.0 * base_value)\n term2 = dig_h[1] / 65536.0 * (1.0 + dig_h[5] / 67108864.0 * term2a)\n humidity = term1 * term2\n humidity = humidity * (1.0 - dig_h[0] * humidity / 524288.0)\n humidity = max(0, min(humidity, 100))\n return humidity"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
run pyuic5 on a single module
|
def build(module_name):
ui_file = f"{UI_PATH}{module_name}.ui"
py_file = f"{PY_PATH}{module_name}.py"
command = "pyuic5 {} -o {}"
# in the case of failure CPython will print its own error message
if os.system(command.format(ui_file, py_file)) == 0:
print(f"made Ui_{module_name}.py")
|
[
"def convert_ui(*args):\r\n directory_files = [file for file in listdir(getcwd()) if isfile(file)]\r\n uifiles = [file for file in directory_files if file[-3:] == '.ui']\r\n if len(args) == 0:\r\n for file in uifiles:\r\n system(f'pyuic5 {file} -o {file[:-3] + \".py\"}')\r\n else:\r\n for file in args:\r\n if file in uifiles:\r\n system(f'pyuic5 {file} -o {file[:-3] + \".py\"}')\r\n else:\r\n print(f\"Can't fine {file} in the current working directory.\", file=stderr)",
"def test_load_full_uic():\n QT_API = os.environ.get('QT_API', '').lower()\n if QT_API.startswith('pyside'):\n assert hasattr(uic, 'loadUi')\n assert hasattr(uic, 'loadUiType')\n else:\n objects = ['compileUi', 'compileUiDir', 'loadUi', 'loadUiType',\n 'widgetPluginPath']\n assert all(hasattr(uic, o) for o in objects)",
"def qt2py(uifile, outfile):\n if sys.platform != 'linux':\n path = r\"D:\\Miniconda3\\envs\\build\\Lib\\site-packages\\PySide\\scripts\\uic.py\"\n ex = 'D:\\Miniconda3\\python.exe ' + path + ' ' + '-o' + ' ' + outfile + ' ' + uifile\n print(ex)\n os.system(ex)\n # ex = r\"D:\\Miniconda3\\envs\\build\\Scripts\\pyside-uic.exe\"\n # cmd = [ex, '-o', outfile, uifile]\n # print(cmd)\n # subprocess.call(cmd)\n\n else:\n cmd = ['/home/victor/miniconda3/envs/pyside/bin/pyside-uic', '-o', outfile, uifile]\n subprocess.call(cmd)",
"def build_uat():\n build_app(\"uat\")",
"def loadFromModule(file):\n \n myf=str(file)\n exec(\"from MuonID import \"+myf)\n exec(\"outp=\"+myf+\".\"+myf)\n return outp",
"def gen_core_app():\n\n doc = '''\n\"\"\"\nCore app.\n\"\"\"\nimport sys\nsys.dont_write_bytecode = True\nimport core.urls\nfrom core.urls import URLS\n\n__all__ = ['URLS']\n\nif __name__ == '__main__':\n pass\n'''\n\n return doc",
"def ui_v1(self) -> UIV1Subsystem:\n # pylint: disable=cyclic-import\n\n from bauiv1 import UIV1Subsystem\n\n return UIV1Subsystem()",
"def runUI(specific_port, share):\n\timport ui\n\tui.start(share=share, debug=False, use_reloader=False, specific_port=specific_port)",
"def createPyUI(input_ui_file_dir, output_py_file_dir, application_update_ui=False):\n if developer_mode or application_update_ui:\n print(\"Updating %s...\" % os.path.basename(output_py_file_dir))\n\n if sys.hexversion >= 0x03000000:\n from PyQt4.uic.port_v3.invoke import invoke\n else:\n from PyQt4.uic.port_v2.invoke import invoke\n\n opts = PyOutputObject(output_py_file_dir)\n args = [input_ui_file_dir]\n\n if len(args) != 1:\n sys.stderr.write(\"Error: one input ui-file must be specified\\n\")\n sys.exit(1)\n\n invoke(Driver(opts, args[0]))\n\n # Former code\n # def createPyUI(input_ui_file_dir, output_py_file_dir):\n # \"\"\"\n # Function to translate a .ui file created in Qt Designer to a .py file that is readable by PyQt.\n #\n # @param input_ui_file_dir [.ui file] A file created in Qt Designer\n # @param output_py_file_dir [.py file] A file that will be the translation of the file created in Qt Designer\n # @return None\n # \"\"\"\n #\n # # Finds the absolute path for the PyQt .ui translator module\n # pyui_creator_dir = os.path.join(os.path.dirname(__file__), \"pyuic.py\")\n #\n # # Creates a variable that is \"python absolute_path\\pyuic.py\" to be run afterwards\n # command_starter = \"python %s\" % pyui_creator_dir\n #\n # # Display a message that the translated .py files are being updated (or created)\n # print(\"Updating %s...\" % os.path.basename(output_py_file_dir))\n #\n # # Executes the command in a Terminal\n # os.system(\"%s -x %s -o %s\" % (command_starter, input_ui_file_dir, output_py_file_dir))\n #\n # return\n # parser = optparse.OptionParser(usage=\"pyuic4 [options] <ui-file>\",\n # version=Version)\n # parser.add_option(\"-p\", \"--preview\", dest=\"preview\", action=\"store_true\",\n # default=False,\n # help=\"show a preview of the UI instead of generating code\")\n # parser.add_option(\"-o\", \"--output\", dest=\"output\", default=\"-\", metavar=\"FILE\",\n # help=\"write generated code to FILE instead of stdout\")\n # parser.add_option(\"-x\", \"--execute\", dest=\"execute\", action=\"store_true\",\n # default=False,\n # help=\"generate extra code to test and display the class\")\n # parser.add_option(\"-d\", \"--debug\", dest=\"debug\", action=\"store_true\",\n # default=False, help=\"show debug output\")\n # parser.add_option(\"-i\", \"--indent\", dest=\"indent\", action=\"store\", type=\"int\",\n # default=4, metavar=\"N\",\n # help=\"set indent width to N spaces, tab if N is 0 [default: 4]\")\n # parser.add_option(\"-w\", \"--pyqt3-wrapper\", dest=\"pyqt3_wrapper\",\n # action=\"store_true\", default=False,\n # help=\"generate a PyQt v3 style wrapper\")\n #\n # g = optparse.OptionGroup(parser, title=\"Code generation options\")\n # g.add_option(\"--from-imports\", dest=\"from_imports\", action=\"store_true\",\n # default=False, help=\"generate imports relative to '.'\")\n # g.add_option(\"--resource-suffix\", dest=\"resource_suffix\", action=\"store\",\n # type=\"string\", default=\"_rc\", metavar=\"SUFFIX\",\n # help=\"append SUFFIX to the basename of resource files [default: _rc]\")\n # parser.add_option_group(g)\n #\n # opts, args = parser.parse_args()",
"def setup_module(module):\n print(\"\\nsetting up module\")",
"def test_module_add():\n os.chdir(test_solution_dir)\n cli = __import__(\"iotedgedev.cli\", fromlist=['main'])\n runner = CliRunner()\n\n add_module_and_verify(cli.main, runner, \"csharp\")\n # add_module_and_verify(cli.main, runner, \"nodejs\")\n add_module_and_verify(cli.main, runner, \"python\")\n add_module_and_verify(cli.main, runner, \"csharpfunction\")",
"def uci_load(self, ucistr):\n self.execute(\"echo \" + ucistr + \" | uci import \")",
"def gen_user_app(user_app_name):\n\n doc = '''\n\"\"\"\n%s app.\n\"\"\"\nimport sys\nsys.dont_write_bytecode = True\nimport %s.urls\nfrom %s.urls import URLS\n\n__all__ = ['URLS']\n\nif __name__ == '__main__':\n pass\n''' % (user_app_name, user_app_name, user_app_name)\n\n return doc",
"def main():\n obj = UnityNFS()\n obj.perform_module_operation()",
"def check_ui_generation(uifname):\r\n pyfname = get_uifnames_from(uifname)\r\n if ( os.path.exists(uifname) and \r\n not os.path.exists(pyfname) or\r\n (os.access(pyfname,os.F_OK|os.W_OK) and\r\n os.stat(pyfname).st_mtime < os.stat(uifname).st_mtime )) :\r\n print 'Generate Ui'\r\n compile_ui(uifname)",
"def set_interface(module_name: str) -> None:\n global ui\n\n ui_module = __import__('pywikibot.userinterfaces.{}_interface'\n .format(module_name), fromlist=['UI'])\n ui = ui_module.UI()\n assert ui is not None\n atexit.register(ui.flush)\n pywikibot.argvu = ui.argvu()\n\n # re-initialize\n if _handlers_initialized:\n _handlers_initialized.clear()\n init_handlers()",
"def main():\n obj = UnitySnapshotSchedule()\n obj.perform_module_operation()",
"def setup_module(module):\n logger.info('SETUP MODULE')",
"def main():\n import missionbio\n\n # Imports cli tools from missionbio.*.cli to allow the\n # @click.groups to register themselves\n for loader, module_name, is_pkg in walk_packages(missionbio.__path__,\n prefix=\"missionbio.\"):\n if module_name.endswith(\".cli\"):\n import_module(module_name)\n\n tapestri_command()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assembles the residuals vector and the Jacobian matrix.
|
def assemble_residuals_and_jacobian(number_of_nodes, element_lengths, elasticity_tensor,
centerline, rotation,
increments, second_strain_invariant):
# start with a blank residuals vector
residuals = np.zeros((6*number_of_nodes), dtype=float64)
# start with a blank Jacobian matrix
jacobian = np.zeros((6*number_of_nodes, 6*number_of_nodes), dtype=float64)
# add contributions of each element
for i in range(1, number_of_nodes):
# length of the element
element_length = element_lengths[i-1]
# approximate rotation matrix at the midpoint of the element
rotation_matrix = rotations.interpolate_euler(rotation[:, i-1], rotation[:, i])
# compute first invariant strain measure in the element ...
# compute translational displacement tangent vector
centerline_tangent = (centerline[:, i] - centerline[:, i-1]) / element_length
# first invariant strain measure
first_strain_invariant = np.dot(rotation_matrix.T, centerline_tangent)
# no axial strain <=> lambda_3 = 1
first_strain_invariant[2] -= 1
# compute second invariant strain measure in the element using Simo's formula ...
# incremental rotation tangent vector
incremental_euler_tangent = \
(increments[6*i+3:6*i+6] - increments[6*(i-1)+3:6*(i-1)+6]) / element_length
# incremental Euler vector at midpoint of element
mid_incremental_euler = (increments[6*i+3:6*i+6] + increments[6*(i-1)+3:6*(i-1)+6]) / 2
mid_incremental_euler_norm = np.linalg.norm(mid_incremental_euler)
# compute beta
if mid_incremental_euler_norm < 1e-6:
# use asymptotic approximation of Simo's formula to save computational cost
beta = incremental_euler_tangent + \
0.5*auxiliary.cross(mid_incremental_euler, incremental_euler_tangent)
else:
fooo = np.sin(mid_incremental_euler_norm) / mid_incremental_euler_norm
delu = mid_incremental_euler / mid_incremental_euler_norm
beta = fooo*incremental_euler_tangent + \
(1-fooo) * np.dot(delu.T, incremental_euler_tangent) * delu + \
2*(np.sin(0.5*mid_incremental_euler_norm) / mid_incremental_euler_norm)**2 \
* auxiliary.cross(mid_incremental_euler, incremental_euler_tangent)
# updating the second strain invariant
second_strain_invariant[:, i-1] += np.dot(rotation_matrix.T, beta)
#-----------------
# compute internal reactions in inertial frame of the element ...
strain_invariants = np.hstack((first_strain_invariant, second_strain_invariant[:, i-1]))
forces = np.dot(rotation_matrix, np.dot(elasticity_tensor[0:3, :], strain_invariants))
moments = np.dot(rotation_matrix, np.dot(elasticity_tensor[3:6, :], strain_invariants))
#-----------------
# add contriubutions of the element to residual vector ...
# contributions from internal forces and moments
crossphin = 0.5 * element_length * auxiliary.cross(centerline_tangent, forces)
residuals[6*(i-1):6*i] += np.hstack((-forces, -crossphin - moments))
residuals[6*i:6*(i+1)] += np.hstack((+forces, -crossphin + moments))
# add contributions of the element to Jacobian matrix ...
# symmetrize (because of roundoff error ?)
C11 = np.dot(np.dot(rotation_matrix, elasticity_tensor[0:3, 0:3]), rotation_matrix.T)
C11 = (C11 + C11.T) / 2
C12 = np.dot(np.dot(rotation_matrix, elasticity_tensor[0:3, 3:6]), rotation_matrix.T)
C21 = C12.T
C22 = np.dot(np.dot(rotation_matrix, elasticity_tensor[3:6, 3:6]), rotation_matrix.T)
C22 = (C22 + C22.T) / 2
centerline_tangent_cross = auxiliary.skew_matrix_from_vector(centerline_tangent)
forces_cross = auxiliary.skew_matrix_from_vector(forces)
moments_cross = auxiliary.skew_matrix_from_vector(moments)
# material tangent stiffness (symmetric part)
jacobian[6*(i-1):6*i, 6*(i-1):6*i] += np.vstack((np.hstack((+C11 / element_length,
-0.5*np.dot(C11, centerline_tangent_cross) + C12 / element_length)),
np.hstack((-0.5*np.dot(centerline_tangent_cross.T, C11) + C21 / element_length,
np.dot(np.dot(centerline_tangent_cross.T, C11), centerline_tangent_cross)*(element_length / 3) - 0.5*np.dot(centerline_tangent_cross.T, C12) + np.dot(C21, centerline_tangent_cross) + C22 / element_length))))
jacobian[6*i:6*(i+1), 6*i:6*(i+1)] += np.vstack((np.hstack((+C11 / element_length,
+0.5*np.dot(C11, centerline_tangent_cross) + C12 / element_length)),
np.hstack((+0.5*np.dot(centerline_tangent_cross.T, C11) + C21 / element_length,
np.dot(np.dot(centerline_tangent_cross.T, C11), centerline_tangent_cross)*(element_length / 3) + 0.5*np.dot(centerline_tangent_cross.T, C12) + np.dot(C21, centerline_tangent_cross) + C22 / element_length))))
jacobian[6*i:6*(i+1), 6*(i-1):6*i] += np.vstack((np.hstack((-C11 / element_length,
+0.5*np.dot(C11, centerline_tangent_cross) - C12 / element_length)),
np.hstack((-0.5*np.dot(centerline_tangent_cross.T, C11) - C21 / element_length,
np.dot(np.dot(centerline_tangent_cross.T, C11), centerline_tangent_cross)*(element_length / 6) - 0.5*np.dot(centerline_tangent_cross.T, C12) - np.dot(C21, centerline_tangent_cross) - C22 / element_length))))
jacobian[6*(i-1):6*i, 6*i:6*(i+1)] += np.vstack((np.hstack((-C11 / element_length,
-0.5*np.dot(C11, centerline_tangent_cross) - C12 / element_length)),
np.hstack((+0.5*np.dot(centerline_tangent_cross.T, C11) - C21 / element_length,
np.dot(np.dot(centerline_tangent_cross.T, C11), centerline_tangent_cross)*(element_length / 6) + 0.5*np.dot(centerline_tangent_cross.T, C12) - np.dot(C21, centerline_tangent_cross) - C22 / element_length))))
# geometric tangent stiffness (non-symmetric)
jacobian[6*(i-1):6*i, 6*(i-1):6*i] += np.vstack((np.hstack((np.zeros((3, 3)), +0.5*forces_cross)),
np.hstack((-0.5*forces_cross, +0.5*moments_cross - np.dot(centerline_tangent_cross.T, forces_cross)*(element_length / 3)))))
jacobian[6*i:6*(i+1), 6*i:6*(i+1)] += np.vstack((np.hstack((np.zeros((3, 3)), -0.5*forces_cross)),
np.hstack((+0.5*forces_cross, -0.5*moments_cross - np.dot(centerline_tangent_cross.T, forces_cross)*(element_length / 3)))))
jacobian[6*i:6*(i+1), 6*(i-1):6*i] += np.vstack((np.hstack((np.zeros((3, 3)), -0.5*forces_cross)),
np.hstack((-0.5*forces_cross, -0.5*moments_cross - np.dot(centerline_tangent_cross.T, forces_cross)*(element_length / 6)))))
jacobian[6*(i-1):6*i, 6*i:6*(i+1)] += np.vstack((np.hstack((np.zeros((3, 3)), +0.5*forces_cross)),
np.hstack((+0.5*forces_cross, +0.5*moments_cross - np.dot(centerline_tangent_cross.T, forces_cross)*(element_length / 6)))))
# tangent due to distributive load
# tangent due to boundary loads
return residuals, jacobian
|
[
"def residual_jacobian(self, x):\n sres = np.zeros((len(self.prior_list), len(x)))\n for iprior, prior in enumerate(self.prior_list):\n sres[iprior, prior['index']] = prior['residual_dx'](\n x[prior['index']]\n )\n\n return sres",
"def prepareJacobian(self):\n self.jac.clear()\n self.nm = self.regionManager().parameterCount()\n self.nf = len(self.fops)\n print(self.nm, \"model cells\")\n nd = 0\n for i, fop in enumerate(self.fops):\n self.jac.addMatrix(fop.jacobian(), nd, i*self.nm)\n nd += fop.data.size()\n\n self.jac.recalcMatrixSize()\n self.setJacobian(self.jac)",
"def jacobian(A,aparams):\n l1 = aparams['l1']\n l2 = aparams['l2']\n dHxdA1 = -l1*sin(A[0]) - l2*sin(A[0]+A[1])\n dHxdA2 = -l2*sin(A[0]+A[1])\n dHydA1 = l1*cos(A[0]) + l2*cos(A[0]+A[1])\n dHydA2 = l2*cos(A[0]+A[1])\n J = matrix([[dHxdA1,dHxdA2],[dHydA1,dHydA2]])\n return J",
"def jacobian(self, x):\n return self.jnz",
"def get_jacobian(functions_array, var_list, var_values):\n\n #input is a numpy array of rAd_Var function\n functions_dim = len(functions_array)\n vars_dim = len(var_values)\n\n jacobian = np.zeros((functions_dim, vars_dim))\n list_partial_ders = []\n\n # Raise error if the number of input variables does not match the value numbers\n if len(var_list) != len(var_values):\n raise ValueError(f\"Number of input variables does not match the number of input values.\")\n\n\n # Create dictionary of variables to their input values\n variable_value_dict = {}\n for var, value in zip(var_list, var_values):\n variable_value_dict[var] = value\n\n # For the list of functions, create rAd_Var instances for variables used in the function\n for i, function in enumerate(functions_array):\n func_variable = {}\n func_variable_list = list(function.__code__.co_varnames)\n\n for var in func_variable_list:\n if var not in variable_value_dict:\n raise ValueError(\"The variable required as input for your function is not defined in the constructor.\")\n func_variable[var] = rAd_Var(variable_value_dict[var])\n\n partial_der = function(**func_variable).get_ders()\n\n dict_partial_der = {}\n for variable, der in zip(func_variable_list, partial_der):\n dict_partial_der[variable] = der\n\n list_partial_ders.append(dict_partial_der)\n\n #Get a full list of all variables from the dictionary\n #Map the variable names to column number in the Jacobian\n col_dict = {}\n for index, var in enumerate(var_list):\n col_dict[index] = var\n\n #For each row in the jacobian matrix, assign values based on variable names; if it does not exist, assign 0\n for i in range(jacobian.shape[0]):\n partial_der = list_partial_ders[i]\n\n for j in range(jacobian.shape[1]):\n var_name = col_dict[j]\n jacobian[i][j] = 0 if var_name not in partial_der else partial_der[var_name]\n\n return jacobian",
"def BuildJacobianMatrix(self): \r\n hf=self.hf\r\n ha=self.ha\r\n \r\n TMP_NumProb=copy.deepcopy(self)\r\n \r\n \r\n self.Ytmp[:]=self.Ynp1[:]\r\n for i in range(self.NbVariables):\r\n # Construction du dY\r\n dh=(2.0*hf)*self.Ytmp[i]+2.0*ha\r\n \r\n self.Ytmp[i]=((1.0-hf)*self.Ytmp[i]-ha)\r\n self.BuildFluxFunction(self.Ytmp) \r\n self.Flux_m1[:]=self.Flux_TMP[:]\r\n \r\n self.Ytmp[i]=self.Ytmp[i]+dh\r\n self.BuildFluxFunction(self.Ytmp)\r\n self.Flux_p1[:]=self.Flux_TMP[:]\r\n inv_dY=1.0/dh\r\n self.JacobianMatrix[:,i]=(self.Flux_p1[:]-self.Flux_m1[:])*inv_dY\r\n self.Ytmp[i]=self.Ynp1[i]",
"def jacobian(Q, d):\n return zeros([n, n])",
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def jacobian(self, flux):\n if not self._fields:\n raise ValueError(\"No fields.\")\n return Matrix(flux).jacobian(self._fields)",
"def jacobian_world(self,\n q: Optional[Sequence[float]] = None) -> np.ndarray:\n q = self.joints if q is None else q\n j_fl = self.jacobian_flange(q)\n pose = self.fk(q)\n rotation = pose[:3, :3]\n j_tr = np.zeros(\n (ROTATION_VECTOR_LENGTH * 2, ROTATION_VECTOR_LENGTH * 2),\n dtype=float\n )\n j_tr[:ROTATION_VECTOR_LENGTH, :ROTATION_VECTOR_LENGTH] = \\\n rotation\n j_tr[ROTATION_VECTOR_LENGTH:, ROTATION_VECTOR_LENGTH:] = \\\n rotation\n j_w = np.dot(j_tr, j_fl)\n\n return j_w",
"def left_jacobian_Q_matrix(cls, xi):\n phi = xi[:, :3] # rotation part\n mu = xi[:, 3:6] # velocity part\n rho = xi[:, 6:9] # translation part\n\n px = SO3.wedge(phi)\n mx = SO3.wedge(mu)\n rx = SO3.wedge(rho)\n\n ph = phi.norm(p=2, dim=1)\n ph2 = ph * ph\n ph3 = ph2 * ph\n ph4 = ph3 * ph\n ph5 = ph4 * ph\n\n cph = ph.cos()\n sph = ph.sin()\n\n m1 = 0.5\n m2 = (ph - sph) / ph3\n m3 = (0.5 * ph2 + cph - 1.) / ph4\n m4 = (ph - 1.5 * sph + 0.5 * ph * cph) / ph5\n\n m2 = m2.unsqueeze(dim=1).unsqueeze(dim=2).expand_as(rx)\n m3 = m3.unsqueeze(dim=1).unsqueeze(dim=2).expand_as(rx)\n m4 = m4.unsqueeze(dim=1).unsqueeze(dim=2).expand_as(rx)\n\n v1 = mx\n v2 = px.bmm(mx) + mx.bmm(px) + px.bmm(mx).bmm(px)\n v3 = px.bmm(px).bmm(mx) + mx.bmm(px).bmm(px) - 3. * px.bmm(mx).bmm(px)\n v4 = px.bmm(mx).bmm(px).bmm(px) + px.bmm(px).bmm(mx).bmm(px)\n\n t1 = rx\n t2 = px.bmm(rx) + rx.bmm(px) + px.bmm(rx).bmm(px)\n t3 = px.bmm(px).bmm(rx) + rx.bmm(px).bmm(px) - 3. * px.bmm(rx).bmm(px)\n t4 = px.bmm(rx).bmm(px).bmm(px) + px.bmm(px).bmm(rx).bmm(px)\n\n Q_v = m1 * v1 + m2 * v2 + m3 * v3 + m4 * v4\n Q_p = m1 * t1 + m2 * t2 + m3 * t3 + m4 * t4\n\n return Q_v, Q_p",
"def jacobian_information(self):\n has_jacobian = True\n jacobian_free_solvers = [\"lm-scipy-no-jac\"]\n return has_jacobian, jacobian_free_solvers",
"def jacobian(self, xs, argdict=None, eps_f=5e-11):\n jac = []\n xs = np.asarray(xs)\n for i, x in enumerate(xs):\n # Determine the separation to use\n # Optimal one-pt separation is (eps_f*f/f'')^(1/2) ~ sqrt(eps_f)*x\n # Optimal two-pt separation is (eps_f*f/f''')^(1/3) ~ cbrt(eps_f)*x\n h = np.zeros(len(xs))\n h[i] = (eps_f**(1./3.))*x\n\n # Evaluate the function\n # One-pt\n #f1 = rebound_2d_earth_res(xs...)\n # Two-pt\n f1 = self.residuals(xs-h, argdict)\n f2 = self.residuals(xs+h, argdict)\n\n # Difference\n # One-pt\n #(f2-f1)/h\n # Two-pt\n jac.append((f2-f1)*0.5/h[i])\n\n # put them together\n jac = np.asarray(jac)\n return jac",
"def Jacvec(y, x, v):\n return torch.autograd.grad(y, x, v, retain_graph=True)",
"def calculate_residual_field(self):\n delta_c_j = np.matrix(self.constraint_values - self.c_unconstrained).T\n xi_times_c = np.matrix(self.correlations.xi_ij_inverse) * delta_c_j\n PH = self.correlations.PH\n rhoR_f = np.sum(PH * np.array(xi_times_c)[:, np.newaxis, np.newaxis], axis=0)\n self.rhoR = Field(fourier = rhoR_f)",
"def JacobianMatrix(Angle1,Angle2,Link1,Link2):\n\tTheta1, Theta2 = sp.symbols('Theta1 Theta2', real = True)\n\tG = sp.Matrix([\tLink1*sp.cos(Theta1)+Link2*sp.cos(Theta1+Theta2),\\\n\t\t\t\t\tLink1*sp.sin(Theta1)+Link2*sp.sin(Theta1+Theta2)\t])\n\tJ = G.jacobian([Theta1,Theta2])\n\tJ_inv_trans = (J**-1).T\n\tJ = J.subs([(Theta1,Angle1),(Theta2,Angle2)])\n\tJ_inv_trans = J_inv_trans.subs([(Theta1,Angle1),(Theta2,Angle2)])\n\treturn(J,J_inv_trans)",
"def assemble_matrices(self):\n\n self.a_t_q = self.d_x(factors=(self.t.increment / self.x.increment\n / self.material_vector('density')\n / self.material_vector('heat_capacity')))\n self.a_q_t = self.d_x(factors=(1 / self.x.increment\n * self.material_vector('thermal_conductivity_x')),\n variant='backward')\n self.matrices_assembled = True",
"def get_resid_wave_eq_first_order(u):\n #See autograd docs for jacobian documentation. \n #This code treats u as a vector valued function of the arguments x,t\n #So have to compute two jacobians, one for each. Consider changing depending on efficiency. \n #Is one jacobian w.r.t single vector [x,t] much faster than two jacobians w.r.t. (x,t)?\n\n #Jx is vector valued function of params,x,t\n #Jx(params,x,t) is d([u,ut,ux])/dx(params,x,t)\n Jx=jacobian(u, 1)\n Jt=jacobian(u, 2)\n\n\n elementwise_error=lambda params,x,t: np.array([\\\n Jx(params,x,t)[0]-Jt(params,x,t)[0]-u(params,x,t)[2]+u(params,x,t)[1], \\\n Jx(params,x,t)[1]-Jt(params,x,t)[2], \\\n Jx(params,x,t)[2]-Jt(params,x,t)[1]\n ])\n\n #elementwise_error=lambda params,x,t: np.array([\\\n # Jx(params,x,t)[0], 0., 0.])\n\n resid=lambda params,x,t: np.linalg.norm((elementwise_error(params,x,t)), ord=2)\n return resid",
"def jacobian(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=np.object)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n return np.vstack(\n [y.derivatives() for y in y_ad.flat]).reshape(y_ad.shape + (-1,))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the corresponding keras layer from configurations
|
def layer_from_config(layer_conf, model_conf, data_conf):
# context = {"class_count": data_conf["class_count"]}
return object_from_conf(layer_conf, scope="layer", context=None)
|
[
"def get_layer(keras_tensor):\n layer = keras_tensor._keras_history[0]\n return layer",
"def get_layer_by_name(name):\n if name == ConvLayer.__name__:\n return ConvLayer\n elif name == DepthConvLayer.__name__:\n return DepthConvLayer\n elif name == PoolingLayer.__name__:\n return PoolingLayer\n elif name == IdentityLayer.__name__:\n return IdentityLayer\n elif name == LinearLayer.__name__:\n return LinearLayer\n else:\n raise ValueError('unrecognized layer: %s' % name)",
"def get_layer(self, name=None, index=None):\n if not self.built:\n self.build()\n return self.model.get_layer(name, index)",
"def _get_layer(self, name: str) -> layer.Layer:\n self._validate_layer_name(name)\n return self.layers[name]",
"def set_layer_from_config(layer_config):\n layer_name = layer_config.pop('name')\n layer = get_layer_by_name(layer_name)\n layer = layer(**layer_config)\n return layer",
"def get(self, name):\n if not name:\n raise ValueError(\"Layer name is required\")\n\n for layer in self._layers:\n if layer.name == name:\n return layer\n return None",
"def get_config(self):\n\n config = super(DecoderLayer, self).get_config()\n return config",
"def get_layer_output(self, layer_name):\r\n\r\n if layer_name in self._ops_output:\r\n return self._ops_output[layer_name]\r\n else:\r\n return self._ops_output[layer_name.rsplit(':')[0]]",
"def get_supported_keras_layers():\n return list(layer_handlers.keys())",
"def get_model_config(self):\n return self.keras_model.to_json()",
"def load_conv_layer(input_x, w, b, name='conv_layer'):\n with tf.name_scope(name):\n conv = tf.nn.conv2d(input_x, w, strides=[1, 1, 1, 1], padding=\"SAME\")\n act = tf.nn.relu(conv + b)\n \n return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")",
"def net_from_config(model_conf, data_conf):\r\n # Get network conf\r\n net_conf = model_conf[\"net\"]\r\n\r\n # Input layer\r\n transform_confs = model_conf[\"dataset\"].get(\"train_transforms\", [])\r\n # Get the shape of the dataset, first check whether we have clip-feature layer in the dataset, if not, we\r\n # use the feature size in the dataset configuration\r\n feature_size = None\r\n \"\"\"\r\n for transform_conf in transform_confs[::-1]:\r\n if type(transform_conf) is dict and transform_conf.get(\"name\") == \"clip-feature\":\r\n feature_size = transform_conf[\"c\"]\r\n logger.log(\"Get feature_size={} from model configuration\".format(feature_size))\r\n \"\"\"\r\n if feature_size is None:\r\n feature_size = data_conf.get(\"feature_size\")\r\n logger.log(\r\n \"Get feature_size={} from dataset configuration\".format(feature_size))\r\n assert feature_size is not None, \"Cannot determine the feature_size\"\r\n # Get the point size, if possible\r\n point_count = data_conf.get(\"point_count\")\r\n \"\"\"\r\n for transform_conf in transform_confs[::-1]:\r\n if type(transform_conf) is dict and transform_conf.get(\"name\") == \"sampling\":\r\n point_count = None\r\n logger.log(\"Ignore point_count since we have transform sampling from dataset\")\r\n \"\"\"\r\n # input_layer = tf.keras.layers.InputLayer(input_shape=(point_count, feature_size))\r\n\r\n # Extend feature layer\r\n if \"extend_feature\" in net_conf:\r\n logger.log(\r\n \"\\\"extend_feature\\\" is deprecated, use \\\"input-feature-extend\\\" layer instead\", color=\"yellow\")\r\n inputs = tf.keras.Input(shape=(point_count, feature_size), batch_size=16)\r\n if net_conf[\"structure\"] == \"sequence\":\r\n x = inputs # Input layer\r\n\r\n for layer_conf in net_conf[\"layers\"]:\r\n logger.log(f\"In constructing: {layer_conf}\")\r\n layer = layer_from_config(layer_conf, model_conf, data_conf)\r\n logger.log(f\"Input={x}\")\r\n x = layer(x)\r\n logger.log(f\"Output={x}\")\r\n\r\n outputs = x\r\n return tf.keras.Model(inputs=inputs, outputs=outputs)\r\n elif net_conf[\"structure\"] == \"graph\":\r\n inputs = [inputs, tf.keras.Input(shape=())]\r\n layer_confs = net_conf[\"layers\"]\r\n graph_confs = net_conf[\"graph\"]\r\n # Generate all the intermediate nodes and use labels to map them\r\n \r\n for conf in layer_confs:\r\n # Use label to denote the layer\r\n node_name = conf.get(\"label\", None)\r\n node = IntermediateLayerGraphNode(layer_from_config(conf, model_conf, data_conf))\r\n nodes.append(node)\r\n if node_name is not None:\r\n assert node_name not in name_to_nodes, f\"Layer name \\\"{node_name}\\\" conflict, check your labels\"\r\n name_to_nodes[node_name] = node\r\n \r\n # Create the input graph node and output graph node\r\n input_node = InputGraphNode(input=inputs)\r\n output_node = OutputGraphNode()\r\n assert \"input\" not in name_to_nodes and \"output\" not in name_to_nodes, \\\r\n f\"Cannot name label of a layer to \\\"input\\\" or \\\"output\\\", check your layer labels\"\r\n name_to_nodes[\"input\"] = input_node\r\n name_to_nodes[\"output\"] = output_node\r\n # Create the graph\r\n for conf in graph_confs:\r\n node_name = conf.get(\"label\", None)\r\n param = conf.get(\"param\", [])\r\n name_to_nodes[node_name].set_param(param)\r\n model = tf.keras.Model(inputs=inputs, outputs=output_node.value())\r\n return model\r\n else:\r\n assert False, \"\\\"{}\\\" is currently not supported\".format(\r\n net_conf[\"structure\"])",
"def get_activation(self, layer=0):\n return self.layers[layer]['activation'][:-1]",
"def import_keras_module(self):\n try:\n keras_module = importlib.import_module(\"tensorflow.keras.applications.\" + self.cnn_base)\n except ModuleNotFoundError as err:\n print(\"ERROR: Model not found in Keras application\")\n sys.exit(1)\n return keras_module",
"def get_last_conv_layer(self, model_name: str):\n return self.models_[model_name][\"last_conv_layer\"]",
"def get_layer(self, index):\n return self._layers[index]",
"def get_config(self):\n\n config = super(EncoderLayer, self).get_config()\n return config",
"def dict_to_berry_layer(self, layer_info):\n if layer_info.has_key('input'):\n layer_inputs = layer_info['input']\n if (isinstance(layer_inputs, list) and len(layer_inputs) == 1):\n layer_inputs = layer_inputs[0]\n if isinstance(layer_inputs, list):\n input_layer = []\n for l in layer_inputs:\n input_layer.append(self._model.get(l))\n else:\n input_layer = self._model.get(layer_inputs)\n else:\n input_layer = self._model.last\n params = layer_info.get('params', {})\n if layer_info.has_key('name'):\n params['name'] = layer_info['name']\n layer_type = layer_info.pop('type')\n cls = self.get_berry_layer(layer_type)\n layer = cls(input_layer, **params)\n return layer",
"def take_layer(token_embed, layer_num):\n return token_embed[layer_num]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the value of this node, if it is None, this means the output of this node has not been computed yet, so it requires all the parameters needed from corresponding dependency nodes
|
def value(self):
if self._value is not None:
return self._value
# Compute the dependencies
inputs = []
for name, idx in self.param:
if not name == 'None':
output = name_to_nodes[name].value()[idx]
inputs.append(output)
else:
inputs.append(None)
self._value = self._compute(inputs)
return self._value
|
[
"def get_value(self):\n return self.node.value()",
"def _get_its_own_value_from_input(self, input_values, reevaluate):\n if self in input_values:\n value = input_values[self]\n elif self._type == \"Deterministic node\":\n value = self._get_sample(1, input_values=input_values)[self]\n else:\n value = self.value\n return value",
"def solve_get_value(self) -> Any:\n self.apply_node_changes()\n\n if self.path_and_name is None:\n raise ValueError(\"path_and_name should not be None\")\n\n return getattr(self.parent.object_ref, self.path_and_name.rsplit(\".\")[-1])",
"def getValue(self) -> \"SoNode *\":\n return _coin.SoSFNode_getValue(self)",
"def get_value(self, node1, label): \n for _, _, node2, _ in self.get_edges(node1, label):\n return node2\n return None",
"def retrieve_value(node):\n\n return node.value",
"def get_value(self):\n return self._variable_value",
"def get_value(self):\r\n try:\r\n return self.get_value()\r\n except AttributeError:\r\n try:\r\n return self._Semaphore__value\r\n except AttributeError:\r\n try:\r\n return self._value\r\n except AttributeError:\r\n raise NotImplementedError",
"def get_pdl_value(self):\n self._raise_not_implemented()",
"def get_target_value(self):\n return self.target_value",
"def get_value(self, **kwargs):\n return self.source_from(self, **kwargs)",
"def _get_node_value(self, quant):\n # get node definition\n node = self._get_node(quant)\n dtype = self._get_node_datatype(node)\n # read data from ZI\n d = self.daq.get(node, True)\n if len(d) == 0:\n assert True, 'No value defined at node %s.' % node\n # extract and return data\n data = next(iter(d.values()))\n # if returning dict, strip timing information (API level 6)\n if isinstance(data, dict) and 'value' in data:\n data = data['value']\n value = dtype(data[0])\n return value",
"def _pyforaComputedValueArg(self):\n return self.computedValue",
"def get_objective_value(self):\n raise NotImplementedError()",
"def visit_value_node(self, value_node):\n if value_node not in self._cached_value_nodes_values:\n self._visit_operation(value_node.parent_operation)\n return self._cached_value_nodes_values[value_node]",
"def GetVal(self, *args):\n return _snap.TFltPr_GetVal(self, *args)",
"def get_parm_value(self, node, parm_name):\n\n #parm\n parm = node.parm(parm_name)\n #check\n if not (parm):\n #log\n self.logger.debug('Node {0} does not have a parm with name {1}. Returning None'.format(node.name(), parm_name))\n return None\n\n #return\n return parm.eval()",
"def value(self):\n return super(CompositeOutputDevice, self).value",
"def node_value(self):\n value = 0\n if self.child_nodes == []:\n return sum(self.metadata_entries)\n\n for child_number in self.metadata_entries:\n child_number = child_number-1\n if child_number < len(self.child_nodes):\n child_value = self.child_nodes[child_number].node_value()\n value += child_value\n \n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a keras network from configuration dict
|
def net_from_config(model_conf, data_conf):
# Get network conf
net_conf = model_conf["net"]
# Input layer
transform_confs = model_conf["dataset"].get("train_transforms", [])
# Get the shape of the dataset, first check whether we have clip-feature layer in the dataset, if not, we
# use the feature size in the dataset configuration
feature_size = None
"""
for transform_conf in transform_confs[::-1]:
if type(transform_conf) is dict and transform_conf.get("name") == "clip-feature":
feature_size = transform_conf["c"]
logger.log("Get feature_size={} from model configuration".format(feature_size))
"""
if feature_size is None:
feature_size = data_conf.get("feature_size")
logger.log(
"Get feature_size={} from dataset configuration".format(feature_size))
assert feature_size is not None, "Cannot determine the feature_size"
# Get the point size, if possible
point_count = data_conf.get("point_count")
"""
for transform_conf in transform_confs[::-1]:
if type(transform_conf) is dict and transform_conf.get("name") == "sampling":
point_count = None
logger.log("Ignore point_count since we have transform sampling from dataset")
"""
# input_layer = tf.keras.layers.InputLayer(input_shape=(point_count, feature_size))
# Extend feature layer
if "extend_feature" in net_conf:
logger.log(
"\"extend_feature\" is deprecated, use \"input-feature-extend\" layer instead", color="yellow")
inputs = tf.keras.Input(shape=(point_count, feature_size), batch_size=16)
if net_conf["structure"] == "sequence":
x = inputs # Input layer
for layer_conf in net_conf["layers"]:
logger.log(f"In constructing: {layer_conf}")
layer = layer_from_config(layer_conf, model_conf, data_conf)
logger.log(f"Input={x}")
x = layer(x)
logger.log(f"Output={x}")
outputs = x
return tf.keras.Model(inputs=inputs, outputs=outputs)
elif net_conf["structure"] == "graph":
inputs = [inputs, tf.keras.Input(shape=())]
layer_confs = net_conf["layers"]
graph_confs = net_conf["graph"]
# Generate all the intermediate nodes and use labels to map them
for conf in layer_confs:
# Use label to denote the layer
node_name = conf.get("label", None)
node = IntermediateLayerGraphNode(layer_from_config(conf, model_conf, data_conf))
nodes.append(node)
if node_name is not None:
assert node_name not in name_to_nodes, f"Layer name \"{node_name}\" conflict, check your labels"
name_to_nodes[node_name] = node
# Create the input graph node and output graph node
input_node = InputGraphNode(input=inputs)
output_node = OutputGraphNode()
assert "input" not in name_to_nodes and "output" not in name_to_nodes, \
f"Cannot name label of a layer to \"input\" or \"output\", check your layer labels"
name_to_nodes["input"] = input_node
name_to_nodes["output"] = output_node
# Create the graph
for conf in graph_confs:
node_name = conf.get("label", None)
param = conf.get("param", [])
name_to_nodes[node_name].set_param(param)
model = tf.keras.Model(inputs=inputs, outputs=output_node.value())
return model
else:
assert False, "\"{}\" is currently not supported".format(
net_conf["structure"])
|
[
"def generate_model_configuration(args):\n\n model_config = {\n\n \"dataset_path\": args.dataset_config.output_folder, # Input dataset folder path.\n \"reaction_classes\": args.dataset_config.final_classes, # Final list of reaction classes.\n \"input_configs\": args.descriptor_config.model_training, # List of input configurations to train the model on.\n\n \"logs_folder\": args.model_config.logs_folder, # Path to the designated log folder.\n \"use_oversampling\": eval(args.model_config.use_oversampling), # Use SMOTE oversampling.\n \"random_seed\": args.model_config.random_seed, # Random seed used for reproducibility purposes.\n \"learning_rate\": args.model_config.learning_rate, # ADAM optimizer learning rate.\n \"max_epochs\": args.model_config.max_epochs, # Maximum number of epochs.\n \"batch_size\": args.model_config.batch_size, # Batch size.\n \"early_stopping\": args.model_config.early_stopping, # Number of epochs for early stopping detection.\n\n \"input_size\": args.model_config.input_layer[\"size\"], # Input layer size.\n \"output_size\": args.model_config.output_layer[\"size\"], # Output layer size.\n \"output_act_fcn\": args.model_config.output_layer[\"activation_fcn\"], # Output layer activation.\n\n \"hidden_types\": args.model_config.hidden_layers[args.model_config.fixed_model][\"types\"], # Hidden layer types.\n \"hidden_sizes\": args.model_config.hidden_layers[args.model_config.fixed_model][\"sizes\"], # Hidden layer sizes.\n # Hidden layer activation functions.\n \"hidden_act_fcns\": args.model_config.hidden_layers[args.model_config.fixed_model][\"activation_fcns\"],\n # Hidden layer dropout values.\n \"hidden_dropouts\": args.model_config.hidden_layers[args.model_config.fixed_model][\"dropouts\"]\n }\n\n return model_config",
"def build_model(classes, height, width):\n print(\"> Building Keras neural network...\")\n network_model = model.simple_3(classes=classes, height=height, width=width)\n return network_model",
"def create_network_from_dictionary(params_input):\n params = fill_out_dictionary(params_input)\n validify_dictionary(params)\n # Then make the Network object\n number_of_classes = params['number_of_classes']\n number_of_nodes = params['number_of_nodes']\n arrivals = [params['arrival_distributions']['Class ' + str(clss)]\n for clss in range(len(params['arrival_distributions']))]\n services = [params['service_distributions']['Class ' + str(clss)]\n for clss in range(len(params['service_distributions']))]\n if all(isinstance(f, types.FunctionType) for f in params['routing']):\n routing = params['routing']\n else:\n routing = [params['routing']['Class ' + str(clss)]\n for clss in range(len(params['routing']))]\n if isinstance(params['priority_classes'], dict):\n priorities = [params['priority_classes']['Class ' + str(clss)]\n for clss in range(len(params['priority_classes']))]\n preempt_priorities = [False for _ in range(number_of_nodes)]\n if isinstance(params['priority_classes'], tuple):\n priorities = [params['priority_classes'][0]['Class ' + str(clss)]\n for clss in range(len(params['priority_classes'][0]))]\n preempt_priorities = params['priority_classes'][1]\n baulking_functions = [params['baulking_functions']['Class ' + str(clss)]\n for clss in range(len(params['baulking_functions']))]\n batches = [params['batching_distributions']['Class ' + str(clss)]\n for clss in range(len(params['batching_distributions']))]\n queueing_capacities = [float(i) if i == \"Inf\" else i for i in params['queue_capacities']]\n class_change_matrices = params.get('class_change_matrices',\n {'Node ' + str(nd + 1): None for nd in range(number_of_nodes)})\n class_change_time_distributions = params.get('class_change_time_distributions',\n [[None for clss1 in range(number_of_classes)] for clss2 in range(number_of_classes)])\n number_of_servers, schedules, nodes, classes, preempts = [], [], [], [], []\n for c in params['number_of_servers']:\n if isinstance(c, (tuple, list)):\n if isinstance(c, tuple):\n s = c[0]\n p = c[1]\n if isinstance(c, list):\n s = c\n p = False\n number_of_servers.append('schedule')\n schedules.append(s)\n preempts.append(p)\n elif c == 'Inf':\n number_of_servers.append(float(c))\n schedules.append(None) \n preempts.append(False)\n else:\n number_of_servers.append(c)\n schedules.append(None) \n preempts.append(False) \n for nd in range(number_of_nodes):\n nodes.append(ServiceCentre(\n number_of_servers[nd],\n queueing_capacities[nd],\n class_change_matrices['Node ' + str(nd + 1)],\n schedules[nd],\n preempts[nd],\n preempt_priorities[nd],\n params['ps_thresholds'][nd],\n params['server_priority_functions'][nd]))\n for clss in range(number_of_classes):\n if all(isinstance(f, types.FunctionType) for f in params['routing']):\n classes.append(CustomerClass(\n arrivals[clss],\n services[clss],\n routing,\n priorities[clss],\n baulking_functions[clss],\n batches[clss],\n params['reneging_time_distributions']['Class ' + str(clss)],\n params['reneging_destinations']['Class ' + str(clss)],\n class_change_time_distributions[clss]))\n else:\n classes.append(CustomerClass(\n arrivals[clss],\n services[clss],\n routing[clss],\n priorities[clss],\n baulking_functions[clss],\n batches[clss],\n params['reneging_time_distributions']['Class ' + str(clss)],\n params['reneging_destinations']['Class ' + str(clss)],\n class_change_time_distributions[clss]))\n\n n = Network(nodes, classes)\n if all(isinstance(f, types.FunctionType) for f in params['routing']):\n n.process_based = True\n else:\n n.process_based = False\n return n",
"def gen_network(self):\n '''Generating layers'''\n for spec in self.layer_specs:\n new_layer = self._gen_layer(spec)\n self.layers.append(new_layer)\n\n '''Initializing weights and biases'''\n for i in range(1, len(self.layers)):\n # Input does not have associated weights nor bias--> skipping first layer\n self.layers[i].gen_weights(self.layers[i-1].size) # Passes size of previous layer as argument\n self.layers[i].gen_bias()",
"def generate_config(args):\n\n kernel_size = args.kernel_size\n lr = args.lr\n random_seed = args.random_seed\n\n if args.hyperpara_search:\n kernel_size = np.random.choice([1, 3, 9, 15, 21, 27])\n # lr = np.random.choice([1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6])\n random_seed = np.random.randint(0, 1000, 1)\n\n config = {\n 'lr': lr,\n 'batch_size': args.batch_size,\n 'kernel_size': int(kernel_size),\n 'in_channels': args.in_channels,\n 'channels': args.channels,\n 'augment': args.augment,\n 'n_points': args.n_points,\n 'bias': args.bias,\n 'num_classes': 0,\n 'category': args.category,\n 'max_epochs': 300,\n 'min_epochs': 100,\n 'lr_decay': 0.9,\n 'lr_patience': 1,\n 'early_stopping': 20,\n 'gpu_index': args.gpu,\n 'multi_gpu': args.multi_gpu,\n 'root_dir': args.root_dir,\n 'model_dir': args.model_dir,\n 'hilbert_level': args.hilbert_level,\n 'architecture': args.architecture,\n 'use_tnet': args.use_tnet,\n 'random_seed': random_seed,\n 'do_not_dump_in_tensorboard': ['do_not_dump_in_tensorboard', 'model', 'order',\n 'category', 'dataset', 'data_loading_function',\n 'backbone', 'root_dir', 'model_dir', 'architecture'],\n }\n\n return config",
"def _build_and_run_keras_network(mod, params, inputs, device, tvm_ops, acl_partitions):\n data = {}\n np.random.seed(0)\n for name, shape in inputs.items():\n data[name] = np.random.uniform(-128, 127, shape).astype(\"float32\")\n\n outputs = []\n for acl in [False, True]:\n outputs.append(build_and_run(mod, data, 1, params,\n device, enable_acl=acl,\n tvm_ops=tvm_ops,\n acl_partitions=acl_partitions)[0])\n verify(outputs, atol=0.002, rtol=0.01)",
"def buildNetwork(self):\n if self.loaded_checkpoint:\n self.model = keras.models.load_model(self.loaded_checkpoint, compile=False)\n self.model.load_weights(self.loaded_checkpoint)\n else:\n self.model = self.cnn() # CNN\n\n self.model.compile(\n loss= keras.losses.sparse_categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=self.learning_rate),\n metrics=[\"accuracy\"])",
"def _create_generator_network(self):\n model = Sequential()\n # Inital Image dimension\n initial_row_dim = self.image_dimensions[0]/4\n initial_column_dim = self.image_dimensions[1]/4\n initial_channel_dim = 128\n\n model.add(Dense(initial_row_dim*initial_column_dim*initial_channel_dim, activation=\"relu\"))\n # Dim = (2048)\n model.add(Reshape((initial_row_dim, initial_column_dim, initial_channel_dim)))\n # Dim = (7x7x128)\n model.add(UpSampling2D())\n # Dim = (14x14x128)\n model.add(Conv2D(filters=128, kernel_size=4, padding=\"same\"))\n # Dim = (14x14x128)\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D())\n # Dim = (28x28x128)\n model.add(Conv2D(filters=64, kernel_size=4, padding=\"same\"))\n # Dim = (28x28x64)\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(filters=self.image_dimensions[2], kernel_size=4, padding=\"same\"))\n # Dim = (28x28x1)\n # Since the images are normalized to the scale -1 to +1, tanh Activation\n #is used since the activation has the same range.\n model.add(Activation(\"tanh\"))\n\n noise = Input(shape=(self.latent_dimension, ))\n image = model(noise)\n\n model.summary()\n return Model(noise, image)",
"def parse_network_from_config(args, input_shape):\n\n # parse standard cases\n if isinstance(args, dict):\n if args['net'] in ['resnet18', 'resnet34', 'resnet50']:\n from torchvision.models import resnet18, resnet34, resnet50\n\n resnet_fn = None\n if args['net'] == 'resnet18':\n resnet_fn = resnet18\n if args['net'] == 'resnet34':\n resnet_fn = resnet34\n if args['net'] == 'resnet50':\n resnet_fn = resnet50\n\n norm_layer = torch.nn.BatchNorm2d\n if args.get('norm_layer', '') == 'GroupNorm':\n norm_layer = group_norm_partial_apply_fn(num_groups=32)\n if args.get('norm_layer', '') == 'none':\n norm_layer = (lambda num_channels: Identity())\n\n num_classes = args.get('num_classes', 1000)\n pretrained = args.get('pretrained', False)\n\n # if pretraining is enabled but number of classes is not 1000 replace the last layer\n if pretrained and num_classes != 1000:\n net = resnet_fn(norm_layer=norm_layer, num_classes=1000, pretrained=pretrained)\n net.fc = nn.Linear(net.fc.in_features, num_classes)\n else:\n net = resnet_fn(norm_layer=norm_layer, num_classes=num_classes, pretrained=pretrained)\n output_shape = infer_shape([net], input_shape)\n print(\"output.shape:\", output_shape)\n return net, output_shape\n\n if args['net'] in ['resnet18-cifar', 'resnet34-cifar']:\n from .networks.resnet_cifar import resnet18, resnet34\n\n resnet_fn = None\n if args['net'] == 'resnet18-cifar':\n resnet_fn = resnet18\n if args['net'] == 'resnet34-cifar':\n resnet_fn = resnet34\n\n norm_layer = torch.nn.BatchNorm2d\n if args.get('norm_layer', '') == 'GroupNorm':\n norm_layer = group_norm_partial_apply_fn(num_groups=32)\n if args.get('norm_layer', '') == 'none':\n norm_layer = (lambda num_channels: Identity())\n net = resnet_fn(num_classes=args['num_classes'], norm_layer=norm_layer)\n output_shape = infer_shape([net], input_shape)\n print(\"output.shape:\", output_shape)\n return net, output_shape\n\n # parse feed forward\n return parse_feed_forward(args, input_shape)",
"def build_model():\n model_weights = np.load('models/sound8.npy').item()\n\n filter_parameters = [{'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8', 'num_filters': 1000, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n inputs = Input(shape=(None, 1)) # define inputs\n\n x = inputs\n for layer in filter_parameters:\n if 'conv8' not in layer['name']:\n x = ZeroPadding1D(padding=layer['padding'])(x)\n else:\n x = ZeroPadding1D(padding=layer['padding'])(conv7_layer_output)\n\n conv_layer = Conv1D(layer['num_filters'],\n kernel_size=layer['kernel_size'],\n strides=layer['conv_strides'],\n padding='valid', name=layer['name'])\n\n weights = model_weights[layer['name']]['weights'].reshape(conv_layer.get_weights()[0].shape)\n biases = model_weights[layer['name']]['biases']\n conv_layer.set_weights([weights, biases])\n\n x = conv_layer(x)\n\n if 'conv8' not in layer['name']: # except the last layers\n gamma = model_weights[layer['name']]['gamma']\n beta = model_weights[layer['name']]['beta']\n mean = model_weights[layer['name']]['mean']\n var = model_weights[layer['name']]['var']\n\n batch_norm = BatchNormalization()\n batch_norm.set_weights([gamma, beta, mean, var])\n x = batch_norm(x)\n x = Activation('relu')(x)\n if 'pool_size' in layer:\n x = MaxPooling1D(pool_size=layer['pool_size'],\n strides=layer['pool_strides'],\n padding='valid')(x)\n if layer['name'] == 'conv7':\n conv7_layer_output = x\n elif layer['name'] == 'conv8':\n imagenet_output = x\n elif layer['name'] == 'conv8_2':\n places_output = x\n\n model = Model(inputs=inputs,outputs=[imagenet_output, places_output])\n return model",
"def generate_network(config):\n with open(\".tmp/network.tf\", mode=\"w\", encoding=\"utf-8\") as f:\n f.write(MAIN_NETWORK)\n\n if config[\"infrastructure\"][\"cloud_nodes\"] > 0:\n f.write(CLOUD_NETWORK)\n\n if config[\"infrastructure\"][\"edge_nodes\"] > 0:\n f.write(EDGE_NETWORK)\n\n if config[\"infrastructure\"][\"endpoint_nodes\"] > 0:\n f.write(ENDPOINT_NETWORK)\n\n f.write(INGRESS)\n f.write(EGRESS)",
"def build_network(self): \r\n self.network = input_data(shape = [None, 48, 48, 1])\r\n print(\"Input data \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 64, 5, activation = 'relu')\r\n print(\"Conv1 \",self.network.shape[1:])\r\n self.network = max_pool_2d(self.network, 3, strides = 2)\r\n print(\"Maxpool1 \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 64, 5, activation = 'relu')\r\n print(\"Conv2 \",self.network.shape[1:])\r\n self.network = max_pool_2d(self.network, 3, strides = 2)\r\n print(\"Maxpool2 \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 128, 4, activation = 'relu')\r\n print(\"Conv3 \",self.network.shape[1:])\r\n self.network = dropout(self.network, 0.3)\r\n print(\"Dropout \",self.network.shape[1:])\r\n self.network = fully_connected(self.network, 3072, activation = 'relu')\r\n print(\"Fully connected\",self.network.shape[1:])\r\n self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax')\r\n print(\"Output \",self.network.shape[1:])\r\n print(\"\\n\")\r\n # Generates a TrainOp which contains the information about optimization process - optimizer, loss function, etc\r\n self.network = regression(self.network,optimizer = 'momentum',metric = 'accuracy',loss = 'categorical_crossentropy')\r\n # Creates a model instance.\r\n self.model = tflearn.DNN(self.network,checkpoint_path = 'model_1_atul',max_checkpoints = 1,tensorboard_verbose = 2)\r\n # Loads the model weights from the checkpoint\r\n self.load_model()",
"def make_generator():\n\n model = Sequential()\n model.add(Dense(256 * D, input_dim=LATENT_DIM))\n model.add(Reshape((4, 4, 16 * D)))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2))) # 8\n model.add(Conv2D(8 * D, (5, 5), padding='same'))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2))) # 16\n model.add(Conv2D(4 * D, (5, 5), padding='same'))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2))) # 32\n model.add(Conv2D(2 * D, (5, 5), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(D, (5, 5), padding='same'))\n model.add(UpSampling2D(size=(2, 2))) # 64\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2))) # 128\n model.add(Conv2D(D/2, (5, 5), padding='same'))\n model.add(Activation('relu'))\n model.add(UpSampling2D(size=(2, 2))) # 256\n model.add(Conv2D(1, (5, 5), padding='same'))\n model.add(Activation('tanh'))\n\n return model",
"def _build_graph(self):\n\n # build simple architecture to multiply two numbers\n w1 = keras.layers.Input(shape=(1,), name=\"w1\")\n w2 = keras.layers.Input(shape=(1,), name=\"w2\")\n\n add = keras.layers.add([w1, w2])\n mult = keras.layers.multiply([w1, w2])\n out = keras.layers.concatenate([add, mult])\n\n return keras.models.Model(inputs=[w1, w2], outputs=out)",
"def train_from_cfg_file(config: Dict[str, Dict[str, Any]]) -> None:\n # Print configuration file title\n print(\"[\" + (\"=\" * (len(config[\"title\"]) + 18)) + \"]\")\n print(f\"[======== {config['title']} ========]\")\n\n # Get date\n d = date.today().isoformat()\n\n # Create results directory\n for dn in [\"./results\", \"./results/models\", \"./results/logs\"]:\n if not os.path.exists(dn):\n os.makedirs(dn)\n\n # Deserialize configuration file\n parameters = config[\"params\"]\n epochs = config[\"run\"][\"epochs\"]\n ensemble = config[\"run\"][\"ensemble\"]\n datasets = config[\"datasets\"]\n\n # Iterate through datasets\n for dataset in datasets:\n\n if datasets[dataset]:\n print(f\"[==> {dataset}\")\n\n # Prepare output directory\n if not os.path.exists(f\"results/models/{dataset}\"):\n os.makedirs(f\"results/models/{dataset}\")\n\n if dataset == \"families\":\n\n for famid, fn in CATALOG[dataset].items():\n\n # Create basename for files\n basename = f\"model-famid-{famid}-{parameters['hidden-neurons']}-{parameters['optimizer']}-{parameters['lc-layer']}-{d}\"\n\n # Prepare prefix\n if not os.path.exists(f\"results/models/{dataset}/famid{famid}\"):\n os.makedirs(f\"results/models/{dataset}/famid{famid}\")\n prefix = f\"results/models/{dataset}/famid{famid}/{basename}\"\n\n # Prepare log file\n log = f\"results/logs/{basename}.log\"\n\n train_ensemble(fn, prefix, log, parameters, epochs, ensemble)\n\n else:\n # Create basename for files\n basename = f\"model-{dataset}-{parameters['hidden-neurons']}-{parameters['optimizer']}-{parameters['lc-layer']}-{d}\"\n if 'without' in config['title']:\n t = config['title']\n basename += f\"-{t[t.find('without'):]}\"\n\n # Prepare output prefix\n prefix = f\"results/models/{dataset}/{basename}\"\n\n # Prepare log file\n log = f\"results/logs/{basename}.log\"\n\n # Training DiabNet for dataset\n train_ensemble(\n CATALOG[dataset], prefix, log, parameters, epochs, ensemble\n )\n\n print(\"[\" + (\"=\" * (len(config[\"title\"]) + 18)) + \"]\")",
"def compile_model(self,):\n # Get our network parameters.\n nb_layers = 2 #network['nb_layers']\n nb_neurons = 768 #network['nb_neurons']\n activation = 'tanh' #network['activation']\n optimizer = 'adagrad' #network['optimizer']\n nb_classes = 1\n model = Sequential()\n \n # Add each layer.\n for i in range(nb_layers):\n \n # Need input shape for first layer.\n if i == 0:\n model.add(Dense(nb_neurons, activation=activation,input_dim=5)) # input_shape=input_shape))\n else:\n model.add(Dense(nb_neurons, activation=activation))\n \n model.add(Dropout(0.2)) # hard-coded dropout\n \n # Output layer.\n model.add(Dense(nb_classes, activation= activation))\n \n model.compile(loss='mse', optimizer=optimizer,\n metrics=['mae'])\n \n return model",
"def create_network(self, neurons_input=1, neurons_hidden=0):\n\t\t\n\t\tself.rate = 0.01\t#Learning rate\n\t\tself.weights_input = []\n\t\tself.weights_hidden = []\n\t\tself.weights_output = []\n\t\tself.neurons_input = neurons_input\n\t\tself.neurons_hidden = neurons_hidden\n\n\t\tif neurons_input > 1:\n\t\t\tneurons_output = 1\n\t\telse:\n\t\t\tneurons_output = 0\n\t\tself.neurons_output = neurons_output\n\n\t\t# set random starting weights\n\t\tfor i in range(neurons_input):\n\t\t\tself.weights_input.append(randint(-1,1))\n\t\tfor i in range(neurons_hidden):\n\t\t\tfor j in range(neurons_input*neurons_hidden):\n\t\t\t\tself.weights_hidden.append(randint(-1,1))\n\t\tfor i in range(neurons_output):\n\t\t\tfor j in range(neurons_hidden):\n\t\t\t\tself.weights_output.append(randint(-1,1))",
"def __init__(self, network_model_filepath=DEFAULT_MODEL_FILEPATH,\n batch_size=32, use_gpu=False, gpu_device_id=0,\n network_is_greyscale=False, load_truncated_images=False,\n pixel_rescale=None, input_scale=None,\n pre_initialize_network=True):\n super(KWCNNDescriptorGenerator, self).__init__()\n\n self.batch_size = int(batch_size)\n\n self.use_gpu = bool(use_gpu)\n self.gpu_device_id = int(gpu_device_id)\n self.gpu_device_tag = 'gpu%d' % (self.gpu_device_id, )\n\n self.network_is_greyscale = bool(network_is_greyscale)\n # assert self.network_is_greyscale is False, 'Only color model supported' # NOQA\n self.load_truncated_images = bool(load_truncated_images)\n self.pixel_rescale = pixel_rescale\n self.input_scale = input_scale\n\n self.network_model_filepath = str(network_model_filepath)\n\n assert self.batch_size > 0, \\\n \"Batch size must be greater than 0 (got %d)\" % self.batch_size\n assert self.gpu_device_id >= 0, \\\n \"GPU Device ID must be greater than 0 (got %d)\" % self.gpu_device_id\n\n # Network setup variables\n self.data = None\n self.model = None\n self.network = None\n\n self._setup_network(pre_initialize_network=pre_initialize_network)",
"def new_network(config_path: str):\n with open(config_path, \"r\") as f:\n config_data = f.read()\n\n config = json.loads(config_data)\n\n return Network(\n layers=NetworkFactory._construct_layers(config),\n loss_function=config[\"loss\"],\n **NetworkFactory._parse_regularization_config(config),\n learning_rate=config[\"learning_rate\"],\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the available machines that jobs can run on
|
def get_machines(request):
machines = {}
#for (machine, attrs) in gridutil.GRID_RESOURCE_TABLE.iteritems(): not work on python3
for (machine, attrs) in slurmutil.GRID_RESOURCE_TABLE.items():
if attrs['jobmanagers'] != {}:
machines[machine] = attrs['jobmanagers']
return machines
|
[
"def machines(self):\n ret = self._get_attr(\"machines\")\n return ret",
"def available_jobs():\n return run_rvt2('base.help.AvailableJobs', background=False)",
"def machines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MachineReferenceWithHintsArgs']]]]:\n return pulumi.get(self, \"machines\")",
"def list_available_servers(self):\t\t\n\t\treturn self.swarm_manager.list_available_servers()",
"def machines(self):\n ret = self._get_attr(\"machines\")\n return [IMachine(a) for a in ret]",
"def getWorkers(cpus=None):\n cpu_count = os.cpu_count()\n if cpus:\n if isinstance(cpus, float):\n return round(cpus * cpu_count)\n elif isinstance(cpus, int):\n return cpus\n else:\n raise TypeError('Unrecogzied input type %s' % str(cpus.__name__))\n limit_usage_machines = ['brain2', 'pinky']\n if socket.gethostname().lower() in limit_usage_machines:\n return cpu_count // 8\n else:\n # Use all cpus if not on SAIL servers\n return cpu_count",
"def get_alloc_nodes(self):\n if self.is_slurm_enabled:\n # Use scontrol instead of sinfo -t alloc since sinfo will show all nodes currently allocated, not just\n # the nodes associated with the current job context.\n return list(set(subprocess.check_output(['scontrol', 'show', 'hostname']).decode().splitlines()))\n else:\n raise NotImplementedError('<geopm> geopmpy.launcher: Allocated nodes feature requires SLURM')",
"def num_machines(self):\n return self.n",
"def calculate_ready_machines(self):\n for machine in self.machines:\n if (machine['status'] == self.Pending and\n machine['machine'].is_initialized()):\n if self.add_machine_tasks(machine['machine']):\n machine['status'] = self.Active",
"def _get_online_cpus():\n def parse_int_list(list_string):\n \"\"\"Returns an int list from a string of comma separated integers and\n integer ranges.\"\"\"\n integers = []\n members = list_string.split(',')\n\n for member in members:\n if '-' not in member:\n integers.append(int(member))\n else:\n int_range = member.split('-')\n integers.extend(range(int(int_range[0]),\n int(int_range[1]) + 1))\n\n return integers\n\n with open('/sys/devices/system/cpu/online') as cpu_list:\n cpu_string = cpu_list.readline()\n return parse_int_list(cpu_string)",
"def get_the_available_boxes():\n with get_db_cursor() as cursor:\n cursor.execute(\"SELECT * FROM box WHERE usage = 'available'\")\n return cursor.fetchall()",
"def machine_info():\n BYTES_IN_GIG = 1073741824\n free_bytes = psutil.virtual_memory().available\n return [{\"memory\": int(free_bytes / BYTES_IN_GIG), \"cores\": multiprocessing.cpu_count(),\n \"name\": socket.gethostname()}]",
"def getRemoteMasterCoresForDemand(self):\n return self.session.request('replicationcomms/slave/cores/masters')",
"def matchmaking(job, compute_nodes, running_jobs):\n\n candidates = []\n best_candidate = None\n\n # Find the set of probable candidates\n for node_id, status in compute_nodes.items():\n if status['memory'] >= job.min_memory and \\\n status['cpu'] > MIN_CPU_AVAILABILITY:\n candidates.append(node_id)\n\n # Try to assign a node for the job from the probable candidates\n if len(candidates) > 0:\n # Find the set of idle machines\n idle_machines = []\n for candidate in candidates:\n if len(running_jobs[candidate]) <= IDLE_MACHINE_JOB_COUNT:\n idle_machines.append(candidate)\n\n # Choose one of the idle machines based on max memory preference\n if len(idle_machines) > 0:\n # Order by max memory\n diff_from_max = float('inf')\n for idle_machine in idle_machines:\n memory_diff = abs(\n job.max_memory - compute_nodes[idle_machine]['memory'])\n if memory_diff < diff_from_max:\n diff_from_max = memory_diff\n best_candidate = idle_machine\n\n running_jobs[best_candidate].append(job)\n return best_candidate, None\n\n else:\n # Order by cpu usage in case no machine is idle\n max_cpu_available = 0\n for node_id, status in compute_nodes.items():\n if compute_nodes[node_id]['cpu'] > max_cpu_available:\n max_cpu_available = compute_nodes[node_id]['cpu']\n best_candidate = node_id\n\n try:\n running_jobs[best_candidate].append(job)\n except KeyError:\n # All nodes have crashed. The job can not be scheduled.\n best_candidate = None\n return best_candidate, None\n\n else:\n # Preemption is needed, so first first the lowest priority job on each\n # machine\n lowest_priority_jobs = {}\n for node_id, job_list in running_jobs.items():\n try:\n lowest_priority_jobs[node_id] = min(\n job_list, key=lambda j: j.priority)\n except ValueError:\n continue\n\n # Out of all the low priority jobs on each machine,\n # Preempt the lowest priority one which satisfies memory constraints\n job_to_preempt = None\n for node_id, lowest_priority_job in lowest_priority_jobs.items():\n if (job.min_memory <\n compute_nodes[node_id]['memory'] +\n lowest_priority_job.min_memory):\n if (job_to_preempt is None) or (\n job_to_preempt.priority > lowest_priority_job.priority):\n job_to_preempt = lowest_priority_job\n best_candidate = node_id\n\n if best_candidate is not None:\n running_jobs[best_candidate].append(job)\n return best_candidate, job_to_preempt",
"def getJobs(self):\n if (self.firstJobCheck):\n o = self.getString(\"JOBS\", \"\", \"CPU\")\n if ((o == \"CPU\") or (o == \"\")):\n o =str(multiprocessing.cpu_count())\n self.addHistory(\"JOBS\", \"Number of make jobs for build?\", o)\n self.jobs = o\n else:\n o = self.jobs\n return o",
"def workers(self):\n return self._wrap_get('/workers')",
"def available_processes():\n server_response = requests.get(_event_generator_api_url + 'processes')\n if server_response.status_code != 200:\n raise RuntimeError(\"The event generator server did not answer with a \"\n \"valid message.\")\n return server_response.json()['processes']",
"def get_vms(self):\n logger.info(\"Getting list of VMs from NetBox API\")\n try:\n result = self.netboxapi.virtualization.get_virtual_machines()\n logger.info(f\"Retrieved {len(result)} virtual machines\")\n return result\n except ConnectionError as e:\n logger.exception(f\"{e.args}\")\n exit(1)",
"def get_division_machines(division):\n return machine_dict(division.machines)",
"def get_free_monitors():\n monitors = len(Team.get_monitors())\n free_monitors = 10 - monitors\n return free_monitors"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets up the command line parser for the config subprogram and adds it to sub_parsers.
|
def setup_parser(sub_parsers):
parser = sub_parsers.add_parser(
"config",
prog="law config",
description="Configuration helper to get, set or remove a value from the law configuration "
"file ({}).".format(_cfg.config_file),
)
parser.add_argument(
"name",
nargs="?",
help="the name of the config in the format <section>[.<option>]; when empty, all section "
"names are printed; when no option is given, all section options are printed",
)
parser.add_argument(
"value",
nargs="?",
help="the value to set; when empty, the current value is printed",
)
parser.add_argument(
"--remove",
"-r",
action="store_true",
help="remove the config",
)
parser.add_argument(
"--expand",
"-e",
action="store_true",
help="expand variables when getting a value",
)
parser.add_argument(
"--location",
"-l",
action="store_true",
help="print the location of the configuration file and exit",
)
|
[
"def _add_subparsers(self):\n runner = self.subparsers.add_parser(\"run\", help=\"Run a Test\")\n runner.add_argument(\"glob\", help=\"A file glob to match config files (default='%(default)s').\",\n metavar=\"<config-file glob>\",\n default=\"*.ini\",\n nargs=\"?\")\n runner.set_defaults(function=self.strategerizer.run)\n\n fetcher = self.subparsers.add_parser(\"fetch\", help=\"Fetch a sample config file.\")\n fetcher.set_defaults(function=self.strategerizer.fetch)\n\n tester = self.subparsers.add_parser('test', help='Test your setup.')\n tester.add_argument(\"glob\", help=\"A file glob to match config files (e.g. *.ini - default='%(default)s').\",\n metavar=\"<config-file glob>\",\n default=\"*.ini\",\n nargs=\"?\")\n\n tester.set_defaults(function=self.strategerizer.test)\n\n #helper = self.subparsers.add_parser(\"help\", help=\"Show more help\")\n #helper.add_argument('topic', help=\"A specific subject to inquire about.\", nargs=\"?\")\n #helper.set_defaults(function=self.strategerizer.handle_help)\n return",
"def setup(cls, subparser):\n # creates the parser for options\n parser = subparser.add_parser(cls.__command__, help=cls.__help__)\n\n # adds the arguments\n cls.args(parser)\n\n # sets the default function to invoke\n parser.set_defaults(func=cls.run)\n cls._parser = parser",
"def add_sub_parser(cls, sub_parsers):\n \n assert cls.name, \"command_name must be set.\"\n\n parser = sub_parsers.add_parser(cls.name,\n help=cls.help or \"No help\", \n description=cls.description or \"No help\", \n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.set_defaults(func=cls)\n return parser",
"def setup_parser(parser):\n subparsers = parser.add_subparsers(title='Commands')\n for command in COMMANDS:\n command.setup_parser(subparsers)",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def root_config_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('a', argument_type=bool, is_flag=True, action=store_true)\n parser_manager.add_subgroup(name='sub')\n\n return parser_manager",
"def __init__(self):\n self.CLI_COMMAND = os.path.basename(sys.argv[0])\n\n self.ctrl_parser = argparse.ArgumentParser(prog=self.CLI_COMMAND,\n description='Control Component Parser')\n\n self.ctrl_subparser = self.ctrl_parser.add_subparsers(\n title='Sub Commands',\n description='List of Valid Sub Commands', dest='subparser_name')\n\n self.add_simple_args()\n\n \"\"\"Sub Parser for all Cli Commands\"\"\"\n self.add_subparser('power', 'Power on/off/reset a device.',\n ['on', 'off', 'cycle', 'bios', 'efi', 'hdd', 'pxe', 'cdrom', 'removable'],\n 'Select an option: on/off/cycle/bios/efi/hdd/pxe/cdrom/removable.'\n ' Ex: {} power on node001'.format(self.CLI_COMMAND),\n [\n {\n 'name': '-f',\n 'name2': '--force',\n 'action': 'store_true',\n 'help': 'This option will allow user to force the Power On/Off/Reboot'\n },\n {\n 'name': '-o',\n 'name2': '--outlet',\n 'type': int,\n 'nargs': '?',\n 'help': 'Specify the outlet to edit (PDUs only)'\n }\n ])\n\n self.add_subparser('resource', 'Resource add/remove from a resource pool.', ['add', 'remove', 'check'],\n 'Select one of the following options: add/remove/check'\n ' Ex: {} resource add node001'.format(self.CLI_COMMAND))\n\n self.add_subparser('process', 'Process list/kill on a node in a cluster.', ['list', 'kill'],\n 'Select one of two options: list/kill.'\n ' Ex: {} process kill 1232 node001'.format(self.CLI_COMMAND),\n [\n {\n 'name': 'process_id',\n 'help': 'Please provide process id to list or kill a process'\n }\n ])\n\n self.add_subparser('get', 'Get powercap/freq value of a node.', ['freq', 'powercap'])\n\n self.add_subparser('set', 'Set powercap/freq value of a node.', ['freq', 'powercap'], 'Select an option to set',\n [\n {\n 'name': 'value',\n 'help': 'Please provide the value to be set'\n }\n ])\n\n self.add_subparser('service', 'Check, start or stop services specified in the configuration file',\n ['status', 'start', 'stop'], 'Select an action to perform')\n\n self.ctrl_subparser.add_parser('datastore', help=\"Raw access to the database and its contects\", add_help=False)\n self.ctrl_subparser.add_parser('cmm', help=\"Configuration Manifest Management (CMM) is a user friendly way to update your configuration.\", add_help=False)\n self.ctrl_subparser.add_parser('provision', help=\"Adding, setting and removing provisioning \"\n \"options for devices\", add_help=False)\n self.ctrl_subparser.add_parser('diag', help=\"Launching diagnostic tests on devices\", add_help=False)\n\n self.add_subparser('bios', 'Update or get version of bios on specified nodes/group of nodes',\n ['update', 'get-version'], 'Select an action to perform',\n [\n {\n 'name': '-i',\n 'name2': '--image',\n 'nargs': '?',\n 'help': 'Specify the bios image'\n }\n ])\n\n self.add_subparser('sensor', 'Get specified sensor value on specified nodes/group of nodes',\n ['get'], 'Select option to get sensor values'\n 'Ex: 1. {0} sensor-name temp 2. {1} sensor-name temp --get-overtime 2 3'.\n format(self.CLI_COMMAND, self.CLI_COMMAND),\n [\n {\n 'name': 'sensor_name',\n 'nargs': '?',\n 'help': 'Provide a specific sensor, a comma seperated list of multiple sensors '\n 'or \"*\" for all sensors'\n },\n {\n 'name': '--get-overtime',\n 'nargs': 2,\n 'type': int,\n 'metavar': ('<sample-rate>', '<duration>'),\n 'help': 'Provide a sample rate(per second) and a duration of time(seconds) to sample'\n ' over, both values must be integers greater than 1'\n }\n ])\n self.ctrl_subparser.add_parser('job', help='Launching, checking, '\n 'retrieving and canceling job', add_help=False)",
"def prepare_argparser():\n description = \"%(prog)s -- Gene Set Enrichment Analysis in Python\"\n epilog = \"For command line options of each command, type: %(prog)s COMMAND -h\"\n\n # top-level parser\n argparser = ap.ArgumentParser(description=description, epilog=epilog)\n argparser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n subparsers = argparser.add_subparsers(\n dest=\"subcommand_name\"\n ) # help=\"sub-command help\")\n\n # command for 'gsea'\n add_gsea_parser(subparsers)\n # command for 'prerank'\n add_prerank_parser(subparsers)\n # command for 'ssgsea'\n add_singlesample_parser(subparsers)\n # command for 'plot'\n add_plot_parser(subparsers)\n # command for 'enrichr'\n add_enrichr_parser(subparsers)\n # command for 'biomart'\n add_biomart_parser(subparsers)\n\n return argparser",
"def load_subparsers(config_file_dict, parser):\n\n # Initialize the subparsers\n subparsers_object = parser.add_subparsers(help='sub-command-help')\n\n # Get a list of the shorthand names of all deployment tools, and make the subparsers\n for deployment_tool in config_file_dict['deployment_tools']:\n try:\n meta_info = deployment_tool['meta']\n except KeyError:\n print \"ERROR! Could not load deployment tool's meta file!\"\n exit()\n\n # Add a parser for each deployment tool using the shorthand name\n subparser = subparsers_object.add_parser(meta_info['shorthand_name'],\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=meta_info['description'])\n\n # Adding an attribute to the parser, so we know what subcommand is being run\n # and what plugins to load post-invocation\n subparser.set_defaults(subcommand=meta_info['shorthand_name'])\n subparser.set_defaults(deployment_tool_name=meta_info['name'])\n\n # Adding the arguments for each subparser.\n for option in deployment_tool['options']:\n subparser.add_argument(option[0], default=option[1], help=option[2])\n\n return parser",
"def __init__(self):\n self.parser = argparse.ArgumentParser(add_help=True)\n self.subparsers = self.parser.add_subparsers()\n\n self.add_download_arguments()\n self.add_process_arguments()\n self.add_display_arguments()",
"def __init__(self, sub_parser, cmd_validate=None):\n self.cmd_validate = cmd_validate\n\n super(CmdExtPackage, self).__init__(sub_parser)",
"def add_parser(subparsers):\n parser = subparsers.add_parser(\n \"train\",\n help=\"Train a hydronn retrieval model.\",\n description=(\n \"\"\"\n Train the hydronn retrieval model.\n \"\"\"\n )\n )\n\n parser.add_argument(\n 'training_data',\n metavar='training_data',\n type=str,\n help='Path to training data.'\n )\n parser.add_argument(\n 'validation_data',\n metavar='validation_data',\n type=str,\n help='Path to validation data.'\n )\n parser.add_argument(\n 'output',\n metavar='output',\n type=str,\n nargs=1,\n help='Where to store the model.'\n )\n\n parser.add_argument(\n \"--resolution\",\n metavar='n',\n type=int,\n default=2,\n help=(\"The resolution in km at which to perform the retrieval.\")\n )\n parser.add_argument(\n \"--n_features_body\",\n metavar='n',\n type=int,\n default=256,\n help=(\"The number of features in the body of the network.\")\n )\n parser.add_argument(\n '--n_layers_head',\n metavar='n',\n type=int,\n default=4,\n help='The number of layers in the head of the network.'\n )\n parser.add_argument(\n '--n_features_head',\n metavar='n',\n type=int,\n default=128,\n help=\"The number of features in the head of the network.\"\n )\n parser.add_argument(\n '--n_blocks',\n metavar='N',\n type=int,\n nargs=\"*\",\n default=[2],\n help=\"The number of block in the stages of the encoder.\"\n )\n parser.add_argument(\n '--learning_rate',\n metavar='lr',\n type=float,\n nargs=\"*\",\n default=[0.0005, 0.0005, 0.0001],\n help='The learning rates to use during training.'\n )\n parser.add_argument(\n '--n_epochs',\n metavar='lr',\n type=int,\n nargs=\"*\",\n default=[10, 10, 10],\n help='The learning rates to use during training.'\n )\n parser.add_argument(\n '--no_lr_schedule',\n action=\"store_true\",\n help='Disable learning rate schedule.'\n )\n parser.add_argument(\n \"--ir\",\n action=\"store_true\",\n help=(\"Whether to train an IR only retrieval.\")\n )\n\n # Other\n parser.add_argument(\n '--device', metavar=\"device\", type=str, nargs=1,\n help=\"The name of the device on which to run the training\",\n default=\"cpu\"\n )\n parser.add_argument(\n '--batch_size', metavar=\"n\", type=int, nargs=1,\n help=\"The batch size to use for training.\"\n )\n parser.set_defaults(func=run)",
"def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=f'{os.path.basename(prog_name)} {subcommand}',\n description=self.help or None)\n\n # Add any arguments that all commands should accept here\n self.add_arguments(parser)\n\n return parser",
"def init_subparsers(self) -> _SubParsersAction:\r\n subparsers = self.parser.add_subparsers(dest=\"which\")\r\n\r\n \"\"\"Parser for list command.\"\"\"\r\n parser_list = subparsers.add_parser(\"list\")\r\n parser_list.set_defaults(which=\"list\",\r\n help=\"Display a list of all projects.\")\r\n\r\n \"\"\"Parser for view command.\"\"\"\r\n parser_view = subparsers.add_parser(\"view\")\r\n parser_view.set_defaults(which=\"view\",\r\n help=\"Displays details of project.\")\r\n parser_view.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to view.\")\r\n\r\n \"\"\"Parser for create command.\"\"\"\r\n parser_create = subparsers.add_parser(\"create\")\r\n parser_create.set_defaults(which=\"create\",\r\n help=\"(Team Lead and Admin only) Create \"\r\n \"a new project from a given repo.\")\r\n parser_create.add_argument(\"gh_repo\", metavar=\"gh-repo\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify link to \"\r\n \"GitHub repository.\")\r\n parser_create.add_argument(\"github_team_name\",\r\n metavar=\"github-team-name\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify GitHub team to \"\r\n \"assign project to.\")\r\n parser_create.add_argument(\"--name\", metavar=\"DISPLAY-NAME\",\r\n type=str, action=\"store\",\r\n help=\"Add to set the displayed \"\r\n \"name of the project.\")\r\n\r\n \"\"\"Parser for unassign command.\"\"\"\r\n parser_unassign = subparsers.add_parser(\"unassign\")\r\n parser_unassign.set_defaults(which=\"unassign\",\r\n help=\"Unassign a given project.\")\r\n parser_unassign.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project \"\r\n \"to unassign.\")\r\n\r\n \"\"\"Parser for edit command.\"\"\"\r\n parser_edit = subparsers.add_parser(\"edit\")\r\n parser_edit.set_defaults(which=\"edit\",\r\n help=\"Edit the given project.\")\r\n parser_edit.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to edit.\")\r\n parser_edit.add_argument(\"--name\", metavar=\"DISPLAY-NAME\",\r\n type=str, action=\"store\",\r\n help=\"Add to change the displayed \"\r\n \"name of the project.\")\r\n\r\n \"\"\"Parser for assign command.\"\"\"\r\n parser_assign = subparsers.add_parser(\"assign\")\r\n parser_assign.set_defaults(which=\"assign\",\r\n help=\"Assigns a project to a team.\")\r\n parser_assign.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to assign.\")\r\n parser_assign.add_argument(\"github_team_name\",\r\n metavar=\"github-team-name\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify GitHub team to \"\r\n \"assign project to.\")\r\n parser_assign.add_argument(\"-f\", \"--force\", action=\"store_true\",\r\n help=\"Set to assign project even if \"\r\n \"another team is already \"\r\n \"assigned to it.\")\r\n\r\n \"\"\"Parser for delete command.\"\"\"\r\n parser_delete = subparsers.add_parser(\"delete\")\r\n parser_delete.set_defaults(which=\"delete\",\r\n help=\"Delete the project from database.\")\r\n parser_delete.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to delete.\")\r\n parser_delete.add_argument(\"-f\", \"--force\", action=\"store_true\",\r\n help=\"Set to delete project even if \"\r\n \"a team is already assigned to it.\")\r\n\r\n return subparsers",
"def register_subparser(\n subparser: argparse._SubParsersAction, # pylint: disable=protected-access\n) -> argparse.ArgumentParser:\n\n subcommand = subparser.add_parser(\n \"inputs\", help=\"Find all reported inputs for a given workflow.\"\n )\n subcommand.add_argument(\"workflow-id\", help=\"Cromwell workflow ID.\")\n subcommand.add_argument(\n \"--grid-style\",\n help=\"Any valid `tablefmt` for python-tabulate.\",\n default=\"fancy_grid\",\n )\n subcommand.set_defaults(func=call)\n return subcommand",
"def add_to_parser(cls, subparsers):\n gene_assignment_group = subparsers.add_parser(\"gene_assignment\")\n gene_assignment_group.add_argument(\"--coordinates-geojson\", type=FsExistsType(), required=True)\n gene_assignment_group.add_argument(\"--spots-json\", type=FsExistsType(), required=True)\n gene_assignment_group.add_argument(\"-o\", \"--output\", required=True)\n gene_assignment_group.set_defaults(starfish_command=GeneAssignment._cli)\n gene_assignment_subparsers = gene_assignment_group.add_subparsers(dest=\"gene_assignment_algorithm_class\")\n\n for algorithm_cls in cls.algorithm_to_class_map().values():\n group_parser = gene_assignment_subparsers.add_parser(algorithm_cls.get_algorithm_name())\n group_parser.set_defaults(gene_assignment_algorithm_class=algorithm_cls)\n algorithm_cls.add_arguments(group_parser)\n\n cls.gene_assignment_group = gene_assignment_group",
"def add_parser(subparsers):\n parser = subparsers.add_parser(\n \"payoffs\",\n aliases=[\"pay\"],\n help=\"\"\"Compute payoffs\"\"\",\n description=\"\"\"Compute payoff relative information in input game of\n specified profiles.\"\"\",\n )\n parser.add_argument(\n \"--input\",\n \"-i\",\n metavar=\"<input-file>\",\n default=sys.stdin,\n type=argparse.FileType(\"r\"),\n help=\"\"\"Input file for script. (default:\n stdin)\"\"\",\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n metavar=\"<output-file>\",\n default=sys.stdout,\n type=argparse.FileType(\"w\"),\n help=\"\"\"Output file for script. (default:\n stdout)\"\"\",\n )\n parser.add_argument(\n \"profiles\",\n metavar=\"<profile>\",\n nargs=\"+\",\n help=\"\"\"File or string with\n json profiles from input games for which payoffs should be calculated.\n This file can be to be a list or a single profile\"\"\",\n )\n parser.add_argument(\n \"-t\",\n \"--type\",\n metavar=\"type\",\n default=\"payoffs\",\n choices=TYPE,\n help=\"\"\"What to return: {} (default: %(default)s)\"\"\".format(TYPE_HELP),\n )\n return parser",
"def define_sub_options(self):\n self.plugin_parser = self.parser.add_argument_group(\"Plugin Options\",\n \"Options for all plugins.\")\n self.plugin_parser.add_argument(\"-H\", \"--host\",\n default='127.0.0.1',\n required=True,\n help=\"Host IP address or DNS\",\n dest=\"host\")\n self.plugin_parser.add_argument(\"-u\", \"--user\",\n default=None,\n required=False,\n help=\"User name\",\n dest=\"user\")\n self.plugin_parser.add_argument(\"-p\", \"--password\",\n default=None,\n required=False,\n help=\"User password\",\n dest=\"password\")",
"def getparser():\n prs = ap.ArgumentParser(\n description=\"Format conversion for \"\n \"and introspection of \"\n \"intersphinx \"\n \"'objects.inv' files.\"\n )\n prs.add_argument(\n \"-\" + PrsConst.VERSION[0],\n \"--\" + PrsConst.VERSION,\n help=\"Print package version & other info\",\n action=\"store_true\",\n )\n\n sprs = prs.add_subparsers(\n title=\"Subcommands\",\n dest=PrsConst.SUBPARSER_NAME,\n metavar=f\"{{{PrsConst.CONVERT},{PrsConst.SUGGEST}}}\",\n help=\"Execution mode. Type \"\n \"'sphobjinv [mode] -h' \"\n \"for more information \"\n \"on available options. \"\n \"Mode names can be abbreviated \"\n \"to their first two letters.\",\n )\n\n # Enforce subparser as optional. No effect for 3.4 to 3.7;\n # briefly required a/o 3.7.0b4 due to change in default behavior, per:\n # https://bugs.python.org/issue33109. 3.6 behavior restored for\n # 3.7 release.\n sprs.required = False\n\n spr_convert = sprs.add_parser(\n PrsConst.CONVERT,\n aliases=[PrsConst.CONVERT[:2]],\n help=PrsConst.HELP_CO_PARSER,\n description=PrsConst.HELP_CO_PARSER,\n )\n spr_suggest = sprs.add_parser(\n PrsConst.SUGGEST,\n aliases=[PrsConst.SUGGEST[:2]],\n help=PrsConst.HELP_SU_PARSER,\n description=PrsConst.HELP_SU_PARSER,\n )\n\n # ### Args for conversion subparser\n spr_convert.add_argument(\n PrsConst.MODE,\n help=\"Conversion output format\",\n choices=(PrsConst.ZLIB, PrsConst.PLAIN, PrsConst.JSON),\n )\n\n spr_convert.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to file to be converted. Passing '-' indicates to read from stdin \"\n \"(plaintext/JSON only).\"\n ),\n )\n\n spr_convert.add_argument(\n PrsConst.OUTFILE,\n help=(\n \"Path to desired output file. \"\n \"Defaults to same directory and main \"\n \"file name as input file but with extension \"\n + PrsConst.HELP_CONV_EXTS\n + \", as appropriate for the output format. \"\n \"A path to a directory is accepted here, \"\n \"in which case the default output file name will be used. \"\n \"Passing '-' indicates to write to stdout. If \"\n + PrsConst.INFILE\n + \" is passed as '-', \"\n + PrsConst.OUTFILE\n + \" can be omitted and both stdin and stdout will be used.\"\n ),\n nargs=\"?\",\n default=None,\n )\n\n # Mutually exclusive group for --expand/--contract\n gp_expcont = spr_convert.add_argument_group(title=\"URI/display name conversions\")\n meg_expcont = gp_expcont.add_mutually_exclusive_group()\n meg_expcont.add_argument(\n \"-\" + PrsConst.EXPAND[0],\n \"--\" + PrsConst.EXPAND,\n help=\"Expand all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n meg_expcont.add_argument(\n \"-\" + PrsConst.CONTRACT[0],\n \"--\" + PrsConst.CONTRACT,\n help=\"Contract all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n # Clobber argument\n spr_convert.add_argument(\n \"-\" + PrsConst.OVERWRITE[0],\n \"--\" + PrsConst.OVERWRITE,\n help=\"Overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # stdout suppressor option (e.g., for scripting)\n spr_convert.add_argument(\n \"-\" + PrsConst.QUIET[0],\n \"--\" + PrsConst.QUIET,\n help=\"Suppress printing of status messages and \"\n \"overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # Flag to treat infile as a URL\n spr_convert.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n \"Cannot be used with an infile of '-'.\"\n ),\n action=\"store_true\",\n )\n\n # ### Args for suggest subparser\n spr_suggest.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to inventory file to be searched. \"\n \"Passing '-' indicates to read from stdin (plaintext/JSON only).\"\n ),\n )\n spr_suggest.add_argument(PrsConst.SEARCH, help=\"Search term for object suggestions\")\n spr_suggest.add_argument(\n \"-\" + PrsConst.ALL[0],\n \"--\" + PrsConst.ALL,\n help=\"Display all results \"\n \"regardless of the number returned \"\n \"without prompting for confirmation.\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.PAGINATE[0],\n \"--\" + PrsConst.PAGINATE,\n help=\"Paginate long search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.INDEX[0],\n \"--\" + PrsConst.INDEX,\n help=\"Include Inventory.objects list indices with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.SCORE[0],\n \"--\" + PrsConst.SCORE,\n help=\"Include fuzzywuzzy scores with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.THRESH[0],\n \"--\" + PrsConst.THRESH,\n help=\"Match quality threshold, integer 0-100, \"\n \"default 75. Default is suitable when \"\n \"'search' is exactly a known object name. \"\n \"A value of 30-50 gives better results \"\n \"for approximate matches.\",\n default=PrsConst.DEF_THRESH,\n type=int,\n choices=range(101),\n metavar=\"{0-100}\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n f\"Cannot be used with --{PrsConst.URL}.\"\n ),\n action=\"store_true\",\n )\n\n return prs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Executes the config subprogram with parsed commandline args.
|
def execute(args):
cfg = Config.instance()
# just print the file location?
if args.location:
print(cfg.config_file)
return
# print sections when none is given
if not args.name:
print("\n".join(cfg.sections()))
return
# print section options when none is given
section, option = args.name.split(".", 1) if "." in args.name else (args.name, None)
if not option:
print("\n".join(cfg.options(section)))
return
# removal
if args.remove:
abort("config removal not yet implemented")
# setting
if args.value:
abort("config setting not yet implemented")
# getting
print(cfg.get_default(section, option, expand_vars=args.expand, expand_user=args.expand))
|
[
"def config_main(args):\n command = args.get(\"command\")\n if command == \"set\":\n set_config(args)\n if command == \"unset\":\n unset_config(args)",
"def main():\n # set up the program to take in arguments from the command line",
"def main():\n args = docopt(__doc__, version='recipy v%s' % __version__)\n\n if args['--debug']:\n print('Command-line arguments: ')\n print(args)\n print('DB path: ', config.get_db_path())\n print('')\n print('Full config file (as interpreted):')\n print('----------------------------------')\n conf = config.read_config_file()\n s = six.StringIO()\n conf.write(s)\n print(s.getvalue())\n print('----------------------------------')\n\n\n if args['search']:\n search(args)\n elif args['latest']:\n latest(args)\n elif args['gui']:\n gui(args)\n elif args['annotate']:\n annotate(args)",
"def main():\n config.version = __version__\n noArgs = len(sys.argv)\n if noArgs == 1:\n guiLaunch()\n else:\n cliLaunch()",
"def run():\n args = get_args()\n try:\n conf = main(args)\n except: # noqa\n if not logger.handlers:\n # Add a logging handler if main failed to do so.\n logging.basicConfig()\n logger.exception(\n \"Program terminated abnormally, see stack trace \"\n \"below for more information\",\n exc_info=True)\n logger.info(\n \"If you suspect this is a bug or need help, please open an issue \"\n \"on https://github.com/ESMValGroup/ESMValTool/issues and attach \"\n \"the run/recipe_*.yml and run/main_log_debug.txt files from the \"\n \"output directory.\")\n sys.exit(1)\n else:\n if conf[\"remove_preproc_dir\"]:\n logger.info(\"Removing preproc containing preprocessed data\")\n logger.info(\"If this data is further needed, then\")\n logger.info(\"set remove_preproc_dir to false in config\")\n shutil.rmtree(conf[\"preproc_dir\"])\n logger.info(\"Run was successful\")",
"def run(self):\n config = load_config(confpath)\n # Either add or remove hooks (not allowing both)\n if self.options[\"--add\"]:\n url = self.options[\"<url>\"]\n hook = self.options[\"<hook>\"][0]\n self.add_hook(config, hook, url)\n elif self.options[\"--remove\"]:\n hook = self.options[\"<hook>\"]\n self.remove_hook(config, hook)\n else:\n # Write the configuration to the console\n print(\"\\nConfigurations\\n{0}\".format(\"=\" * len(\"Configurations\")))\n for key, val in config.items():\n print(\"\\n{0}\\n{1}\".format(key, \"-\" * len(key)))\n for k, v in val.items():\n print(\"{0} : {1}\".format(k, v))\n print(\"\")",
"def configure(self, command='./configure', args=None, cwd=None, env=None):\n pass",
"def process_cli_args():\n args = parse_cli_args()\n\n # delete empty args\n if not args[\"debug\"]:\n del args[\"debug\"]\n for arg_name in list(args.keys()):\n if args[arg_name] in [None, tuple()]:\n del args[arg_name]\n\n # validate\n validate_cli_args(args)\n\n # --write-config\n if args.pop(\"write_config\"):\n config_values = {}\n if args.get(\"command\"):\n config_values[\"command\"] = \" \".join(\n shlex.quote(subval) for subval in args[\"command\"]\n )\n if args.get(\"watch\"):\n config_values[\"watch\"] = \"\\n\".join(args[\"watch\"])\n if args.get(\"output\"):\n config_values[\"output\"] = \", \".join(args[\"output\"])\n for arg_name in [\"delay\", \"max_execs\", \"name\", \"start\", \"watcher\"]:\n if arg_name in args:\n config_values[arg_name] = args[arg_name]\n\n write_config_file(args, config_values)\n sys.exit(0)\n\n return args",
"def init(args):\n Configuration.load_config(vars(args).get(\"config\"))",
"def run(self, argv):\n ack = self.cfg.parse(argv)\n\n if ack:\n self.init()\n else:\n print(f\"App {self.__class__.__name__} finished\")",
"def processCommandLineOptions(self, args):\n del args\n msg = (\"processCommandLineOptions() not implemented, Config must be \"\n \"subclassed.\")\n raise NotImplementedError(msg)",
"def run_configuration_wizard():\n\n import argparse # import here because it is usually not needed by this module\n\n parser = argparse.ArgumentParser(description=\"Configure the T2K Data Manager\")\n parser.add_argument(\n \"-l\",\n \"--local\",\n action=\"store_true\",\n help=\"save the configuration file in the current diractory as '.%s.conf'\"\n % (_branding,),\n )\n\n args = parser.parse_args()\n\n # Load current configuration\n conf = load_config()\n\n # Go through all items and ask user what should be used\n for key in default_values:\n current_value = getattr(conf, key)\n default_value = default_values[key]\n help_text = descriptions.pop(key, \"-\")\n text = (\n \"\\nConfiguration parameter: %s\\n\"\n \"\\n\"\n \"%s\\n\"\n \"\\n\"\n \"Current value: %s\\n\"\n \"Default value: %s\\n\" % (key, help_text, current_value, default_value)\n )\n print_(text)\n\n new_value = input(\"Enter new value [keep current]: \").strip()\n if new_value != \"\":\n setattr(conf, key, new_value)\n\n if args.local:\n outf = path.join(os.getcwd(), \".%s.conf\" % (_branding,))\n else:\n outf = path.join(app_dirs.user_config_dir, \"%s.conf\" % (_branding,))\n\n print_(\"Saving configuration in %s\" % (outf,))\n try:\n os.makedirs(path.dirname(outf))\n except OSError:\n pass\n conf.save_config(outf)",
"def _configure(self):\n # Setup command line parser.\n argparser = argparse.ArgumentParser(description = self._description)\n argparser.add_argument('--config-file', help = 'name of the config file')\n argparser.add_argument('--inventory', help = 'name of the inventory file')\n argparser.add_argument('--group', help = 'name of the Ansible host group')\n argparser.add_argument('--fact-dir', help = 'name of the fact cache directory')\n argparser.add_argument('--ascii', help = 'print only ASCII characters (flag)', action = 'store_true', default = None)\n argparser.add_argument('--refresh', help = 'force host fact refresh (flag)', action = 'store_true', default = None)\n\n # Process command line arguments.\n self._config_cli = vars(argparser.parse_args())\n\n # IMPORTANT! Immediatelly rewrite the default value for configuration file\n # name, if the new value was received as command line argument.\n if not self._config_cli['config_file'] == None:\n self.config['config_file'] = self._config_cli['config_file']\n\n # Load configurations from external file.\n self._config_file = self.json_load(self.config.get('config_file'))\n\n # Merge all configurations together.\n self.config.update((k, v) for k, v in self._config_file.items() if v is not None)\n self.config.update((k, v) for k, v in self._config_cli.items() if v is not None)",
"def run_config(\n *, cfg: Union[pathlib.Path, str], overrides: Sequence[str], tag: Optional[str] = None\n) -> None:\n cmd = [\"python\", \"run.py\", \"--cfg\", str(cfg), \"--mode\", \"restart\", \"--force\"]\n if tag is not None:\n cmd.extend(\"--exp_id_pattern_override\", tag)\n cmd.extend(overrides)\n\n print(\"Cmd:\")\n print(f\"HH_EXP_DIR={OUTPUT_ROOT}\", *cmd)\n\n env = os.environ.copy()\n env[\"HH_EXP_DIR\"] = str(OUTPUT_ROOT)\n if \"USER\" not in env:\n # Weird CI stuff.\n env[\"USER\"] = \"root\"\n\n return subprocess.check_call(cmd, env=env)",
"def run_tool(\n descr,\n arguments,\n processes,\n config_id=None,\n config_dir_id=None):\n\n # Set up the primary parser.\n parser = argparse.ArgumentParser(description=descr)\n\n # If we have a configuration file id, then we want to try loading\n # that configuration file as defaults (or allow them to ignore\n # it).\n if config_id != None:\n # Add in the configuration file arguments.\n parser.add_argument(\n \"--config\", \n metavar=\"FILE\",\n help=(\"Uses the configuration file instead of searching \"\n + \"the default locations\"))\n parser.add_argument(\n \"--no-config\", \n action=\"store_true\",\n help=(\"Uses the configuration file instead of searching \"\n + \"the default locations\"))\n\n # Parse the initial arguments to get the configuration\n # file. Using parse_known_args will only parse the arguments\n # we are aware of (e.g., the configuration arguments).\n config_args, remaining_args = parser.parse_known_args()\n args = remaining_args\n\n # Load in the configuration file, optionally setting the\n # location or ignoring it entirely.\n load_config(config_id, config_dir_id, parser, config_args)\n\n # Go through the processes and add each one's subparser to the\n # current arguments.\n subparsers = parser.add_subparsers()\n\n for process_name, process in processes.iteritems():\n # Add this subparser to the primary parser.\n process_parser = subparsers.add_parser(\n process_name,\n help=process.get_help())\n process_parser.set_defaults(name=process_name)\n\n # Add the process-specific arguments to the parser.\n process.setup_arguments(process_parser)\n\n # Process the arguments given on the command line.\n args = parser.parse_args(arguments)\n\n # Use the default to figure out the process name which is then\n # used to call the process() method in that Process class.\n selected_name = args.name\n\n if not selected_name in processes:\n print \"Unknown tool name: \" + format(selected_name)\n exit(1)\n\n selected_process = processes[selected_name]\n selected_process.process(args)",
"def run(argv=None):\n import os\n import docopt\n import textwrap\n args = argv or sys.argv[1:]\n args = docopt.docopt(textwrap.dedent(run.__doc__), args)\n cfg = utils.parse_config(*args['<config>'])\n cfg.update(\n verbose=args['--verbose'],\n debug=args['--debug'],\n )\n pythonpath = cfg.get('pythonpath', [])\n pythonpath.append(cfg['here'])\n for path in pythonpath:\n sys.path.append(os.path.expanduser(path))\n if args['--logdir'] or 'logdir' in cfg:\n logdir = os.path.expanduser(args['--logdir'] or cfg.get('logdir'))\n IrcBot.logging_config = config.get_file_config(logdir)\n if args['--help-page']: # pragma: no cover\n for v in IrcBot.logging_config['handlers'].values():\n v['level'] = 'ERROR'\n if args['--debug']:\n IrcBot.venusian_categories.append('irc3.debug')\n bot = IrcBot(**cfg)\n if args['--raw']:\n bot.include('irc3.plugins.log', venusian_categories=['irc3.debug'])\n if args['--help-page']: # pragma: no cover\n bot.print_help_page()\n else:\n bot.run()\n if argv:\n return bot",
"def parseAndRun(cls):\n config = cls.ConfigClass()\n parser = cls.ArgumentParser(name=cls._DefaultName)\n args = parser.parse_args(config)\n task = cls(config=args.config)\n task.run(args)",
"def main(args=None):\n parsed_args = _parse_args(args)\n _initialize_configuration(parsed_args.pop('subcommand'), parsed_args.pop('config_file'))\n subcommand_class = parsed_args.pop('subcommand_class') # defined in _parse_args() by subparser.set_defaults()\n\n try:\n unhandled_exception_handler = UnhandledExceptionHandler.singleton()\n with unhandled_exception_handler:\n subcommand_class().run(**parsed_args)\n\n finally:\n # The force kill countdown is not an UnhandledExceptionHandler teardown callback because we want it to execute\n # in all situations (not only when there is an unhandled exception).\n _start_app_force_kill_countdown(seconds=10)",
"def main(self): # just put into if __name__ ...\n parser = self.get_parser()\n args = parser.parse_args()\n self.run(args)",
"def do(self, argin):\n\n device_data = self.target\n command_name = \"Configure\"\n\n try:\n this_server = TangoServerHelper.get_instance()\n self.dish_master_fqdn = \"\"\n property_value = this_server.read_property(\"DishMasterFQDN\")\n self.dish_master_fqdn = self.dish_master_fqdn.join(property_value)\n json_argument = device_data._load_config_string(argin)\n receiver_band = json_argument[\"dish\"][\"receiver_band\"]\n self._configure_band(receiver_band)\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_message = f\"Exception occured while executing the '{command_name}' command.\"\n this_server.write_attr(\"activityMessage\", log_message, False)\n tango.Except.re_throw_exception(\n dev_failed,\n f\"Exception in '{command_name}' command.\",\n log_message,\n f\"DishLeafNode.{command_name}Command\",\n tango.ErrSeverity.ERR,\n )\n except KeyError as key_error:\n raise Exception(\n f\"JSON key not found.'{key_error}'in Configure.do().\"\n )\n self.logger.info(\"'%s' command executed successfully.\", command_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts that buckets exist.
|
def test_bucket_exists(self):
self.assertFalse(self.storage.bucket_exists(self.temp_bucket_name))
self.storage.make_bucket(self.temp_bucket_name)
self.assertTrue(self.storage.bucket_exists(self.temp_bucket_name))
self.storage.remove_bucket(self.temp_bucket_name)
|
[
"def test_buckets_empty(self):\n User.objects.create_user(username='empty', email='user@example.com', password='userexample')\n self.client.login(username='empty', password='userexample')\n\n response = self.client.get(reverse('buckets:list'))\n self.assertContains(response, 'No bucket yet.')",
"def test_bucket_does_not_exist(self):\n self.assertEqual(0, self.engine.create_bucket_calls)\n self.assertEqual(0, self.ui.asked_create)\n\n self.engine.configure()\n\n self.assertEqual(1, self.engine.get_bucket_calls)\n self.assertEqual(1, self.engine.create_bucket_calls)\n self.assertEqual(1, self.ui.asked_create)",
"def test_bucket_creation(self):\n res = self.client.post('/buckets/', json=self.bucket)\n self.assertEqual(res.status_code, 201)",
"def test_buckets_create_bucket(self):\n self.client.login(username='user', password='userexample')\n\n response = self.client.get(reverse('buckets:list'))\n self.assertContains(response, reverse('buckets:create'))",
"def test_asset_saintsxctf_s3_bucket_exists(self) -> None:\n bucket_name = 'asset.saintsxctf.com'\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n self.assertTrue(s3_bucket.get('Name') == bucket_name)",
"def test_crud_bucket(self):\n name = \"cbtestcreatebucket-{0}\".format(uuid.uuid4())\n test_bucket = self.provider.object_store.create(name)\n with helpers.cleanup_action(lambda: test_bucket.delete()):\n self.assertTrue(\n test_bucket.id in repr(test_bucket),\n \"repr(obj) should contain the object id so that the object\"\n \" can be reconstructed, but does not. eval(repr(obj)) == obj\")\n\n buckets = self.provider.object_store.list()\n\n list_buckets = [c for c in buckets if c.name == name]\n self.assertTrue(\n len(list_buckets) == 1,\n \"List buckets does not return the expected bucket %s\" %\n name)\n\n # check iteration\n iter_buckets = [c for c in self.provider.object_store\n if c.name == name]\n self.assertTrue(\n len(iter_buckets) == 1,\n \"Iter buckets does not return the expected bucket %s\" %\n name)\n\n # check find\n find_buckets = self.provider.object_store.find(name=name)\n self.assertTrue(\n len(find_buckets) == 1,\n \"Find buckets does not return the expected bucket %s\" %\n name)\n\n get_bucket = self.provider.object_store.get(\n test_bucket.id)\n self.assertTrue(\n list_buckets[0] ==\n get_bucket == test_bucket,\n \"Objects returned by list: {0} and get: {1} are not as \"\n \" expected: {2}\" .format(list_buckets[0].id,\n get_bucket.id,\n test_bucket.name))\n\n buckets = self.provider.object_store.list()\n found_buckets = [c for c in buckets if c.name == name]\n self.assertTrue(\n len(found_buckets) == 0,\n \"Bucket %s should have been deleted but still exists.\" %\n name)",
"def test_bucket_not_exists(make_stubber, make_unique_name, make_bucket):\n stubber = make_stubber(bucket_wrapper, 'get_s3')\n bucket_name = make_unique_name('bucket')\n\n stubber.stub_head_bucket_error(bucket_name, 'NoSuchBucket')\n\n assert not bucket_wrapper.bucket_exists(bucket_name)",
"def test_buckets_access_authorized(self):\n self.client.login(username='user', password='userexample')\n\n response = self.client.get(reverse('buckets:list'))\n self.assertContains(response, 'bucket start')",
"def test_delete_inexistent_bucketlist(self):\n bucketlist = {\n \"name\": \"Crack Game theory.\"\n }\n # create bucketlist\n response = self.client.post('/api/v1/bucketlists', data=bucketlist,\n headers=self.headers\n )\n self.assertEqual(json.loads(response.data)[\"msg\"],\n \"Bucketlist created successfully.\"\n )\n self.assertEqual(response.status_code, 201)\n # attempt delete\n response = self.client.delete('/api/v1/bucketlists/1234',\n data=bucketlist, headers=self.headers\n )\n self.assertEqual(json.loads(response.data)[\"error\"],\n \"Bucket does not exist.\"\n )\n self.assertEqual(response.status_code, 404)",
"def test_api_can_get_all_bucket(self):\n res = self.client.post('/buckets/', json=self.bucket)\n self.assertEqual(res.status_code, 201)\n res = self.client.get('/buckets/')\n self.assertEqual(res.status_code, 200)\n self.assertEqual(1, res.json['total'])",
"def test_add_bucket_acl_bucket_not_found_error(self):\n with self.assertRaises(NotFoundError):\n self.cm.add_bucket_acl(\"NonExistent\", \"ResponseSuccess\", \"read-storage\")",
"def test_create_bucketlist_returns_True_for_unique_bucketlist(self):\n self.bucket_list_app.load_user(\"james@gmail.com\", \"pass\")\n response = self.bucket_list_app.create_bucketlist(\"Mountain Climbing\", \n \"Climb all mountains in Kenya\")\n self.assertTrue(response, \n \"should return True for a unique bucketlist entry\")",
"def __ensure_bucket_availability(self):\n storage_client = storage.Client()\n if storage_client.lookup_bucket(self.__bucket_name) is None:\n # Create the new bucket\n storage_client.create_bucket(self.__bucket_name)",
"def test_buckets_anonymous(self):\n response = self.client.get(reverse('buckets:list'))\n self.assertRedirects(response, '/accounts/login/?next=/buckets/')",
"def test_s3_bucket_creation(self, noobaa_obj, created_buckets):\n\n bucketname = create_unique_resource_name(self.__class__.__name__.lower(), 's3-bucket')\n logger.info(f'Creating new bucket - {bucketname}')\n created_buckets.append(noobaa_obj.s3_create_bucket(bucketname=bucketname))",
"def test_get_nonexisting_bucketlist(self):\n\n response = self.client().get(\"/api/v1.0/bucketlists/66686768\",\n headers=self.my_header)\n self.assertEqual(response.status_code, 404)\n self.assertIn(\"404 Not Found\", str(response.data))",
"def test_get_bucket_success(self):\n bucket = self.cm.get_bucket(\"testVaultName\")\n self.assertEqual(bucket.name, \"testVaultName\")\n self.assertEqual(bucket.id, 274)",
"def test_bucket_deletion(self):\n rv = self.client.post('/buckets/', json=self.bucket)\n self.assertEqual(rv.status_code, 201)\n res = self.client.delete('/buckets/1')\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 404\n res = self.client.get('/buckets/1')\n self.assertEqual(res.status_code, 404)",
"def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.failUnlessEqual(100, fit)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts presigned urls can be generated for put requests.
|
def test_put_object_presigned_url(self):
url = self.storage.get_presigned_url(
self.bucket_name, self.object_name, method="PUT"
)
self.assertIn(self.object_name, url)
|
[
"def test_presigned_url(self):\n # Test we can generate presigned urls for GET and POST requests\n for action in ['GET', 'POST']:\n url = DataManager.generate_presigned_s3_url(bucket='hemlock-highway-test',\n key='customer1/data.csv',\n action=action)\n self.assertTrue(isinstance(url, str) and url.startswith('https://') and 'hemlock-highway-test' in url)\n\n # Raise ValueError on unavailable action\n with self.assertRaises(ValueError):\n DataManager.generate_presigned_s3_url(bucket='test', key='something.csv', action='FAIL')",
"def test_get_presigned_url(self):\n self.assertRaises(\n StorageException,\n self.storage.get_presigned_url,\n self.bucket_name,\n self.object_name,\n method=HttpMethod.GET,\n )\n data, size = str_buffer(self.object_data)\n self.storage.put_object(self.bucket_name, self.object_name, data, size)\n hostname = random_str()\n url = self.storage.get_presigned_url(\n self.bucket_name,\n self.object_name,\n method=HttpMethod.GET,\n use_hostname=hostname,\n )\n self.assertIn(hostname, url)\n self.assertIn(self.object_name, url)",
"def test_that_generate_signed_url_is_called(self):\n bucket = \"fake\"\n _get_gcs_presigned_put_url(self.client, bucket, \"/object.jpg\", \"aBc\", 0, 0)\n self.generate_signed_url_method.assert_called_once()",
"def test_s3upload_get_presigned_url(self):\n responses.add(responses.POST, \"https://30yinsv8k6.execute-api.us-east-1.amazonaws.com/prod/get-signed-url\",\n body=\"http://test/\", status=200)\n\n resp = ec2rlcore.s3upload.get_presigned_url(\"https://aws-support-uploader.s3.amazonaws.com/uploader?\"\n \"account-id=9999999999&case-id=99999999&expiration=1486577795&\"\n \"key=92e1ab350e7f5302551e0b05a89616381bb6c66\"\n \"9c9492d9acfbf63701e455ef6\", \"test\")\n\n self.assertEqual(resp, \"http://test/\")",
"def test_can_upload_file_to_presigned_url(self):\n file_contents = b\"blahfilecontents\"\n file = BytesIO(file_contents)\n # S3 expects a base64-encoded MD5 checksum\n md5 = hashlib.md5(file_contents)\n md5_checksum = md5.hexdigest()\n md5_checksum_base64 = codecs.encode(codecs.decode(md5_checksum, \"hex\"), \"base64\").decode()\n\n filename = \"blahfile.jpg\"\n filepath = generate_object_storage_name(md5_checksum, filename)\n\n ret = get_presigned_upload_url(filepath, md5_checksum_base64, 1000, len(file_contents))\n url = ret[\"uploadURL\"]\n content_type = ret[\"mimetype\"]\n\n resp = requests.put(\n url,\n data=file,\n headers={\n \"Content-Type\": content_type,\n }\n )\n resp.raise_for_status()",
"def test_s3upload_get_presigned_url_bad_url(self, mock_urllib):\n with self.assertRaises(ec2rlcore.s3upload.S3UploadUrlParsingFailure):\n ec2rlcore.s3upload.get_presigned_url(\"http://fakeurl.asdf123\", \"test\")",
"def testGenerateUrl(self):\n self._RunAsync(self.object_store.Put, self.key, 'foo')\n\n url = self.object_store.GenerateUrl(self.key,\n cache_control='private,max-age=31536000',\n expires_in=100)\n response = httpclient.HTTPClient().fetch(url, method='GET')\n self.assertEqual(response.body, 'foo')\n self.assertEqual(response.headers['Cache-Control'], 'private,max-age=31536000')",
"def _get_s3_presigned_put_url(s3_client, bucket, filepath, md5sum, lifetime_sec):\n # S3's PUT Object parameters:\n # https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html\n method = \"put_object\"\n fields = {\n \"Bucket\": bucket,\n \"Key\": filepath,\n }\n\n response = s3_client.generate_presigned_url(\n ClientMethod=method,\n Params=fields,\n ExpiresIn=lifetime_sec,\n )\n\n return response",
"def test_put_get_delete_bucket_policy(make_stubber, make_unique_name, make_bucket):\n stubber = make_stubber(bucket_wrapper, 'get_s3')\n bucket_name = make_unique_name('bucket')\n\n make_bucket(stubber, bucket_wrapper, bucket_name, stubber.region_name)\n\n policy_id = uuid.uuid1()\n\n put_policy = {\n 'Version': '2012-10-17',\n 'Id': str(policy_id),\n 'Statement': [{\n 'Effect': 'Allow',\n 'Principal': {'AWS': 'arn:aws:iam::111122223333:user/Martha'},\n 'Action': [\n 's3:GetObject',\n 's3:ListBucket'\n ],\n 'Resource': [\n f'arn:aws:s3:::{bucket_name}/*',\n f'arn:aws:s3:::{bucket_name}'\n ]\n }]\n }\n\n stubber.stub_put_bucket_policy(bucket_name, put_policy)\n stubber.stub_get_bucket_policy(bucket_name, put_policy)\n stubber.stub_delete_bucket_policy(bucket_name)\n stubber.stub_get_bucket_policy_error(bucket_name, 'NoSuchBucketPolicy')\n\n bucket_wrapper.put_policy(bucket_name, put_policy)\n policy = bucket_wrapper.get_policy(bucket_name)\n assert put_policy == policy\n bucket_wrapper.delete_policy(bucket_name)\n with pytest.raises(ClientError) as exc_info:\n _ = bucket_wrapper.get_policy(bucket_name)\n assert exc_info.value.response['Error']['Code'] == 'NoSuchBucketPolicy'",
"def test_that_we_return_a_string(self):\n bucket = \"fake\"\n ret = _get_gcs_presigned_put_url(\n self.client, bucket, \"/object.jpg\", \"aBc\", 0, 0\n )\n assert isinstance(ret, str)",
"def test_request_put(self):\n r = self.base._request('/put', 'PUT', {\n 'foo': 'bar'\n })\n\n self.assertEqual(r['url'], 'https://httpbin.org/put')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')\n self.assertEqual(r['form']['foo'], 'bar')",
"def test_api_v3_linked_files_linked_file_public_id_put(self):\n pass",
"def presigned_url(\n self,\n url,\n expiration=10,\n force_download=False,\n ):\n raise NotImplementedError",
"def test_sign_quest_upload(self):\n resp = self.app.get(\n self.url_for(\n backend.quest_views.QuestStaticAsset,\n quest_id='4',\n file_name='b.png',\n mime_type='image/png'))\n self.assertEqual(\n json.loads(resp.data)['s3_url'],\n \"https://bucket.s3.amazonaws.com/quests/4/b.png\")",
"def create_s3_put_url(key, content_type):\n url = boto3.client('s3').generate_presigned_url(\n ## TODO\n )\n\n return url",
"def test_api_v3_epics_epic_public_id_put(self):\n pass",
"def test_resource_properties(self):\n res = S3Resource(\"s3://localhost:8069/test_bucket/test_key\")\n assert res._endpoint == 'http://localhost:8069', res._endpoint\n assert res._bucket == 'test_bucket', res._bucket\n assert res._key == 'test_key', res._key",
"def get_presigned_put_url(filename, config, secrets):\n from minio import Minio\n try:\n from minio.error import ResponseError\n except ImportError:\n from minio.error import S3Error as ResponseError\n\n config_startd_logging = config['StartdLogging']\n secrets_startd_logging = secrets['StartdLogging']\n\n client = Minio(config_startd_logging['url'],\n access_key=secrets_startd_logging['access_key'],\n secret_key=secrets_startd_logging['secret_key'],\n secure=True\n )\n\n try:\n return client.presigned_put_object(config_startd_logging['bucket'],\n filename,\n datetime.timedelta(days=1))\n except ResponseError as err:\n print(err)",
"def test_api_v3_files_file_public_id_put(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts presigned urls can be generated for get requests.
|
def test_get_presigned_url(self):
self.assertRaises(
StorageException,
self.storage.get_presigned_url,
self.bucket_name,
self.object_name,
method=HttpMethod.GET,
)
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
hostname = random_str()
url = self.storage.get_presigned_url(
self.bucket_name,
self.object_name,
method=HttpMethod.GET,
use_hostname=hostname,
)
self.assertIn(hostname, url)
self.assertIn(self.object_name, url)
|
[
"def test_presigned_url(self):\n # Test we can generate presigned urls for GET and POST requests\n for action in ['GET', 'POST']:\n url = DataManager.generate_presigned_s3_url(bucket='hemlock-highway-test',\n key='customer1/data.csv',\n action=action)\n self.assertTrue(isinstance(url, str) and url.startswith('https://') and 'hemlock-highway-test' in url)\n\n # Raise ValueError on unavailable action\n with self.assertRaises(ValueError):\n DataManager.generate_presigned_s3_url(bucket='test', key='something.csv', action='FAIL')",
"def test_s3upload_get_presigned_url(self):\n responses.add(responses.POST, \"https://30yinsv8k6.execute-api.us-east-1.amazonaws.com/prod/get-signed-url\",\n body=\"http://test/\", status=200)\n\n resp = ec2rlcore.s3upload.get_presigned_url(\"https://aws-support-uploader.s3.amazonaws.com/uploader?\"\n \"account-id=9999999999&case-id=99999999&expiration=1486577795&\"\n \"key=92e1ab350e7f5302551e0b05a89616381bb6c66\"\n \"9c9492d9acfbf63701e455ef6\", \"test\")\n\n self.assertEqual(resp, \"http://test/\")",
"def test_put_object_presigned_url(self):\n url = self.storage.get_presigned_url(\n self.bucket_name, self.object_name, method=\"PUT\"\n )\n self.assertIn(self.object_name, url)",
"def test_that_generate_signed_url_is_called(self):\n bucket = \"fake\"\n _get_gcs_presigned_put_url(self.client, bucket, \"/object.jpg\", \"aBc\", 0, 0)\n self.generate_signed_url_method.assert_called_once()",
"def test_fetch_public_keys(mocked_requests_get):\n assert fetch_public_keys(APP) is not None",
"def test_get_url(self):\n package = make_package()\n url = self.storage.get_url(package)\n self.assertEqual(package.url, url)\n self.assertIsNotNone(package.expire)\n\n parts = urlparse(url)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + package.path)\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n actual_expire = (time.mktime(package.expire.timetuple()) +\n self.storage.buffer_time)\n self.assertEqual(int(query['Expires'][0]), actual_expire)\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['aws.access_key'])",
"def test_s3upload_get_presigned_url_bad_url(self, mock_urllib):\n with self.assertRaises(ec2rlcore.s3upload.S3UploadUrlParsingFailure):\n ec2rlcore.s3upload.get_presigned_url(\"http://fakeurl.asdf123\", \"test\")",
"def testGenerateUrl(self):\n self._RunAsync(self.object_store.Put, self.key, 'foo')\n\n url = self.object_store.GenerateUrl(self.key,\n cache_control='private,max-age=31536000',\n expires_in=100)\n response = httpclient.HTTPClient().fetch(url, method='GET')\n self.assertEqual(response.body, 'foo')\n self.assertEqual(response.headers['Cache-Control'], 'private,max-age=31536000')",
"def test_get_url(self):\n package = make_package()\n self.request.app_url.side_effect = lambda *x: '/'.join(x)\n url = self.storage.get_url(package)\n expected = 'api/package/%s/%s/download/%s' % (package.name,\n package.version,\n package.filename)\n self.assertEqual(url, expected)",
"def _test_url_for_GET_request(client, url):\n\n response = client.get(url)\n\n assert response.status_code == 200, f\"GET {url} should return 200 response\"",
"def presigned_url(\n self,\n url,\n expiration=10,\n force_download=False,\n ):\n raise NotImplementedError",
"def test_returns_string_if_inputs_are_valid(self):\n\n # use a real connection here as a sanity check\n ret = get_presigned_upload_url(\n \"a/b/abc.jpg\", \"aBc\", 10, 1, storage=self.STORAGE, client=None\n )\n url = ret[\"uploadURL\"]\n\n assert isinstance(url, str)",
"def test_urls_are_valid():\n for key in eio.DATA_URLS:\n dataset = eio.DATA_URLS[key]\n if not isinstance(dataset, list):\n dataset = [dataset]\n for url, name, kind in dataset:\n r = requests.get(\"http://www.example.com\")\n assert r.status_code == 200",
"def test_get_promotion_urls(self):\n pass",
"def test_wrong_endpoint_url(self):\n # Try to get a book from wrong url\n response = self.client.get('/api/v2/booooks')\n self.assertIn(\"http://localhost/api/v2/booooks is not a valid url\",\n str(response.data), msg=\"Handles invalid url\")",
"def test_price_endpoint_available(self):\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def VerifyURLs(urls):\n print 'Verifying that images exist for URLs...'\n missing = []\n for url in urls.iterkeys():\n if not VerifyURL(url):\n print 'Missing: %s, referenced by: \\n %s' % (url, '\\n '.join(urls[url]))\n missing.append(url)\n return missing",
"def test_url2src_using_get(self):\n pass",
"def test_item_endpoint_available(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts an object can be retrieved from the storage implementation.
|
def test_get_object(self):
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
data = self.storage.get_object(self.bucket_name, self.object_name)
self.assertEqual(self.object_data, loads(data.read().decode("utf-8")))
|
[
"def test_a_storage_type(self):\n self.assertIsNotNone(self.storage.all())",
"def test_storage_2(self):\n with self.assertRaises(TypeError):\n all_objs = storage.all(None)",
"def test_entity_storage_initialization():\n storage = EntityStorage()\n assert isinstance(storage, EntityStorage)",
"def testinstance(self):\n self.assertIsInstance(self.object, FileStorage)",
"def test_storage(self):\n storage = FileStorage()\n self.assertIsInstance(storage, FileStorage)",
"def test_type_objects(self):\n self.assertEqual(type(storage.all()), dict)",
"def requested_object_storage_access(self):\n return bool(self._unit.received[\"enable-object-storage-access\"])",
"def has_storage(self, cls):\r\n return True",
"def testBasicStorage(self):\n ifs = self.CreateFieldStorage('key=value')\n self.assertTrue(ifs)\n self.assertEqual(ifs.getfirst('key'), 'value')\n self.assertEqual(ifs.getlist('key'), ['value'])",
"def _attempt_storage_read(self):\n self._bound_target = getattr(self._storage_object, self.storage_type)[self._target]\n # Ensure that the target we bind to matches the type of driver\n try:\n if self._bound_target.getncattr('IODriver_Type') != self.dtype_string():\n raise TypeError(\"Storage target on NetCDF file is of type {} but this driver is designed to handle \"\n \"type {}!\".format(self._bound_target.getncattr('IODriver_Type'), self.dtype_string()))\n except AttributeError:\n warnings.warn(\"This Codec cannot detect storage type from on-disk variable. .write() and .append() \"\n \"operations will not work and .read() operations may work\", RuntimeWarning)",
"def test_entity_storage_is_in_storage_asset(\n create_test_db, create_project, prepare_entity_storage\n):\n from stalker import Asset, Task, Version\n\n project = create_project\n char1 = (\n Asset.query.filter(Asset.project == project)\n .filter(Asset.name == \"Char1\")\n .first()\n )\n model = Task.query.filter(Task.parent == char1).filter(Task.name == \"Model\").first()\n assert model is not None\n v1 = model.versions[0]\n v2 = model.versions[1]\n v3 = model.versions[2]\n assert v1 is not None\n assert v2 is not None\n assert v3 is not None\n\n storage = EntityStorage()\n storage.add_entity(v1)\n\n assert storage.is_in_storage(char1) is True\n assert storage.is_in_storage(model) is True\n assert storage.is_in_storage(v1) is True\n assert storage.is_in_storage(v2) is False\n assert storage.is_in_storage(v3) is False",
"def test_file_storage_attributes(self):\n storage = FileStorage()\n self.assertIsInstance(storage._FileStorage__objects, dict)\n self.assertIsInstance(storage._FileStorage__file_path, str)",
"def test_exists(self, _blob):\n storage.exists('key')\n\n _blob.assert_called_once_with('key')\n _blob().exists.assert_called_once_with()",
"def test_storage_has_property_question_list(self):\n self.assertTrue(hasattr(self.storage, 'question_list'))",
"def test_amenitymodel(self):\n all_objects = storage.all()\n my_model = Amenity()\n storage.new(my_model)\n key = \"{}.{}\".format(my_model.__class__.__name__, my_model.id)\n self.assertIn(key, all_objects.keys())",
"def test_getNonexistant(self):\n failure = self.failureResultOf(self.storage.get([\"BOGUS\"]))\n failure.trap(exceptions.NoSuchStoreException)",
"def testGetObjectMetadata(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object4 = {\n \"id\": \"test_object_id4\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome4\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta4 = impl.save_object(test_object4)\n\n obj = impl.get_objectmeta({\"workspace\":ws_name,\"id\": \"test_object_id4\", \"type\": \"Genome\",\"auth\": self.__class__.token})\n\n self.assertIn({\"origin\":\"shreyas\"}, obj)",
"def isManaged(self, object):\n return self.retrieving.supportsRetrieving(object)",
"def test_storage_has_property_rsvp_list(self):\n self.assertTrue(hasattr(self.storage, 'rsvp_list'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts an object can be renamed.
|
def test_rename_object(self):
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
new_object_name = random_str()
self.storage.rename_object(
self.bucket_name, self.object_name, new_object_name
)
self.assertFalse(
self.storage.object_exists(self.bucket_name, self.object_name)
)
self.assertTrue(
self.storage.object_exists(self.bucket_name, new_object_name)
)
|
[
"def test_version_rename_error_bad_new_name(self):\n rv, output = self.execute('version rename 1.0 2.0')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_used_as_name_reifier (self):\n self._test_reifiable(self.create_name())",
"def testFileRename(self):\n self.assertBackwardCompatible([\n Change('foo/foo.mojom', old='[Stable] struct S {};', new=None),\n Change('bar/bar.mojom',\n old=None,\n new='[Stable] struct S { [MinVersion=1] int32 x; };')\n ])\n self.assertNotBackwardCompatible([\n Change('foo/foo.mojom', old='[Stable] struct S {};', new=None),\n Change('bar/bar.mojom', old=None, new='[Stable] struct S { int32 x; };')\n ])",
"def test_rename(caplog):\n alphabet = [\"A\", \"B\", \"C\", \"D\"]\n\n # Test mapping\n aahCluster_ = aah_cluster.copy()\n mapping = {\n old: alphabet[k] for k, old in enumerate(aah_cluster._cluster_names)\n }\n for key, value in mapping.items():\n assert isinstance(key, str)\n assert isinstance(value, str)\n assert key != value\n aahCluster_.rename_clusters(mapping=mapping)\n assert aahCluster_._cluster_names == alphabet\n assert aahCluster_._cluster_names != aah_cluster._cluster_names\n\n # Test new_names\n aahCluster_ = aah_cluster.copy()\n aahCluster_.rename_clusters(new_names=alphabet)\n assert aahCluster_._cluster_names == alphabet\n assert aahCluster_._cluster_names != aah_cluster._cluster_names\n\n # Test invalid arguments\n aahCluster_ = aah_cluster.copy()\n with pytest.raises(TypeError, match=\"'mapping' must be an instance of \"):\n aahCluster_.rename_clusters(mapping=101)\n with pytest.raises(ValueError, match=\"Invalid value for the 'old name'\"):\n mapping = {\n old + \"101\": alphabet[k]\n for k, old in enumerate(aah_cluster._cluster_names)\n }\n aahCluster_.rename_clusters(mapping=mapping)\n with pytest.raises(TypeError, match=\"'new name' must be an instance of \"):\n mapping = {old: k for k, old in enumerate(aah_cluster._cluster_names)}\n aahCluster_.rename_clusters(mapping=mapping)\n with pytest.raises(\n ValueError, match=\"Argument 'new_names' should contain\"\n ):\n aahCluster_.rename_clusters(new_names=alphabet + [\"E\"])\n\n aahCluster_.rename_clusters()\n assert \"Either 'mapping' or 'new_names' should not be\" in caplog.text\n\n with pytest.raises(\n ValueError, match=\"Only one of 'mapping' or 'new_names'\"\n ):\n mapping = {\n old: alphabet[k]\n for k, old in enumerate(aah_cluster._cluster_names)\n }\n aahCluster_.rename_clusters(mapping=mapping, new_names=alphabet)\n\n # Test unfitted\n aahCluster_ = aah_cluster.copy()\n aahCluster_.fitted = False\n _check_unfitted(aahCluster_)\n with pytest.raises(RuntimeError, match=\"must be fitted before\"):\n mapping = {\n old: alphabet[k]\n for k, old in enumerate(aah_cluster._cluster_names)\n }\n aahCluster_.rename_clusters(mapping=mapping)\n with pytest.raises(RuntimeError, match=\"must be fitted before\"):\n aahCluster_.rename_clusters(new_names=alphabet)",
"def test_instance_rename(self):\n # create the instance\n ret_val = self.run_cloud(\n \"-p ec2-test {} --no-deploy\".format(self.instance_name), timeout=TIMEOUT\n )\n # check if instance returned\n self.assertInstanceExists(ret_val)\n\n changed_name = self.instance_name + \"-changed\"\n\n rename_result = self.run_cloud(\n \"-a rename {} newname={} --assume-yes\".format(\n self.instance_name, changed_name\n ),\n timeout=TIMEOUT,\n )\n self.assertFalse(\n self._instance_exists(),\n \"Instance wasn't renamed: |\\n{}\".format(rename_result),\n )\n self.assertInstanceExists(instance_name=changed_name)\n\n self.assertDestroyInstance(changed_name)",
"def test_component_rename_error_bad_new_name(self):\n rv, output = self.execute('component rename component1 component2')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_milestone_rename_ok(self):\n self.execute('milestone rename milestone1 changed_milestone')\n rv, output = self.execute('milestone list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)",
"def test_change_name(self):\n player1 = player.Player(1, \"Kalle\")\n player1.change_name(\"Kalele\")\n self.assertEqual(player1.name, \"Kalele\")",
"def test_component_rename_ok(self):\n self.execute('component rename component1 changed_name')\n rv, output = self.execute('component list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)",
"def _check_object_name(object_name):\r\n if \",\" in object_name:\r\n raise ValueError(\r\n \"Please, rename object '{}' without commas\".format(object_name)\r\n )\r\n return object_name.strip()",
"def test_milestone_rename_error_bad_milestone(self):\n rv, output = self.execute(\n 'milestone rename bad_milestone changed_name')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_broken_legacy_rename(self):\n\n paths = self.make_misc_files(self.lint_mf)\n paths.extend(self.make_misc_files(self.ref_mf))\n rcfile = os.path.join(self.test_root, \"pkglintrc\")\n\n legacy = os.path.join(self.test_root,\n \"legacy-uses-renamed-ancestor.mf\")\n renamed_new = os.path.join(self.test_root,\n \"broken-renamed-ancestor-new.mf\")\n renamed_old = os.path.join(self.test_root,\n \"renamed-ancestor-old.mf\")\n renamed_self_depend = os.path.join(self.test_root,\n \"self-depend-renamed-ancestor-new.mf\")\n compat_legacy = os.path.join(self.test_root,\n \"compat-renamed-ancestor-old.mf\")\n\n # look for a rename that didn't ultimately resolve to the\n # package that contained the legacy action\n lint_logger = TestLogFormatter()\n manifests = read_manifests([legacy, renamed_new], lint_logger)\n\n lint_engine = engine.LintEngine(lint_logger, use_tracker=False,\n config_file=rcfile)\n lint_engine.setup(cache=self.cache_dir,\n ref_uris=[self.ref_uri], lint_manifests=manifests)\n lint_engine.execute()\n lint_engine.teardown(clear_cache=True)\n\n lint_msgs = []\n for msg in lint_logger.messages:\n if \"pkglint.action005.1\" not in msg:\n lint_msgs.append(msg)\n\n self.assert_(len(lint_msgs) == 2, \"Unexpected lint messages \"\n \"{0} produced when linting broken renaming with legacy \"\n \"pkgs\".format(lint_msgs))\n\n seen_2_3 = False\n seen_3_4 = False\n for i in lint_logger.ids:\n if i == \"pkglint.manifest002.3\":\n seen_2_3 = True\n if i == \"pkglint.action003.4\":\n seen_3_4 = True\n\n self.assert_(seen_2_3 and seen_3_4,\n \"Missing expected broken renaming legacy errors, \"\n \"got {0}\".format(lint_msgs))\n\n # make sure we spot renames that depend upon themselves\n lint_logger = TestLogFormatter()\n manifests = read_manifests([legacy, renamed_self_depend],\n lint_logger)\n\n lint_engine = engine.LintEngine(lint_logger, use_tracker=False,\n config_file=rcfile)\n lint_engine.setup(cache=self.cache_dir,\n ref_uris=[self.ref_uri], lint_manifests=manifests)\n lint_engine.execute()\n lint_engine.teardown(clear_cache=True)\n\n lint_msgs = []\n for msg in lint_logger.messages:\n lint_msgs.append(msg)\n\n self.assert_(len(lint_msgs) == 2, \"Unexpected lint messages \"\n \"produced when linting broken self-dependent renaming with \"\n \"legacy pkgs\")\n seen_2_4 = False\n seen_3_5 = False\n for i in lint_logger.ids:\n if i == \"pkglint.manifest002.4\":\n seen_2_4 = True\n if i == \"pkglint.action003.5\":\n seen_3_5 = True\n self.assert_(seen_2_3 and seen_3_4,\n \"Missing expected broken renaming self-dependent errors \"\n \"with legacy pkgs. Got {0}\".format(lint_msgs))\n\n # make sure we can deal with compatibility packages. We include\n # the 'renamed_old' package as well as the 'compat_legacy'\n # to ensure that pkglint is satisfied by the compatability\n # package, rather that trying to follow renames from the\n # 'renamed_old' package. (otherwise, if a package pointed to by\n # the legacy 'pkg' attribute doesn't exist, pkglint wouldn't\n # complain)\n lint_logger = TestLogFormatter()\n manifests = read_manifests([renamed_old, compat_legacy],\n lint_logger)\n\n lint_engine = engine.LintEngine(lint_logger, use_tracker=False,\n config_file=rcfile)\n lint_engine.setup(cache=self.cache_dir,\n ref_uris=[self.ref_uri], lint_manifests=manifests)\n lint_engine.execute()\n lint_engine.teardown(clear_cache=True)\n\n lint_msgs = []\n for msg in lint_logger.messages:\n lint_msgs.append(msg)\n\n self.debug(lint_msgs)\n self.assert_(len(lint_msgs) == 0, \"Unexpected lint messages \"\n \"produced when linting a compatibility legacy package\")\n\n # the 'legacy' package includes a legacy action which should\n # also be satisfied by the compat_legacy being installed.\n lint_logger = TestLogFormatter()\n manifests = read_manifests([legacy, compat_legacy],\n lint_logger)\n\n lint_engine = engine.LintEngine(lint_logger, use_tracker=False,\n config_file=rcfile)\n lint_engine.setup(cache=self.cache_dir,\n ref_uris=[self.ref_uri], lint_manifests=manifests)\n lint_engine.execute()\n lint_engine.teardown(clear_cache=True)\n\n lint_msgs = []\n for msg in lint_logger.messages:\n lint_msgs.append(msg)\n\n self.assert_(len(lint_msgs) == 0, \"Unexpected lint messages \"\n \"produced when linting a compatibility legacy package\")",
"def test_version_rename_ok(self):\n self.execute('version rename 1.0 9.9')\n rv, output = self.execute('version list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)",
"def test_name(self):\n\n # Default initialized name is empty.\n self.assertEqual(self.named_tr.getName(), '')\n\n self.named_tr.setName(self.TEST_NAME)\n self.assertEqual(self.named_tr.getName(), self.TEST_NAME)\n\n # Wrong type tests.\n for invalid in self.TEST_INVALIDS:\n with self.assertRaises(TypeError):\n self.named_tr.setName(invalid)",
"def test_rename_project(self):\n rename_project(new_name=\"new name for test project\", pid=self.pid)\n self.assertEqual(get_project_by_id(pid=self.pid).name, \"new name for test project\")",
"def test_names() -> None:\n prop = Keyvalues('Test1', 'value')\n\n # Property.name casefolds the argument.\n assert prop.name == 'test1'\n assert prop.real_name == 'Test1'\n\n # Editing name modifies both values\n prop.name = 'SECOND_test'\n assert prop.name == 'second_test'\n assert prop.real_name == 'SECOND_test'\n\n # It can also be set to None - deprecated\n with pytest.deprecated_call(match='[r|R]oot [p|P]ropert[y|ies]'):\n prop.name = None # type: ignore\n with pytest.deprecated_call(match='[r|R]oot [p|P]ropert[y|ies]'):\n assert prop.name is prop.real_name is None",
"def test_renamed(self):\n renamer = bioformats.seqname.FastaSeqRenamer()\n renamer.read_renaming_dict(self.__renaming_dict)\n with open(self.__output, 'w') as output_fasta:\n for line in renamer.renamed(self.__fasta):\n output_fasta.write(line)\n\n # perform the reverse renaming\n rev_renamer = bioformats.seqname.FastaSeqRenamer()\n rev_renamer.read_renaming_dict(self.__renaming_dict)\n with open(self.__rev_output, 'w') as rev_output_fasta:\n for line in renamer.renamed(self.__output, reverse=True):\n rev_output_fasta.write(line)\n\n # compare the original and reverse-renamed FASTA files\n original_fasta = Fasta(self.__fasta)\n rev_renamed_fasta = Fasta(self.__rev_output)\n for x, y in zip(\n original_fasta.keys(),\n rev_renamed_fasta.keys()):\n self.assertEqual(x, y)\n\n # check if the missing sequence exception is raised\n del renamer.renaming_dict['seq2']\n with self.assertRaises(MissingSeqNameError):\n for _ in renamer.renamed(self.__fasta):\n pass",
"def rename(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def test_nuke_name_correct():\r\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts it is possible to concat objects.
|
def test_concat_objects(self):
second_object_name = random_str()
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
data.seek(0)
self.storage.put_object(
self.bucket_name, second_object_name, data, size
)
self.storage.concat_objects(
self.bucket_name,
self.object_name,
[self.object_name, second_object_name],
)
self.storage.delete_object(self.bucket_name, second_object_name)
self.storage.get_object(self.bucket_name, self.object_name)
data = self.storage.get_object(self.bucket_name, self.object_name)
self.assertEqual(
data.read().decode("utf-8"),
dumps(self.object_data) + dumps(self.object_data),
)
|
[
"def test_concat_fails_empty():\n match = \"No objects to concatenate\"\n with pytest.raises(ValueError, match=match):\n concat([])",
"def test_merge(self):\n ply1 = copy.deepcopy(self.instance)\n ply2 = ply1.merge(self.instance)\n ply1.merge([self.instance], no_copy=True)\n self.assert_equal(ply1, ply2)",
"def test_add_objects(self):\n pass",
"def test_concat_fails_iterable(arg, msg):\n match = f\"'{msg}' object is not iterable\"\n with pytest.raises(TypeError, match=match):\n concat(arg)",
"def test_list_add():\n a = List([1, 2, 3])\n b = List([3, 4, 5])\n c = a + b\n assert c == List([1, 2, 3, 3, 4, 5])\n assert type(c) is List",
"def test_concat_single_item(test_df):\n obs = concat([test_df])\n assert_iamframe_equal(obs, test_df)",
"def testconcatenate(self, *args, **kwargs):\n return _ms.ms_testconcatenate(self, *args, **kwargs)",
"def test_concatenation(self):\n a = NFA()\n a.end = State()\n a.start = State()\n a.start.left = Transition(\"a\", a.end)\n\n b = NFA()\n b.end = State()\n b.start = State()\n b.start.left = Transition(\"b\", b.end)\n\n a += b\n\n self.assertEqual(a.end, b.end)",
"def test_extend(self):\n # 1 - create catalog and extend it with list of events\n catalog = Catalog()\n event1 = Event()\n event2 = Event()\n self.assertEquals(len(catalog), 0)\n catalog.extend([event1, event2])\n self.assertEquals(len(catalog), 2)\n self.assertEquals(catalog.events, [event1, event2])\n # 2 - extend it with other catalog\n event3 = Event()\n event4 = Event()\n catalog2 = Catalog([event3, event4])\n self.assertEquals(len(catalog), 2)\n catalog.extend(catalog2)\n self.assertEquals(len(catalog), 4)\n self.assertEquals(catalog.events, [event1, event2, event3, event4])\n # adding objects other as Catalog or list should fails\n self.assertRaises(TypeError, catalog.extend, str)\n self.assertRaises(TypeError, catalog.extend, event1)\n self.assertRaises(TypeError, catalog.extend, (event1, event2))",
"def test_append(self):\n # 1 - create catalog and add a few events\n catalog = Catalog()\n event1 = Event()\n event2 = Event()\n self.assertEquals(len(catalog), 0)\n catalog.append(event1)\n self.assertEquals(len(catalog), 1)\n self.assertEquals(catalog.events, [event1])\n catalog.append(event2)\n self.assertEquals(len(catalog), 2)\n self.assertEquals(catalog.events, [event1, event2])\n # 2 - adding objects other as Event should fails\n self.assertRaises(TypeError, catalog.append, str)\n self.assertRaises(TypeError, catalog.append, Catalog)\n self.assertRaises(TypeError, catalog.append, [event1])",
"def test_add_private(data1: ActionObject, data2: ActionObject) -> None:\n result1 = data1 + data2\n result2 = data1 + data2\n result3 = data2 + data1\n\n assert result1.syft_history_hash == result2.syft_history_hash\n assert result3.syft_history_hash == result2.syft_history_hash",
"def test_add_atoms_and_bonds(self, molecule):\n molecule_copy = Molecule()\n for atom in molecule.atoms:\n molecule_copy.add_atom(atom.atomic_number, atom.formal_charge, atom.is_aromatic, stereochemistry=atom.stereochemistry)\n for bond in molecule.bonds:\n molecule_copy.add_bond(bond.atom1_index, bond.atom2_index, bond.bond_order, bond.is_aromatic,\n stereochemistry=bond.stereochemistry,\n fractional_bond_order=bond.fractional_bond_order)\n # Try to add the final bond twice, which should raise an Exception\n with pytest.raises(Exception) as excinfo:\n molecule_copy.add_bond(bond.atom1_index, bond.atom2_index, bond.bond_order, bond.is_aromatic,\n stereochemistry=bond.stereochemistry,\n fractional_bond_order=bond.fractional_bond_order)\n\n assert molecule == molecule_copy",
"def test_deep_equal_msg(self):\n v = os.urandom(10)\n o1 = [1, 2, {'foo': 'bar', 'bar': v, 'baz': [9, 3, 2], 'goo': {'moo': [1, 2, 3]}}, v]\n o2 = [1, 2, {'goo': {'moo': [1, 2, 3]}, 'bar': v, 'baz': [9, 3, 2], 'foo': 'bar'}, v]\n self.assertEqual(o1, o2)",
"def test_add_object(self):\n pass",
"def _add(obj: SeqLike, other: MutationSet):\n mutations = deepcopy(other.mutations)\n while mutations:\n mutation = mutations.pop(0)\n obj = obj + mutation\n if isinstance(mutation, Insertion):\n mutations = [m + 1 for m in mutations]\n return obj",
"def test_iadd(self):\n # 1 - create catalog and add it with another catalog\n event1 = Event()\n event2 = Event()\n event3 = Event()\n catalog = Catalog([event1])\n catalog2 = Catalog([event2, event3])\n self.assertEquals(len(catalog), 1)\n catalog += catalog2\n self.assertEquals(len(catalog), 3)\n self.assertEquals(catalog.events, [event1, event2, event3])\n # 3 - extend it with another Event\n event4 = Event()\n self.assertEquals(len(catalog), 3)\n catalog += event4\n self.assertEquals(len(catalog), 4)\n self.assertEquals(catalog.events, [event1, event2, event3, event4])\n # adding objects other as Catalog or Event should fails\n self.assertRaises(TypeError, catalog.__iadd__, str)\n self.assertRaises(TypeError, catalog.__iadd__, (event1, event2))\n self.assertRaises(TypeError, catalog.__iadd__, [event1, event2])",
"def test_append_onesided(self):\n self.merge_changed_A([\"name\"], [\"name\", \"value\"], [\"name\", \"value\"])\n self.merge_changed_B([\"name\"], [\"name\", \"value\"], [\"name\", \"value\"])",
"def test_aggregates_list(self):\n pass",
"def test_copy_add_one():\n print('Testing copy_add_one')\n # List of one element\n x = [1]\n result = accum2.copy_add_one(x)\n introcs.assert_equals([2],result)\n # Make sure x is NOT modified\n introcs.assert_equals([1],x)\n\n # More than one element\n x = [2,5,-1]\n result = accum2.copy_add_one(x)\n introcs.assert_equals([3,6,0],result)\n # Make sure x is NOT modified\n introcs.assert_equals([2,5,-1],x)\n\n # Empty List\n x = []\n result = accum2.copy_add_one(x)\n introcs.assert_equals([],result)\n # Make sure x is NOT modified\n introcs.assert_equals([],x)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the proportion of matches between predicted and actual labels.
|
def score(predicted: pd.Series, actual: pd.Series) -> float:
return sum(predicted == actual) / len(predicted)
|
[
"def percentage_pt(y_pred, y_true):\n y_pred_soft = y_pred.exp() / (y_pred.exp().sum(-1)).unsqueeze(-1)\n\n perc = (y_pred_soft.max(dim=1)[1] == y_true.max(dim=1)[1]).sum()\n return perc",
"def percentage_labelled(self,labels):\n\t\tphrases = self.compute_phrases()\n\t\ttotal = len(phrases)\n\t\tlabelled = 0\n\t\tfor phrase in phrases:\n\t\t\tif phrase in labels:\n\t\t\t\tlabelled += 1\n\t\treturn total, labelled",
"def compute_score(y, tx, w):\n # Predict labels\n y_pred = predict_labels(w, tx)\n # Calculate the percentage of correct predictions\n score = np.sum(y_pred == y) / len(y)\n return score",
"def evaluate(labels, predictions):\n identifiedTruePositives = 0\n identifiedTrueNegatives = 0\n totalPositives = 0\n totalNegatives = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n totalPositives += 1\n if labels[i] == predictions[i]:\n identifiedTruePositives += 1\n else:\n totalNegatives += 1\n if labels[i] == predictions[i]:\n identifiedTrueNegatives += 1\n sensitivity = identifiedTruePositives / totalPositives\n specificity = identifiedTrueNegatives / totalNegatives\n\n return (sensitivity, specificity)",
"def calc_accuracy(labels, predicted_labels):\n num_obs = len(labels)\n accuracy = sum(predicted_labels==labels)/num_obs\n return accuracy",
"def accuracy(labels, labels_true):\r\n # YOUR CODE HERE\r\n\r\n total_label = len(labels)\r\n correct_label = 0\r\n\r\n for i in range(total_label):\r\n if labels[i] == labels_true[i]:\r\n correct_label += 1\r\n\r\n return correct_label/total_label\r\n pass",
"def label_probability(self,label):\n total = self.n\n quantity = 0\n for ex in self.data:\n if ex.label == label:\n quantity+=1\n prob = quantity/total\n return prob",
"def evaluate(labels, predictions):\n labels = np.array(labels)\n predictions = np.array(predictions)\n\n total_positives = len(labels[labels == 1])\n correctly_predicted = np.sum(np.logical_and(labels == 1, predictions == 1))\n\n sensitivity = correctly_predicted/total_positives\n\n total_negatives = len(labels[labels == 0])\n correctly_predicted = np.sum(np.logical_and(labels == 0, predictions == 0))\n\n specificty = correctly_predicted/total_negatives\n\n precision, recall, fscore, _ = precision_recall_fscore_support(y_pred=predictions, y_true=labels, average='binary')\n print(f'Precision: {precision:.4f}\\nRecall: {recall:.4f}\\nfscore: {fscore:.4f}')\n\n return (sensitivity, specificty)",
"def compute_accuracy(predictions, labels):\n predicted_labels = torch.argmax(predictions, dim=1)\n n_correct = torch.sum(predicted_labels == labels).item()\n batch_size = torch.numel(labels)\n acc = float(n_correct) / float(batch_size)\n return acc * 100",
"def success_rate(predicted_labels,true_labels):\n success_rate = 1 - (np.count_nonzero(predicted_labels - true_labels)/len(predicted_labels))\n return success_rate",
"def evaluate(labels, predictions):\n i=0\n j=0\n total_true = 0\n total_wrong = 0\n for label,prediction in zip(labels,predictions):\n if label==1:\n total_true = total_true + 1\n if prediction == 1:\n i = i + 1\n else:\n total_wrong = total_wrong + 1\n if prediction == 0:\n j = j + 1\n sensitivity = float(i/total_true)\n specificity = float(j/total_wrong)\n return(sensitivity, specificity)\n\n\n\n\n raise NotImplementedError",
"def predict_label_probability(texts, labels, text_new):\r\n\r\n train_twitter = texts\r\n test_twitter = text_new\r\n\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n from sklearn.feature_extraction.text import TfidfTransformer\r\n from sklearn.linear_model import LogisticRegression\r\n\r\n count_vect = CountVectorizer()\r\n twitter_train_counts = count_vect.fit_transform(train_twitter)\r\n\r\n tf_transformer = TfidfTransformer(use_idf=False).fit(twitter_train_counts)\r\n twitter_train_tf = tf_transformer.transform(twitter_train_counts)\r\n\r\n\r\n tfidf_transformer = TfidfTransformer()\r\n twitter_train_tfidf = tfidf_transformer.fit_transform(twitter_train_counts)\r\n\r\n twitter_clf = LogisticRegression().fit(twitter_train_tfidf,labels)\r\n\r\n twitter_test_data = count_vect.transform(test_twitter)\r\n twitter_tfidf = tfidf_transformer.transform(twitter_test_data)\r\n\r\n\r\n twitter_predicted = twitter_clf.predict(twitter_tfidf)\r\n\r\n for text, class_label in zip(test_twitter, twitter_predicted):\r\n print('%r => %s' % (text, class_label))\r\n\r\n\r\n class_prob = list(twitter_clf.predict_proba(twitter_tfidf)[:,1])\r\n\r\n return class_prob\r\n pass",
"def accuracy(predictions, labels):\n return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]",
"def precision(gold_labels, classified_labels, pos_label='1', neg_label='0'):\n # precision = tp/(tp + fp)\n true_positives = 0\n false_positives = 0\n \n for i in range(len(gold_labels)):\n if gold_labels[i] == pos_label and classified_labels[i] == pos_label:\n true_positives += 1\n elif gold_labels[i] == neg_label and classified_labels[i] == pos_label:\n false_positives += 1\n \n if true_positives + false_positives == 0:\n return 0\n \n return true_positives / (true_positives + false_positives)",
"def accuracy(predicted,annotated):\n\n if (type(predicted) != np.ndarray) or (type(annotated) != np.ndarray):\n raise TypeError(\"Arguments 'predicted' and 'annotated' must be numpy ndarrays.\")\n\n if predicted.shape[0] != 24:\n raise ValueError(\"Invalid shape of 'predicted'.\")\n\n if annotated.shape[0] != 24:\n raise ValueError(\"Invalid shape of 'annotated'.\")\n\n if predicted.shape != annotated.shape:\n raise ValueError(\"Arguments 'predicted' and 'annotated' must have the same shape.\")\n\n nframes = predicted.shape[1]\n\n annotated_frames = nframes\n equal_frames = 0\n\n for pred_frame,true_frame in zip(np.transpose(predicted),np.transpose(annotated)):\n if 25 in true_frame:\n annotated_frames -= 1\n\n if np.array_equal(pred_frame,true_frame):\n equal_frames += 1\n\n return equal_frames/annotated_frames",
"def evaluate(labels, predictions):\n positive_total, positive_rate = 0, 0\n negative_total, negative_rate = 0, 0\n for i in range(len(labels)):\n \n # sensitivity\n if labels[i] == 1:\n positive_total += 1\n if labels[i] == predictions[i]:\n positive_rate += 1\n \n # specificity\n else:\n negative_total += 1\n if labels[i] == predictions[i]:\n negative_rate += 1\n\n sensitivity = float(positive_rate / positive_total)\n specificity = float(negative_rate / negative_total)\n \n return (sensitivity, specificity)",
"def compute_classification_accuracy(labels: List[int], predictions: List[int], num_classes: int = -1) -> float:\n assert len(labels) == len(predictions)\n\n correct = 0\n for a, b in zip(labels, predictions):\n if a == b:\n correct += 1\n\n return correct / len(labels)",
"def completeness_score(labels_true, labels_pred):\n return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]",
"def _calc_matching_prob(self):\n if not self.professional:\n return 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert string None to NoneType.
|
def none_or_str(value):
if value.lower() == 'none':
return None
return value
|
[
"def _cast_none(x):\n if isinstance(x, six.string_types) and x == data.NONE_MAGIC_VALUE:\n return None\n\n return x",
"def convert_str_or_none(val: Optional[str]) -> Optional[str]:\n return str(val) if val is not None else val",
"def nullify(value: Optional[Any]) -> str:\n return 'null' if value is None else value",
"def none_to_empty(val):\n if val is None:\n return \"\"\n return val",
"def nullstring(arg_string):\n if arg_string == GP_NULL:\n arg_string = None\n return arg_string",
"def cvt(thing, type=None):\n if thing == \"None\":\n return None\n elif thing is not None and type is not None:\n return type(thing)\n return thing",
"def force_unicode(s):\n if s is None:\n return None\n else:\n return unicod(s)",
"def get_nulls(val):\n return val if val != \"NULL\" else None",
"def convert_to_nullable(input_val, cast_function):\n if input_val in ['.', None, '', 'NULL']:\n result = None\n else:\n result = cast_function(input_val)\n return result",
"def dot_to_none(v):\n if isinstance(v, basestring) and v.strip() == '.':\n return None\n return v",
"def int_or_none(s):\n return None if not s else int(s)",
"def str_if_not_none(value):\n ...",
"def extractNone(b):\n if b == NONE:\n return None\n return b",
"def decode_nullable(self, data_type, obj):\n if obj is not None:\n return self.json_compat_obj_decode_helper(data_type.validator, obj)\n else:\n return None",
"def null():\n return (\"NULL\",None)",
"def transform_null_stmt(self, node):\n return none",
"def test_builtins_cast_return_none():\n assert m.return_none_string() is None\n assert m.return_none_char() is None\n assert m.return_none_bool() is None\n assert m.return_none_int() is None\n assert m.return_none_float() is None\n assert m.return_none_pair() is None",
"def test_valid_none():\n returned_value = object_._convert_relationship(value=None)\n\n assert returned_value is None",
"def _replace_null(value, fallback):\n if value is None:\n return fallback\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build the social structure
|
def buildSocialStructure(self):
self.groupNum = self.groupBase ** (self.groupLength-1)
self.indPos = [0 for x in range(self.totalNum)]
self.posInd = [[] for x in range(self.groupNum)]
for i in range(self.groupNum):
groupCount = 0;
for j in range(i*self.groupSize, (i+1)*self.groupSize):
self.indPos[j] = i
self.posInd[i].append(j)
return np.array(self.indPos), np.array(self.posInd)
|
[
"def build_cfg(self):\n for block in self.basic_blocks:\n if not block.ends_unconditional():\n if block.next:\n block.next.parents.append(block)\n block.children.append(block.next)\n targets = block.get_targets()\n if len(targets) > 0:\n for b in self.basic_blocks:\n starters = b.get_start_markers()\n for t in targets:\n if t in starters:\n b.parents.append(block)\n block.children.append(b)\n break\n if 'onOptionsItemSelected' in self.signature and 'MainActivity' in self.file.name:\n from graphviz import Digraph\n dot = Digraph()\n self.basic_blocks[0].graph(dot, done=[])\n # dot.render('OUT.png', view=True)\n with open('cfg.dot', 'w+') as f:\n f.write(dot.source)",
"def __build_physical_stuff( self, data_dict ):\n physical_description = etree.SubElement( self.mods, self.MODS+'physicalDescription' )\n physical_description_form_material = etree.SubElement( physical_description, self.MODS+'form', type='material' )\n physical_description_form_material.text = data_dict[ 'object_medium' ]\n for entry in data_dict['MEDIA_SUB::sub_media_name']:\n physical_description_form_technique = etree.SubElement( physical_description, self.MODS+'form', type='technique' )\n physical_description_form_technique.text = entry",
"def build(self):\n self.create_dir()\n self.create_init()\n self.create_config()\n self.build_code()\n self.build_xml()",
"def make_conc(conc, data, debug=False):\n\n i = 0\n pubs = {}\n authors = {}\n for item in data:\n pub_uri = str(item['pub_uri']['value'])\n pub_name = item['pub_name']['value']\n concept_uri = str(item['concept_uri']['value'])\n concept_name = item['concept_name']['value']\n author_uri = str(item['author_uri']['value'])\n author_name = item['author_name']['value']\n \n if pub_uri not in pubs:\n pubs[pub_uri] = {'pub_name': pub_name,\n 'author_uris' : [author_uri],\n 'concept_uris' : [concept_uri]}\n if concept_uri not in pubs[pub_uri]['concept_uris']:\n pubs[pub_uri]['concept_uris'].append(concept_uri)\n if author_uri not in pubs[pub_uri]['author_uris']:\n pubs[pub_uri]['author_uris'].append(author_uri)\n\n if author_uri not in authors:\n authors[author_uri] = {'author_name': author_name,\n 'pub_uris' : [pub_uri],\n 'concept_uris' : [concept_uri]}\n if concept_uri not in authors[author_uri]['concept_uris']:\n authors[author_uri]['concept_uris'].append(concept_uri)\n if pub_uri not in authors[author_uri]['pub_uris']:\n authors[author_uri]['pub_uris'].append(pub_uri)\n \n if concept_uri in conc:\n entry = conc[concept_uri]\n if author_uri not in entry['author_uris']:\n entry['author_uris'].append(author_uri)\n if pub_uri not in entry['pub_uris']:\n entry['pub_uris'].append(pub_uri)\n else:\n entry = {\n 'concept_name': concept_name,\n 'author_uris' : [author_uri],\n 'pub_uris': [pub_uri],\n 'pairs' : {}\n }\n conc[concept_uri] = entry\n\n for pub_uri, pub in pubs.items():\n for concept1 in pub['concept_uris']:\n entry = conc[concept1]\n for concept2 in pub['concept_uris']:\n if concept1 != concept2:\n if concept2 in entry['pairs']:\n if pub_uri not in \\\n entry['pairs'][concept2]['pub_uris']:\n entry['pairs'][concept2]['pub_uris'].append(pub_uri)\n for author_uri in pub['author_uris']:\n if author_uri not in \\\n entry['pairs'][concept2]['author_uris']:\n entry['pairs'][concept2]\\\n ['author_uris'].append(author_uri)\n \n else:\n entry['pairs'][concept2] = {\n 'concept_uris' : [concept2],\n 'pub_uris': [pub_uri],\n 'author_uris': pub['author_uris']\n }\n conc[concept1] = entry\n \n return conc",
"def build_graph(self):\r\n self._create_placeholders()\r\n self._create_network()\r\n self._create_loss()\r\n self._create_optimizer()\r\n self._create_summaries()\r\n self._show_current_model()",
"def company_skeleton():\n\n E, P, B, D, F = EPBDF()\n entities = ['Paul', 'Roger', 'Quinn', 'Sally', 'Thomas',\n 'Case', 'Adapter', 'Laptop', 'Tablet', 'Smartphone',\n 'Accessories', 'Devices']\n\n entity_types = {'Paul': E, 'Roger': E, 'Quinn': E, 'Sally': E, 'Thomas': E,\n 'Case': P, 'Adapter': P, 'Laptop': P, 'Tablet': P, 'Smartphone': P,\n 'Accessories': B, 'Devices': B}\n skeleton = RelationalSkeleton(company_schema(), True)\n p, r, q, s, t, c, a, l, ta, sm, ac, d = ents = tuple([SkItem(e, entity_types[e]) for e in entities])\n skeleton.add_entities(*ents)\n for emp, prods in ((p, {c}), (q, {c, a, l}), (s, {l, ta}), (t, {sm, ta}), (r, {l})):\n for prod in prods:\n skeleton.add_relationship(SkItem(emp.name + '-' + prod.name, D), {emp, prod})\n for biz, prods in ((ac, {c, a}), (d, {l, ta, sm})):\n for prod in prods:\n skeleton.add_relationship(SkItem(biz.name + '-' + prod.name, F), {biz, prod})\n\n return skeleton",
"def _create(links):\n nested = dict()\n for link in links:\n nested = Webpages._nest(link.split(os.sep), nested)\n return nested",
"def create_structures(dont_load_entities: bool = False) -> object:\n\n if not dont_load_entities:\n load_entities()\n\n default_webhooks = Webhook.objects.filter(is_default=True)\n for corporation in EveCorporationInfo.objects.all():\n EveEntity.objects.get_or_create(\n id=corporation.corporation_id,\n defaults={\n \"category\": EveEntity.Category.CORPORATION,\n \"name\": corporation.corporation_name,\n },\n )\n my_owner = Owner.objects.create(corporation=corporation)\n for x in default_webhooks:\n my_owner.webhooks.add(x)\n\n if int(corporation.corporation_id) in [2001, 2002]:\n alliance = EveAllianceInfo.objects.get(alliance_id=3001)\n corporation.alliance = alliance\n corporation.save()\n\n for character in EveCharacter.objects.all():\n EveEntity.objects.get_or_create(\n id=character.character_id,\n defaults={\n \"category\": EveEntity.Category.CHARACTER,\n \"name\": character.character_name,\n },\n )\n corporation = EveCorporationInfo.objects.get(\n corporation_id=character.corporation_id\n )\n if corporation.alliance:\n character.alliance_id = corporation.alliance.alliance_id\n character.alliance_name = corporation.alliance.alliance_name\n character.save()\n\n StructureTag.objects.get(name=\"tag_a\")\n tag_b = StructureTag.objects.get(name=\"tag_b\")\n tag_c = StructureTag.objects.get(name=\"tag_c\")\n Structure.objects.all().delete()\n for structure in entities_testdata[\"Structure\"]:\n x = structure.copy()\n x[\"last_updated_at\"] = now()\n x[\"owner\"] = Owner.objects.get(\n corporation__corporation_id=x[\"owner_corporation_id\"]\n )\n del x[\"owner_corporation_id\"]\n\n if \"services\" in x:\n del x[\"services\"]\n\n obj = Structure.objects.create(**x)\n if obj.state != 11:\n obj.state_timer_start = now() - timedelta(days=randrange(3) + 1)\n obj.state_timer_start = obj.state_timer_start + timedelta(\n days=randrange(4) + 1\n )\n\n if obj.id in [1000000000002, 1000000000003]:\n obj.tags.add(tag_c)\n\n if obj.id in [1000000000003]:\n obj.tags.add(tag_b)\n\n if \"services\" in structure:\n for service in structure[\"services\"]:\n StructureService.objects.create(\n structure=obj,\n name=service[\"name\"],\n state=StructureService.State.from_esi_name(service[\"state\"]),\n )\n obj.save()",
"def create_org_payload(self):\n organizations = ET.Element(\"organizations\")\n organization = ET.Element(\"organization\")\n organizations.append(organization)\n name = ET.SubElement(organization, \"name\")\n name.text = self._module.paramgram[\"org_name\"]\n fullName = ET.SubElement(organization, \"fullName\")\n fullName.text = self._module.paramgram[\"org_display_name\"]\n description = ET.SubElement(organization, \"description\")\n description.text = self._module.paramgram[\"org_description\"]\n if self._module.paramgram[\"uri\"] == \"/phoenix/rest/organization/add\":\n adminUser = ET.SubElement(organization, \"adminUser\")\n adminUser.text = self._module.paramgram[\"org_admin_username\"]\n adminPwd = ET.SubElement(organization, \"adminPwd\")\n adminPwd.text = self._module.paramgram[\"org_admin_password\"]\n adminEmail = ET.SubElement(organization, \"adminEmail\")\n adminEmail.text = self._module.paramgram[\"org_admin_email\"]\n includeRange = ET.SubElement(organization, \"includeRange\")\n includeRange.text = self._module.paramgram[\"org_include_ip_range\"]\n excludeRange = ET.SubElement(organization, \"excludeRange\")\n excludeRange.text = self._module.paramgram[\"org_exclude_ip_range\"]\n\n if self._module.paramgram[\"uri\"] == \"/phoenix/rest/organization/add\":\n custResource = ET.Element(\"custResource\")\n organization.append(custResource)\n eps = ET.SubElement(custResource, \"eps\")\n eps.text = self._module.paramgram[\"org_eps\"]\n max_devices = ET.SubElement(custResource, \"configItem\")\n max_devices.text = str(self._module.paramgram[\"org_max_devices\"])\n\n # CONCAT COLLECTORS BEFORE APPENDING IF SPECIFIED\n if self._module.paramgram[\"org_collectors\"]:\n # EXPECTS A LIST\n collector_data = self._module.paramgram[\"org_collectors\"]\n if isinstance(collector_data, list):\n # collector_xml = \"<collectors>\"\n collectors = ET.Element(\"collectors\")\n organization.append(collectors)\n for col in collector_data:\n collector = ET.SubElement(collectors, \"collector\")\n col_eps = ET.SubElement(collector, \"eps\")\n col_eps.text = col[\"eps\"]\n col_name = ET.SubElement(collector, \"name\")\n col_name.text = col[\"name\"]\n\n # OR IF A SINGLE COLLECTOR VIA PARAMETERS IS DEFINED\n elif self._module.paramgram[\"org_collector_name\"] and self._module.paramgram[\"org_collector_eps\"]:\n collectors = ET.Element(\"collectors\")\n organization.append(collectors)\n collector = ET.SubElement(collectors, \"collector\")\n col_eps = ET.SubElement(collector, \"eps\")\n col_eps.text = self._module.paramgram[\"org_collector_eps\"]\n col_name = ET.SubElement(collector, \"name\")\n col_name.text = self._module.paramgram[\"org_collector_name\"]\n\n xmlstr = ET.tostring(organizations, 'utf-8')\n return xmlstr",
"def _build_profile(self):\n self.setDriver('GV7', 4)\n # This writes all the profile data files and returns our config info.\n wrote_profile = False\n try:\n config_data = write_profile(LOGGER,self.hubs)\n wrote_profile = True\n except (Exception) as err:\n self.l_error('build_profile','write_profile failed: {}'.format(err), exc_info=True)\n self.setDriver('GV7', 7)\n cdata = deepcopy(self.polyConfig['customData'])\n if wrote_profile:\n cdata['profile_version'] = self.serverdata['profile_version']\n self.saveCustomData(cdata)\n # Reload the config we just generated.\n self.load_config()\n #\n # Upload the profile\n #\n st = self.install_profile()\n if not self.first_run:\n self.restart_hubs()\n return st",
"def generateSocialNetwork(self, node_num):\r\n m = np.zeros((node_num, self.node_size))\r\n me = np.zeros((node_num, self.neighbor_num))\r\n mc = np.zeros((node_num, self.neighbor_num))\r\n for i in xrange(node_num):\r\n m[i, :], me[i,:], mc[i, :] = self.generateSocialInteractionProfile()\r\n return m, me, mc",
"def create_graph_schema(self):\n\n ##Classes\n # Person Class\n g.add((RDFnamespace.FOAF.Person, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n # Organization Class\n g.add((RDFnamespace.FOAF.Organization, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n\n # Student Class\n g.add((focu.Student, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.Student, RDFnamespace.RDFS.subClassOf, RDFnamespace.FOAF.Person))\n g.add((focu.Student, RDFnamespace.RDFS.label, Literal(\"StudentClass\")))\n g.add((focu.Student, RDFnamespace.RDFS.comment, Literal(\"This is a Student Class\")))\n\n # Course Class\n g.add((focu.Course, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.Course, RDFnamespace.RDFS.label, Literal(\"University Courses\")))\n g.add((focu.Course, RDFnamespace.RDFS.comment, Literal(\"This is a Course Class\")))\n\n # Topic Class\n g.add((focu.Topic, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.Topic, RDFnamespace.RDFS.label, Literal(\"Course Topic\")))\n g.add((focu.Topic, RDFnamespace.RDFS.comment, Literal(\"Topic extracted for a given course\")))\n\n # University Class\n g.add((focu.University, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.University, RDFnamespace.RDFS.subClassOf, RDFnamespace.FOAF.Organization))\n g.add((focu.University, RDFnamespace.RDFS.label, Literal(\"Univeristy\")))\n\n ##Properties\n\n # Course Name\n g.add((focu.course_name, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_name, RDFnamespace.RDFS.label, Literal(\"Course Name\")))\n g.add((focu.course_name, RDFnamespace.RDFS.comment, Literal(\"Course Name\")))\n g.add((focu.course_name, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_name, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Course Subject\n g.add((focu.course_subject, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_subject, RDFnamespace.RDFS.label, Literal(\"Course Subject\")))\n g.add((focu.course_subject, RDFnamespace.RDFS.comment, Literal(\"Course Subject\")))\n g.add((focu.course_subject, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_subject, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Course Number\n g.add((focu.course_number, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_number, RDFnamespace.RDFS.label, Literal(\"Course Number\")))\n g.add((focu.course_number, RDFnamespace.RDFS.comment, Literal(\"Course Number\")))\n g.add((focu.course_number, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_number, RDFnamespace.RDFS.range, RDFnamespace.XSD.integer))\n\n # Course Description\n g.add((focu.course_description, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_description, RDFnamespace.RDFS.label, Literal(\"Course Description\")))\n g.add((focu.course_description, RDFnamespace.RDFS.comment, Literal(\"Course Description\")))\n g.add((focu.course_description, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_description, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Student ID\n g.add((focu.student_id, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.student_id, RDFnamespace.RDFS.label, Literal(\"Student ID\")))\n g.add((focu.student_id, RDFnamespace.RDFS.comment, Literal(\"Student ID\")))\n g.add((focu.student_id, RDFnamespace.RDFS.domain, focu.Student))\n g.add((focu.student_id, RDFnamespace.RDFS.range, RDFnamespace.XSD.integer))\n\n # Graded Courses\n g.add((focu.graded_courses, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.graded_courses, RDFnamespace.RDFS.label, Literal(\"grade for course\", lang=\"en\")))\n g.add((focu.graded_courses, RDFnamespace.RDFS.comment, Literal(\"Course graded for a student\")))\n g.add((focu.graded_courses, RDFnamespace.RDFS.domain, focu.Student))\n g.add((focu.graded_courses, RDFnamespace.RDFS.range, focu.Course))\n\n # Subject Contains Topics\n g.add((focu.contains, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.contains, RDFnamespace.RDFS.label, Literal(\"extracted topics\", lang=\"en\")))\n g.add((focu.contains, RDFnamespace.RDFS.comment, Literal(\"Topics extracted from course description\")))\n g.add((focu.contains, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.contains, RDFnamespace.RDFS.range, focu.Topic))\n\n # Topics from Courses\n g.add((focu.containInverse, RDFnamespace.OWL.inverseOf, focu.contains))\n g.add((focu.containInverse, RDFnamespace.RDFS.label, Literal(\"extracted courses\", lang=\"en\")))\n g.add((focu.containInverse, RDFnamespace.RDFS.comment, Literal(\"Courses extracted from Topics\")))\n g.add((focu.containInverse, RDFnamespace.RDFS.domain, focu.Topic))\n g.add((focu.containInverse, RDFnamespace.RDFS.range, focu.Course))\n\n # Course Term\n g.add((focu.course_term, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_term, RDFnamespace.RDFS.label, Literal(\"course term\", lang=\"en\")))\n g.add((focu.course_term, RDFnamespace.RDFS.comment, Literal(\"The term in which the given course was taken\")))\n g.add((focu.course_term, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_term, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Course Grade\n g.add((focu.course_grade, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_grade, RDFnamespace.RDFS.label, Literal(\"course grade\", lang=\"en\")))\n g.add((focu.course_grade, RDFnamespace.RDFS.comment, Literal(\"The grade received for a course\")))\n g.add((focu.course_grade, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_grade, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n g.serialize(format='turtle', destination='knowledge_base/schema.ttl')",
"def make_relation_processors(settings):\n shapes = settings['shape_templates']\n fixed_floor = settings['fixed_floor']\n floor_height = settings['floor_height']\n height = settings['height']\n\n # Dictionary of the form\n # relation: [arity, add_constraint_function, kwargs]\n relation_processors = {\n 'pushable': [1, add_in_set, {'key': 'pushable', 'vals': [True]}],\n 'target': [1, add_in_set, {'key':'is_target', 'vals':[True]}],\n 'whiteblob': [1, add_in_sets, {'keys':['shape','color'], 'vals':[['small_blob'],[COLORS['white']]]}],\n \n 'same_color': [None, add_all_equal, {'keys': ['color']}],\n 'diff_color': [None, add_all_different, {'keys': ['color']}],\n 'same_kind': [None, add_all_equal, {'keys': ['shape']}],\n 'diff_kind': [None, add_all_different, {'keys': ['shape']}], \n 'same_shape': [None, add_all_equal, {'keys': ['shape','shape_idx']}],\n 'h_between': [3, add_between, {'orientation': 'horizontal'}],\n 'v_between': [3, add_between, {'orientation': 'vertical'}],\n # diff_shape: TODO - this is a bit trickier\n 'on_top': [2, add_on, {'relation': 'above'}],\n 'on_bottom': [2, add_on, {'relation':'below'}], \n 'on_left': [2, add_on, {'relation':'left_of'}],\n 'on_right': [2, add_on, {'relation':'right_of'}],\n }\n\n for shape in shapes.keys():\n relation_processors[shape] = [1, add_in_set, {'key': 'shape', 'vals': [shape]}]\n\n for color in COLORS.keys():\n relation_processors[color] = [1, add_in_set, {'key': 'color', 'vals': [COLORS[color]]}]\n\n for rel in SPATIAL_RELATIONS:\n relation_processors[rel] = [2, add_pairwise_spatial, {'relation': rel}]\n\n for rel in LOCATIONS:\n relation_processors[rel] = [1, add_logpolar_unary_spatial, {'relation': rel}]\n\n if fixed_floor:\n relation_processors['floor'] = [1, add_in_sets, {'keys':['shape', 'y'], 'vals':[['floor_shape'],[height - floor_height]]}]\n else:\n relation_processors['floor'] = [1, add_in_set, {'key': 'shape', 'vals': ['floor_shape']}]\n\n # an object is the union over various shapes\n relation_processors['object'] = \\\n [1, add_in_set, {'key': 'shape', 'vals': ['blob', 'small_blob', 'convex_blob', 'container', 'noncontainer', 'enclosure', 'nonenclosure']}]\n\n # a potential container is either a container, noncontainer, or lower_corner (supports \"inside\" relation)\n relation_processors['potential_container'] = \\\n [1, add_in_set, {'key': 'shape', 'vals': ['container','noncontainer','lower_left_corner','lower_right_corner']}]\n\n relation_processors['corner'] = \\\n [1, add_in_set, {'key': 'shape', 'vals': ['lower_left_corner','lower_right_corner','upper_left_corner','upper_right_corner']}]\n\n relation_processors['lower_corner'] = \\\n [1, add_in_set, {'key': 'shape', 'vals': ['lower_left_corner','lower_right_corner']}]\n\n return relation_processors",
"def generate(self):\n download(\"https://node0.static.jsonx.ml/socialiqa/socialiqa.jsonl\")\n download(\"https://node0.static.jsonx.ml/socialiqa/socialiqa_label.txt\")\n socialiqa_corpus = jsonlines.open(\"./download/socialiqa.jsonl\", mode='r') # unlabelled data\n socialiqa_label = open(\"./download/socialiqa_label.txt\", mode=\"r\") # label\n samples = []\n for item in socialiqa_corpus.iter():\n label = int(socialiqa_label.readline().strip())\n samples.append({\"text\": item, \"label\": label})\n jsonlines.open(self.get_data(),'w').write_all(self.postprocess(samples))",
"def buildNetworks(self, netBuilder):\n #None is default, so old scripts can still run. self not defined in signature\n for index in self.environments:\n environment = self.environments[index]\n environment.network(netBuilder)\n\n self.graph = nx.Graph()\n self.graph.add_nodes_from(list(range(len(self.populace))))\n #add the edges of each environment to a single networkx graph\n for environment in self.environments: self.graph.add_weighted_edges_from(self.environments[environment].edges, weight = \"transmission_weight\")\n self.isBuilt = True",
"def buildColumn(self, b):\n s = self.style\n if not self.C.useOnline():\n b.text('No Social Media links when off-line.')\n elif s.twitterAccount or s.facebookAccount:\n b.div(class_=self.C.CLASS_SOCIALMEDIA, display=self.C.BLOCK, float=s.float or self.C.LEFT,\n width=s.width or Perc(100),\n media=Media(max=self.C.M_MOBILE_MAX, display=s.mobileDisplay)\n )\n # getUrl does not seem to work with twitter. Script only tests http or https. \n if s.twitterAccount:\n b.div(id=\"twitter\", float=self.C.LEFT)\n b.a(href=\"https://twitter.com/share\", data_href=b.getUrl(), \n class_=\"twitter-share-button\", data_lang=\"en\", data_via=\"doingbydesign\", \n data_count=\"none\", data_related=\"anywhere\")\n b.text(s.twitterLabel)\n b._a()\n b.script()\n b.text(\"\"\"!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');\"\"\")\n b._script()\n b._div()\n if s.facebookAccount:\n b.div(id=\"fb-root\", float=self.C.LEFT)\n b._div()\n b.script()\n b.text(\"\"\"(function(d, s, id) {\n var js, fjs = d.getElementsByTagName(s)[0];\n if (d.getElementById(id)) return;\n js = d.createElement(s); js.id = id;\n js.src = \"//connect.facebook.net/en_US/all.js#xfbml=1\";\n fjs.parentNode.insertBefore(js, fjs);\n }(document, 'script', 'facebook-jssdk'));\n \"\"\")\n b._script()\n b.div(class_=\"fb-share-button\", float=self.C.LEFT, \n data_href=b.getUrl(), data_type=\"button_count\")\n b._div()\n b._div()",
"def __build_origin_stuff( self, data_dict ):\n origin_info = etree.SubElement( self.mods, self.MODS+'originInfo' )\n origin_info_dt_created_start = etree.SubElement( origin_info, self.MODS+'dateCreated', encoding='w3cdtf', point='start' )\n origin_info_dt_created_start.text = data_dict[ 'object_year_start' ]\n origin_info_dt_created_end = etree.SubElement( origin_info, self.MODS+'dateCreated', encoding='w3cdtf', point='end' )\n origin_info_dt_created_end.text = data_dict[ 'object_year_end' ]",
"def social_parser(x, type=1, lang='en', yaml=False):\n if type == 1:\n val = x.loc[6][lang] # email\n line = val.strip() if isinstance(val, str) else ''\n icon = 'envelope'\n icon_pack = 'fas'\n link = 'mailto:{}'.format(line)\n elif type == 2:\n val = x.loc[7][lang] # github\n line = val.strip() if isinstance(val, str) else ''\n icon = 'github'\n icon_pack = 'fab'\n link = 'https://github.com/{}'.format(line)\n elif type == 3:\n val = x.loc[8][lang] # twitter\n line = val.strip() if isinstance(val, str) else ''\n icon = 'twitter'\n icon_pack = 'fab'\n link = 'https://twitter.com/{}'.format(line)\n else:\n line = icon = icon_pack = link = \"\"\n\n # YAML ourput\n if line:\n if yaml:\n lines = []\n lines.append('- icon: {}'.format(icon))\n lines.append(' icon_pack: {}'.format(icon_pack))\n lines.append(' link: {}'.format(link))\n out = '\\n'.join(lines)\n else:\n out = [val, icon, icon_pack, link]\n else:\n out = \"\"\n\n return out",
"def make_config(self):\n if not self.search_terms:\n self.make_search_terms()\n if not self.stmts:\n self.make_gene_statements()\n config = dict()\n config['name'] = self.name\n config['human_readable_name'] = self.human_readable_name\n config['search_terms'] = [st.to_json() for st in self.search_terms]\n config['assembly'] = {\n 'belief_cutoff': 0.8,\n 'filter_ungrounded': True\n }\n if self.description:\n config['description'] = self.description\n return config"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the potential position based on the current position and distance probability
|
def getPosition(groupLength, nowPosition, distanceProb):
potentialPos = []
# Here, the distance is a np.array, like [1].
distance = np.random.choice(groupLength, 1, p=distanceProb)[0] + 1
# print ("distance: ", distance)
if distance == 1:
potentialPos.append(nowPosition)
else:
posTemp = 2**(distance - 1)
if nowPosition % posTemp < posTemp // 2:
for k in range(posTemp//2, posTemp):
potentialPos.append((nowPosition//posTemp)*posTemp + k)
else:
for k in range(0, posTemp//2):
potentialPos.append((nowPosition//posTemp)*posTemp + k)
return np.array(potentialPos)
|
[
"def get_closest(self, pos):\n if self.empty():\n return\n\n min_idx = 0\n min_dist = euclidean(pos, self.npcs[0].get_pos())\n\n for i in range(1, len(self.npcs)):\n dist = euclidean(pos, self.npcs[i].get_pos())\n if dist < min_dist:\n min_dist = dist\n min_idx = i\n\n return self.npcs[min_idx], min_dist",
"def get_distance(self):\n \n\n # Specify -1 to retrieve the absolute position.\n return self.vr.simxReadProximitySensor(self.handle, vrep.simx_opmode_buffer)",
"def pointPotential(x,y,q,posx,posy):\n k = 8.987e9 #N m^2/C^2\n Vxy = (k*q)/(((x-posx)**2 + (y-posy)**2)**(1/2.)) \n return Vxy",
"def proximityTest(self, pos, radius):\n x, y = pos\n radiusSq = (radius + 0.51) ** 2\n for i in range(len(self)):\n rhoSq = (self.px[i] - x) ** 2 + (self.py[i] - y) ** 2\n if rhoSq <= radiusSq:\n return i\n return -1",
"def prob_dist_prior(distance):\n return prob_dist(distance, dist_prior_mean, dist_prior_variance)",
"def get_position(self):\n pos = self.guess_pos\n\n for iteration in range(self._max_iterations):\n transformation_matrix = ls.transformation_matrix(\n self.stations, pos)\n\n old_pos = pos\n pos, sd, rms = self._correct_position(pos, transformation_matrix)\n\n if all(np.isclose(pos, old_pos, atol=0.001)):\n return pos, sd, rms\n else:\n print('No solution, old_pos : pos = ', old_pos, ':', pos)\n return None",
"def get_pos(self):\r\n if self.pos is None:\r\n x = random.uniform(32.001, 32.999)\r\n y = random.uniform(35.001, 35.999)\r\n self.pos = (x, y, 0)\r\n return self.pos",
"def find_legal_pos(self, robot_id: int, position=None,\n perpendicular=False) -> Tuple[float, float, float]:\n if position is not None and perpendicular:\n position = position[:2]\n path = position - self.gs.get_robot_position(self._team,\n robot_id)[:2]\n norm_path = path / np.linalg.norm(path)\n STEP_SIZE = self.gs.ROBOT_RADIUS\n direction = np.array([norm_path[1], -norm_path[0]])\n for i in range(0, 2000, int(STEP_SIZE)):\n if self.gs.is_pos_legal(position + i * direction, self._team,\n robot_id) and \\\n self.gs.is_position_open(position + i * direction,\n self._team, robot_id):\n return position + i * direction\n if self.gs.is_pos_legal(position - i * direction, self._team,\n robot_id) and \\\n self.gs.is_position_open(position - i * direction,\n self._team, robot_id):\n return position - i * direction\n self.logger.debug(\"No legal perpeudicular position found\")\n if position is None:\n position = self.gs.get_robot_position(self._team, robot_id)\n if len(position) == 2:\n position = (position[0], position[1], None)\n x, y, w = position\n delta = 0\n for delta in range(0, 1000, 10):\n positions_to_try = [\n np.array([x, y + delta, w]),\n np.array([x, y - delta, w]),\n np.array([x + delta, y, w]),\n np.array([x - delta, y, w]),\n np.array([x + delta, y + delta, w]),\n np.array([x - delta, y + delta, w]),\n np.array([x + delta, y - delta, w]),\n np.array([x - delta, y - delta, w])\n ]\n for pos in positions_to_try:\n if self.gs.is_pos_legal(pos, self._team, robot_id) and \\\n self.gs.is_position_open(pos, self._team, robot_id):\n return pos\n self.logger.debug(\"No legal position found open\")\n return np.array([0, 0, 0])",
"def relevant_position(position_1, position_2, domain):\n # distances = [] # distances regarding the domain, and the virutal domains\n position_1 = np.asarray(position_1)\n position_2 = np.asarray(position_2)\n distance_domain = np.linalg.norm(position_1 - position_2)\n dist_virtual = []\n position_v1 = position_2 + np.array([domain[0],0])\n position_v2 = position_2 + np.array([0,domain[0]])\n position_v3 = position_2 + np.array([-domain[0],0])\n position_v4 = position_2 + np.array([0,-domain[1]])\n position_virtual = [position_v1, position_v2, position_v3, position_v4]\n\n for i in range(4):\n dist_virtual.append(np.linalg.norm(position_1 - position_virtual[i]))\n dist_virtual_array = np.asarray(dist_virtual)\n arg_min = np.argmin(dist_virtual_array)\n\n if dist_virtual_array[arg_min] < distance_domain:\n position = position_virtual[arg_min]\n\n else:\n position = position_2\n\n return position",
"def _get_target_position(self):\n\n if self.chasing:\n player_position = self.game.player.grid_position\n distance_from_player = Vector(player_position).distance(self.grid_position)\n if distance_from_player > self.flee_distance:\n # Target position is player if the player is more than 4 tiles away\n target_position = player_position\n return target_position\n\n # Returns bottom left in scatter mode or if within flee_distance from player\n target_position = -1, -1\n return target_position",
"def distance_to(self, pos):\n return np.linalg.norm(self.pos - pos)",
"def findProb(self):\r\n # Build accession dictionary\r\n self.buildAccessionDict()\r\n\r\n # Loop through the hidden path\r\n for i in range(0, len(self.hidden)):\r\n # Multiply the probability by the emission value of the ith value of x according to the ith value of pi\r\n self.prob *= self.emission[self.accessionDict[self.hidden[i]]][self.accessionDict[self.x[i]]]\r\n print(self.prob)",
"def _getInformantBestPos(self,particle, swarm):\n best_fitness = sys.float_info.max\n best_pos = None\n for i in particle.informants:\n if best_fitness > swarm[i].fitness:\n best_fitness = swarm[i].fitness\n best_pos = swarm[i].position\n return best_pos",
"def guess_position(self):\r\n # Look for the player meta first. Duh.\r\n if self.player is not None:\r\n return self.player.position\r\n\r\n stats = [\r\n (self.passing_att, 'QB'),\r\n (self.rushing_att, 'RB'),\r\n (self.receiving_tar, 'WR'),\r\n (self.defense_tkl, 'DEF'),\r\n (self.defense_ast, 'DEF'),\r\n (self.kicking_tot, 'K'),\r\n (self.kicking_fga, 'K'),\r\n (self.punting_tot, 'P'),\r\n ]\r\n return sorted(stats, reverse=True)[0][1]",
"def _get_target_position(self):\n\n if self.chasing:\n player_position = self.game.player.grid_position\n player_direction_vector = self.game.player.current_direction.value\n # Could have used Pink's target position, but calculating here reduces confusion\n two_cells_ahead_of_player = Vector(player_position) + (2 * player_direction_vector)\n red_beetle_position = self.game.red_enemy.grid_position\n # Double the vector between 2 cells ahead of the player and the red beetle's position\n target_position = 2 * Vector(two_cells_ahead_of_player) - Vector(red_beetle_position)\n return target_position\n\n else:\n # Bottom right in scatter mode\n target_position = (self.game.level.columns + 1, -1)\n return target_position",
"def get_opponent_pos(self):\r\n return self.opponent_pos",
"def get_distance_from_desired_point(self, current_position):\n curr_position = np.array([current_position.x, current_position.y, current_position.z])\n des_position = np.array([self.desired_pose.pose.position.x,\\\n self.desired_pose.pose.position.y,\\\n self.desired_pose.pose.position.z])\n distance = self.get_distance_between_points(curr_position, des_position)\n\n return distance",
"def get_distance(self):\n state_desc = self.env.get_state_desc()\n return state_desc[\"body_pos\"][\"pelvis\"][0]",
"def get_position(self):\n return self.player_position",
"def closest_dirt(self):\r\n position = self.bot_pos\r\n dirts = self.get_dirts(position[0],position[1])\r\n if dirts:\r\n i, j = min(dirts,\r\n key=lambda dirt_pos:((position[0]-dirt_pos[0])**2+(position[1]-dirt_pos[1])**2)**0.5\r\n )\r\n return (i,j)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pick an individual from positions.
|
def pickIndividual(positions, posInd):
potentialInd = []
for i in positions:
for j in posInd[i]:
potentialInd.append(j)
indIndex = np.random.choice(potentialInd, 1)[0]
return int(indIndex)
|
[
"def _pick_move(self, qvalues):\n unif = random.random()\n if unif < self.qlearning.epsilon: # greedy\n cell = self._greedy_pick_cell(qvalues)\n else: # random\n cell = self._random_pick_cell(qvalues)\n return cell",
"def Pick_point(self, loc):\n \n x, y = loc\n renderer = self.rens[0]\n picker = vtk.vtkCellPicker()\n picker.SetTolerance(0.01)\n picker.Pick(x, y, 0, renderer)\n points = picker.GetPickedPositions()\n numPoints = points.GetNumberOfPoints()\n #if no points selected, exits function\n if numPoints<1: return\n # Calls function to create a sphere at selected point\n pnt = points.GetPoint(0)\n self.mark(pnt[0], pnt[1], pnt[2])\n # Creating label at selected point\n label = vtk.vtkStringArray()\n label.SetName('label')\n label.InsertNextValue(\" Mid Patella\")\n lPoints = vtk.vtkPolyData()\n lPoints.SetPoints(points)\n lPoints.GetPointData().AddArray(label)\n \n hier = vtk.vtkPointSetToLabelHierarchy()\n hier.SetInputData(lPoints)\n hier.SetLabelArrayName('label')\n hier.GetTextProperty().SetColor(0,0,0)\n hier.GetTextProperty().SetFontSize(30)\n \n lMapper = vtk.vtkLabelPlacementMapper()\n lMapper.SetInputConnection(hier.GetOutputPort())\n lMapper.SetBackgroundColor(0.3,0.3,0.3)\n lMapper.SetBackgroundOpacity(0.8)\n lMapper.SetMargin(10)\n \n lActor = vtk.vtkActor2D()\n lActor.SetMapper(lMapper)\n self.labels.append(lActor) # keep track of all label actors\n \n self.rens[0].AddActor(lActor)\n self.Render()\n return pnt",
"def GetPickPosition(self):\n vtk_cell = self.mesh.GetCell(self.cell_id)\n cell = [vtk_cell.GetPointId(point_id) for point_id\n in range(vtk_cell.GetNumberOfPoints())]\n self.point_id = cell[0]\n return self.mesh.points[self.point_id]",
"def select(self, pos):\n for s in sorted(self.hole.groups['all'].sprites(), key=attrgetter('_layer'), reverse=True):\n if s.rect.collidepoint(pos.as_2d_tuple()):\n return s\n\n return None",
"def getPickedPoint(self, index: 'int const'=0) -> \"SoPickedPoint *\":\n return _coin.SoRayPickAction_getPickedPoint(self, index)",
"def pick(self):\n self.run_op(op_name='pick', op_func=stack.pick, num_of_args=1, push=False)",
"def getPickedPoint(self, index = 0):\n return _coin.SoRayPickAction_getPickedPoint(self, index)",
"def pick(self, action: 'SoPickAction') -> \"void\":\n return _coin.SoCoordinate4_pick(self, action)",
"def pickMontyDoor(door1,door2,door3,doors,PlayerDoor):\r\n MontyDoor = randint(1,3)\r\n while MontyDoor == PlayerDoor:\r\n MontyDoor = randint(1,3)\r\n ## With the Monty's door selected, we must now make sure he didnt pick the same\r\n ## door as the Player as well as with the car behind it.\r\n unchosenDoor = [1,2,3]\r\n unchosenDoor.pop((PlayerDoor - 1))\r\n unchosenDoor.pop((MontyDoor - 1))",
"def GetPickPosition(self):\n if self.hemi == \"vol\":\n self.point_id = self.cell_id\n return self.brain._data[\"vol\"][\"grid_coords\"][self.cell_id]\n else:\n vtk_cell = self.mesh.GetCell(self.cell_id)\n cell = [\n vtk_cell.GetPointId(point_id)\n for point_id in range(vtk_cell.GetNumberOfPoints())\n ]\n self.point_id = cell[0]\n return self.mesh.points[self.point_id]",
"def pick(self, action: 'SoPickAction') -> \"void\":\n return _coin.SoTextureCoordinateObject_pick(self, action)",
"def selected(self):\n selected = self._selected\n if self.multisel:\n return tuple(self._pos[index] for index in selected)\n\n return self._pos[selected]",
"def get_random_spin_position(self):\n\n try:\n random_position = random.choice(list(self._all_coordinate))\n except:\n from pdb import set_trace;set_trace() ############################## Breakpoint ##############################\n\n return random_position",
"def getSpawnPoint(self):\n return random.choice(self.spawnPoints)",
"def pick(self, action: 'SoPickAction') -> \"void\":\n return _coin.SoCoordinate3_pick(self, action)",
"def random_pos(self):\n temp = 0\n while temp < len(self.objects):\n line = random.randint(0, 14)\n column = random.randint(0, 14)\n if self.object_position(line, column, self.objects[temp].letter):\n temp += 1",
"def get_piece(self, position):\n return self._positions[str(position)].piece",
"def pick(self, action: 'SoPickAction') -> \"void\":\n return _coin.SoTextureCoordinateSphere_pick(self, action)",
"def pickplant(self):\n for tree in self.plants:\n if (self.gimme_distance((tree[0],tree[1])) <\n self.gimme_distance(self.myplant[:2])):\n\n self.myplant = (tree[0],tree[1],tree[2])",
"def _generate_candidate(self, i):\n\n a = i\n while a == i:\n a = self.random.randrange(self.n)\n b = i\n while b == i or b == a:\n b = self.random.randrange(self.n)\n c = i\n while c == i or c == a or c == b:\n c = self.random.randrange(self.n)\n\n new_pos = self.population[i][:]\n\n dim = len(self.domains)\n r = self.random.randrange(dim)\n\n for d in range(dim):\n if d == r or self.random.random() < self.cr:\n new_pos[d] = self.population[a][d] + self.f * (self.population[b][d] - self.population[c][d])\n return new_pos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make sure the point is inside the image if it is not, move it to the closest border
|
def limit_point_to_be_inside_image(point):
smallest_x = 0
smallest_y = 0
largest_x = IMG_HEIGHT-1
largest_y = IMG_WIDTH-1
limited_point = np.int0(np.array([
max(smallest_x, min(point[0], largest_x)),
max(smallest_y, min(point[1], largest_y))
]))
return limited_point
|
[
"def __validate_point(self, point):\n\n if point.x() < 0:\n point.setX(0)\n\n if point.y() < 0:\n point.setY(0)\n\n img_width = self._data.shape[1] - 1\n if point.x() > img_width:\n point.setX(img_width)\n\n img_height = self._data.shape[0] - 1\n if point.y() > img_height:\n point.setY(img_height)\n\n return point",
"def test_corner_points_shifted(self):\n self.rectangle.location = geometry.Point2D(10, 0)\n\n self.assertEqual(self.rectangle.top_left_point, geometry.Point2D(10, 10))\n self.assertEqual(self.rectangle.top_right_point, geometry.Point2D(15, 10))\n\n self.assertEqual(self.rectangle.bottom_left_point, geometry.Point2D(10, 0))\n self.assertEqual(self.rectangle.bottom_right_point, geometry.Point2D(15, 0))",
"def point_on_rectangle(rect, point, border=False):\n px, py = point\n rx, ry, rw, rh = tuple(rect)\n x_inside = y_inside = False\n\n if px < rx:\n px = rx\n elif px > rx + rw:\n px = rx + rw\n elif border:\n x_inside = True\n\n if py < ry:\n py = ry\n elif py > ry + rh:\n py = ry + rh\n elif border:\n y_inside = True\n\n if x_inside and y_inside:\n # Find point on side closest to the point\n if min(abs(rx - px), abs(rx + rw - px)) > \\\n min(abs(ry - py), abs(ry + rh - py)):\n if py < ry + rh / 2.:\n py = ry\n else:\n py = ry + rh\n else:\n if px < rx + rw / 2.:\n px = rx\n else:\n px = rx + rw\n\n return px, py",
"def snapToNearestGridPoint(self):\n self.x = round(self.x/50)*50\n self.y = round(self.y/50)*50",
"def difference_on_border(point, border):\n center_x = np.abs(border[0][0] - border[1][0])\n return point[0] - center_x",
"def covers(self, pt):\n# dx=self.center.x-pt.x\n# dy=self.center.y-pt.y\n# distance=math.sqrt(dx**2+dy**2)\n \n if round(self.center.distance(pt),8)<=round(self.radius,8):\n return True\n else:\n return False",
"def round_rectangle_tool_inside(x0, y0, x1, y1, radius, z_safe, z_surf, tool_dia, cuts):\n _RoundRectOffset(x0, y0, x1, y1, radius, z_safe, z_surf, tool_dia, cuts, -0.5 * tool_dia)",
"def where_is_point(point, wall_tolerance = 30):\n\n bounds = box_bounds()\n where_am_i = 'away from boundary'\n\n # [min x, max x, min y, max y]\n # if list element is 0 then not near an extrema, i.e, not near either an extreme x and/or y value.\n # if list element is 1 then near the corresponding extrema, i.e, near either an extreme x and/or y value.\n near_extrema = [0, 0, 0, 0]\n\n if bounds['min_x'] - wall_tolerance <= point[0] <= bounds['min_x'] + wall_tolerance:\n near_extrema[0] = 1\n where_am_i = 'near left wall'\n\n if bounds['min_y'] - wall_tolerance <= point[1] <= bounds['min_y'] + wall_tolerance:\n near_extrema[2] = 1\n where_am_i = 'near top wall'\n\n if bounds['max_x'] - wall_tolerance <= point[0] <= bounds['max_x'] + wall_tolerance:\n near_extrema[1] = 1\n where_am_i = 'near right wall'\n\n if bounds['max_y'] - wall_tolerance <= point[1] <= bounds['max_y'] + wall_tolerance:\n near_extrema[3] = 1\n where_am_i = 'near bottom wall'\n\n if sum(near_extrema) > 1:\n if near_extrema == [1, 0, 1, 0]:\n where_am_i = 'near top left corner'\n elif near_extrema == [0, 1, 1, 0]:\n where_am_i = 'near top right corner'\n elif near_extrema == [1, 0, 0, 1]:\n where_am_i = 'near bottom left corner'\n else:\n where_am_i = 'near bottom right corner'\n\n return where_am_i",
"def _auto_set_start_point(self):\n # sum the image along each axis within the central 1/3 (avoids outlier influence from say, gantry shots)\n top_third = int(self.image.array.shape[0]/3)\n bottom_third = int(top_third * 2)\n left_third = int(self.image.array.shape[1]/3)\n right_third = int(left_third * 2)\n central_array = self.image.array[top_third:bottom_third, left_third:right_third]\n\n x_sum = np.sum(central_array, 0)\n y_sum = np.sum(central_array, 1)\n\n # Calculate Full-Width, 80% Maximum\n fwxm_x_point = SingleProfile(x_sum).get_FWXM_center(80) + left_third\n fwxm_y_point = SingleProfile(y_sum).get_FWXM_center(80) + top_third\n\n # find maximum points\n x_max = np.unravel_index(np.argmax(central_array), central_array.shape)[1] + left_third\n y_max = np.unravel_index(np.argmax(central_array), central_array.shape)[0] + top_third\n\n # which one is closer to the center\n fwxm_dist = Point(fwxm_x_point, fwxm_y_point).dist_to(self.image.center)\n max_dist = Point(x_max, y_max).dist_to(self.image.center)\n\n if fwxm_dist < max_dist:\n center_point = Point(fwxm_x_point, fwxm_y_point)\n else:\n center_point = Point(x_max, y_max)\n\n self.circle_profile.center = center_point",
"def mark_image(image, ball):\n\t# Draw the outer circle\n\tcv2.circle(image, (ball[0], ball[1]), ball[2], (0, 255, 0), 2)\n\t#Draw the centre of the circle\n\tcv2.circle(image, (ball[0], ball[1]), 2, (0, 128, 255), 3)\n\treturn image",
"def move_image(self, xamt, yamt):\n width,height = self.GetClientSize()\n x,y = self.center\n x -= xamt\n y -= yamt\n def compute(): \n w, h = width/2/self.scale, height/2/self.scale\n return x-w, y-h, x+w, y+h\n ul_x, ul_y, dr_x, dr_y = compute()\n if ul_x < 0:\n x += -ul_x\n ul_x, ul_y, dr_x, dr_y = compute()\n if dr_x > self.img.shape[1]:\n x -= dr_x-self.img.shape[1]\n ul_x, ul_y, dr_x, dr_y = compute()\n if ul_y < 0:\n y += -ul_y\n ul_x, ul_y, dr_x, dr_y = compute()\n if dr_y > self.img.shape[0]:\n y -= dr_y-self.img.shape[0]\n ul_x, ul_y, dr_x, dr_y = compute()\n self.set_center((x,y))",
"def shift_to_center(self, desired_center_image_point, imshape):\n\n current_coords_of_the_point = desired_center_image_point\n target_coords_of_the_point = np.float32([imshape[1], imshape[0]]) / 2\n self.intrinsic_matrix[:2, 2] += (\n target_coords_of_the_point - current_coords_of_the_point)",
"def get_relative_location_on_cell_border(cell_min_point, cell_length_x, cell_length_y, point, zero_tolerance):\n border_id_p = -1\n if cwt(point[0], cell_min_point[0], zero_tolerance) == 0:\n border_id_p = 0\n border_id_p += (point[1]-cell_min_point[1]) / cell_length_y\n elif cwt(point[1], cell_min_point[1] + cell_length_y, zero_tolerance) == 0:\n border_id_p = 1\n border_id_p += (point[0]-cell_min_point[0]) / cell_length_x\n elif cwt(point[0], cell_min_point[0] + cell_length_x, zero_tolerance) == 0:\n border_id_p = 2\n border_id_p += (1 - (point[1]-cell_min_point[1]) / cell_length_y)\n elif cwt(point[1], cell_min_point[1], zero_tolerance) == 0:\n border_id_p = 3\n border_id_p += (1- (point[0]-cell_min_point[0]) / cell_length_x)\n return border_id_p",
"def inside(self, point):\n inv_trans = np.linalg.inv(self.transformation)\n scale = self.scale\n point_w = np.matmul(inv_trans[:3, :3], point) + inv_trans[:3, 3]\n for i in range(3):\n if abs(point_w[i]) > scale[i] / 2.:\n return False\n return True",
"def _coords_inside_image(*args, **kwargs): # real signature unknown\n pass",
"def _point_in_curve(self, point, curve_group, tol=0.01):\n # closed curve mid-point\n # shoot a ray (new linear curve) and check if it intersects with any of \n pass",
"def _on_board(self, point):\n return self.board[point]!= BORDER",
"def boundaries(im_original, im_copy, b_left, b_right, track_name='Sebring'):\n _, thresh_b = cv.threshold(im_copy, 25, 255, 0)\n contours_b, _ = cv.findContours(thresh_b, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n\n contour1 = np.squeeze(contours_b[0])\n contour2 = np.squeeze(contours_b[1])\n\n x1 = contour1[:, 0]\n y1 = contour1[:, 1]\n x2 = contour2[:, 0]\n y2 = contour2[:, 1]\n\n # Check which contour is the left, and which is the right boundary of the given shape\n # We assume our tracks are clockwise. Point with biggest y will hence be on the left hand side\n # Remember that y axis is pointing down.\n if max(y1) > max(y2):\n (xl, yl, xr, yr) = (x1, y1, x2, y2)\n else:\n (xl, yl, xr, yr) = (x2, y2, x1, y1)\n\n # Manual correction for our sand region, if there are more contours than 2\n\n if len(contours_b) > 2:\n print('You have more than 2 contours in ' + track_name)\n pass\n # left: 0, 1\n # right: 3, 2\n if (track_name == 'Sebring') & (b_left == 8):\n c0 = np.squeeze(contours_b[0])\n c1 = np.squeeze(contours_b[1])\n c2 = np.squeeze(contours_b[2])\n c3 = np.squeeze(contours_b[3])\n xl = np.hstack((c0[:, 0], c1[:, 0], c2[:, 0]))\n xr = np.hstack((c3[:, 0]))\n yl = np.hstack((c0[:, 1], c1[:, 1], c2[:, 1]))\n yr = np.hstack((c3[:, 1]))\n\n else:\n print('You have to correct some contour!')\n\n # Matplotlib code to check if you combined contours correctly\n # if name == 'track_6':\n # plt.figure()\n # plt.plot(xl, yl, 'r.')\n # plt.plot(xr, yr, 'b.')\n # plt.title(track_name)\n # plt.show()\n\n\n # Make asphalt boundaries\n for idx in range(len(xl)):\n im_original[yl[idx], xl[idx]] = b_left\n for idx in range(len(xr)):\n im_original[yr[idx], xr[idx]] = b_right\n\n return xl, yl, xr, yr",
"def mark_bbox(event,x,y,flags,param):\n \n global draw_mode, ref_point, region_mode\n region_color = color_dict[region_mode]\n if event == cv2.EVENT_LBUTTONDOWN and not draw_mode:\n cv2.circle(img, (x,y), 1, region_color, 1)\n ref_point[0] = (x,y)\n draw_mode = True\n elif event == cv2.EVENT_LBUTTONDOWN and draw_mode:\n cv2.circle(img, (x,y), 1, region_color, 1)\n ref_point[1] = (x,y)\n p1, p2 = ref_point[0], ref_point[1]\n cv2.rectangle(img, p1, p2, region_color, 1)\n append_to_roi(p1,p2, region_mode)\n draw_mode = False",
"def is_box_in_image_bounds(input_image_shape,box):\n assert box['x1'] < box['x2']\n assert box['y1'] < box['y2']\n width, height, _ = input_image_shape\n if box[\"x1\"] < 0:\n return False\n if box[\"y1\"] < 0:\n return False\n if box[\"x2\"] >= width:\n return False\n if box[\"y2\"] >= height:\n return False\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that finds the orange in an image, make a bounding box around it, fits an ellipse in the bounding box and paints everything outside the ellipse in black. Returns the painted image and a boolean stating wheather any orange was found.
|
def get_pixels_inside_orange(hsv):
hsv_inside_orange = hsv.copy()
hsv_orange_mask = get_orange_mask(hsv)
hsv_save_image(hsv_orange_mask, "2b_orange_mask", is_gray=True)
orange_x, orange_y = np.where(hsv_orange_mask==255)
if len(orange_x) == 0:
# If no orange in image: return original image
return hsv, False
x_min = np.amin(orange_x)
x_max = np.amax(orange_x)
y_min = np.amin(orange_y)
y_max = np.amax(orange_y)
hsv_inside_orange[0:x_min,] = HSV_BLACK_COLOR
hsv_inside_orange[x_max+1:,] = HSV_BLACK_COLOR
hsv_inside_orange[:,0:y_min] = HSV_BLACK_COLOR
hsv_inside_orange[:,y_max+1:] = HSV_BLACK_COLOR
return hsv_inside_orange, True
|
[
"def test_ellipse(self):\n self.ia.open(datapath + \"gal.im\")\n reg = rg.fromtextfile(\n datapath + \"testEllipse90deg.crtf\",\n csys = self.ia.coordsys().torecord(),\n shape=self.ia.shape()\n )\n subi = self.ia.subimage(\"\", region=reg)\n self.ia.open(datapath + \"galwj2kellipse.im\")\n expec = self.ia.getchunk(getmask=True)\n self.ia.done()\n got = subi.getchunk(getmask=True)\n subi.done()\n self.assertTrue((got == expec).all())",
"def main():\n p1 = Point(0,0) \n p2 = Point(0,0)\n p3 = Point(0,0)\n e1 = Ellipse(p1, p2, 2)\n e2 = Ellipse(p2, p3, 3)\n overlap = ComputeRectangleValues(e1, e2)\n\n success = ComputeOverlapOfEllipses(overlap, 10000)\n print(\"The overlapping area of an ellipse is: \" + str(success.hits))",
"def draw_ellipse_mask(canvas, center_x, center_y, outer_radius, colour, threshold, fill=True):\n\n def _draw_4point(_canvas, _cx, _cy, x, y, _colour):\n # Draw the 8 symmetries\n print(\"_draw_8point\", _cy, _cx, y, x)\n print(\"_draw_8point\", _cy + y, _cx - x)\n print(\"_draw_8point\", _cy + y, _cx + x)\n print(\"_draw_8point\", _cy - y, _cx - x)\n print(\"_draw_8point\", _cy - y, _cx + x)\n\n _canvas[_cy + y, _cx - x] = _colour\n _canvas[_cy + y, _cx + x] = _colour\n _canvas[_cy - y, _cx - x] = _colour\n _canvas[_cy - y, _cx + x] = _colour\n\n i = 0\n j = outer_radius\n last_fade_amount = 0\n # fade_amount = 0\n\n max_opaque = 1.0\n\n while i < j:\n height = math.sqrt(max(outer_radius * outer_radius - i * i, 0))\n fade_amount = max_opaque * (math.ceil(height) - height)\n\n if fade_amount < last_fade_amount:\n # Opaqueness reset so drop down a row.\n j -= 1\n last_fade_amount = fade_amount\n\n # We're fading out the current j row, and fading in the next one down.\n if max_opaque - fade_amount > threshold:\n _draw_4point(canvas, center_x, center_y, i, j, colour)\n if fade_amount > threshold:\n _draw_4point(canvas, center_x, center_y, i, j - 1, colour)\n\n i += 1\n\n if fill:\n boundary_fill4(canvas, center_x, center_y, colour, colour)",
"def build_image_ellipses():\n\n img = np.zeros((500, 600, 3), dtype=\"uint8\")\n cv2.ellipse(img, (120, 60), (100, 50), 0, 0, 360, (255, 255, 0), -1)\n cv2.ellipse(img, (300, 60), (50, 50), 0, 0, 360, (0, 0, 255), -1)\n cv2.ellipse(img, (425, 200), (50, 150), 0, 0, 360, (255, 0, 0), -1)\n cv2.ellipse(img, (550, 250), (20, 240), 0, 0, 360, (255, 0, 255), -1)\n cv2.ellipse(img, (200, 200), (150, 50), 0, 0, 360, (0, 255, 0), -1)\n cv2.ellipse(img, (250, 400), (200, 50), 0, 0, 360, (0, 255, 255), -1)\n return img",
"def draw_ellipse(image, xy, a, b, facecolor, edgecolor):\n from PIL import Image, ImageDraw\n mask = Image.new( size=image.size, mode='RGBA')\n draw = ImageDraw.Draw(mask)\n draw.ellipse([xy[0]-a, xy[0]-b, xy[1]+a, xy[1]+b],\n fill=tuple(facecolor), outline=tuple(edgecolor))\n mask = mask.resize(image.size, Image.LANCZOS)\n mask = mask.rotate(45)\n image.paste(mask, mask=mask)\n return image",
"def face_highlight(img, bounding_boxes, scalar):\n img = img.astype('int32')\n # filter real faces based on detection confidence\n confidence_thresh = 0.85\n filtered_idx = bounding_boxes[:, 4]>=confidence_thresh\n filtered_bboxes = bounding_boxes[filtered_idx]\n\n # if no faces found, return a darker image\n if not len(filtered_bboxes):\n return np.clip(img-50, 0, 255).astype('uint8')\n\n nrof_faces = len(filtered_bboxes)\n\n # detect multiple faces or not\n det = filtered_bboxes[:, 0:4]\n det_arr = []\n img_size = np.asarray(img.shape)\n if nrof_faces>1:\n # if multiple faces found, we choose one face\n # which is located center and has larger size\n bounding_box_size = (det[:,2] - det[:,0]) * (det[:,3] - det[:,1])\n img_center = img_size / 2\n offsets = np.vstack([(det[:,0]+det[:,2])/2 - img_center[0],\n (det[:,1]+det[:,3])/2 - img_center[1]])\n offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)\n # some extra weight on the centering\n index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)\n det_arr.append(det[index, :])\n else:\n det_arr.append(np.squeeze(det))\n\n det = np.squeeze(det_arr)\n # compute expanding bounding box\n bb, box_size = get_square_crop_box(det, scalar)\n # get the valid pixel index of cropped face\n face_left = np.maximum(bb[0], 0)\n face_top = np.maximum(bb[1], 0)\n face_right = np.minimum(bb[2], img_size[0])\n face_bottom = np.minimum(bb[3], img_size[1])\n\n # highlight the face with circle\n xx, yy = np.mgrid[:img_size[0], :img_size[1]]\n center_x = int((bb[3] + bb[1])/2)\n center_y = int((bb[2] + bb[0])/2)\n circle_r2 = int(0.25 * box_size**2)\n circle = (xx - center_x) ** 2 + (yy - center_y) ** 2\n highlight_mat = circle > circle_r2\n highlight_mat = np.repeat(np.expand_dims(highlight_mat, 2), 3, axis=2)\n # highlight the face with square\n #highlight_mat = np.ones_like(img)\n #highlight_mat[face_top:face_bottom, face_left:face_right, :] = 0\n \n return np.clip(img-50*highlight_mat, 0, 255).astype('uint8')",
"def search(imagename):\n # loading astronaut image\n img = Image.open(imagename)\n img_arr = np.array(img)\n W,H = img.size\n\n # perform selective search\n img_lbl, regions = selectivesearch.selective_search(\n img_arr, scale=500, sigma=0.8, min_size=200)\n\n candidates = set()\n for r in regions:\n # excluding same rectangle (with different segments)\n if r['rect'] in candidates:\n continue\n # excluding regions smaller than 2000 pixels\n if r['size'] < 2000:\n continue\n # distorted rects\n x, y, w, h = r['rect']\n #if w / h > 1.2 or h / w > 1.2:\n # continue\n if r['rect'][2] >= W - 70 and r['rect'][3] >= H - 70:\n continue\n candidates.add(r['rect'])\n \"\"\"\n # draw rectangles on the original image\n fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))\n ax.imshow(img)\n for x, y, w, h in candidates:\n rect = mpatches.Rectangle(\n (x, y), w, h, fill=False, edgecolor='red', linewidth=1)\n ax.add_patch(rect)\n #plt.show()\n \"\"\"\n return candidates",
"def ellipse(\n img,\n center,\n axes,\n angle,\n startAngle,\n endAngle,\n color,\n thickness=...,\n lineType=...,\n shift=...,\n) -> img:\n ...",
"def find_eyes(self):\n\n self.create_output_directory()\n\n eyes_classifier = self.classifiers.get('eyes', None)\n eyes_cascade = cv2.CascadeClassifier(eyes_classifier)\n\n image = cv2.imread(self.input_image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n faces = eyes_cascade.detectMultiScale(gray, 1.2, 3)\n\n eyes = None\n\n for (x, y, w, h) in faces:\n roi_gray = gray[y:y + h, x:x + w]\n roi_color = image[y:y + h, x:x + w]\n\n eyes = eyes_cascade.detectMultiScale(roi_gray)\n\n for (x_eye, y_eye, w_eye, h_eye) in eyes:\n center = (int(x_eye + 0.5 * w_eye), int(y_eye + 0.5 * h_eye))\n radius = int(0.3 * (w_eye + h_eye))\n color = (0, 255, 0)\n thickness = 3\n cv2.circle(roi_color, center, radius, color, thickness)\n\n print('Found {number_of_faces} eyes in \\'{file_name}.\\''.format(\n number_of_faces=len(eyes),\n file_name=os.path.basename(self.input_image)\n ))\n\n output_dir = '{dir}/{file_name}'.format(\n dir=self.output_dir,\n file_name=os.path.basename(self.input_image)\n )\n\n cv2.imwrite(os.path.expanduser(output_dir), image)",
"def findPipsRGB(img,version=1):\n\n logEntry(INFO, \"Finding pips on Red, Green and Blue Dices\")\n\n pips = 0\n\n #dl = DrawingLayer((img.width, img.height))\n\n white_dice = img.smooth('median',(5,5))\n\n if version == 1:\n # Version 1 Approach Normal\n histo = white_dice.toGray()\n histo_eq = histo.equalize()\n showImage(histo_eq)\n\n max = histo_eq.maxValue()\n min = histo_eq.minValue()\n\n stretch = white_dice.stretch(min,max-100)\n\n only_dice_int = stretch.binarize().invert().erode(2).dilate(2)\n\n blobs = only_dice_int.findBlobs(minsize=40)\n\n if blobs is not None:\n\n layer1 = DrawingLayer((img.width, img.height))\n counter = 0\n for blob in blobs:\n #print [counter, blob.perimeter(), blob.area(), blob.angle(), blob.circleDistance()]\n #print [blob.radius(), blob.isCircle(), blob.isRectangle()]\n #print [(blob.minRectWidth() / blob.minRectHeight()),blob.isSquare()]\n blob.isMySquare = blob.minRectWidth() / blob.minRectHeight()\n #print [blob.isMySquare]\n #blob.draw(layer=layer1, color=Color.RED)\n #layer1.text(str(counter), blob.coordinates())\n counter = counter + 1\n\n #img.addDrawingLayer(layer1)\n #img.applyLayers()\n\n showImage(img)\n\n total_img_pix = img.height * img.width\n\n large_blobs = blobs.filter([b.area() > (0.25 * total_img_pix) for b in blobs])\n if large_blobs is not None and len(large_blobs) > 0:\n #img.clearLayers()\n showImage(img)\n darker_img = img / 1.5\n darker_img.filename = img.filename\n pips = findPipsRGB(darker_img)\n else:\n circles = blobs.filter([b.perimeter() < 55 and b.perimeter() > 30 and b.circleDistance() > 0.11 and (b.isMySquare >= 0.8 and b.isMySquare <= 1.1) for b in blobs])\n if circles is not None:\n if len(circles) > 0:\n logEntry(INFO, \"RGB dice. Found pip(s): \"+ str(len(circles)))\n pips = len(circles)\n else:\n logEntry(ERROR, \"No blobs found\")\n #img.clearLayers()\n return pips;",
"def detect_color(image, ball):\n\t#Crop the ball region for color detection\n\t#[y1:y2, x1:x2], x1y1 -top left, x2y2 - bottom right\n\tball_img = image[ball[1] - ball[2]/2 : ball[1]+ball[2]/2,\n\t\tball[0] - ball[2]/2 : ball[0] + ball[2]/2]\n\tcv2.imwrite(\"crop.jpg\", ball_img)\n\t#Convert cropped image to HSV\n\tball_img = cv2.cvtColor(ball_img, cv2.COLOR_BGR2HSV)\n\tcv2.imwrite(\"crop1.jpg\", ball_img)\n\t# get Color\n\tball_hsv_mean = cv2.mean(ball_img)\n\thue = ball_hsv_mean[0]\n\t\n\tif(hue < 11):\n\t\tcolor = 'RED'\n\telif (hue < 18):\n\t\tcolor = 'ORANGE'\n\telif (hue < 39):\n\t\tcolor = 'YELLOW'\n\telif (hue < 76):\n\t\tcolor = 'GREEN'\n\telif (hue < 131):\n\t\tcolor = 'BLUE'\n\telif (hue < 161):\n\t\tcolor = 'VIOLET'\n\telif (hue < 180):\n\t\tcolor = 'RED'\n\telse:\n\t\tcolor = 'UNKNOWN'\n\treturn color",
"def isWhite(img, circle):\n circle = [int(X) for X in circle]\n xc, yc, r = circle\n cropImg = img[yc-r:yc+r, xc-r:xc+r]\n average_color = cv2.mean(cropImg)\n if manyConds(average_color, [BAD1, BAD2, BAD3, BAD4], [d1, d2, d3,\n d4]) or \\\n isClose(average_color, d):\n return True\n else:\n # print average_color\n return False",
"def fill_elliptical_contours(image: np.ndarray, threshold: float = 0.5, round_ratio: float = ROUND_RATIO) -> np.ndarray:\n processed = cv.morphologyEx(image, cv.MORPH_CLOSE, Kernel.k3, iterations=4) # fill small gaps and close contours\n # find contours in 2 level hierarchy: inner and outer contours - inner contours in parent field have non -1 value\n contours, hierarchy = cv.findContours(processed, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)\n original = image.copy()\n img_area = image.shape[0] * image.shape[1]\n\n for i in range(0, len(contours)):\n # check for a contour parent (indicates being inner contour), also filter to big and to small contours\n if hierarchy[0][i][3] != -1 \\\n and img_area * VERTEX_AREA_UPPER >= cv.contourArea(contours[i]) >= img_area * VERTEX_AREA_LOWER:\n (x, y), (a, b), angle = cv.minAreaRect(contours[i]) # rotated bounding rect describe fitted ellipse\n if round_ratio >= a / b >= 1.0 / round_ratio: # check if fitted ellipse is round enough to be a vertex\n ellipse_cnt = cv.ellipse2Poly((int(x), int(y)), (int(a / 2.0), int(b / 2.0)), int(angle), 0, 360, 1)\n overlap_level = contours_overlap_level(ellipse_cnt, contours[i])\n if overlap_level >= threshold: # if ellipse and inner contour overlap enough then fill vertex (contour)\n cv.drawContours(original, contours, i, Color.OBJECT, thickness=cv.FILLED)\n\n else: # removing contours not meeting roundness condition\n cv.drawContours(original, contours, i, Color.BG, thickness=6)\n\n return original",
"def isRed(img, circle):\n circle = [int(X) for X in circle]\n xc, yc, r = circle\n cropImg = img[yc-r:yc+r, xc-r:xc+r]\n average_color = cv2.mean(cropImg)\n if red_lower[0] <= average_color[0] <= red_upper[0] and red_lower[1] <= \\\n average_color[1] <= red_upper[1] and red_lower[2] <= \\\n average_color[2] <= red_upper[2]:\n return True\n else:\n return False",
"def mark_image(image, ball):\n\t# Draw the outer circle\n\tcv2.circle(image, (ball[0], ball[1]), ball[2], (0, 255, 0), 2)\n\t#Draw the centre of the circle\n\tcv2.circle(image, (ball[0], ball[1]), 2, (0, 128, 255), 3)\n\treturn image",
"def OverlayFitEllipse(img_edges, confidence_parameters, new_controls, globalflags):\n #confidence parameters\n best_ellipse = confidence_parameters[0]\n pnts = confidence_parameters[1]\n norm_err = confidence_parameters[2]\n inliers = confidence_parameters[3]\n #global flags\n debug = globalflags[0]\n displayImages = globalflags[1]\n #create a color image\n img_color = cv2.merge((img_edges,img_edges,img_edges))\n if debug:print(\"Shape of color image is \" + str(img_color.shape))\n OverlayRANSACFit(img_color, pnts, inliers, best_ellipse)\n if displayImages == 1 :\n fig,(ax1,ax2) = plt.subplots(ncols =2 ,nrows =1, figsize=(8,4))\n ax1.set_title(\"Normalized error of the fit\")\n ax1.plot(norm_err, 'k-')\n ax2.set_title(str(new_controls))\n ax2.imshow(img_color)\n return img_color",
"def findPipsY(img,version=1):\n\n logEntry(INFO, \"Finding pips on Yellow Dice\")\n\n pips = 0\n\n showImage(img)\n\n white_dice = img.hueDistance(Color.YELLOW,minsaturation=80, minvalue=15)\n only_dice = img - white_dice\n\n only_dice_int = only_dice.binarize()\n showImage(only_dice_int)\n\n\n only_dice_fil = only_dice_int.floodFill((0,0),color=Color.BLACK)\n\n only_dice_fil = only_dice_fil.floodFill((img.width-1,0),color=Color.BLACK)\n\n only_dice_fil = only_dice_fil.floodFill((img.width-1,img.height-1),color=Color.BLACK)\n\n only_dice_fil = only_dice_fil.floodFill((0,img.height-1),color=Color.BLACK)\n\n showImage(only_dice_fil)\n\n total_img_pix = img.height * img.width\n\n blobs = only_dice_fil.findBlobs()\n if blobs is not None:\n counter = 0\n for blob in blobs:\n #print [counter, blob.perimeter(), blob.area(), blob.angle(), blob.circleDistance()]\n #print [blob.radius(), blob.isCircle(), blob.isRectangle()]\n #print [(blob.minRectWidth() / blob.minRectHeight()),blob.isSquare()]\n blob.isMySquare = blob.minRectWidth() / blob.minRectHeight()\n #print [blob.isMySquare]\n\n logEntry(INFO, \"Yellow Dice - pips \"+ str(len(blobs)))\n pips = len(blobs)\n else:\n darker_img = img / 1.5\n darker_img.filename = img.filename\n pips = findPipsYW(darker_img)\n return pips",
"def detect_colour(hsv_image, hsv_boundary, min_size, change_to_white):\n\n (lower, upper) = hsv_boundary\n\n # Convert boundaries to np arrays\n lower = np.array(lower, dtype=\"uint8\")\n upper = np.array(upper, dtype=\"uint8\")\n\n # Create mask of pixels in range if the range spans both sides of 0 (red) then it will need to be considered as\n # two masks\n if lower[0] > upper[0]:\n lower_bound = lower[0]\n lower[0] = 0\n mask1 = cv2.inRange(hsv_image, lower, upper)\n lower[0] = lower_bound\n upper[0] = 180\n mask2 = cv2.inRange(hsv_image, lower, upper)\n mask = cv2.bitwise_or(mask1, mask2)\n else:\n mask = cv2.inRange(hsv_image, lower, upper)\n\n res = cv2.bitwise_and(hsv_image, hsv_image, mask=mask)\n\n # find contours in the masked image\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n colour_found = False\n cnts = cnts[1]\n\n centre_array = []\n\n # loop over the contours - We should only have one significant region.\n for c in cnts:\n\n # if the contour is not sufficiently large, ignore it - #This number will need to depend on image size.\n size = cv2.contourArea(c)\n if size < min_size:\n continue\n\n colour_found = True\n # compute the center of the contour\n M = cv2.moments(c)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n centre_array.append([cX, cY, size])\n\n if __debug__:\n # draw the contour and center of the shape on the image\n image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)\n cv2.drawContours(image, [c], -1, (255, 55, 255), int(image.shape[0]/80))\n cv2.circle(image, (cX, cY), 1, (255, 55, 255), int(image.shape[0]/80))\n cv2.imwrite(os.path.join(config.debug_output_folder, datetime.datetime.now().strftime(\"%M%S.%f_\")\n + 'Colour_Detection.jpg'), image)\n \n if change_to_white:\n hsv_image[mask == 255] = [0, 0, 255]\n\n return centre_array",
"def shapeDetection(imagePath):\n img = cv2.imread(imagePath)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n ret,thresh = cv2.threshold(gray,127,255,1)\n\n contours,h = cv2.findContours(thresh,1,2)\n\n for cnt in contours:\n approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)\n #print (len(approx))\n if len(approx) > 15:\n #print (\"circle\")\n cv2.drawContours(img,[cnt],0,(0,255,255),-1)\n\n return img"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculates the angle between the line from the centroid to the arrowhead and the negative xaxis.
|
def calc_angle_centroid_arrowhead(centroid, arrowhead):
v_1 = arrowhead - centroid
dx, dy = v_1[0], v_1[1]
theta = np.degrees(np.arctan2(dy, -dx))
return theta
|
[
"def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle",
"def get_line_angle_in_degrees(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def angle(self):\n return np.degrees(np.arctan2(self.u_vector[1], self.u_vector[0]))",
"def angle(self):\n cos_the = branch_angles(\n self.direction, np.array([[0, 1]]), np.ones(1))[0]\n return 180 / np.pi * np.arccos(cos_the)",
"def angle_vector(self):\n from math import atan2, pi\n return (atan2(self.y, self.x)) / pi * 180",
"def get_angle(self, otherPoint):\n deltax = otherPoint.x - self.x\n deltay = otherPoint.y - self.y\n return math.atan2(deltay, deltax) * 180 / math.pi",
"def _get_startAngle(self) -> \"double\" :\n return _core.Arc2D__get_startAngle(self)",
"def angle_to(self, vector):\n angle = vector.heading - self.heading\n if angle < 0:\n angle += 360\n return angle",
"def _get_startAngle(self) -> \"double\" :\n return _core.EllipticalArc2D__get_startAngle(self)",
"def compute_angle(start_node, end_node):\n delta_x = end_node.map_coord[0] - start_node.map_coord[0]\n delta_y = end_node.map_coord[1] - start_node.map_coord[1]\n return np.arctan2(delta_y, delta_x)",
"def get_angle_between_atoms(self, atom0, atom1=None):\n vector0 = np.array([\n atom0.pixel_x - self.pixel_x,\n atom0.pixel_y - self.pixel_y])\n if atom1 is None:\n vector1 = np.array([\n self.pixel_x+1000,\n 0])\n else:\n vector1 = np.array([\n atom1.pixel_x - self.pixel_x,\n atom1.pixel_y - self.pixel_y])\n cosang = np.dot(vector0, vector1)\n sinang = np.linalg.norm(np.cross(vector0, vector1))\n return(np.arctan2(sinang, cosang))",
"def get_angle(self):\n mx, my = self.mouse.get_pos()\n angle = math.degrees(math.atan2((mx - C_X), (my - C_Y)))\n if angle < 0:\n angle += 360\n return int(angle)",
"def arrival_angle(self, last_but_one_x, last_but_one_y, last_x, last_y):\n theta = aim_to_point(last_but_one_x, last_but_one_y, last_x, last_y)\n # print \"DOA = \" + str(math.degrees(theta))\n return theta",
"def _get_startAngle(self) -> \"double\" :\n return _core.EllipticalArc3D__get_startAngle(self)",
"def angle(v):\n cos_theta = normalized(v)[0]\n theta = math.acos(cos_theta)\n if v[1] > 0:\n theta = -theta\n return rads_to_degs(theta)",
"def GetAngle(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetAngle(self)",
"def ND_angle(self):\n ND_angle = np.degrees(np.arctan(np.average(self.ND_params[0,:])))\n return ND_angle",
"def calc_angle_centroid_goal_point(centroid, goal_point):\n v_2 = goal_point - centroid\n dx, dy = v_2[0], v_2[1]\n alpha = np.degrees(np.arctan2(-dy, -dx))\n return alpha",
"def get_angle_to(self, aim):\n dx = self.rect.centerx - aim.rect.centerx\n dy = self.rect.centery - aim.rect.centery\n\n angle = np.arctan2(dy, dx) + np.pi/2\n\n if angle < 0:\n angle += 2*np.pi\n\n angle = np.rad2deg(angle)\n\n return angle"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculates the angle between the line from the centroid to the goalpoint and the negative xaxis. The goalpoint is between the two inner corners in the H.
|
def calc_angle_centroid_goal_point(centroid, goal_point):
v_2 = goal_point - centroid
dx, dy = v_2[0], v_2[1]
alpha = np.degrees(np.arctan2(-dy, -dx))
return alpha
|
[
"def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle",
"def arrival_angle(self, last_but_one_x, last_but_one_y, last_x, last_y):\n theta = aim_to_point(last_but_one_x, last_but_one_y, last_x, last_y)\n # print \"DOA = \" + str(math.degrees(theta))\n return theta",
"def get_angle(self, otherPoint):\n deltax = otherPoint.x - self.x\n deltay = otherPoint.y - self.y\n return math.atan2(deltay, deltax) * 180 / math.pi",
"def get_angle(self, position, goal):\n position = np.array([position.x, position.y])\n diff = goal - position\n angle = math.atan2(diff[1], diff[0])\n return angle",
"def _halfway_x_y_angle(self):\n pixel = self._get_pixel()\n x_disp, y_disp = pixel[:, 0], pixel[:, 1]\n # Get distance along the path\n d = np.hstack(\n [0.0, np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]\n )\n xcen = np.interp(d[-1] / 2.0, d, x_disp)\n ycen = np.interp(d[-1] / 2.0, d, y_disp)\n\n # Find segment along which the mid-point lies\n imin = np.searchsorted(d, d[-1] / 2.0) - 1\n\n # Find normal of the axis label facing outwards on that segment\n normal_angle = self.normal_angle[imin] + 180.0\n return xcen, ycen, normal_angle",
"def get_angle_to(self, aim):\n dx = self.rect.centerx - aim.rect.centerx\n dy = self.rect.centery - aim.rect.centery\n\n angle = np.arctan2(dy, dx) + np.pi/2\n\n if angle < 0:\n angle += 2*np.pi\n\n angle = np.rad2deg(angle)\n\n return angle",
"def angle_vector(self):\n from math import atan2, pi\n return (atan2(self.y, self.x)) / pi * 180",
"def angle(self):\n return np.degrees(np.arctan2(self.u_vector[1], self.u_vector[0]))",
"def getBearingTo(self, p, o):\n relPoint = Point2D(p.x - self.x, p.y - self.y)\n absDir = relPoint.getDirection()\n return normalizeAngle(absDir - o)",
"def calc_angle_to_goal(self, state: State) -> float:\n\n curPos = state.position\n if hasattr(self.planningProblem.goal.state_list[0], 'position'):\n goalPos = self.planningProblem.goal.state_list[0].position.center\n return math.atan2(goalPos[1] - curPos[1], goalPos[0] - curPos[0])\n else:\n return 0",
"def angle(point1, point2):\n ax = ux(point1)\n ay = uy(point1)\n bx = ux(point2)\n by = uy(point2)\n return 180.0 * math.atan2(by-ay, bx-ax) / math.pi",
"def compute_angle(start_node, end_node):\n delta_x = end_node.map_coord[0] - start_node.map_coord[0]\n delta_y = end_node.map_coord[1] - start_node.map_coord[1]\n return np.arctan2(delta_y, delta_x)",
"def get_angle(x,y,target_x,target_y) -> float:\n red = math.atan2(-target_y + y,target_x - x) # Angle in radians\n return math.degrees(red)",
"def calc_angle_centroid_arrowhead(centroid, arrowhead):\n v_1 = arrowhead - centroid\n dx, dy = v_1[0], v_1[1]\n theta = np.degrees(np.arctan2(dy, -dx))\n return theta",
"def get_slanting_angle(self, disp):\r\n # for 2ch\r\n top_center = ( self.ideal_top_pts_2ch[0] + self.ideal_top_pts_2ch[1] ) / 2.0\r\n midline = normalize(self.ideal_lowest_point_2ch - top_center)\r\n baseline_2ch = normalize(self.ideal_top_pts_2ch[0] - self.ideal_top_pts_2ch[1])\r\n normal = find_plane_eq(self.ideal_top_pts_2ch[0],\r\n self.ideal_top_pts_2ch[1],\r\n self.ideal_lowest_point_2ch)[:3]\r\n\r\n horizontal_vec_2ch = normalize(np.cross(normalize(normal), midline))\r\n\r\n if np.dot(baseline_2ch, horizontal_vec_2ch) < 0.0:\r\n baseline_2ch = -baseline_2ch\r\n\r\n slanting_angle_2ch = vg.angle(baseline_2ch, horizontal_vec_2ch, assume_normalized = True, look = normal, units = 'deg')\r\n\r\n # for 4ch\r\n top_center = ( self.ideal_top_pts_4ch[0] + self.ideal_top_pts_4ch[1] ) / 2.0\r\n midline = normalize(self.ideal_lowest_point_4ch - top_center)\r\n baseline_4ch = normalize(self.ideal_top_pts_4ch[0] - self.ideal_top_pts_4ch[1])\r\n normal = find_plane_eq(self.ideal_top_pts_4ch[0],\r\n self.ideal_top_pts_4ch[1],\r\n self.ideal_lowest_point_4ch)[:3]\r\n horizontal_vec_4ch = normalize(np.cross(normalize(normal), midline))\r\n\r\n if np.dot(baseline_4ch, horizontal_vec_4ch) < 0.0:\r\n baseline_4ch = -baseline_4ch\r\n\r\n slanting_angle_4ch = vg.angle(baseline_4ch, horizontal_vec_4ch, assume_normalized = True, look = normal, units = 'deg')\r\n\r\n if disp:\r\n norml = np.linalg.norm(self.ideal_top_pts_2ch[0] - self.ideal_top_pts_2ch[1])\r\n baseline_2ch_act = get_arrow_act(self.ideal_top_pts_2ch[0], self.ideal_top_pts_2ch[0] + norml*baseline_2ch, 0.05, 5)\r\n horizontal_vec_2ch_act = get_arrow_act(self.ideal_top_pts_2ch[0], self.ideal_top_pts_2ch[0] + norml*horizontal_vec_2ch, 0.05, 5)\r\n baseline_4ch_act = get_arrow_act(self.ideal_top_pts_4ch[0], self.ideal_top_pts_4ch[0] + norml*baseline_4ch, 0.05, 5)\r\n horizontal_vec_4ch_act = get_arrow_act(self.ideal_top_pts_4ch[0], self.ideal_top_pts_4ch[0] + norml*horizontal_vec_4ch, 0.05, 5)\r\n self.endoActor.GetProperty().SetOpacity(0.6)\r\n\r\n renderers = []\r\n viewPorts = split_window(1,2)\r\n ren = vtk.vtkRenderer()\r\n ren.SetViewport(viewPorts[0,:])\r\n ren.AddActor(self.endoActor)\r\n ren.AddActor(baseline_2ch_act)\r\n ren.AddActor(horizontal_vec_2ch_act)\r\n renderers.append(ren)\r\n\r\n ren = vtk.vtkRenderer()\r\n ren.SetViewport(viewPorts[1,:])\r\n ren.AddActor(self.endoActor)\r\n ren.AddActor(baseline_4ch_act)\r\n ren.AddActor(horizontal_vec_4ch_act)\r\n renderers.append(ren)\r\n\r\n vtk_multiple_renderers(renderers, 800, 800)\r\n\r\n\r\n self.a4c_sla = slanting_angle_4ch\r\n self.a2c_sla = slanting_angle_2ch\r\n\r\n return slanting_angle_2ch, slanting_angle_4ch",
"def angle(self):\n cos_the = branch_angles(\n self.direction, np.array([[0, 1]]), np.ones(1))[0]\n return 180 / np.pi * np.arccos(cos_the)",
"def getAngle(r1, r2):\n\tray1=np.array(r1)\n\tray2=np.array(r2)\n\tinters,p=col.linesIntersect(ray1,ray2, getPoint=True)\n\tif inters:\n\t\tpts=[r1[0],r1[1], r2[0], r2[1]]\n\t\tif not tuple(p) in pts: raise Exception('lines are intersecting and not incident, angle not defined')\n\tp=np.array(p)\n\tpoints=[]\n\tfor ray in ray1,ray2:\n\t\tfurthestDist=-1\n\t\tfor point in ray:\n\t\t\tdist=getDistance(p,point)\n\t\t\tif dist>furthestDist:\n\t\t\t\tfurthest=point\n\t\t\t\tfurthestDist=dist\n\t\tpoints.append(point)\n\tp1=np.array(points[0])-p\n\tp2=np.array(points[1])-p\n\tth=acos(np.dot(p1,p2)/(getDistance(p,p1)*getDistance(p,p2)))\n\tif th>pi:\n\t\tif th>2*pi: raise Exception('something is wrong with getAngle')\n\t\tth=pi-th\n\treturn th",
"def angle(v):\n cos_theta = normalized(v)[0]\n theta = math.acos(cos_theta)\n if v[1] > 0:\n theta = -theta\n return rads_to_degs(theta)",
"def angle(p):\n return math.atan2(p[1], p[0])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filter out the corners that belong to a rightangled corner i.e. corners with a mean intensity value around 255/4~64 number_of_corners = len(corners)
|
def clip_corners_on_intensity(corners, img, average_filter_size):
value_per_degree = 255.0/360.0
min_degree, max_degree = 60, 120 # +- 30 from 90 degrees
# Since 255 is white and 0 is black, subtract from 255
# to get black intensity instead of white intensity
min_average_intensity = 255 - max_degree*value_per_degree
max_average_intensity = 255 - min_degree*value_per_degree
number_of_corners = len(corners)
print number_of_corners
min_intensity = np.array([min_average_intensity]*number_of_corners)
max_intensity = np.array([max_average_intensity]*number_of_corners)
img_average_intensity = make_circle_average_blurry(img, average_filter_size)
corner_x = np.int0(corners[:,0])
corner_y = np.int0(corners[:,1])
corners_clipped_on_intensity = corners[
np.logical_and(
np.greater(
img_average_intensity[corner_x,corner_y],
min_intensity
), # Add top limit
np.less(
img_average_intensity[corner_x,corner_y],
max_intensity
) # Add bottom limit
)
]
corner_x = np.int0(corners_clipped_on_intensity[:,0])
corner_y = np.int0(corners_clipped_on_intensity[:,1])
if np.ndim(corner_x) == 0:
corners = np.array([[corner_x, corner_y]])
intensities = np.array([img_average_intensity[corner_x, corner_y]])
number_of_corners = 1
else:
corners = np.stack((corner_x, corner_y), axis=1)
intensities = np.array(img_average_intensity[corner_x, corner_y])
number_of_corners = len(corners)
print number_of_corners
print "intensities: ", intensities
if number_of_corners == 0:
return None, None
else:
return corners, intensities
|
[
"def highlight_significant_corners(corners, image):\n\n # This line is equivalent to the nested loop below, but much faster.\n image[corners > 0.01 * corners.max()] = [0, 255, 0]\n\n # for rowIndex in range(len(corners)):\n # for pixelIndex in range(len(corners[0])):\n # if corners[rowIndex][pixelIndex] > (0.01 * corners.max()):\n # image[rowIndex][pixelIndex] = [0, 255, 0]",
"def mask_corners(row):\n mywcs = create_wcs(row)\n # Then figure out the corners\n x, y = row['naxis1'], row['naxis2'] # Mask dimensions\n corners = mywcs.all_pix2world([-0.5, -0.5, x-0.5, x-0.5],\n [-0.5, y-0.5, y-0.5, -0.5],\n 0)\n del mywcs\n return corners",
"def spread_out_corners(im, m, n, radius):\n corners = [np.empty((0,2), dtype=np.int)]\n x_bound = np.linspace(0, im.shape[1], n+1, dtype=np.int)\n y_bound = np.linspace(0, im.shape[0], m+1, dtype=np.int)\n for i in range(n):\n for j in range(m):\n # Use Harris detector on every sub image.\n sub_im = im[y_bound[j]:y_bound[j+1], x_bound[i]:x_bound[i+1]]\n sub_corners = harris_corner_detector(sub_im)\n sub_corners += np.array([x_bound[i], y_bound[j]])[np.newaxis, :]\n corners.append(sub_corners)\n corners = np.vstack(corners)\n legit = ((corners[:, 0] > radius) & (corners[:, 0] < im.shape[1]-radius) &\n (corners[:, 1] > radius) & (corners[:, 1] < im.shape[0]-radius))\n ret = corners[legit, :]\n return ret",
"def find_corners(self, list_of_walls):\n list_of_corners = CornerList()\n\n\n for first_wall in list_of_walls.wall_list:\n for second_wall in list_of_walls.wall_list:\n if first_wall == second_wall:\n continue\n if first_wall.wall_end == second_wall.wall_start:\n corner_angle = self.angle_between_lines(first_wall, second_wall)\n if 50 < corner_angle < 310:\n self.create_corner(list_of_corners, first_wall, second_wall)\n if first_wall.wall_start_rupture or first_wall.wall_start_break or first_wall.wall_end_rupture or first_wall.wall_end_break:\n # we are not only wanting normal corners but also potential corners\n\n # however we probably will need to refine the selection of potential corners\n # TODO refine the selection of potential corners :)\n self.create_potential_corner(list_of_corners, first_wall)\n\n\n\n\n return list_of_corners",
"def get_corners(walls, tilesize=16):\r\n corners = []\r\n for i in xrange(len(walls) - 1):\r\n for j in xrange(len(walls[0]) - 1):\r\n a = walls[i][j]\r\n b = walls[i + 1][j]\r\n c = walls[i][j + 1]\r\n d = walls[i + 1][j + 1]\r\n if a + b + c + d == 1:\r\n cornertype = b + 2 * c + 3 * d\r\n corners.append((tilesize * (i + 1), \r\n tilesize * (j + 1), cornertype))\r\n return corners",
"def analyse_corners(images):\n\n # Populate array of tuples (a,b) where a is the number of corners detected, and b is the image resolution\n results = []\n for i in images:\n resolution = (len(i[0]), len(i))\n results.append((resolution, st.get_num_corners(i)))\n\n # TODO: Add option for Harris corner detection\n\n # Sort array by image resolution in descending order\n results.sort(reverse=True)\n\n # Calculate and print the performance of the corner detection at each resolution\n highest_num_corners = results[0][1]\n for res, num_corners in results:\n percentage = round((num_corners / highest_num_corners) * 100, 2)\n print(\"Corners detected: {0} | {1}% of full quality image | Resolution: {2}\"\n .format(num_corners, percentage, res))\n\n return results",
"def _get_corners_from_centroids(centroids):\n # Identify breaks along columns\n breaks = _infer_interval_breaks(centroids, axis=1)\n # Identify breaks along rows\n corners = _infer_interval_breaks(breaks, axis=0)\n return corners",
"def get_corners(self):\n if self.corners is not None:\n return self.corners\n \n point = self.point\n width = self.width\n height = self.height \n \n xx = [point.x, point.x + width]\n yy = [point.y, point.y + height]\n keys = [\n \"bot_left\",\n \"bot_right\",\n \"top_left\",\n \"top_right\"\n ]\n points = np.dstack(np.meshgrid(xx, yy)).reshape(-1, 2)\n vals = list(map(lambda pt: Point(pt[0], pt[1]), points))\n self.corners = dict(zip(keys, vals))\n return self.corners",
"def crop_border(self):\n tc=self.I[0,0]\n iscorner= self.I!=tc\n \n a=iscorner.max(axis=0)\n #nonzer returns a tuple for coordinates so have to unpack\n anon=a.nonzero()[0]\n \n b=iscorner.max(axis=1)\n bnon=b.nonzero()[0]\n \n #crop borders containing zero voxel values\n self.I=self.I[bnon.min():bnon.max(), anon.min():anon.max()]",
"def find_corners(self, member='total'):\n if len(self.members) == 0:\n print(\"Please add exposures before looking for corners...\")\n return\n label_border = 10\n # Insure footprint has been determined\n if self.footprint_member != member:\n self.find_footprint(member=member)\n\n if member == 'total':\n # Use Harris corner detection to identify corners in the\n # total footprint\n # insure footprint has enough signal to detect corners\n fp = np.clip(self.footprint, 0, 1).astype(np.int16)\n\n # simple trick to remove noise and small regions 3x3 or less.\n scmask = ndimage.binary_dilation(ndimage.binary_erosion(fp, iterations=3), iterations=2)\n # Label each major contiguous region in the mask\n sclabels, nlabels = ndimage.label(scmask)\n slices = ndimage.find_objects(sclabels)\n\n # For each region, trace the edge, find the Harris corners,\n # then order the Harris corners counter-clockwise around the region\n # using the traced edge pixel positions.\n ordered_xy = []\n ordered_edges = []\n sky_corners = []\n for label, mask_slice in enumerate(slices):\n label += 1\n # Need to guarantee the slice ALWAYS has a border of non-assigned pixels\n label_shape = (mask_slice[0].stop - mask_slice[0].start + (label_border * 2),\n mask_slice[1].stop - mask_slice[1].start + (label_border * 2))\n label_mask = np.zeros(label_shape, sclabels.dtype)\n # get slice with just the region/label of interest\n label_mask[label_border:-1*label_border, label_border:-1*label_border] = sclabels[mask_slice].copy()\n # make sure no pixels from other regions are present in this mask\n label_mask[label_mask != label] = 0\n # reset label to be a binary mask only\n label_mask[label_mask == label] = 1000\n print('extracting corners for region {} in slice {}'.format(label, mask_slice))\n # Perform corner detection on each region/chip separately.\n mask_corners = corner_peaks(corner_harris(label_mask),\n min_distance=3,\n threshold_rel=0.2)\n xy_corners = mask_corners * 0.\n xy_corners[:, 0] = mask_corners[:, 1]\n xy_corners[:, 1] = mask_corners[:, 0]\n # shift corner positions to full array positions\n xy_corners += (mask_slice[1].start-label_border, mask_slice[0].start-label_border)\n\n # Create a mask from the total footprint consisting solely of the\n # pixels at the outer edge, ordered in clockwise fashion.\n #\n # get list of (X,Y) coordinates of all edges from each separate 'region' or chip\n edge_pixels = trace_polygon(label_mask > 0, mask_slice)\n\n # use the ordering of the traced edge pixels to order the corners in the same way\n cordist = distance.cdist(xy_corners, edge_pixels) # returns distances for each corner position\n ordered_indices = []\n for distarr, minval in zip(cordist, np.min(cordist, axis=1)):\n ordered_indices.append(np.where(distarr == minval)[0][0])\n radial_order = np.argsort(ordered_indices)\n ordered_xyc = xy_corners[radial_order].tolist()\n ordered_xyc.append(ordered_xyc[0]) # close polygon\n\n # save as output values\n ordered_xy.append(np.array(ordered_xyc, dtype=np.float64))\n sky_corners.append(self.meta_wcs.all_pix2world(ordered_xyc, 0))\n ordered_edges.append(edge_pixels)\n else:\n if member not in self.exp_masks:\n raise ValueError(\"Member {} not added to footprint\".format(member))\n ordered_xy = [self.exp_masks[member]['xy_corners']]\n sky_corners = [self.meta_wcs.all_pix2world(ordered_xy, 0)]\n\n self.edge_pixels = ordered_edges\n self.xy_corners = ordered_xy\n self.corners = sky_corners",
"def chessboard_with_corners(self):\n for img in self._img_with_corners:\n yield img",
"def on_corner(self):\n for c in self.corners:\n if abs(c[0]-self.position[0]) <= EPSILON and abs(c[1]-self.position[1]) <= EPSILON:\n return True\n return False",
"def harris_corner_detector(im):\n Ix = convolve(im, DX_DERIVE)\n Iy = convolve(im, DY_DERIVE)\n Ixy = sol4_utils.blur_spatial(Ix * Iy, KERNEL_SIZE)\n Ixx = sol4_utils.blur_spatial(Ix ** 2, KERNEL_SIZE)\n Iyy = sol4_utils.blur_spatial(Iy ** 2, KERNEL_SIZE)\n R = (Ixx * Iyy - Ixy ** 2) - K * (Ixx + Iyy) ** 2\n corners_inds = np.nonzero(non_maximum_suppression(R))\n corners_cords = np.array([corners_inds[1], corners_inds[0]]).T\n return corners_cords",
"def find_corners(tiles: List[Tile]) -> Tuple[int, int, int, int]:\n edge_counts = defaultdict(set)\n for tile in tiles:\n for tilestate in tile.states.values():\n for edge in (\n tilestate.bottom,\n tilestate.top,\n tilestate.left,\n tilestate.right,\n ):\n edge_counts[edge].add(tile.id)\n\n # Now find the 4 tiles that have 2 unique edges\n count_uniques = {}\n for edge_tiles in edge_counts.values():\n if len(edge_tiles) == 1:\n tile_id = next(iter(edge_tiles))\n count_uniques[tile_id] = count_uniques.get(tile_id, 0) + 1\n\n corners = set()\n for tile_id, count in count_uniques.items():\n if count == 4:\n corners.add(tile_id)\n return corners",
"def getCorners(self):\n px,py=self.position\n sx,sy=self.size\n corners=(px,py,px+sx,py+sy)\n return corners",
"def draw_corners(img, board_size, corners):\n if len(img.shape) < 3 or img.shape[2] < 3:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n return cv2.drawChessboardCorners(img, board_size, corners, 1)",
"def getCorners(self,window):\n sx,sy=self.size\n corners=(-sx//2,-sy//2,sx//2,sy//2)\n return corners",
"def empty_corner(self):\n return [square for square in range(0, len(self.board), 2) if not self.board[square]]",
"def harris_corner_detector(im):\n x_der_vec = np.array([1, 0, -1])[np.newaxis, :]\n y_der_vec = x_der_vec.T\n I_x = convolve2d(im, x_der_vec, mode='same', boundary='symm')\n I_y = convolve2d(im, y_der_vec, mode='same', boundary='symm')\n I_xx = I_x * I_x\n I_yy = I_y * I_y\n I_xy = I_x * I_y\n blur_I_xx = sol4_utils.blur_spatial(I_xx, 3)\n blur_I_yy = sol4_utils.blur_spatial(I_yy, 3)\n blur_I_xy = sol4_utils.blur_spatial(I_xy, 3)\n det = blur_I_xx * blur_I_yy - blur_I_xy * blur_I_xy\n trace = blur_I_xx + blur_I_yy\n R = det - 0.04 * (trace ** 2)\n corners = non_maximum_suppression(R)\n cor_arr = np.where(corners > 0)\n points = np.dstack((cor_arr[1], cor_arr[0]))[0]\n\n return points"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test whether storing distance weights works.
|
def test_store_weights():
abc = MockABC([{'s1': -1, 's2': -1, 's3': -1},
{'s1': -1, 's2': 0, 's3': 1}])
x_0 = {'s1': 0, 's2': 0, 's3': 1}
weights_file = tempfile.mkstemp(suffix=".json")[1]
print(weights_file)
def distance0(x, x_0):
return abs(x['s1'] - x_0['s1'])
def distance1(x, x_0):
return np.sqrt((x['s2'] - x_0['s2'])**2)
for distance in [AdaptivePNormDistance(log_file=weights_file),
AdaptiveAggregatedDistance(
[distance0, distance1], log_file=weights_file)]:
distance.initialize(0, abc.sample_from_prior, x_0=x_0)
distance.update(1, abc.sample_from_prior)
distance.update(2, abc.sample_from_prior)
weights = load_dict_from_json(weights_file)
assert set(weights.keys()) == {0, 1, 2}
expected = distance.weights
for key, val in expected.items():
if isinstance(val, np.ndarray):
expected[key] = val.tolist()
assert weights == expected
|
[
"def test_setup_database_consistent(self):\n\t\tself.assertWeightsNonnegative()",
"def test_weights_differ(self):\r\n #\r\n weights = set()\r\n weights.add(coconuts.SouthAsian().weight)\r\n weights.add(coconuts.MiddleEastern().weight)\r\n weights.add(coconuts.American().weight)\r\n self.assertEqual(3, len(weights))",
"def test_weights_direction(self):\n if not self.instance.supports_weights:\n raise SkipTest(f\"{self.instance} does not support weights\")\n\n # for sanity checking: give the largest weight to best rank => should improve\n idx = self.ranks.argmin()\n weights = numpy.ones_like(self.ranks, dtype=float)\n weights[idx] = 2.0\n weighted = self.instance(ranks=self.ranks, num_candidates=self.num_candidates, weights=weights)\n unweighted = self.instance(ranks=self.ranks, num_candidates=self.num_candidates, weights=None)\n if self.instance.increasing: # increasing = larger is better => weighted should be better\n self.assertLessEqual(unweighted, weighted)\n else:\n self.assertLessEqual(weighted, unweighted)",
"def test_load_valid_weighting():\n try:\n clfs['weighted'].save('test.pkl')\n clf = KNNClassifier.load('test.pkl')\n # Check that all fields are still the same\n assert isinstance(clf, KNNClassifier)\n assert clf._k == 3\n assert list(clf.encoder_.classes_) == classes\n assert clf._window == 1.\n assert clf._use_c == False\n assert clf._independent == False\n assert deepcopy(clf._random_state).normal() == deepcopy(rng).normal()\n assert_all_equal(clf.X_, X)\n assert_equal(clf.y_, clf.encoder_.transform(y))\n assert clf._n_features_ == 3\n # Check that weighting functions are the same for x=0 to x=1000\n xs = np.arange(1000, step=0.1)\n weighting = lambda x: np.exp(-x)\n assert_equal(clf._weighting(xs), weighting(xs))\n finally:\n os.remove('test.pkl')",
"def test_topic_weights(self):\n assert self.state.topic_weights == (10, 36, 6, 45)",
"def process_DistanceWeightingValues(self, node):\n distanceWeightingValues = [e.n for e in node.elts]\n if len(distanceWeightingValues) != len(self.weights):\n return\n self.weights = distanceWeightingValues",
"def has_weight(self) -> bool:\n return self.weight > 0.0",
"def assertWeightsNonnegative(self):\n\t\tfor action_class in ACTION_LIST:\n\t\t\tweight = action_class.weight_available()\n\t\t\tself.assertGreaterEqual(weight, 0)\n\t\t\tself.assertIsInstance(weight, int)",
"def test_expectation_weighted(self):\n self._test_expectation(weights=self._generate_weights())",
"def test_weights_coherence(self):\n if not self.instance.supports_weights:\n raise SkipTest(f\"{self.instance} does not support weights\")\n\n # generate two versions\n generator = numpy.random.default_rng(seed=21)\n repeats = generator.integers(low=1, high=10, size=self.ranks.shape)\n\n # 1. repeat each rank/candidate pair a random number of times\n repeated_ranks, repeated_num_candidates = [], []\n for rank, num_candidates, repeat in zip(self.ranks, self.num_candidates, repeats):\n repeated_ranks.append(numpy.full(shape=(repeat,), fill_value=rank))\n repeated_num_candidates.append(numpy.full(shape=(repeat,), fill_value=num_candidates))\n repeated_ranks = numpy.concatenate(repeated_ranks)\n repeated_num_candidates = numpy.concatenate(repeated_num_candidates)\n value_repeat = self.instance(ranks=repeated_ranks, num_candidates=repeated_num_candidates, weights=None)\n\n # 2. do not repeat, but assign a corresponding weight\n weights = repeats.astype(float)\n value_weighted = self.instance(ranks=self.ranks, num_candidates=self.num_candidates, weights=weights)\n\n self.assertAlmostEqual(value_repeat, value_weighted, delta=2)",
"def testWeightsAreNotTheSame(self):\r\n\t\tfor i in range(len(self.pop)):\r\n\t\t\tfor j in range(len(self.pop)):\r\n\t\t\t\tif i != j:\r\n\t\t\t\t\twi1 = array(self.pop[i].wi)\r\n\t\t\t\t\twi2 = array(self.pop[j].wi)\r\n\t\t\t\t\tcomparisons = where( wi1 == wi2, True, False)\r\n\t\t\t\t\tfor c in comparisons:\r\n\t\t\t\t\t\tself.assertFalse(c.all())\t\t\t\t\t\r\n\r\n\t\t\t\t\two1 = array(self.pop[i].wo)\r\n\t\t\t\t\two2 = array(self.pop[j].wo)\r\n\t\t\t\t\tcomparisons = where( wo1 == wo2, True, False)\r\n\t\t\t\t\tfor c in comparisons:\r\n\t\t\t\t\t\tself.assertFalse(c.all())",
"def can_ship(weight, distance):\n\n if weight * distance < 1000:\n return True\n else:\n return False",
"def test_broker_weight_cv(self):\n assert abs(self.state.broker_weight_cv - 0.4040) < 1e-4",
"def test_distance_of_vertices(self):\n for pair, answer in self.paths.items():\n _, answer_distance = answer\n _, distance = self.mst.find_path(*pair)\n self.assertEqual(answer_distance, distance)",
"def test_analytic_weighted_nlls(self):\n e = np.array([1, 2, 1, 3, 1])\n self.fitting_problem.data_e = e\n self.cost_func = WeightedNLLSCostFunc(self.fitting_problem)\n self.cost_func.jacobian = self.jacobian\n self.cost_func.hessian = self.hessian\n eval_result, _ = self.cost_func.hes_res(params=self.params)\n actual_hessian = grad2_r_weighted_nlls(\n self.fitting_problem.data_x, e, self.params)\n\n self.assertTrue(np.isclose(actual_hessian, eval_result).all())",
"def test_weight_quantizer_ls2_modes():\n torch.manual_seed(1234)\n quantizer_ls2 = weight_quantization.WeightQuantizerLS2(32)\n w = torch.ones(32, 16, 3, 3) * 2\n\n quantizer_ls2.train()\n w_q_train = quantizer_ls2(w)\n assert torch.all(w_q_train == 2.0)\n\n quantizer_ls2.eval()\n w = torch.rand(32, 16, 3, 3) # some random, but all positive tensor\n w_q_eval = quantizer_ls2(w)\n\n assert torch.all(w_q_train.eq(w_q_eval))",
"def test_weights_built(self):\n # Create the network\n with nengo.Network():\n a = nengo.Ensemble(200, 2)\n b = nengo.Ensemble(400, 2)\n a_b = nengo.Connection(\n a, b, solver=nengo.solvers.Lstsq(weights=True)\n )\n\n # Create the model and built the pre-synaptic Ensemble\n model = builder.Model()\n model.rng = np.random\n model.seeds[a] = 1\n model.seeds[b] = 2\n model.seeds[a_b] = 3\n ensemble.build_ensemble(model, a)\n ensemble.build_ensemble(model, b)\n\n # Now build the connection and check that the params seem sensible\n params = ensemble.build_from_ensemble_connection(model, a_b)\n assert params.decoders.shape == (200, 400)",
"def test_total_weight(self):\n assert self.state.total_weight == 97",
"def assert_non_null_weights(statedict: dict):\n for w in statedict.items():\n assert not np.any(w[1].cpu().numpy() == -101.0), (\n w[0] + \"contains parameters that were likely not initialized\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a service name, existing url_pattern, and url path prefix, derive a normalized/finalized url path based on them.
|
def derive_service_path(service_name, url_pattern=None, path_prefix=None):
if not isinstance(service_name, basestring) or not service_name:
raise ValueError("Service name must be a non-empty string.")
path_parts = []
if not isinstance(url_pattern, basestring) or not url_pattern:
url_pattern = service_name
if url_pattern.startswith("/"):
url_pattern = url_pattern[1:]
if isinstance(path_prefix, basestring) and path_prefix:
if not path_prefix.startswith("/"):
path_prefix = "/" + path_prefix
path_parts.append(path_prefix)
path_parts.append(url_pattern)
url_pattern = "/".join(path_parts)
if not url_pattern.startswith("/"):
url_pattern = "/" + url_pattern
return url_pattern
|
[
"def get_url_pattern(urlname, args=[]):\r\n patterns = get_resolver(None).reverse_dict.getlist(urlname)\r\n if not args:\r\n return '/%s' % patterns[0][0][0][0]\r\n\r\n for pattern in patterns:\r\n if pattern[0][0][1] == args:\r\n return '/%s' % pattern[0][0][0]",
"def expand_url(url, protocol):\n if protocol == 'soap':\n ws_part = 'api/?wsdl'\n elif protocol == 'xmlrpc':\n ws_part = 'index.php/api/xmlrpc'\n else:\n ws_part = 'index.php/rest/V1'\n return url.endswith('/') and url + ws_part or url + '/' + ws_part",
"def concat_url(endpoint, url):\n return \"%s/%s\" % (endpoint.rstrip(\"/\"), url.strip(\"/\"))",
"def api_path(base_url, path):\n if base_url.endswith('/'):\n base_url = base_url[:-1]\n\n if path.startswith('/'):\n path = path[1:]\n\n return \"{}/{}\".format(base_url, path)",
"def gen_service_urls():\n base_url = common_bits.base_url\n all_base_service_urls = []\n service_urls = ['ah', 'any', 'esp', 'group', 'icmp', 'icmpv6', 'ip', 'tcp', 'tcpudp', 'udp']\n for item in service_urls:\n base_service_url = '{}/service/{}'.format(base_url,item)\n all_base_service_urls.append(base_service_url)\n\n return all_base_service_urls",
"def _rewrite_path(path, pathsubst, pathprefix):\n pathurl = util.url(path)\n substurl = util.url(pathsubst)\n prefixurl = util.url(pathprefix)\n\n if not substurl.user:\n if pathurl.user:\n substurl.user = pathurl.user\n else:\n substurl.user = prefixurl.user\n if not substurl.passwd:\n if pathurl.passwd:\n substurl.passwd = pathurl.passwd\n else:\n substurl.passwd = prefixurl.passwd\n\n substurl.path += pathurl.path[len(prefixurl.path):]\n return str(substurl)",
"def _resolve_url_base(self, url):\n return url[:url.rfind('/')]",
"def _get_path(url):\n\n if url.find('http://') == 0:\n url = url.replace('http://', '')\n if url.find('https://') == 0:\n url = url.replace('https://', '')\n\n hostname = url.split('/')\n if len(hostname) == 1:\n return ''\n else:\n return \"/%s\" % (\"/\".join(hostname[1:]))",
"def service_mapping():\n return \"/foo/{anything}/bar\"",
"def rewrite_url(full_url, local_catalog_url_base):\n # Only rewrite the URL if necessary\n if not full_url.startswith(local_catalog_url_base):\n return get_path_from_url(full_url, local_catalog_url_base)\n else:\n return full_url",
"def extend_with_root_redirects(patterns_obj, filled_services, service_type,\n base_path, with_slash=True):\n service_path = get_service_path(filled_services, service_type)\n if with_slash:\n service_path = service_path.rstrip('/') + '/'\n\n root_url_entry = None\n if base_path and base_path != '/':\n # redirect slash to /<base_path>/\n root_url_entry = url('^$', 'redirect_to',\n {'url': join_urls('/', base_path.rstrip('/'),\n '/')})\n\n base_path_pattern = prefix_pattern(base_path) + '$'\n base_path_pattern_no_slash = prefix_pattern(base_path).rstrip('/') + '$'\n\n # redirect /<base_path> and /<base_path>/ to service_path public endpoint\n base_url_entry = url(base_path_pattern, 'redirect_to', {'url':\n service_path})\n base_url_entry_no_slash = url(base_path_pattern_no_slash,\n 'redirect_to', {'url': service_path})\n # urls order matter. Setting base_url_entry first allows us to avoid\n # redirect loops when base_path is empty or `/`\n patterns_obj += patterns('django.views.generic.simple',\n base_url_entry, base_url_entry_no_slash)\n if root_url_entry:\n # register root entry only for non root base_path deployments\n patterns_obj += patterns('django.views.generic.simple', root_url_entry)",
"def make_entity_base_url(url):\n return url if url.endswith(\"/\") else url + \"/\"",
"def __rewrite_dest_url(dest_url, dest_sign_url):\n if dest_sign_url == 'gcs':\n dest_url = re.sub('davs', 'gclouds', dest_url)\n dest_url = re.sub('https', 'gclouds', dest_url)\n elif dest_sign_url == 's3':\n dest_url = re.sub('davs', 's3s', dest_url)\n dest_url = re.sub('https', 's3s', dest_url)\n\n if dest_url[:12] == 'srm+https://':\n dest_url = 'srm' + dest_url[9:]\n return dest_url",
"def build_url(job_url_id, suffix):\r\n url_prefix = re.match(r'^(.*)\\/\\d+\\/', job_url_id).group(1)\r\n return url_prefix + suffix",
"def _ger_full_url(self, endpoint):\r\n return '{}{}{}'.format(self.url, self._base_path, endpoint)",
"def url_pathcombine(self, href_str):\n split_urls = self.url.split('/') \n if href_str.lower().startswith('http'):\n return href_str\n elif href_str.startswith('#'):\n return ''\n else:\n current_host = ''\n if self.url.lower().__contains__('http'):\n current_host_list = split_urls[0:3]\n current_host = '/'.join(current_host_list)\n else:\n current_host = split_urls[0]\n current_root = split_urls[0:-1]\n (splitchar_num, href_path) = self._parse_href(href_str)\n new_url = self.url + \"/\" + href_path\n if splitchar_num == 0:\n new_url = \"/\".join(current_root) + \"/\" + href_path\n elif splitchar_num == 1:\n new_url = current_host + \"/\" + href_path\n elif splitchar_num == -1:\n if len(split_urls) > 2:\n new_url = \"/\".join(split_urls[0:-2]) + \"/\" + href_path\n else:\n return self.url + \"/\" + href_path\n return new_url",
"def build_restful_url(base_url, username, url_restful_service):\n # use regexp to support http and https\n # and to prepare for more complex ulrs\n pattern = re.compile(r\"(?P<protocol>http[s]?://)(?P<hostname>.*)\")\n match = pattern.match(base_url)\n protocol = match.group('protocol')\n hostname = match.group('hostname')\n\n # define basic strucutre of url and prepare substitution\n restful_url_template = (\n \"%(protocol)s%(username)s@%(hostname)s%(url_restful_service)s\")\n\n # attributes to be substituted\n attributes = {\n \"protocol\": protocol,\n \"username\": username,\n \"hostname\": hostname,\n \"url_restful_service\": url_restful_service\n }\n\n # substitute attributes of template\n restful_url = restful_url_template % attributes\n return restful_url",
"def url2path(url):\n return url.strip().replace(\"/\", \"_\")",
"def rebase_one(base, url, force_rebase):\n parsed = urlparse.urlparse(url)\n if parsed.scheme == parsed.netloc == '':\n return urlparse.urljoin(base, url)\n elif force_rebase:\n return base + url\n else:\n return url"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Once the daily data has been collected, we need to append the extra value and the description to every day.
|
def populate_extra_data(week_data, description):
for day, day_week_data in six.iteritems(week_data):
value = day_week_data['value']
if day in SQUARE_DAYS:
extra_value = value ** 2
day_week_data['square'] = extra_value
elif day in DOUBLE_DAYS:
extra_value = value * 2
day_week_data['double'] = extra_value
day_week_data['description'] = '{} {}'.format(description, extra_value)
|
[
"def add_new_data(self, value, comment,\n date=dt.datetime.now().strftime(\"%Y%m%d\")):\n day = pd.DataFrame([{'date': pd.Timestamp(date),\n 'value': int(value), 'comment': comment}])\n self.account = self.account.append(day)\n self.account = self.account.reset_index(drop=True)",
"def generate_daily_summary(weather_data):\n result = \"\"\n\n for item in weather_data:\n result += f\"---- {convert_date(item[0])} ----\\n\"\n result += f\" Minimum Temperature: {format_temperature(convert_f_to_c(item[1]))}\\n\"\n result += f\" Maximum Temperature: {format_temperature(convert_f_to_c(item[2]))}\\n\\n\"\n\n return result",
"def data(ignore_date=False):",
"def exampleCase2(self):\n\t\t \n\t\tdata = [['date', 'data']]\n\t\tdate_1 = datetime.datetime(2015, 8, 1)\n\t\tdate_2 = datetime.datetime(2017, 8, 1)\n\n\t\tfor _ in range(1800000):\n\t\t\tdata.append([date_1, self.randomText()])\n\t\t\t\n\t\tfor _ in range(1800000, 2000000):\n\t\t\tdata.append([date_2, self.randomText()])\n\n\t\tself.writeCSV(2, data)",
"def generate_summary(daily_forecast_data):\r\n\r\n output = \"\"\r\n\r\n for day in daily_forecast_data:\r\n \r\n date = convert_date(day[\"Date\"])\r\n\r\n min_temp = convert_f_to_c(day[\"Temperature\"][\"Minimum\"][\"Value\"])\r\n\r\n max_temp = convert_f_to_c(day[\"Temperature\"][\"Maximum\"][\"Value\"])\r\n\r\n day_desc = day[\"Day\"][\"LongPhrase\"]\r\n\r\n day_rain_probability = day[\"Day\"][\"RainProbability\"]\r\n\r\n night_desc = day[\"Night\"][\"LongPhrase\"]\r\n\r\n night_rain_probability = day[\"Night\"][\"RainProbability\"]\r\n\r\n formatted_day = f\"-------- {date} --------\\nMinimum Temperature: {format_temperature(min_temp)}\\nMaximum Temperature: {format_temperature(max_temp)}\\nDaytime: {day_desc}\\n{'':>3} Chance of rain: {day_rain_probability}%\\nNighttime: {night_desc}\\n{'':>3} Chance of rain: {night_rain_probability}%\\n\\n\" \r\n \r\n output += formatted_day\r\n \r\n return output",
"def daily_valuations(self):\n df = pd.DataFrame(self.close_prices, columns=[\"date\", \"price\"])\n df = df.set_index(\"date\")\n df[\"quantity\"] = float(\"nan\")\n df[\"market_val\"] = float(\"nan\")\n # the prices starting from the first date the security was held\n start_date = str(self.breakdown[0][0])\n\n df2 = df.loc[start_date:]\n df2 = df2.copy() # copied to prevent chained assignment\n # update the quantity at each date\n for row in self.breakdown:\n df2.at[str(row[0]), \"quantity\"] = row[1]\n df2[\"price\"] = df2[\"price\"].fillna(method=\"ffill\")\n df2[\"quantity\"] = df2[\"quantity\"].fillna(method=\"ffill\")\n\n df2[\"price\"] = pd.to_numeric(df2[\"price\"])\n df2[\"market_val\"] = round((df2[\"price\"] * df2[\"quantity\"]), 3)\n\n df2 = df2[[\"market_val\"]]\n new_name = f\"market_val_{self.ticker}\"\n new_header = {\"market_val\": new_name}\n df2 = df2.rename(columns=new_header)\n return df2",
"def set_daily_info(self, date):\n self.mode = MODE_DAILY\n self.date = date",
"def add_more_columns(self):\r\n self.all_data[self._year] = self.all_data.index.year\r\n self.all_data[self._month] = self.all_data.index.month\r\n self.all_data[self._day_of_week] = self.all_data.index.day_name()",
"def generate_summary(weather_data):\n count = 0\n min_temps = []\n max_temps = []\n date_time = []\n\n for index, item in enumerate(weather_data):\n if index != []:\n count += 1\n date_time.append(item[0])\n min_temps.append(item[1])\n max_temps.append(item[2])\n \n min_temp, index_date_min = find_min(min_temps)\n max_temp, index_date_max = find_max(max_temps)\n\n min_temps_c = convert_f_to_c(str(min_temp))\n max_temps_c = convert_f_to_c(str(max_temp))\n\n date_min = date_time[index_date_min]\n date_max = date_time[index_date_max]\n\n mean_min_c = convert_f_to_c(calculate_mean(min_temps))\n mean_max_c = convert_f_to_c(calculate_mean(max_temps))\n\n result = \"\"\n result += f\"{count} Day Overview\\n\"\n result += f\" The lowest temperature will be {format_temperature(min_temps_c)}, and will occur on {convert_date(date_min)}.\\n\"\n result += f\" The highest temperature will be {format_temperature(max_temps_c)}, and will occur on {convert_date(date_max)}.\\n\"\n result += f\" The average low this week is {format_temperature(mean_min_c)}.\\n\"\n result += f\" The average high this week is {format_temperature(mean_max_c)}.\\n\"\n\n return result",
"def populate_scienceInstruments_extra(self):\r\n\r\n pass",
"def insert_amount_daily(share_name):\n share_object = Share.objects(name=share_name).get()\n historical = share_object.historical\n days = historical['daily_data']\n quantity = share_object['quantity']\n fees = share_object['fees_usd']\n amount = share_object['amount_usd']\n new_hist = {}\n for day in days.iterkeys():\n new_hist[day] = days[day]\n new_hist[day]['quantity'] = quantity\n new_hist[day]['fees_usd'] = fees\n new_hist[day]['amount_usd'] = amount\n new_hist[day]['invested'] = new_hist[day]['amount_usd'] + new_hist[day]['fees_usd']\n new_hist[day]['name'] = share_name\n update_data = dict(daily_data=new_hist, last_update=datetime.datetime.now())\n update = Share.objects(name=share_name).update(set__historical=update_data)\n print update",
"def add_dates(self, split_words_list):\n if self.curr_id in self.individualdata:\n self.individualdata[self.curr_id][self.tempdata + split_words_list[1]] = split_words_list[2]\n elif split_words_list[1] == \"DATE\":\n husband = self.familydata[self.curr_id][\"HUSB\"]\n wife = self.familydata[self.curr_id][\"WIFE\"]\n self.individualdata[husband][self.tempdata + split_words_list[1]] = split_words_list[2]\n self.individualdata[wife][self.tempdata + split_words_list[1]] = split_words_list[2]",
"def add_specific_date(self, recurring_event_id, date, location_description):\n pass",
"def create_aggregate_df():\n all_dates_df = pd.read_csv(\"datasets/all_dates_without_nan_df.csv\")\n aggregate_df = pd.DataFrame()\n\n tmp_date = first_date\n\n i = 0\n\n while tmp_date.date() < last_date.date():\n\n # add 20 lines for each interval\n while i < 20:\n aggregate_df = aggregate_df.append(\n {'Date': str(tmp_date)[0:10] + \" - \" + str(tmp_date + datetime.timedelta(days=delta - 1))[0:10],\n 'Stock Name': stock_columns[i]}\n , ignore_index=True)\n i += 1\n\n tmp_date = tmp_date + datetime.timedelta(days=delta)\n i = 0\n\n\n # create dummies for the stock names\n df_dummies = pd.DataFrame(data=pd.get_dummies(aggregate_df['Stock Name']))\n aggregate_df = aggregate_df.join(df_dummies)\n\n day_counter = 1\n\n # create delta columns for each day in the interval\n for i in range(1, delta + 1):\n aggregate_df['Day ' + str(day_counter)] = np.nan\n day_counter += 1\n\n i = 0\n tmp_date = first_date\n j = 0\n\n # add the relevant value of stock for each day\n while i < len(aggregate_df) and 0 <= (last_date.date() - tmp_date.date()).days:\n print(i)\n for day_counter in range(1, delta + 1):\n j = 0\n while j < 20:\n if 0 <= (last_date.date() - tmp_date.date()).days:\n col = [col for col in aggregate_df.columns if aggregate_df.loc[j, col] == 1]\n index = (tmp_date.date() - first_date.date()).days\n aggregate_df['Day ' + str(day_counter)][i + j] = all_dates_df.loc[index, col]\n j += 1\n else:\n break\n tmp_date = tmp_date + datetime.timedelta(days=1)\n i += j\n aggregate_df.to_csv('aggregate_df.csv')",
"def add_eDOO_to_summary(df, delay_don=3, delay_dpl=2, blur=1):\n\n # create new columns\n df['eDOO'] = df['DOO'].copy()\n df['sDON'] = 0\n df['sDPL'] = 0\n\n file_dates = df.index.get_level_values('Date_file').unique().sort_values()\n # List of tuples (fdate, df_slice)\n map_input = [\n (dtf, df.loc[dtf].copy(), delay_don, delay_dpl, blur) # dataframe for one file, index Date_statistics\n for dtf in file_dates\n ]\n cols = ['eDOO', 'sDON', 'sDPL']\n\n msg = f'Adding eDOO for {len(map_input)} days ({{ncpu}} processes)'\n with PoolNCPU(msg) as pool:\n map_output = pool.map(_add_eDOO_to_df1, map_input)\n\n print('\\nMerging...', end='', flush=True)\n for (dtf, df1) in map_output:\n df.loc[(dtf,), cols] = df1[cols]\n print('done.')\n return df",
"def add_deltas_to_summary(df):\n\n # file dates for deltas (except the first one).\n dtfs = df.index.get_level_values('Date_file').unique().sort_values()[1:]\n # create new columns\n for col in ['dDOO', 'dDON', 'dDPL', 'dDtot']:\n df[col] = 0.0\n\n print('Getting deltas by file date', end='')\n for dtf in dtfs:\n print('.', flush=True, end='')\n dtf_prev = dtf - pd.Timedelta(1, 'd')\n dfcurr = df.loc[(dtf,)] # dataframe with only Date_statistics as index\n dfprev = df.loc[(dtf_prev,)]\n deltas = {}\n for col in ['DOO', 'DPL', 'DON', 'Dtot']:\n dcol = f'd{col}'\n deltas[dcol] = dfcurr[col] - dfprev[col]\n if dtf in dfcurr.index:\n last_entry = dfcurr.loc[dtf, col] # there is no previous\n else:\n last_entry = 0\n deltas[dcol].loc[dtf] = last_entry\n\n df_deltas = pd.DataFrame(deltas)\n # add index\n df_deltas['Date_file'] = dtf\n df_deltas.reset_index(inplace=True)\n df_deltas.set_index(['Date_file', 'Date_statistics'], inplace=True)\n df.loc[(dtf,), ['dDOO', 'dDON', 'dDPL', 'dDtot']] = df_deltas\n\n print('')\n return df",
"def everyday(self):\n\n # Telephony data\n telephony.Calls.copyfiles(self.date_report)\n self.tp.get_data(self.date_report)\n tp_data = self.tp.report_data(self.date_report, self.date_report)\n\n self.bar.update(5)\n\n # Calltouch data\n self.ct.get_data(self.date_report)\n ct_report = self.ct.report_data(self.date_report, self.date_report)\n ct_calls = ct_report.get('calls')\n ct_leads = ct_report.get('leads')\n\n self.bar.update(10)\n\n # Ads data\n self.ads.get_data(self.date_report)\n\n # Traffic data\n self.tr.get_data(self.date_report)\n\n self.bar.update(20)\n\n # Callbacks\n self.cb.get_data(self.date_report)\n callbacks = self.cb.report_data(self.date_report, self.date_report)\n num_lost_leads = callbacks.get('num_leads')\n lost_leads = callbacks.get('lost_leads')\n late_leads = callbacks.get('late_leads')\n\n self.bar.update(30)\n\n # Creating HTML data for email report\n html_data = self.er.html(tp_data, ct_calls, ct_leads, num_lost_leads, lost_leads, late_leads, link=None)\n subject = \"Отчет за {}\".format(self.date_report)\n\n self.bar.update(40)\n\n # Creating and sending email\n msg = self.er.create_mail(config.FROM_ADDR, config.TO_ADDR_DEBUG, subject, html_data)\n self.er.send_email(config.FROM_ADDR, config.TO_ADDR_DEBUG, msg)\n\n self.bar.update(50)",
"def get_daily_info(self):\n start_date = self.quest.start_date\n last_day = self.day_finished or self.latest_day\n info = []\n # get daily info from the database\n for datum in self.user.dailydistance_set.filter(\n day__range=[start_date, last_day]):\n info.append({\n \"day\": datum.day,\n \"daily_distance\": datum.miles,\n \"total_distance\": self.get_total_miles(end_date=datum.day),\n \"waypoint\": self.get_waypoint(day=datum.day),\n \"id\": datum.pk,\n \"fitbit\": not datum.manually_entered\n })\n # add any days that aren't in the database\n days_in_db = {datum[\"day\"] for datum in info}\n all_days_on_quest = set(\n util.daterange(start_date, last_day, inclusive=True))\n missing_days = all_days_on_quest - days_in_db\n for day in missing_days:\n info.append({\n \"day\": day,\n \"daily_distance\": \"-\",\n \"total_distance\": self.get_total_miles(end_date=day),\n \"waypoint\": self.get_waypoint(day=day),\n \"id\": None\n })\n\n info = sorted(info, key=lambda x: x[\"day\"], reverse=True)\n return info",
"def add_weather_info_to_data(data_frame):\n\tdata_frame['temperature'] = 0\n\tdata_frame['wind'] = 0\n\tdata_frame['humidity'] = 0\n\tdata_frame['precipitation'] = 0\n\tdata_frame['pressure'] = 1013\n\tfor index, row in data_frame.iterrows():\n\t\tts = datetime.datetime.strptime(row['time'], '%Y-%m-%d %H:%M:%S')\n\t\tweather_data = get_weather_for_time_stamp(ts)\n\t\tdata_frame.at[index, 'temperature'] = weather_data.get_temperature()\n\t\tdata_frame.at[index, 'wind'] = weather_data.get_wind()\n\t\tdata_frame.at[index, 'humidity'] = weather_data.get_humidity()\n\t\tdata_frame.at[index, 'precipitation'] = weather_data.get_precipitation()\n\t\tdata_frame.at[index, 'pressure'] = weather_data.get_pressure()\n\t\tif index%10 == 0:\n\t\t\tprint(index)\n\treturn data_frame"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
decodes by taking the DFT of the received_sequence and channel_impulse_response then apply pointwise division to get the true symbol value
|
def demodulate_sequence(received_sequence, channel_impulse_response_start, channel_impulse_response_end, N, K, Q1, Q2):
if (N % 2 != 0):
raise ValueError("N must be an even integer")
H_start = dft(channel_impulse_response_start, N)
H_end = dft(channel_impulse_response_end, N)
# phase
x = list(range(Q1, Q2))
phase_start = np.unwrap(np.angle(H_start))
phase_end = np.unwrap(np.angle(H_end))
phase_diff = np.subtract(phase_end, phase_start)
phase_diff = phase_diff[Q1:Q2]
p = np.polyfit(x, phase_diff, 1)
phase_gradient = p[0]
# magnitude
mag_start = np.abs(H_start)
mag_end = np.abs(H_end)
# block arithmetic
P = N + K
padded_sequence = pad_so_divisible(received_sequence, P)
sequence_length = len(padded_sequence)
num_blocks = int(sequence_length / P)
demodulated_sequence = []
for i in range(0, num_blocks):
lower_index = i * P
upper_index = lower_index + P
# estimate interpolation
H_arr_block = gen_H_estimate(mag_start, mag_end, phase_start, phase_end, i, num_blocks)
phase_gradient_in_block = phase_gradient * (num_blocks - 2*i) / 2
block = padded_sequence[lower_index : upper_index]
demodulated_block = demodulate_block(block, H_arr_block, N, K, Q1, Q2, phase_gradient_in_block)
demodulated_sequence = np.concatenate((demodulated_sequence, demodulated_block))
return demodulated_sequence
|
[
"def _call(self, signal):\n\n signal = preemphasis(signal, self.pre_emph)\n\n frames = framesig(signal,\n self.win_len * self.fs,\n self.win_step * self.fs,\n self.win_fun)\n\n pspec = powspec(frames, self.nfft)\n # this stores the total energy in each frame\n energy = np.sum(pspec, 1)\n # if energy is zero, we get problems with log\n energy = np.where(energy == 0, np.finfo(float).eps, energy)\n\n # compute the filterbank energies\n feat = np.dot(pspec, self._filterbanks.T)\n # if feat is zero, we get problems with log\n feat = np.where(feat == 0, np.finfo(float).eps, feat)\n\n return feat, energy",
"def _impulse_response(x):\n pass",
"def DMFluxneuDet(flavor,Enu,ch,DMm,DMsig,body,param,osc): \n ##B From Arxiv: 0506298 ec. 21 & 24\n #DM_annihilation_rate_Earth = 1.0e14*(100*param.GeV/DMm)**2/param.sec #[annhilations/s]\n #DM_annihilation_rate_Sun = ((1.0*param.AU)/(param.EARTHRADIUS*param.km))**2*DM_annihilation_rate_Earth\n DM_annihilation_rate_Sun = float(np.sum(DMSunAnnihilationRate(DMm,DMsig,param)))# [eV]\n ##E\n \n flux = 0.0\n \n if param.neutype == \"neutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n elif param.neutype == \"antineutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2+1,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n else :\n print \"Wrong neutrino type.\"\n quit()",
"def _process_frame(self):\n # Generate the analysis frame and discard the input samples that will\n # not be needed anymore\n self._in_buffer.peek(self._analysis_frame)\n self._in_buffer.remove(self._analysis_hop)\n\n # Apply the analysis window\n windows.apply(self._analysis_frame, self._analysis_window)\n\n # Convert the analysis frame into a synthesis frame\n #synthesis_frame = self._converter.convert_frame(self._analysis_frame)\n synthesis_frame, stft_out = self._converter.convert_frame(self._analysis_frame) # duys\n # duys\n if stft_out is not None: \n #stft_out = np.vstack((stft_out, stft_out))\n self.stft_output_buffer.append(stft_out) \n stft_out = np.real(stft_out.reshape((self._channels, -1)))\n\n # --> duys\n # Apply the synthesis window\n #windows.apply(synthesis_frame, self._synthesis_window)\n #windows.apply(stft_out, self._synthesis_window)\n\n # Overlap and add the synthesis frame in the output buffer\n #self._out_buffer.add(synthesis_frame)\n #self._out_buffer.add(stft_out)\n\n # The overlap and add step changes the volume of the signal. The\n # normalize_buffer is used to keep track of \"how much of the input\n # signal was added\" to each part of the output buffer, allowing to\n # normalize it.\n #self._normalize_buffer.add(self._normalize_window)\n\n # Normalize the samples that are ready to be written to the output\n #normalize = self._normalize_buffer.to_array(end=self._synthesis_hop)\n #normalize[normalize < EPSILON] = 1\n #self._out_buffer.divide(normalize)\n \n # set ready before trying to read\n self._out_buffer.set_ready(self._synthesis_hop)\n \n # duys ---\n buf = np.empty((self._channels, self._out_buffer.length))\n n = self._out_buffer.peek(buf)\n self.stft_output_buffer2.append(buf[0,:]) \n # ----\n\n self._normalize_buffer.remove(self._synthesis_hop)",
"def frame_convert(frame, cals):\n \n # Calculate the frame conversion\n wavelength = (frame - cals[0]) * cals[2] + cals[1]\n \n return wavelength",
"def ffe_to_flux(spframe_hdu, calib_data):\n #eflux = #6 is sky, 0 is residuals\n eflux = spframe_hdu[6].data + spframe_hdu[0].data \n spflux = eflux/calib_data\n \n return spflux",
"def compare_response2(unit_impulse, cutoff_period):\n\n fig = plt.figure(figsize=(6, 6), dpi=300)\n ax = fig.add_subplot(1, 1, 1)\n ax.set_ylim(-.2, 1.5)\n worN = 4096\n dt = unit_impulse.index[1]-unit_impulse.index[0]\n # cuf off frequency as ratio of Nyquist frequency\n cf = 2.0*dt.seconds/(cutoff_period*3600)\n dt_hours_ratio = dt.seconds/3600.0\n # set consine_lanczos as 70h\n cl_size_hours1 = 70\n cl_size1 = int(cl_size_hours1*3600/dt.seconds)\n # default consine_lanczos size\n default_cl_size = int(1.25*2.0/cf)\n for (unit_impulse_response, label) in [(cosine_lanczos(unit_impulse, cutoff_period=\"40h\", filter_len=cl_size1), \"C_L,size=%s hours\" % cl_size_hours1),\n (cosine_lanczos(\n unit_impulse, cutoff_period=\"40h\"), \"C_L,size=%s default\" % default_cl_size),\n (unit_impulse.rolling(96, center=True,\n min_periods=96).mean(), \"boxcar24\"),\n (unit_impulse.rolling(99, center=True,\n min_periods=99).mean(), \"boxcar25\"),\n (godin(unit_impulse), \"godin\")]:\n b = unit_impulse_response.fillna(0.0)\n if((b.iloc[0, 0] != 0.0) or (b.iloc[-1, 0] != 0.0)):\n print(\"warning: unit impulse length for %s is not long enough \" % label)\n w, h = freqz(b.values, worN=worN)\n pw = w[1:]\n # convert frequence to period in hours\n period = 1.0/pw\n period = 2.0*np.pi*period*dt_hours_ratio\n hh = np.abs(h)\n ax.plot(period, hh[1:], linewidth=1, label=label)\n ax.set_xlim(0.1, 400)\n ax.axvline(x=cutoff_period, ymin=-0.2, linewidth=1, color='r')\n ax.annotate(\"cutoff period=%f h\" % cutoff_period, (cutoff_period, 1.2), xycoords='data',\n xytext=(50, 50), textcoords='offset points',\n arrowprops=dict(arrowstyle=\"->\"))\n ax.set_ylabel(r'Magnitude')\n ax.set_xlabel(r'Period(hours)')\n plt.grid(visible=True, which='both', color='0.9',\n linestyle='-', linewidth=0.5)\n plt.tight_layout()\n ax.legend(loc=\"lower right\")",
"def frame_energy(frame):\n return (abs(frame)**2).sum()",
"def magnitude_response(x, fs):\n nfft = len(x)\n df = fs / nfft\n f = np.arange(0, fs - df, df)\n y = fft(x)\n y_mag = 20 * np.log10(np.abs(y))\n return f[0:int(np.ceil(nfft/2))], y_mag[0:int(np.ceil(nfft/2))]",
"def _decode_player_utterance(self, player_utterance):\n # THIS CAN'T BE DONE UNTIL WE SETTLE ON AN ELEGANT WAY OF INCORPORATING TENSORFLOW\n # INTO THE TALK OF THE TOWN FRAMEWORK",
"def firFreqResponse(self, x, freq):\n fir = self.createFirFilter(fs, x)\n\n # Decimated sampling frequency [Hz]\n fd = fs / np.prod(self.firdecimation(x))\n\n return firFreqResponse(fd, freq, fir)",
"def IDFT(fourier_signal):\n\n vec2 = np.arange(0, fourier_signal.shape[0]) / fourier_signal.shape[0]\n\n vec1 = np.arange(0, fourier_signal.shape[0]).\\\n reshape((fourier_signal.shape[0], 1))\n\n exp = np.exp(vec1 * vec2 * UNITY_ROOT_CONSTANT)\n\n return np.dot(exp, fourier_signal) / fourier_signal.shape[0]",
"def _fast_sc_decode(self, y_message, frozen_bits=None):\n\n u_est = np.full(self._N, -1)\n\n # An array which shows for each LLR value out of N * (1 + log(N)) whether it was calculated\n is_calc_llr = [False] * self._N * (self._n + 1)\n\n # An array which stores values for N * (1 + log(N)) LLRs\n llr_array = np.full(self._N * (self._n + 1), 0.0, dtype=np.longfloat)\n\n for i in range(self._N):\n # Call the function to calculate LLR for i-th out of N polarized channels\n llr = self._fast_llr(i, y_message, u_est[:i], llr_array, is_calc_llr)\n\n if i in self._frozen_bits_positions:\n u_est[i] = frozen_bits[self._frozen_bits_positions.index(i)] if frozen_bits is not None else 0\n else:\n u_est[i] = 0 if llr > 0 else 1\n\n return u_est",
"def response_spectrum(self, ss, s1, soil_class, design_coefficient):\r\n\r\n self.data = {\r\n 'ss': ''\r\n }\r\n if soil_class == \"SA\":\r\n fa = 0.8\r\n fv = 0.8\r\n elif soil_class == \"SB\":\r\n fa = 1.0\r\n fv = 1.0\r\n elif soil_class == \"SC\":\r\n # fa\r\n if ss <= 0.5:\r\n fa = 1.2\r\n elif ss > 0.5 and ss < 1.0:\r\n fa = -0.4*ss+1.4\r\n else:\r\n fa = 1.0\r\n # fv\r\n fv = -s1+1.8\r\n elif soil_class == \"SD\":\r\n #fa\r\n if ss <= 0.25:\r\n fa = 1.6\r\n elif ss > 0.25 and ss < 0.75:\r\n fa = -0.88*ss+1.8\r\n elif ss >= 0.75 and ss < 1.25:\r\n fa = -0.4*ss+1.5\r\n else:\r\n fa = 1.0\r\n # fv\r\n if s1 <= 0.1:\r\n fv = 2.4\r\n elif s1 > 0.1 and s1 < 0.3:\r\n fv = -4*s1+2.8\r\n elif s1 >= 0.3 and s1 < 0.4:\r\n fv = -2*s1+2.4\r\n elif s1 >= 0.4 and s1 < 0.5:\r\n fv = -s1+2\r\n else:\r\n fv = 1.5\r\n else:\r\n # fa\r\n if ss <= 0.25:\r\n fa = 2.5\r\n elif ss > 0.25 and ss < 0.5:\r\n fa = -0.32*ss+3.3\r\n elif ss >= 0.5 and ss < 0.75:\r\n fa = -2*ss+2.7\r\n elif ss >= 0.75 and ss < 1.0:\r\n fa = -1.32*ss+2.1\r\n else:\r\n fa = 0.9\r\n # fv\r\n if s1 <= 0.1:\r\n fv = 3.5\r\n elif s1 > 0.1 and s1 < 0.2:\r\n fv = -3*s1+3.8\r\n elif s1 >= 0.2 and s1 < 0.4:\r\n fv = -4*s1+4\r\n else:\r\n fv = 2.4\r\n\r\n # sms and sm1\r\n sms = ss*fa\r\n sm1 = s1*fv\r\n\r\n # sds sd1 $s0\r\n sds = sms*design_coefficient\r\n sd1 = sm1*design_coefficient\r\n s0 = sds/2.5\r\n\r\n # T0, Ts\r\n t0 = 0.2*sd1/sds\r\n t_ = ts = sd1/sds\r\n\r\n # calculate another point\r\n x = [0, t0, ts]\r\n y = [s0, sds, sds]\r\n while t_ <= 8:\r\n x.append(t_)\r\n y.append(sd1/t_)\r\n t_ += 0.1\r\n\r\n # data[]\r\n # data['ss'] = ss\r\n # data['s1'] = s1\r\n # data['kelas_situs'] = soil_class\r\n # data['fa'] = fa\r\n # data['fv'] = fv\r\n # data['sms'] = sms\r\n # data['sm1'] = sm1\r\n # data['sds'] = sds\r\n # data['sd1'] = sd1\r\n # data['s0'] = s0\r\n # data['t0'] = t0\r\n # data['ts'] = ts\r\n # data['t'] = t\r\n # if t>0 and t<t0:\r\n # pass\r\n # elif t>t0 and t<ts:\r\n # sa = sds\r\n # else:\r\n # sa = sd1/t\r\n # data['sa'] = sa\r\n\r\n return x, y",
"def wfDerivative(signalRaw,sp=10.):\n signalDeriv = np.zeros(len(signalRaw))\n for i in range(len(signalRaw)-1):\n signalDeriv[i] = (signalRaw[i+1] - signalRaw[i])/sp\n return signalDeriv",
"def bbSpec(freq,temp,emis): \n if temp==0:\n return 0\n occ = 1.0/(np.exp(h*freq/(temp*kB)) - 1)\n e = emis(freq) if callable(emis) else emis\n return 2 * e * h * freq**3 /(c**2) * occ",
"def stereo_fm(x, fs=2.4e6, file_name='test.wav'):\n N1 = 10\n b = signal.firwin(64, 2 * 200e3 / float(fs))\n # Filter and decimate (should be polyphase)\n y = signal.lfilter(b, 1, x)\n z = ss.downsample(y, N1)\n # Apply complex baseband discriminator\n z_bb = discrim(z)\n # Work with the (3) stereo multiplex signals:\n # Begin by designing a lowpass filter for L+R and DSP demoded (L-R)\n # (fc = 12 KHz)\n b12 = signal.firwin(128, 2 * 12e3 / (float(fs) / N1))\n # The L + R term is at baseband, we just lowpass filter to remove \n # other terms above 12 kHz.\n y_lpr = signal.lfilter(b12, 1, z_bb)\n b19 = signal.firwin(128, 2 * 1e3 * np.array([19 - 5, 19 + 5]) / (float(fs) / N1),\n pass_zero=False);\n z_bb19 = signal.lfilter(b19, 1, z_bb)\n # Lock PLL to 19 kHz pilot\n # A type 2 loop with bandwidth Bn = 10 Hz and damping zeta = 0.707 \n # The VCO quiescent frequency is set to 19000 Hz.\n theta, phi_error = pilot_pll(z_bb19, 19000, fs / N1, 2, 10, 0.707)\n # Coherently demodulate the L - R subcarrier at 38 kHz.\n # theta is the PLL output phase at 19 kHz, so to double multiply \n # by 2 and wrap with cos() or sin().\n # First bandpass filter\n b38 = signal.firwin(128, 2 * 1e3 * np.array([38 - 5, 38 + 5]) / (float(fs) / N1),\n pass_zero=False);\n x_lmr = signal.lfilter(b38, 1, z_bb)\n # Coherently demodulate using the PLL output phase\n x_lmr = 2 * np.sqrt(2) * np.cos(2 * theta) * x_lmr\n # Lowpass at 12 kHz to recover the desired DSB demod term\n y_lmr = signal.lfilter(b12, 1, x_lmr)\n # Matrix the y_lmr and y_lpr for form right and left channels:\n y_left = y_lpr + y_lmr\n y_right = y_lpr - y_lmr\n\n # Decimate by N2 (nominally 5)\n N2 = 5\n fs2 = float(fs) / (N1 * N2) # (nominally 48 ksps)\n y_left_DN2 = ss.downsample(y_left, N2)\n y_right_DN2 = ss.downsample(y_right, N2)\n # Deemphasize with 75 us time constant to 'undo' the preemphasis \n # applied at the transmitter in broadcast FM.\n # A 1-pole digital lowpass works well here.\n a_de = np.exp(-2.1 * 1e3 * 2 * np.pi / fs2)\n z_left = signal.lfilter([1 - a_de], [1, -a_de], y_left_DN2)\n z_right = signal.lfilter([1 - a_de], [1, -a_de], y_right_DN2)\n # Place left and righ channels as side-by-side columns in a 2D array\n z_out = np.hstack((np.array([z_left]).T, (np.array([z_right]).T)))\n\n ss.to_wav(file_name, 48000, z_out / 2)\n print('Done!')\n # return z_bb, z_out\n return z_bb, theta, y_lpr, y_lmr, z_out",
"def part1():\n signal = read_input()\n base_pattern = [0, 1, 0, -1]\n for _ in range(100): # Repeat FFT 100 times\n next_signal = []\n for i in range(len(signal)): # Calculate each signal element\n pattern = cycle(d for d in base_pattern for _ in range(i + 1))\n next(pattern)\n next_element = 0\n for d in signal:\n next_element += d*next(pattern)\n next_signal.append(abs(next_element) % 10)\n signal = next_signal\n print(''.join(str(c) for c in signal[:8]))",
"def reconstruct_pu(self, receivers):\n self.fpts = receivers\n # Initialize variables\n self.p_recon = np.zeros((self.fpts.coord.shape[0], len(self.controls.k0)), dtype=complex)\n self.uz_recon = np.zeros((self.fpts.coord.shape[0], len(self.controls.k0)), dtype=complex)\n # Initialize bar\n bar = tqdm(total = len(self.controls.k0), desc = 'Reconstructing sound field...')\n for jf, k0 in enumerate(self.controls.k0):\n # For smooth transition from continous to discrete k domain\n kappa = np.sqrt(self.delta_kx*self.delta_ky/(2*np.pi*k0**2))\n # compute kz\n kz_f = form_kz(k0, self.kx_f, self.ky_f)\n k_vec_ref = np.array([self.kx_f, self.ky_f, kz_f])\n # Reflected or radiating part\n fz_ref = self.f_ref * np.sqrt(k0/np.abs(kz_f))\n recs = np.array([self.fpts.coord[:,0], self.fpts.coord[:,1],\n self.fpts.coord[:,2]-self.zp]).T\n psi_ref = fz_ref * kappa * np.exp(-1j * recs @ k_vec_ref)\n # Incident part\n if self.f_inc != 0:\n k_vec_inc = np.array([self.kx_f, self.ky_f, -kz_f])\n fz_inc = self.f_inc * np.sqrt(k0/np.abs(kz_f))\n recs = np.array([self.fpts.coord[:,0], self.fpts.coord[:,1],\n self.fpts.coord[:,2]-self.zm]).T\n psi_inc = fz_inc * kappa * np.exp(-1j * recs @ k_vec_inc)\n # Forming the sensing matrix\n if self.f_inc == 0:\n h_mtx = psi_ref\n else:\n h_mtx = np.hstack((psi_inc, psi_ref))\n # Compute p and uz\n self.p_recon[:,jf] = h_mtx @ self.pk[:,jf]\n if self.f_inc == 0:\n self.uz_recon[:,jf] = -((np.divide(kz_f, k0)) * h_mtx) @ self.pk[:,jf]\n else:\n self.uz_recon[:,jf] = -((np.divide(np.concatenate((-kz_f, kz_f)), k0)) * h_mtx) @ self.pk[:,jf]\n bar.update(1)\n bar.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Implements a classification (binary crossentropy) loss function for DragonNet architecture.
|
def binary_classification_loss(concat_true, concat_pred):
t_true = concat_true[:, 1]
t_pred = concat_pred[:, 2]
t_pred = (t_pred + 0.001) / 1.002
losst = tf.reduce_sum(K.binary_crossentropy(t_true, t_pred))
return losst
|
[
"def loss(y_true, y_pred):\n return categorical_crossentropy(y_true=y_true, y_pred=y_pred)",
"def loss_func(y_true, y_pred):\n\n return tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1, reduction=\"none\")(y_true, y_pred)",
"def _cross_entropy_loss(self, y_true_clf, y_pred_clf, training_mask):\n return torch.nn.functional.binary_cross_entropy(y_pred_clf*training_mask, (y_true_clf*training_mask))",
"def discriminator_loss(real_output, fake_output):\n\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss",
"def loss(self, X, Y):\n if self.l1 == 0:\n l1 = 0\n else:\n l1 = np.sum(np.absolute(self.w1)) + np.sum(np.absolute(self.b1)) + np.sum(np.absolute(self.w2)) + np.sum(np.absolute(self.b2)) + np.sum(np.absolute(self.w3)) + np.sum(np.absolute(self.b3))\n if self.l2 ==0:\n l2 = 0\n else:\n l2 = np.sum(np.power(self.w1,2)) + np.sum(np.power(self.b1,2)) + np.sum(np.power(self.w2,2)) + np.sum(np.power(self.b2,2)) + np.sum(np.power(self.w3,2)) + np.sum(np.power(self.b3, 2))\n return categorical_cross_entropy(self.fprop(X).T, onehot(Y,self.output_size)) + self.l1 * l1 + self.l2 * l2",
"def discriminator_loss(logits_real, logits_fake):\n \n ####################################\n # YOUR CODE HERE #\n ####################################\n \n D_x = bce_loss(logits_real, torch.ones(logits_real.size()).to(device))\n D_G_x = bce_loss(logits_fake, torch.zeros(logits_fake.size()).to(device))\n total = D_x + D_G_x\n total.mean()\n ########## END ##########\n return total",
"def masked_categorical_crossentropy(y_true, y_pred):\n mask = y_true[:, -1]\n # y_true = y_true[:, :-1]\n loss = K.categorical_crossentropy(target=y_true,\n output=y_pred,\n from_logits=True)\n mask = K.cast(mask, dtype=np.float32)\n loss *= mask\n return K.mean(loss, axis=-1)",
"def softmax_cross_entropy_loss(self):\n self._log_accuracy()\n loss_cls = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.proposals.data.get_field('gt_classes'),\n logits=self.pred_class_logits\n )\n )\n loss_cls = tf.cond(\n tf.shape(self.pred_class_logits)[0] > 0, lambda: loss_cls, lambda: 0.\n )\n return loss_cls",
"def logit_binary_cross_entropy_loss(x,\n y):\n return optax.sigmoid_binary_cross_entropy(x, y).mean()",
"def classification_loss(self, classifier, pos_box_ind, neg_box_ind):\n \n # Gather up the classifier values at the negative and \n # positive indexes:\n with tf.variable_scope(\"rpn_cls_loss\"):\n pos_class = tf.gather(classifier, pos_box_ind)\n\n pos_class = tf.cond(tf.rank(pos_class) > 2,\n true_fn = lambda: tf.squeeze(pos_class, axis=1),\n false_fn = lambda: pos_class)\n\n neg_class = tf.gather(classifier, neg_box_ind)\n\n neg_class = tf.cond(tf.rank(neg_class) > 2,\n true_fn = lambda: tf.squeeze(neg_class, axis=1),\n false_fn = lambda: neg_class)\n\n # Set up the \"true\" answers:\n pos_true = tf.zeros(tf.shape(pos_class)) + (0,1,)\n neg_true = tf.zeros(tf.shape(neg_class)) + (1,0,)\n\n # Now, collect pos and negative into one:\n true_labels = tf.concat((pos_true, neg_true), axis=0)\n class_labels = tf.concat((pos_class, neg_class), axis=0)\n\n # Finally, convert this into cross entropy loss:\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=true_labels,\n logits=class_labels))\n\n return cross_entropy",
"def loss(logits_seg, logits_con, labels_seg, labels_con):\n # TODO: weighting of loss terms. DCAN project uses proportion of values that equal 1 for contours and segments\n # The DCAN paper talks about weights for the 'auxiliary classifiers'. In the FCN which the paper refers, the \n # auxiliary classifiers are the pre-fused results from the different levels of the convnet. Should that be the same here?\n # Means there will be 6 of them - 3 for each label type. FCN code doesn't reveal much about weighting and the paper doesn't \n # help much either.\n \n# with tf.variable_scope(\"weights/segment\"):\n# weights_seg = tf.scalar_mul(SEG_RATIO, tf.cast(tf.equal(labels_seg, 0), tf.float32)) + \\\n# tf.scalar_mul(1 - SEG_RATIO, tf.cast(tf.equal(labels_seg, 1), tf.float32))\n# \n# with tf.variable_scope(\"weights/contour\"):\n# weights_con = tf.scalar_mul(CON_RATIO, tf.cast(tf.equal(labels_con, 0), tf.float32)) + \\\n# tf.scalar_mul(1 - CON_RATIO, tf.cast(tf.equal(labels_con, 1), tf.float32))\n# \n# loss_seg = tf.losses.sigmoid_cross_entropy(labels_seg, logits_seg, weights=weights_seg, scope='segment_loss')\n# loss_con = tf.losses.sigmoid_cross_entropy(labels_con, logits_con, weights=weights_con, scope='contour_loss')\n\n loss_seg = tf.losses.sigmoid_cross_entropy(labels_seg, logits_seg, scope='segment_loss')\n loss_con = tf.losses.sigmoid_cross_entropy(labels_con, logits_con, scope='contour_loss')\n\n total_loss = tf.add(tf.scalar_mul(1.0, loss_seg), tf.scalar_mul(1.0, loss_con), name='total_loss')\n \n return total_loss",
"def rpn_loss_cls(num_anchors):\n def rpn_loss_cls_fixed_num(y_true, y_pred):\n\n return lambda_rpn_class * \\\n K.sum(y_true[:, :, :, :num_anchors] *\n K.binary_crossentropy(y_pred[:, :, :, :], y_true[:, :, :, num_anchors:])) / \\\n K.sum(epsilon + y_true[:, :, :, :num_anchors])\n\n return rpn_loss_cls_fixed_num",
"def sigmoid_cross_entropy_loss(inputs, reduction='valid', **kwargs):\n reduction = reduction.upper()\n if context.executing_eagerly():\n return OpLib.execute(\n 'SigmoidCrossEntropyLoss', inputs, reduction=reduction)\n return OpLib.add('SigmoidCrossEntropyLoss', inputs,\n reduction=reduction, **kwargs)",
"def loss(self, y_pred: Any, y_true: Any) -> ITensor:\n raise NotImplementedError(\"Every loss must implement the call method.\")",
"def binary_cross_entropy_loss(x, y):\n return -jnp.mean(y * jnp.log(jnp.clip(x, a_min=utils.EPS)) +\n (1 - y) * jnp.log(jnp.clip(1 - x, a_min=utils.EPS)))",
"def make_discriminator(nb_categories):\n\n input_data = Input(shape=(128, 128, 1))\n x = Conv2D(D, (5, 5), strides=(2,2), padding='same')(input_data)\n x = LeakyReLU(alpha=0.2)(x)\n x = Conv2D(D * 2, (5, 5), strides=(2,2), kernel_initializer='he_normal',padding='same')(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = Conv2D(D * 4, (5, 5), strides=(2,2), kernel_initializer='he_normal',padding='same')(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = Conv2D(D * 8, (5, 5), strides=(2,2), kernel_initializer='he_normal',padding='same')(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = Conv2D(D * 16, (5, 5), strides=(2,2), kernel_initializer='he_normal', padding='same')(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = Flatten()(x)\n real_fake = Dense(1, kernel_initializer='he_normal', name='real_fake')(x) # no activation for wasserstein_loss\n categories = Dense(nb_categories, kernel_initializer='he_normal', name='categories', activation='softmax')(x)\n\n model = Model(input_data, [real_fake, categories])\n\n return model",
"def _ComputeClassificationLoss(self, predicted_class_logits,\n assigned_gt_labels, class_weights, loss_fn):\n p = self.params\n\n predicted_class_logits = py_utils.HasShape(predicted_class_logits,\n [-1, -1, p.num_classes])\n bs, npillars, ncls = py_utils.GetShape(predicted_class_logits, 3)\n class_weights = py_utils.HasShape(class_weights, [bs, npillars, ncls])\n assigned_gt_labels = py_utils.HasShape(assigned_gt_labels,\n [bs, npillars, ncls])\n\n class_loss = loss_fn(\n logits=predicted_class_logits, labels=assigned_gt_labels)\n class_loss *= class_weights\n class_loss_sum = tf.reduce_sum(class_loss)\n\n return class_loss_sum",
"def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_classes = W.shape[1]\n num_samples = X.shape[0]\n\n # scores = np.dot(X, W) # (N, C)\n # scores -= np.max(scores) # numerical stability\n\n loss = 0\n for sample_idx in range(num_samples):\n scores = X[sample_idx].dot(W) # (C,)\n scores -= np.max(scores)\n\n exp_sum = np.sum(np.exp(scores))\n\n target_vector = np.zeros_like(scores)\n target_vector[y[sample_idx]] = 1\n for y_class in range(num_classes):\n probability = np.exp(scores[y_class]) / exp_sum \n probability_error = probability - target_vector[y_class]\n\n dW[:,y_class] += probability_error * X[sample_idx] \n\n if y_class == y[sample_idx]:\n loss -= np.log(probability)\n\n\n\n # compute the mean of sample losses and gradients\n loss /= num_samples\n dW /= num_samples\n\n # add regularization loss and gradients\n loss += reg * np.sum(W * W)\n dW += reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW",
"def discriminator_loss(disc_real_output,\n disc_generated_output,\n l2_weight = 0.0001,\n L2_OPT = False,\n WASSERSTEIN_OPT = False):\n # log(DIS)\n if (WASSERSTEIN_OPT):\n real_loss = tf.reduce_mean(disc_real_output) - tf.reduce_mean(disc_generated_output)\n generated_loss = tf.zeros_like(disc_real_output) # equal to zero-like tensor...\n else:\n real_loss = cross_entropy(tf.ones_like(disc_real_output), disc_real_output)\n generated_loss = cross_entropy(tf.zeros_like(disc_generated_output), disc_generated_output)\n # total_loss = real_loss + generated_loss\n\n # real_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n # labels=tf.ones_like(disc_real_output), logits=disc_real_output) # label=1\n\n # log(1-DIS(GEN))\n # generated_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n # labels=tf.zeros_like(disc_generated_output), logits=disc_generated_output) # label=0\n\n # L2 loss\n l2_loss = tf.reduce_mean(tf.abs(disc_real_output - disc_generated_output)) # loss with target...\n\n total_loss = real_loss + generated_loss + (L2_OPT * l2_weight * l2_loss)\n\n # total_disc_loss = tf.reduce_mean(real_loss) \\\n # + tf.reduce_mean(generated_loss) \\\n # + (l2_weight * l2_loss * L2_OPT)\n\n return total_loss"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tracks the mean absolute value of epsilon.
|
def track_epsilon(concat_true, concat_pred):
epsilons = concat_pred[:, 3]
return tf.abs(tf.reduce_mean(epsilons))
|
[
"def set_epsilon(self, epsilon):\r\n self.epsilon = epsilon",
"def decay_epsilon(self):\n if self.epsilon > EPSILON_MIN:\n self.epsilon = max(EPSILON_MIN, self.epsilon * EPSILON_DECAY_RATE)",
"def decayed_epsilon(self):\n # exploration rate is never smaller than 0.001 and greater than 0.9\n return max(0.001,\n min(0.9, 1.0 - np.log10(self.episode/self.decay_factor)))",
"def mean(self):\n\t\treturn 0.8",
"def epsilon(self,c):\n \n \n epsilon = np.log10(self.sumIsotopes(c)/self.model[\"p\"])+12\n return epsilon",
"def setEpsilon(self, epsilon) -> None:\n ...",
"def setThreshold(self, epsilon):\n self.epsilon = epsilon",
"def mean_abs_diff(x):\n\treturn np.mean(np.abs(np.diff(x)))",
"def set_epsilon(self, eps):\n self.eps = eps",
"def _estimate_epsilon(self,D):\n \n print(\"Optimizing epsilon.\"); sys.stdout.flush()\n\n epsilon_list = []\n num_clust_list = []\n noise_list = []\n\n # Go through a large number of values of epsilon \n for i in np.arange(0,np.max(D.dist_matrix),0.1):\n\n # generate clusters at this value of epsilon\n self.epsilon = i\n\n # This check is because dbscan throws an error if epsilon is too small...\n try:\n self.generate_clusters(D)\n except ValueError:\n continue\n\n # record the epsilon, number of clusters, and size of the noise cluster\n epsilon_list.append(i)\n num_clust_list.append(self.num_clusters)\n noise_list.append(len(self.cluster_labels[(self.cluster_labels['cluster'] == -1)].index))\n\n # spit out epsilon optimization if being verbose\n if self.verbose:\n print(epsilon_list[-1],num_clust_list[-1],noise_list[-1])\n sys.stdout.flush()\n \n if self.num_clusters > 1:\n count = self.cluster_labels.groupby(\"cluster\").count()\n count.to_pickle(os.path.join(self.out_path,\"episilon_{:.2e}.pickle\".format(i)))\n\n # If no clusters were found for *any* epsilon, complain\n if len(num_clust_list) < 1:\n err = \"No clusters found for any epsilon. Data set has too few sequences?\\n\"\n raise ValueError(err)\n\n # Normalize the number of clusters to the largest number seen\n clust_thresh = np.array(num_clust_list)/max(num_clust_list)\n\n # Get indices of each epsilon where the number of clusters is above\n # epsilon_size_cutoff.\n indices = np.where(clust_thresh > self.epsilon_size_cutoff)\n\n # Now find values of epsilon that maximize the size of the noise cluster\n max_noise = max([noise_list[i] for i in indices[0]])\n eps = [epsilon_list[i] for i in indices[0] if noise_list[i] == max_noise]\n \n # return the smallest epsilon compatible with this.\n return eps[0]",
"def _get_epsilon(self, is_evaluation):\n if is_evaluation:\n return 0.0\n\n decay_steps = min(self._step_counter, self._epsilon_decay_duration)\n decayed_epsilon = (\n self._epsilon_end + (self._epsilon_start - self._epsilon_end) *\n (1 - decay_steps / self._epsilon_decay_duration)**self._epsilon_power)\n return decayed_epsilon",
"def mean_score(self):\n pass",
"def plugged_mean(self) -> float:\n return self._plugged_mean",
"def decay_epsilon(self):\n if self.epsilon_linear_decay:\n if self.epsilon_greedy > self.epsilon_min:\n self.epsilon_greedy -= (1 - self.epsilon_decay_rate)\n else:\n if self.epsilon_greedy > self.epsilon_min:\n self.epsilon_greedy *= self.epsilon_decay_rate",
"def epsilon_inf(self):\n\n return cst.epsilon_0*(1. + np.sqrt(self.epsilon_11*self.epsilon_33\\\n - self.epsilon_13**2.))",
"def epsilon_find(sim_vars, erange):\n errors = []\n for epsilon in erange:\n sim_vars[\"epsilon\"] = epsilon\n original, recon = recon_pulse(sim_vars, plot=False, savefig=False)\n # compute error\n errors.append(rmse(original,recon))\n\n # plot RMSE error against epsilon\n plt.plot(erange, errors)\n plt.figure(num=1, size=[16,9])\n plt.xlabel(\"Epsilon\")\n plt.ylabel(\"RMSE\")\n plt.title(\"Reconstruction Error vs. Epsilon\")\n plt.grid(True)\n plt.show()",
"def __normalize__(self, features, eps=1e-14):\n return (features - self.__mean__) / (self.__std__ + eps)",
"def mean_precision(self, array_precision):\n mean_classes_precision_exp = []\n for exp in array_precision:\n mean_classes_precision_exp.append(np.mean(exp))\n return np.mean(mean_classes_precision_exp), np.std(mean_classes_precision_exp)",
"def mean(self):\n return self.value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get templates on its main and neighboring channels
|
def on_main_channel(templates):
pass
|
[
"def probe_templates(self, protocol, model_id):\n if protocol == \"1:1\":\n matches = self._read_match_file(\"1:1\", \"ijbc_11_G1_G2_matches.csv\")[model_id]\n return [self._templates[\"Mixed\"][m] for m in matches]\n elif protocol == \"Covariates\":\n matches = self._read_match_file(\"Covariates\", \"ijbc_11_covariate_matches.csv\")[model_id]\n return [self._templates[\"Covariates\"][m] for m in matches]\n else:\n # for 1:N protocols, return all probe files\n return self.get_templates(protocol, \"probe\").values()",
"def _get_templates(self, cr, uid, context=None):\n if context is None:\n context = {}\n record_ids = []\n email_template = self.pool.get('email.template')\n model = False\n if context.get('message_id'):\n mail_message = self.pool.get('mail.message')\n message_data = mail_message.browse(cr, uid, int(context.get('message_id')), context)\n model = message_data.model\n elif context.get('mail.compose.target.model') or context.get('active_model'):\n model = context.get('mail.compose.target.model', context.get('active_model'))\n if model:\n record_ids = email_template.search(cr, uid, [('model', '=', model)])\n return email_template.name_get(cr, uid, record_ids, context) + [(False,'')]\n return []",
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def init_templates():\n\n templates = []\n\n # single stroke templates (all fingers doing the same if various fingers) (1 finger)\n templates.append(Template(\"T\", [\n # different PC for having different ways of drawing Template.name (T) for better recognition\n Point_cloud(\"T1\", [Point(30, 7, 1), Point(103, 7, 1),\n Point(66, 7, 2), Point(66, 87, 2)])\n ,\n Point_cloud(\"T2\", [Point(30, 7, 1), Point(123, 7, 1),\n Point(80, 17, 2), Point(30, 7, 2),\n Point(80, 17, 3), Point(80, 77, 3)])\n ,\n Point_cloud(\"T3\", [Point(30, 7, 1), Point(123, 7, 1),\n Point(80, 17, 2), Point(30, 7, 2),\n Point(80, 17, 3), Point(80, 50, 3)])\n ], None)\n )\n templates.append(Template(\"V\", [\n Point_cloud(\"V1\", [Point(30, 7, 1), Point(40, 37, 1),\n Point(40, 37, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V2\", [Point(0, 7, 1), Point(25, 37, 1),\n Point(25, 37, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V3\", [Point(30, 7, 1), Point(40, 25, 1),\n Point(40, 25, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V4\", [Point(30, 16, 1), Point(33, 25, 1),\n Point(33, 25, 2), Point(38, 7, 2)])\n ,\n Point_cloud(\"V5\", [Point(30, 7, 1), Point(33, 25, 1),\n Point(33, 25, 2), Point(38, 16, 2)])\n ], None)\n )\n templates.append(Template(\"D\", [\n Point_cloud(\"D1\", [Point(30, 7, 1), Point(30, 67, 1),\n Point(30, 67, 2), Point(50, 53, 2),\n Point(50, 53, 3), Point(55, 37, 3),\n Point(55, 37, 4), Point(50, 21, 4),\n Point(50, 21, 5), Point(30, 7, 5)])\n ,\n Point_cloud(\"D1\", [Point(30, 7, 1), Point(30, 67, 1),\n Point(30, 67, 2), Point(60, 53, 2),\n Point(60, 53, 3), Point(65, 37, 3),\n Point(65, 37, 4), Point(60, 21, 4),\n Point(60, 21, 5), Point(30, 7, 5)])\n ,\n ], None)\n )\n templates.append(Template(\"X\", [\n Point_cloud(\"X1\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2)])\n ,\n Point_cloud(\"X1_2\", [Point(30, 7, 1), Point(60, 34, 1),\n Point(60, 7, 2), Point(30, 34, 2)])\n ,\n Point_cloud(\"X2\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 7, 3), Point(60, 7, 3)])\n ,\n Point_cloud(\"X3\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 47, 3), Point(60, 47, 3)])\n ,\n Point_cloud(\"X4\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 7, 3), Point(30, 47, 3)])\n ], None)\n )\n templates.append(Template(\"W\", [\n Point_cloud(\"W1\", [Point(30, 7, 1), Point(40, 37, 1),\n Point(40, 37, 2), Point(50, 20, 2),\n Point(50, 20, 3), Point(60, 37, 3),\n Point(60, 37, 4), Point(70, 7, 4)])\n ,\n Point_cloud(\"W2\", [Point(30, 7, 1), Point(50, 37, 1),\n Point(50, 37, 2), Point(70, 7, 2),\n Point(70, 7, 3), Point(90, 37, 3),\n Point(90, 37, 4), Point(110, 7, 4)])\n ], None)\n )\n\n templates.append(Template(\"L\", [\n Point_cloud(\"L1\", [Point(30, 27, 1), Point(30, 37, 1),\n Point(30, 37, 2), Point(40, 37, 2)])\n ,\n Point_cloud(\"L2\", [Point(30, 17, 1), Point(30, 37, 1),\n Point(30, 37, 2), Point(40, 37, 2)])\n ], None)\n )\n templates.append(Template(\"Z\", [\n Point_cloud(\"Z1\", [Point(30, 7, 1), Point(60, 7, 1),\n Point(60, 7, 2), Point(30, 27, 2),\n Point(30, 27, 3), Point(60, 27, 3)])\n ,\n Point_cloud(\"Z2\", [Point(30, 7, 1), Point(50, 12, 1),\n Point(50, 12, 2), Point(30, 35, 2),\n Point(30, 35, 3), Point(55, 30, 3)])\n ,\n Point_cloud(\"Z3\", [Point(30, 7, 1), Point(50, 12, 1),\n Point(50, 12, 2), Point(20, 37, 2),\n Point(20, 37, 3), Point(52, 33, 3)])\n ,\n Point_cloud(\"Z4\", [Point(30, 21, 1), Point(50, 8, 1),\n Point(50, 8, 2), Point(23, 30, 2),\n Point(23, 30, 3), Point(54, 27, 3)])\n ,\n Point_cloud(\"Z5\", [Point(40, 7, 1), Point(60, 7, 1),\n Point(60, 7, 2), Point(30, 25, 2),\n Point(30, 25, 3), Point(70, 27, 3)])\n ,\n Point_cloud(\"Z6\", [Point(20, 7, 1), Point(70, 7, 1),\n Point(70, 7, 2), Point(30, 28, 2),\n Point(30, 28, 3), Point(57, 27, 3)])\n ], None)\n )\n\n return templates",
"def get_channels():",
"def show_templates(self, user_id=None, channel_id=None):\n xl_release = super(ReleaseHelper, self).get_xl_release(user_id=user_id)\n message = get_templates_message(templates=xl_release.get_templates())\n message['token'] = self.vault_client.get_secret(path=\"bot_token\")\n message['channel'] = channel_id\n if \"text\" not in message:\n message['text'] = \"template list\"\n self.logger.info(\"show_templates -> message = %s\" % json.dumps(message, indent=4, sort_keys=True))\n return self.slack_client.post_message(message=message)",
"def whitened_templates(self) -> np.ndarray:\n if self.result is None:\n return None\n # get reconstructions from posterior, shaped as (chain, draw, ifo, time)\n # and stack into (ifo, time, sample)\n hs = self.result.posterior.h_det.stack(samples=('chain', 'draw'))\n # whiten the reconstructions using the Cholesky factors, L, with shape\n # (ifo, time, time). the resulting object will have shape (ifo, time, sample)\n return linalg.solve(self.result.constant_data.L, hs)",
"def templates_query(self):\n if self.tempchanged:\n try:\n fname = self.templatedf.loc[(self.templatedf.teff == self.teff.value) &\n (self.templatedf.logg == self.grav.value) &\n (self.templatedf.met == self.met.value)].iloc[0].name\n fname = self.templatedir + fname\n kwargs = self.kwargs\n kwargs['wavearr'] = self.spec.spectral_axis.value\n temp_spec = self.cutspec(freader(fname, **self.kwargs))\n except (IndexError, FileNotFoundError, OSError):\n self.gottemplate = False\n return\n self.templatefname = fname\n self.temp_spec = temp_spec\n self.gottemplate = True\n return",
"def templates(self) -> Optional[Sequence['outputs.UpstreamTemplateResponse']]:\n return pulumi.get(self, \"templates\")",
"def get_templates(cloud):\n print \"[+] checking available templates ...\"\n r = templates.get_list()\n rexp = raw_input(\"Enter a Name of OS:\")\n c_rexp = re.compile(rexp, re.IGNORECASE)\n for i in r[\"template_groups\"]:\n for j in i[\"templates\"]:\n for k in j[\"clouds\"]:\n if re.match(c_rexp, j[\"name\"]) and cloud.get(int(k['id'])):\n print j[\"name\"], cloud.get(int(k['id'])), \"Cloud id:\", k['id'], \"Templat id:\", k[\n 'system_template_id']\n cloud = raw_input(\"Enter Cloud id:\")\n template = raw_input(\"Enter Template id:\")\n print \"[+] Done\"\n return cloud, template",
"def _wikipedia_Page_templatePages(self):\n return [template for template in toolserver.Generators.getTemplatelinks(self)]",
"def test_gateway_template_discovery__mixed_templates(self) -> None:\n self._config.namespace = 'n1'\n self._config.data_store_exec = self._get_runnable_cmd(\n 0, {\n 'schema-version': 'v1',\n 'document-version': 'x',\n 'gateway-templates': [{\n 'namespace': None,\n 'protection': 'public',\n 'purpose': 'abc',\n 'template': 'xyz',\n }, {\n 'namespace': 'n1',\n 'protection': 'public',\n 'purpose': 'abc',\n 'template': '123',\n }, {\n 'namespace': None,\n 'protection': 'public',\n 'purpose': 'def',\n 'template': '456',\n }, {\n 'namespace': 'n1',\n 'protection': 'public',\n 'purpose': 'hij',\n 'template': '789',\n }, {\n 'namespace': 'n2',\n 'protection': 'public',\n 'purpose': 'hij',\n 'template': '789',\n }],\n 'service-templates': [],\n },\n )\n gateway = generate.GenerateGatewayConfiguration(self._config)\n templates = gateway.get_templates()\n self.assertEqual(\n {'abc': '123', 'hij': '789'},\n templates,\n )",
"def getBoundTemplates(self, uid):\n facade = self._getFacade()\n templates = facade.getBoundTemplates(uid)\n data = []\n for template in templates:\n label = '%s (%s)' % (template.titleOrId(), template.getUIPath())\n data.append([template.id, label])\n return DirectResponse.succeed(data=Zuul.marshal(data))",
"def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item",
"def UpstreamImixTemplates(self):\r\n\t\treturn self._get_attribute('upstreamImixTemplates')",
"def generate_templates(self):\n\n templates = []\n cell_size = self.cell_size\n\n # Slide each size template over the entire shape model and generate templates\n for size in self.sizes:\n w = size[0]\n h = size[1]\n\n # Slide template with dimenions specified by size across the entire shape model\n for y in range(self.shape_model.shape[0] - h):\n for x in range(self.shape_model.shape[1] - w):\n\n mat_temp = np.copy(self.shape_model[y:y + h, x:x + w])\n unique = np.unique(mat_temp)\n\n # Check to make sure template holds some shape model information\n if len(unique) > 1:\n\n # Binary template: set values to 1 and 0 and add template\n if len(unique) == 2:\n idx1 = mat_temp == unique[0]\n idx2 = mat_temp == unique[1]\n\n mat_temp[idx1] = 1\n mat_temp[idx2] = 0\n templates.append((x, y, size, mat_temp))\n\n # Ternary template: set values to -1, 0, 1 -- add template -- repeat with all permutations\n else:\n # Get unique value indices\n idx1 = mat_temp == unique[0]\n idx2 = mat_temp == unique[1]\n idx3 = mat_temp == unique[2]\n\n mat_temp[idx1] = -1\n mat_temp[idx2] = 0\n mat_temp[idx3] = 1\n templates.append((x, y, size, mat_temp))\n\n mat_temp[idx1] = 1\n mat_temp[idx2] = -1\n mat_temp[idx3] = 0\n templates.append((x, y, size, mat_temp))\n\n mat_temp[idx1] = 0\n mat_temp[idx2] = 1\n mat_temp[idx3] = -1\n templates.append((x, y, size, mat_temp))\n\n self.templates = np.asarray(templates, dtype=object)\n self.remove_duplicates()\n self.shift_templates()\n self.normalize_templates()\n\n print('Created %d templates' % (len(self.templates)))\n return self.templates",
"def get_templates(self):\n index_templates = {}\n for path in glob.iglob(self.data_path + '/template/*.json'):\n logger.debug('Reading index template setup from {}'.format(path))\n index_template = None\n with open(path) as f:\n index_template = json.load(f)\n template_name = index_template['name']\n setup_body = index_template['body']\n index_templates[template_name] = setup_body\n return index_templates",
"def DownstreamImixTemplates(self):\r\n\t\treturn self._get_attribute('downstreamImixTemplates')",
"def get_templates_and_inputs(digit, number_of_templates):\n templates_index_list = random.sample(xrange(0, 10), number_of_templates)\n inputs_list = []\n templates_list = []\n for i in xrange(0, 10):\n if i in templates_index_list:\n templates_list.append(\n map(list,\n get_mfcc_feat(filename=\"./records/\" + str(digit) + '_' + str(i) + '.wav', winstep=0.01,\n nfilt=40,\n numcep=13, preemph=0.95, appendEnergy=False, outputFilename=\"./output/\" + str(digit)\n + '_' + str(i) + '.txt')[2]))\n\n else:\n inputs_list.append(\n map(list,\n get_mfcc_feat(filename=\"./records/\" + str(digit) + '_' + str(i) + '.wav', winstep=0.01,\n nfilt=40,\n numcep=13, preemph=0.95, appendEnergy=False, outputFilename=\"./output/\" + str(digit)\n + '_' + str(i) + '.txt')[2]))\n return templates_list, inputs_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Public method to grab feed from an object
|
def get_object_feed(self, endpoint, limit=250, since=None, until=None):
endpoint_id = self.get_object_id(endpoint)
url = self.base_url + '/' + endpoint_id + '/feed'
params = {'limit': limit, 'access_token': self.access_token}
if since is not None:
params['since'] = since
if until is not None:
params['until'] = until
resp = requests.get(url, params=params)
resp = json.loads(resp.content)
if 'error' in resp.keys():
raise FacebookError(resp)
return resp
|
[
"def get( self ):\n #using urlgrabber so it doesn't matter whether feed is a file or a url\n logger.debug(\"Opening feed: \" + self.feed)\n fd = urlopen( self.feed )\n feed = {}\n #is this an OPML file?\n try:\n outlines = OPML.parse( fd ).outlines\n logger.debug(\"Feed is OPML\")\n for opmlfeed in outlines:\n feed = {}\n feed[\"title\"] = opmlfeed[\"title\"]\n feed[\"url\"] = opmlfeed[\"xmlUrl\"]\n self.feedlist.append( feed )\n logger.debug(\"Feed has been imported: %s - %s\" % (feed[\"title\"], feed[\"url\"]))\n except Exception, e:\n feed = {}\n try:\n if self.title:\n feed[\"title\"] = self.title\n else:\n outlines = feedparser.parse( self.feed )[\"feed\"]\n feed[\"title\"] = outlines.title\n feed[\"url\"] = self.feed\n self.feedlist.append(feed)\n logger.debug(\"Feed has been imported: %s - %s\" % (feed[\"title\"], feed[\"url\"]))\n except Exception, e:\n print \"Feedparser exception:\", e\n sys.exit(-1)\n self.toXML()",
"def _getFeed(self):\n feed = FEED_DATA.get(self.data.url,None)\n if feed is None:\n # create it\n print 'Creating FEED_DATA[%s]'%self.data.url\n feed = FEED_DATA[self.data.url] = ItsatripFeed(self.data.url,\n self.data.timeout)\n return feed",
"def get_datafeed(item):\n if hasattr(item, 'get_datafeed'):\n return item.get_datafeed()\n return None",
"def _getFeed(self):\n feed = FEED_DATA.get(self.data['url'], None)\n if feed is None:\n # create it\n feed = FEED_DATA[self.data['url']] = RSSFeed(self.data['url'],\n self.data['timeout'])\n return feed",
"def get_feed(self):\n\t\turl = str(self.feed_url)\n\t\tf = feedparser.parse(url)\n\n\t\ttry:\n\t\t\tself.__feed_text = f['entries'][self.feed_number]['title'] + \"\\n\\n\" + f['entries'][self.feed_number]['summary']\n\t\t\tself.link = f[\"channel\"].get(\"link\", _(\"No link\"))\n\t\t\t\n\t\texcept IndexError:\n\t\t\tself.__feed_text = _('Refreshing...')",
"def get_datafeed(self):\n return self.datafeed",
"def _retrieveFeed(self):\n url = self.url\n if url!='':\n self._last_update_time_in_minutes = time.time()/60\n self._last_update_time = DateTime()\n try:\n data = tool.read_data(url, force=True)\n except urllib2.URLError, ex:\n try:\n data = tool.read_data(url)\n except:\n # we tried at least but have a failed load\n self._loaded = True \n self._failed = True\n return False\n self._parser = parser.Parser()\n self._parser.parse(data)\n self._title = u'Events'\n self._items = self._model2view(self._parser.items)\n self._loaded = True\n self._failed = False\n return True\n self._loaded = True\n self._failed = True # no url set means failed\n return False # no url set, although that actually should not really happen",
"def fetch(feed):\n d = feedparser.parse(feed.url)\n got_entries = len(d['entries'])\n fetch_ = Fetch()\n fetch_.feed_id = feed.id\n fetch_.result = str(got_entries)\n Session.add(fetch_)\n feed.last_fetched_at = datetime.datetime.now()\n Session.add(feed)\n count = 0\n for e in d['entries']:\n url = e.get('link')\n exists = Session.query(Entry).filter_by(url=url).first()\n if exists: \n continue\n title = e.get('title')\n \n # Try to get a published time, differs widely by feed.. \n published = e.get('published_parsed')\n if not published:\n published = e.get('updated_parsed')\n if not published:\n published = e.get('created_parsed')\n if not published:\n # If all aobe failed we will just use current gmtime\n published = time.gmtime()\n \n # Now convert published to a datetime \n published = datetime.datetime(*published[:6])\n\n summary = e.get('summary')\n \n # Now save the entry into the db...\n entry = Entry()\n entry.feed_id = feed.id\n entry.title = title\n entry.feed_title = feed.title\n entry.url = url\n entry.pubtime = published\n entry.summary = summary\n entry.host = get_host(feed.weburl)\n Session.add(entry)\n Session.commit()\n count += 1\n \n Session.commit()\n return count",
"def get_feedable_item(self, *args, **kwargs):\n return self.dataset_builder.get_feedable_item(*args, **kwargs)",
"def FindFeeds():\n rss_page = \"http://www.latimes.com/services/site/la-rssinfopage,0,5039586.htmlstory\"\n\n html = ukmedia.FetchURL( rss_page )\n soup = BeautifulSoup( html )\n\n feeds = []\n div = soup.find('div',{'id':'story-body'} )\n for td in div.table.findAll('td', {'class':'rssTitleCell'} ):\n a = td.a\n url = urlparse.urljoin( rss_page, a['href'] )\n\n title = ukmedia.FromHTMLOneLine( a.renderContents(None) )\n feeds.append( (title,url) )\n\n return feeds",
"def show_list_feed(request, slug, template_name='podcast/show_feed.html'):\n return object_detail(request,\n queryset=Show.objects.all(),\n mimetype='application/rss+xml',\n slug_field='slug',\n slug=slug,\n template_name=template_name)",
"def test_feed_generator(self):\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()",
"def getFeed(self, feedId):\n\t\ttry:\n\t\t\treturn self.getFeeds()[feedId]\n\t\texcept Exception:\n\t\t\tprint \"The ID %r doesn't exist.\" % feedId",
"def get_feeds(self):\n return self.feeds",
"def items(self, obj):\n posts = super(iTunesPodcastsFeed, self).items(obj)\n posts = [iTunesPodcastPost(item) for item in posts]\n return posts",
"def parse(feed):\n rss = fp.parse(feed)\n return (rss, rss.feed.title, rss.feed.subtitle)",
"def get(self, feed_id=None, uri=None):\n if None == feed_id == uri:\n raise ValueError('One of feed id or uri must be provided')\n\n pars = {'id': feed_id, 'uri': uri}\n tree = self._http.request(\"GetFeed\", \"GET\", pars)\n elm_feed = tree.find('feed')\n if ElementTree.iselement(elm_feed):\n return Feed.fromElementTree(elm_feed)\n\n raise ValueError('Feed not found')",
"def fetch_feed(url, etag=None, modified=None, agent=None):\n\td = None\n\ttry:\n\t\td = feedparser.parse(url, etag=etag, modified=modified, agent=conf.USER_AGENT)\n\texcept LookupError, e:\n\t\traise FeedParseError(url, None, 'Broken Character Encoding', e)\n\n\t# TODO: do we want to check the bozo bit?\n\n\t# check HTTP status for error states\n\tif http_status_not_modified(d):\n\t\treturn d\n\telif http_status_error(d):\n\t\traise FeedFetchError(url, d, 'HTTP status code %d: %s' % (d.status, HTTP.responses[d.status]))\n\n\t# we received a full document. Check if we could parse it\n\tif d.version==None or d.version=='':\n\t\t# TODO: no idea how to get real feedback on whether parsing worked or not. check feedparser docs again\n\t\traise FeedParseError(url, d, 'Unsupported Document Type')\n\n\treturn d",
"def fetchFeed( self ):\n global threadcount\n numgrabbed = 0\n #don't do conditional download if we are trying to catchup or any of the getall options match\n if self.options[\"catchup\"] or re.compile( self.title, re.I ).match( self.options[\"getall\"] ) or self.options[\"getallglobal\"]:\n logger.debug(\"Ignoring any conditional download\")\n logger.debug(\"Attempting to parse feed\")\n feed = feedparser.parse( self.url, agent=USER_AGENT )\n else:\n #if not catchup use last-modified or ETag to see if feed has changed since last download\n try:\n if self.feedLogDict.has_key( self.url ):\n if self.feedLogDict[self.url][\"e-tag\"]:\n feed = feedparser.parse( self.url, etag=self.feedLogDict[self.url][\"e-tag\"], agent=USER_AGENT )\n if feed.status == 304:\n raise PeapodError, \"etag\"\n elif self.feedLogDict[self.url][\"modified\"]:\n feed = feedparser.parse( self.url, modified=time.gmtime( float( self.feedLogDict[self.url][\"modified\"] ) ), agent=USER_AGENT )\n if feed.status == 304:\n raise PeapodError, \"last-modified\"\n else:\n try:\n logger.debug(\"Attempting to parse feed\")\n feed = feedparser.parse( self.url, agent=USER_AGENT )\n except Exception,e:\n logger.warn(\"Unable to parse feed: \" + self.url)\n threadcount = threadcount -1\n else:\n logger.debug(\"Attempting to parse feed\")\n feed = feedparser.parse( self.url, agent=USER_AGENT )\n except PeapodError, e:\n logger.info( str( e.value ) + \" unchanged, not fetching: \" + str( self.url ))\n threadcount = threadcount - 1\n #we can't just use makefeedlogentry here because we haven't actually downloaded the feed\n self.feedlog = self.feedlog + \"%s||%s||%s\\n\" % ( self.url, self.feedLogDict[self.url][\"e-tag\"], self.feedLogDict[self.url][\"modified\"] )\n return self.message, self.log, self.feedlog\n except AttributeError, e:\n logger.info(\"%s: %s : problem getting url\" % ( self.url, e ))\n if feed.has_key( \"headers\" ):\n logger.info( feed.headers )\n threadcount = threadcount - 1\n return self.message, self.log, self.feedlog\n# except:\n# print >> sys.stderr, \"Failed to fetch/parse %s\" % self.url\n# threadcount = threadcount - 1\n# return self.message,self.log\n\n #update feed.log\n self.makefeedlogentry( feed )\n\n # if we don't already have a title, then grab one from the feed\n if not self.title:\n # if the feed has no title then just bail out as it's probably gibberish\n if not feed.feed.has_key( 'title' ):\n logger.info(\"Ignoring feed - no title \" + self.url)\n return self.message, self.log, self.feedlog\n\n self.title = feed['feed']['title']\n\n # strip out any non-alphanumericals in the title so we can safely(ish) use it as a path-name\n #self.title = re.sub( \"\\W\\W*\", \"_\", self.title )\n #self.options[\"getall\"] = re.sub( \"\\W\\W*\", \"_\", self.options[\"getall\"] )\n\n logger.info(\"Fetching feed for \" + self.title)\n\n # set the base directory of the feed to the global \"savedir\" + the sanitised feed title\n if self.options[\"savestyle\"] == \"feed\":\n basedir = \"%s/%s\" % ( self.options[\"savedir\"], self.title )\n tmpdir = basedir\n elif self.options[\"savestyle\"] == \"date\":\n basedir = \"%s/%s\" % ( self.options[\"savedir\"], self.options[\"datedir\"] )\n tmpdir = self.options[\"savedir\"]\n elif self.options[\"savestyle\"] == \"none\":\n basedir = self.options[\"savedir\"]\n tmpdir = basedir\n else:\n basedir = self.options[\"savedir\"]\n tmpdir = basedir\n\n # if we've never seen this feed before, then make a directory for it\n if not os.path.exists( basedir ):\n logger.debug(\"Creating directory for feed: \" + basedir)\n os.makedirs( basedir )\n\n # this is the first time we've seen the feed - if we've been told only to download\n # the latest feed for new stuff then set the maxfetch counter to \"1\"\n if self.options[\"newfeedsingle\"] == 1:\n self.maxfetch = 1\n\n # check to see if we are to over-ride the maxfetch and download everything for this feed\n if re.compile( self.title, re.I ).match( self.options[\"getall\"] ) or self.options[\"getallglobal\"]:\n self.maxfetch = 1000000\n getall = 1\n logger.info(\"Fetching all podcasts for %s\" % self.title)\n else:\n getall = None\n\n # loop over each entry in the podcast feed (again, all praise feedparser.org!)\n timelist = []\n feeds = {}\n #make feed_count 3 months in the future so that we can deal with feeds that have a couple of\n #dodgy pubDates\n feed_count = int( time.mktime( time.localtime() ) ) + 7776000\n #before we get to downloading the podcasts it's a good idea to order the feed by published date\n for entry in feed.entries:\n mp3URL,content_type = self.getcontenturl(entry)\n if mp3URL:\n if entry.has_key( \"modified_parsed\" ):\n try:\n time_epoch = time.mktime( entry.modified_parsed )\n except TypeError:\n #this is for feeds that advertise pubDate but don't create entries\n try:\n grabber = downloadURL( mp3URL, basedir, tmpdir,bittorrent=self.options[\"bittorrent\"], bandwidth=self.bandwidth, content_type=content_type )\n except IOError:\n self.makefeedlogentry( None )\n continue\n entry[\"grabber\"] = grabber\n if grabber.info.has_key( \"last-modified\" ):\n if feedparser._parse_date( grabber.info[\"last-modified\"] ):\n time_epoch = time.mktime( feedparser._parse_date( grabber.info[\"last-modified\"] ) )\n else:\n time_epoch = feed_count\n feed_count = feed_count - 1\n else:\n time_epoch = feed_count\n feed_count = feed_count - 1\n else:\n logger.info(\"No pubDate information for \" + self.title)\n #podcasts which don't use pubDate use a fake time. These feeds end up getting\n #read from top to bottom like they would if we were not ordering by time\n try:\n grabber = downloadURL( mp3URL, basedir, tmpdir, bittorrent=self.options[\"bittorrent\"], bandwidth=self.bandwidth, path=self.options[\"path\"], content_type=content_type )\n except (KeyboardInterrupt, SystemExit):\n sys.exit()\n except Exception:\n self.makefeedlogentry( None )\n continue\n entry[\"grabber\"] = grabber\n if grabber.info.has_key( \"last-modified\" ):\n time_epoch = time.mktime( feedparser._parse_date( grabber.info[\"last-modified\"] ) )\n else:\n time_epoch = feed_count\n feed_count = feed_count - 1\n\n #occasionaly you get idiots who put two entries in with the same pubDate\n #we increment the second by 1 so that we get both podcasts\n while 1:\n if time_epoch in timelist:\n time_epoch = time_epoch - 1\n else:\n break\n timelist.append( time_epoch )\n feeds[time_epoch] = entry\n\n timelist.sort()\n timelist.reverse()\n\n #go through the podcasts from latest to earliest\n for time_epoch in timelist:\n entry = feeds[time_epoch]\n # get the \"enclosure\" tag which should contain our mp3/ogg/whatever\n mp3URL,content_type = self.getcontenturl(entry)\n if not mp3URL:\n #no enclosures so move on to next\n logger.info(\"No enlosures found.\")\n continue\n\n #quick check against guid first before bothering to head back to the webserver\n if self.dowehaveit( entry ):\n self.maxfetch = self.maxfetch -1\n if self.maxfetch <= 0:\n break\n else:\n continue\n\n # open it as a stream using the \"openanything\" module from \"Dive Into Python\" (thanks!)\n if entry.has_key( \"grabber\" ):\n grabber = entry[\"grabber\"]\n else:\n try:\n grabber = downloadURL( mp3URL, basedir, tmpdir, bittorrent=self.options[\"bittorrent\"], bandwidth=self.bandwidth, path=self.options[\"path\"], content_type=content_type )\n except (KeyboardInterrupt, SystemExit):\n sys.exit()\n except Exception, e:\n logger.info(\"Unable to download enclosure: \" + mp3URL)\n self.makefeedlogentry( None )\n continue\n\n if not grabber.trackname:\n #no filename indicates something went wrong so move on\n logger.info(\"Not downloading \" + mp3URL)\n self.makefeedlogentry( None )\n continue\n else:\n trackname = grabber.trackname\n savename = grabber.savename\n mp3URL = grabber.url\n\n # check to see if we've already got this track downloaded\n if trackname in self.filelist:\n\n # we have - so decrease the counter and check to see if we're done\n #check that the time on this podcast isn't in the future. If it is it's probably\n #a bad time. don't decrease maxfetch so that a bad pubdate doesn't clog up the feed\n logger.debug(\"Already have file. Skipping download\")\n if not int( time_epoch ) > int( time.mktime( time.localtime() ) ):\n if not getall:\n self.maxfetch = self.maxfetch -1\n if self.maxfetch <= 0:\n break\n else:\n continue\n else:\n continue\n\n logger.info(\"\\tDownloading %s -- %s\" % (self.title, mp3URL))\n logger.info(\"\\tTrackname \" + trackname)\n logger.info(\"\\tSavename \" + savename)\n logger.info(\"\\tMime-type \" + grabber.info[\"content-type\"])\n\n if self.options[\"tellnew\"]:\n self.message = self.message + savename + \" (\" + self.title + \")\\n\"\n\n\n if ( not ( self.options[\"dryrun\"] or self.options[\"catchup\"] ) ):\n #break for problems reading url\n try:\n grabber.get()\n except IOError, e:\n logger.info(\"Unable to download enclosure \" + mp3URL)\n self.makefeedlogentry( None )\n break\n\n # update our log of downloaded tracks\n if entry.has_key( 'id' ):\n self.log = self.log + \"%s||%s||%s\\n\" % ( savename, entry[\"id\"], int( time.time() ) )\n else:\n self.log = self.log + \"%s||None||%s\\n\" % ( savename, int( time.time() ) )\n\n #if we have python-vorbis or eyed3 re-write the file's id3/ogg tags\n #check that it's an mp3 or ogg to get round m4a corruption problem\n #we have to let bittorrent files through because we don't know what type they are\n if not ( self.options[\"dryrun\"] or self.options[\"catchup\"] or sys.platform.startswith(\"win\") ):\n if grabber.info[\"content-type\"] in ('audio/mpeg','application/ogg','audio/x-mpeg','application/x-bittorrent'):\n editTags( feed['feed'],entry, self.options, savename )\n\n #run post command if specified\n if self.options[\"post\"] and not ( self.options[\"dryrun\"] or self.options[\"catchup\"] ):\n cmd = \"%s %s\" % (self.options[\"post\"], savename)\n proc = Popen(cmd, shell=True, stderr=PIPE)\n\t\tposterrs = proc.stderr.read()\n\t\terrno = proc.wait()\n\t\tif errno:\n logger.warn(\"Post script failed:%s:%s\" % (cmd,posterrs))\n else:\n logger.debug(\"Post script ran:%s:%s\" % (cmd,posterrs))\n\n # update our track counters\n numgrabbed = numgrabbed + 1\n if not getall:\n self.maxfetch = self.maxfetch - 1\n\n # if we've hit our limit them bail out\n if self.maxfetch <= 0:\n break\n\n # indicate that we've finished with this thread to the global counter\n threadcount = threadcount - 1\n # and return with our messages and log\n return self.message, self.log, self.feedlog"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Queries the Facebook API to see if the input is valid.
|
def is_valid(self, object_name):
try:
resp = self.get_object_id(object_name)
return True
except FacebookError as e:
return False
|
[
"def validate_data(data):\n if not data.get(\"response_url\"):\n return False\n if not data.get(\"token\"):\n return False\n if not data.get(\"command\"):\n return False\n if not data.get(\"user_name\"):\n return False\n if not data.get(\"channel_name\"):\n return False\n return True",
"def valid_input(url_to_check: str) -> bool:\r\n if url_to_check == 'q':\r\n sys.exit()\r\n\r\n else:\r\n result = urlparse(url_to_check)\r\n\r\n if ('youtube.com' not in result.netloc) and ('youtube.com' not in result.path):\r\n return False\r\n\r\n if not ('list' in parse_qs(result.query).keys()):\r\n return False\r\n\r\n return True",
"def isvalid(self, schema_or_url, data):\n\n try:\n self.validate(schema_or_url, data)\n\n except JsonValidationError:\n return False\n\n return True",
"def _facebook_flow(oauth):\n appsecret_proof = sha256()\n appsecret_proof.update(request.form[\"token\"])\n appsecret_proof.update(oauth.SECRET)\n hash_ = appsecret_proof.digest()\n response = _validate_response(requests.get(\n \"{}/age_range?access_token={}\".format(\n urljoin(FACEBOOK_URL, request.form[\"user_id\"]), request.form[\"token\"]\n ),\n headers={\"appsecret_proof\", hash_}\n ))\n # validate age, must be 21 in USA. Because we are making a liquor app...\n if response.json()[\"min\"] != \"21\":\n raise exceptions.UserUnderageError()",
"def validate_token(auth_token):\n try:\n # create an instance of the facebook graph\n facebook_graph = facebook.GraphAPI(\n access_token=auth_token, version=\"3.0\"\n )\n # Get user data\n user_data = facebook_graph.request('/me?fields=id,name,email')\n return user_data\n except facebook.GraphAPIError:\n msg = \"Invalid or expired token\"\n return msg",
"def validate_access_token(access_token):\n try:\n graph = facebook.GraphAPI(access_token=access_token, version=\"3.1\")\n user_data = graph.request(\"/me?fields=name,email\")\n return user_data\n\n except GraphAPIError as error:\n raise ValidationError(\n {\"error\": {\"access_token\": \"Invalid token\", \"details\": str(error)}}\n )",
"def test_validation_ok(self, schema):\n data = {\n 'title': 'title',\n 'author': 'author',\n 'pages': 111,\n 'isReserved': False\n }\n\n errors = schema.validate(data)\n assert not errors",
"def input_valid(self, settings_to_test):\n return (True, \"ok\")\n #return (False, \"All arguments are assumed invalid until verified\")",
"def test_presence_validations(self):\r\n # missing required name field\r\n person = dict(email='example@example.com')\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 400\r\n data = loads(response.data)\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'name' in errors\r\n assert 'enter a value' in errors['name'].lower()\r\n\r\n # missing required email field\r\n person = dict(name='Jeffrey')\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 400\r\n data = loads(response.data)\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'email' in errors\r\n assert 'enter a value' in errors['email'].lower()\r\n\r\n # everything required is now provided\r\n person = dict(name='Jeffrey', email='example@example.com', age=24)\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 201\r\n personid = loads(response.data)['id']\r\n\r\n # check that the provided field values are in there\r\n response = self.app.get('/api/test/' + str(personid))\r\n assert response.status_code == 200\r\n data = loads(response.data)\r\n assert data['name'] == 'Jeffrey'\r\n assert data['email'] == 'example@example.com'",
"def test_facebook_provider(self):\n access_token = \"bad token\"\n self.assertFalse(providers.facebook_provider(access_token))",
"def validated(self, post_data):\n if 'subreddit' not in post_data or not isinstance(post_data['subreddit'], str):\n return False\n if 'start' not in post_data or not isinstance(post_data['start'], int):\n return False\n if 'end' not in post_data or not isinstance(post_data['end'], int):\n return False\n\n return True",
"def validate(self, request):\n\t\treturn True",
"def valid(self, token_id):",
"def _check_form_validity(self):\n\n for idsp in self._idsp_input:\n if not idsp.form_is_valid():\n self._invalid_input_eh()\n return\n\n self._valid_input_eh()",
"def validateURL(url):",
"def test_facebook_login_not_new_user(self, user_object):\n user_object.return_value = {\n 'email': 'jon@mail.com',\n 'name': 'Jon'\n }\n self.client.post(self.url_facebook,\n data=json.dumps(\n valid_facebook_token),\n content_type='application/json')\n response = self.client.post(self.url_facebook,\n data=json.dumps(\n invalid_facebook_token),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def is_valid(self, model_spec):\n try:\n self._check_spec(model_spec)\n except OutOfDomainError:\n return False\n\n return True",
"def request_is_valid(Klass, request):\n if request.method == 'POST':\n arr = request.POST\n elif request.method == 'GET':\n arr = request.GET\n else:\n raise AuthorizationException()\n \n if Klass._REQUEST_KEY not in arr:\n raise AuthorizationException()\n \n tok = arr[Klass._REQUEST_KEY]\n qs = Klass.objects.filter(value=tok)\n \n if not qs.exists():\n raise InvalidTokenException(tok)\n \n if qs[0].is_disabled():\n raise DisabledTokenException(qs[0])\n \n return True",
"def test_validate_feed(self):\n body = ValidationTaskResult()\n response = self.client.open(\n \"/v0/validate\",\n method=\"POST\",\n data=json.dumps(body),\n content_type=\"application/json\",\n )\n self.assert200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Utility function that determines if this sample should be kept
|
def should_keep(p_keep=1.):
return random.random() <= p_keep
|
[
"def should_sample(self):\n return self.span_context.enabled and self.sampler.should_sample",
"def exclude_samples(self):\n return ~self.frame_flagspace.flags.SAMPLE_SOURCE_BLANK",
"def is_sample(run):\n kind = get_kind(run)\n return kind in (\"sesans\", \"sans\")",
"def is_excluded_item(target):",
"def keep(card):\n return card['rarity'] != 'Basic Land'",
"def valid_sample(sample):\n if sample is None or sample.peak is None or sample.peak.pa is None:\n return False\n else:\n return True",
"def _is_keep(self, elt):\n return elt in self.elts_to_keep",
"def _should_save(self) -> bool:\n return random.randint(0, self.MAX_RAND_INT) == 0",
"def _is_noise(self, _):\n return False",
"def lair_choose_takeback(self):\n return True if len(self.player.discard) > 2 else False",
"def check_duplicatename(lims, lims_sample):\n result = True\n samples = lims.get_samples(name=lims_sample.name,\n udf={'customer': lims_sample.udf['customer']})\n for other_sample in samples:\n if other_sample.id != lims_sample.id:\n # same sample id twice!\n if other_sample.udf.get('cancelled') == 'yes':\n log.info(\"duplicate sample but the other is cancelled\")\n else:\n log.error(\"sample name duplicate: %s | %s\", lims_sample.id,\n other_sample.id)\n result = False\n return result",
"def needs_resampling(self):\n max_weight = 0\n for par in self.particles:\n max_weight = max(max_weight, par[0])\n\n return 1.0 / max_weight < self.resampling_threshold",
"def indel(record):\r\n\tfor i in record.ALT:\r\n\t\tif i:\r\n\t\t\tif len(i) > 1:\r\n\t\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\tif len(record.REF) >1:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def test_exclude_case_by_sample_id(base_store: Store, helpers):\n\n # GIVEN a database with a sample with internal_id\n new_case = add_case(helpers, base_store)\n sample = helpers.add_sample(base_store)\n base_store.relate_sample(new_case, sample, \"unknown\")\n\n # WHEN getting active cases by non-existing sample id\n cases = base_store.cases(sample_id=\"dummy_id\")\n\n # THEN cases should not contain this case\n assert not cases",
"def still_needs(self, junk):\n return self._ghost_junk.contains(junk)",
"def _dont_keep_variable(self, variable):\n\n if variable=='N_LooseLeptons' and self._category!='all':\n return True\n elif variable=='N_TightLeptons' and self._category!='all':\n return True\n elif variable=='N_Jets' and self._category[0]!='6' and self._category!='all':\n return True\n elif variable=='N_BTagsM' and self._category!='54' and self._category!='64' and self._category!='all':\n return True\n else:\n return False",
"def _run_successful(sample):\n # TODO Implement more thoroughly than just checking if file is empty\n return os.stat(sample.mature_readcount).st_size >= 0 and os.stat(sample.hairpin_readcount).st_size >= 0",
"def _check_ignore_waveforms(self):\n return False",
"def _loud_enough(self):\n return self.num_loud_chunks > self.min_loud_chunks"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function. Loads all the beams in a directory into bricks and data
|
def _load_beams(self, dirpaths: Iterable[str]) -> [Iterable[Brick], Iterable[Brick]]:
random.seed(self._training['seed'])
beam_filepaths = []
for dirpath in dirpaths:
beam_filepaths.extend([os.path.join(dirpath, filename) for filename in os.listdir(dirpath)])
beam_filepaths = list(sorted(beam_filepaths))
#ref = self
#pool = mp.Pool()
#fps = [(self, fp) for fp in beam_filepaths]
#train_bricks, val_bricks = pool.map(f, fps)
#train_bricks, val_bricks = list(itertools.chain.from_iterable(train_bricks)), \
# list(itertools.chain.from_iterable(val_bricks))
train_bricks, val_bricks = [], []
for i, (filepath) in enumerate(beam_filepaths):
bricks, beam = self.load_beam(filepath=filepath)
if should_keep(p_keep=self._training['trainRatio']):
if beam.valid:
train_bricks.extend(bricks)
else:
continue
else:
val_bricks.extend(bricks)
console_logger.progress('Loading Beams -', i + 1, len(beam_filepaths), debounce_buffer=5)
console_logger.progress('Loading Beams -', i + 1, len(beam_filepaths), debounce_buffer=0.)
random.seed()
return train_bricks, val_bricks
|
[
"def load_beam(self, filepath: str) -> Iterable[Brick]:",
"def load_all_blocks(folder_path):\n\n import os\n import numpy as np\n gt_files = np.sort(os.listdir(folder_path))\n\n block_list=[]\n\n for file in gt_files:\n\n block_path=os.path.join(folder_path, file)\n\n block_list.append(load_block(block_path))\n\n return block_list",
"def fetch_datasets():\n for root, dir, files in os.walk(\"data\"):\n datasets = [f.replace(\".pickle\", \"\") for f in sorted(files)]\n return datasets",
"def load_batteries():\n # find specific directory with the data\n subpath = f\"Huizen&Batterijen\\wijk{INPUT}_batterijen.txt\"\n path = str(Path.cwd()).replace(\"scripts\", subpath)\n\n with open(path) as batteries_text:\n\n # read text file per line\n data_batteries = batteries_text.readlines()\n\n # delete headers\n data_batteries.pop(0)\n\n batteries = {}\n\n # Library color list toevoegen\n COLOUR_LIST = [\"m\", \"g\", \"c\", \"y\", \"b\",\n \"grey\", \"maroon\", \"yellow\", \"orange\",\n \"fuchsia\", \"lime\", \"peru\"]\n\n # for every batterie isolate coordinates and capacity\n for id, battery in enumerate(data_batteries):\n coordinates = battery.split(\"\\t\", 1)[0]\n cap = battery.split(\"\\t\", 1)[1].strip()\n x = re.sub(\"\\D\", \"\", coordinates.split(\",\", 1)[0])\n y = re.sub(\"\\D\", \"\", coordinates.split(\",\", 1)[1])\n colour = COLOUR_LIST[id]\n batteries[id] = Battery(cap, x, y, colour)\n\n # return dict to INIT\n return batteries",
"def _load_data(self) -> None:\n\n # load test split containing, for each class\n # the test filenames\n with open(\"scr_test_split.json\", \"r\") as f:\n test_split_dict = json.load(f)\n\n data = []\n targets = []\n for classname in self.classes:\n files = [el for el in os.listdir(os.path.join(self.root, classname))\n if el.endswith('.wav')]\n\n features = []\n for i, f in enumerate(files):\n # load appropriate files based on fixed split\n if self.split == 'test' and f not in test_split_dict[classname]:\n continue\n elif self.split == 'train' and f in test_split_dict[classname]:\n continue\n\n audio, sample_rate = torchaudio.load(os.path.join(self.root, classname, f))\n assert sample_rate == self.sample_rate\n features.append(self.mel_spectr(audio).permute(0, 2, 1))\n\n data.append(torch.cat(features, dim=0)) # batch-first sequence\n targets.append(torch.ones(data[-1].size(0)).long() * self.class_to_id[classname])\n\n self.data = torch.cat(data)\n self.targets = torch.cat(targets)",
"def load_breed_names():\n with open('breeds.pkl', 'rb') as file:\n breed_names = pickle.load(file)\n return breed_names",
"def load_data(links=dataset_links, data_dir='data', remove_old=False):\n\n if not os.path.isdir(data_dir) or remove_old:\n if os.path.isdir(data_dir):\n shutil.rmtree(data_dir)\n \n os.mkdir(data_dir)\n os.mkdir(os.path.join(data_dir, 'spam'))\n os.mkdir(os.path.join(data_dir, 'ham'))\n\n download_all_datasets(links, data_dir)\n\n data = []\n folders = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, f))]\n for fol in folders:\n files = [os.path.join(fol,f) for f in os.listdir(fol) if os.path.isfile(os.path.join(fol, f))]\n for file_number, fil in enumerate(files):\n with open(fil, errors='replace', encoding='utf-8') as f:\n print(file_number, \" \", f.name)\n d = f.read()\n try:\n doc = LambDocument(\n os.path.basename(f.name),\n d,\n os.path.basename(os.path.dirname(fil))\n )\n data.append(doc)\n except AssertionError:\n print(\"Skipping file #\", file_number, \", path: \", f.name)\n print(\"Encoding error\")\n\n return data",
"def load_and_pickle_datasets(augment=False):\n subdirs = ['vehicles/GTI_Far',\n 'vehicles/GTI_Left',\n 'vehicles/GTI_MiddleClose',\n 'vehicles/GTI_Right',\n '/object-dataset-select',\n 'non-vehicles/Extras',\n 'non-vehicles/GTI',\n 'non-vehicles-additional']\n\n ''' 1 if the corresponding element in `subdirs` is a directory with car images, 0 if it is a directory with non-car\n images '''\n subdirs_y = [1, 1, 1, 1, 1, 0, 0, 0]\n\n dataset_x, dataset_y = [], []\n for subdir, y in zip(subdirs, subdirs_y):\n path_to_subdir = Params.dataset_base_dir + '/' + subdir\n for fname in os.listdir(path_to_subdir):\n if not fname.endswith('.png'):\n continue\n image = cv2.imread(path_to_subdir + '/' + fname)\n assert image is not None\n image = format_image(image)\n dataset_x.append(image)\n label = Params.car_label if y == 1 else Params.non_car_label\n dataset_y.append(label)\n if augment and label == Params.non_car_label:\n flipped = np.fliplr(image)\n dataset_x.append(flipped)\n dataset_y.append(label)\n\n dataset_x, dataset_y = shuffle(dataset_x, dataset_y, random_state=Params.random_seed)\n ''' Break down the dataset in several pickled files, so they are small enough to be allowed on GitHub;\n generate n_intervals+1 pickled files '''\n n_intervals = 5\n entries_per_file = len(dataset_x) // n_intervals\n counter =0\n for offset in range(0, len(dataset_y), entries_per_file):\n chunk_x = dataset_x[offset:offset+entries_per_file]\n chunk_y = dataset_y[offset:offset + entries_per_file]\n pickle_fname= Params.pickled_dataset_bname + '-' + str(counter) + '.p'\n pickle.dump((chunk_x, chunk_y), open(pickle_fname, \"wb\"))\n counter +=1\n\n return dataset_x, dataset_y",
"def load(self, folder):\n self.g_AB.load_weights('%s/generatorAB.h5' % folder)\n self.g_BA.load_weights('%s/generatorBA.h5' % folder)",
"def load_blocks(self, verbose=True):\n nblocks = len(blocklist)\n # blocks = []\n if nblocks == 0:\n # add bdsim blocks folder\n blockpath = [Path(__file__).parent / 'blocks']\n\n # add RTB and MVTB if they exist\n try:\n import roboticstoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n try:\n import machinvevisiontoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n # blocklist = []\n\n # path = os.getenv('BDSIMPATH')\n # if path is not None:\n # for p in path.split(':'):\n # blockpath.append(Path(p)) \n \n if verbose:\n print('Loading blocks:')\n\n blocks = []\n for path in blockpath: # for each folder on the path\n if not path.exists():\n print(f\"WARNING: path does not exist: {path}\")\n continue\n for file in path.iterdir(): # for each file in the folder\n blocks_this_file = []\n\n # scan every file *.py to find block definitions\n # a block is a class that subclasses Source, Sink, Function, Transfer and\n # has an @block decorator.\n #\n # The decorator adds the classes to a global variable blocklist in the\n # component module's namespace.\n if not file.name.startswith('test_') and not file.name.startswith('__') and file.name.endswith('.py'):\n # valid python module, import it\n try:\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n spec = importlib.util.spec_from_file_location(file.name, file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n except SyntaxError:\n print(f\"-- syntax error in block definition file: {file}\")\n\n for cls in module.__dict__.values():\n if not inspect.isclass(cls) or \\\n inspect.getmro(cls)[-2].__name__ != 'Block' or \\\n not cls.__module__.startswith(file.name):\n continue\n\n # we have a block class candidate\n if cls.blockclass in ('source', 'transfer', 'function'):\n # must have an output function\n valid = hasattr(cls, 'output') and \\\n callable(cls.output) and \\\n len(inspect.signature(cls.output).parameters) == 2\n if not valid:\n raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n if cls.blockclass == 'sink':\n # must have a step function with at least one\n # parameter: step(self [,state])\n valid = hasattr(cls, 'step') and \\\n callable(cls.step) and \\\n len(inspect.signature(cls.step).parameters) >= 1\n if not valid:\n raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n\n blocks_this_file.append(blockname(cls))\n blocks.append(block(blockname(cls), cls, file))\n\n if verbose and len(blocks_this_file) > 0:\n print(' loaded {:d} blocks from {:s}: {:s}'.format(\n len(blocks_this_file),\n str(file),\n ', '.join(b for b in blocks_this_file)))\n \n\n # # components.blocklist grows with every block import\n # if len(blocklist) > nblocks:\n # # we just loaded some more blocks\n # if verbose:\n # print(' loading blocks from {:s}: {:s}'.format(str(file), ', '.join([blockname(cls) for cls in blocklist[nblocks:]])))\n \n # # perform basic sanity checks on the blocks just read\n # for cls in blocklist[nblocks:]:\n # print(cls)\n # if cls.blockclass in ('source', 'transfer', 'function'):\n # # must have an output function\n # valid = hasattr(cls, 'output') and \\\n # callable(cls.output) and \\\n # len(inspect.signature(cls.output).parameters) == 2\n # if not valid:\n # raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n # if cls.blockclass == 'sink':\n # # must have a step function with at least one\n # # parameter: step(self [,state])\n # valid = hasattr(cls, 'step') and \\\n # callable(cls.step) and \\\n # len(inspect.signature(cls.step).parameters) >= 1\n # if not valid:\n # raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n \n # blocks.append(block(blockname(cls), cls, file))\n\n # nblocks = len(blocklist)\n\n return blocks",
"def _load_folder(self, folder):\n for f in os.listdir(folder):\n self._load_file(os.path.join(folder, f))",
"def init_datasets(config_path: str):\n with open(config_path) as f:\n datasets = load(f)\n for dataset in datasets:\n name = dataset[\"name\"]\n file = dataset[\"file\"]\n filename = config_path.split(\"/\")[-1]\n\n df = pd.read_csv(file, sep=None, engine=\"python\")\n columns = df.columns.values.tolist()\n featuretypes = infer_featuretypes(df)\n metadata = {\n \"columns\": columns,\n \"featuretypes\": featuretypes,\n \"original-filename\": filename,\n }\n try:\n # uses PlatIAgro SDK to save the dataset\n # marks as read only, so users can't mess with these datasets\n save_dataset(name, df, metadata=metadata, read_only=True)\n except PermissionError:\n pass",
"def _load_lba_data(datafiles, dist, maxnum):\n datasets = {}\n for split, datafile in datafiles.items():\n dataset = LMDBDataset(datafile, transform=TransformLBA(dist, maxnum, move_lig=False))\n # Load original atoms\n dsdict = extract_coordinates_as_numpy_arrays(dataset, atom_frames=['atoms_pocket','atoms_ligand'])\n # Add the label data\n dsdict['neglog_aff'] = np.array([item['scores']['neglog_aff'] for item in dataset])\n # Convert everything to tensors\n datasets[split] = {key: torch.from_numpy(val) for key, val in dsdict.items()}\n return datasets",
"def load_data(data_dir):\n # Change into data directory\n os.chdir(data_dir)\n images = []\n labels = []\n\n # Repeat as many times as categories exist\n for i in range(NUM_CATEGORIES):\n\n # Enter \"i\"'th folder\n path = os.path.join(os.getcwd(), str(i))\n os.chdir(path)\n\n # Get all images in folder\n files = os.listdir('.')\n\n # For each image, add it to images array\n for image in files:\n \n # Read image and convert it to RGB from BGR\n img = cv2.imread(image)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # If not correct size, resize image\n shape = img.shape\n if shape != (IMG_HEIGHT, IMG_WIDTH, 3):\n img = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH))\n\n # Update arrays\n images.append(img)\n labels.append(i)\n\n # Go back to data folder\n os.chdir(\"..\")\n\n return(images, labels)",
"def load_splits(self, folder):\n split_paths = [f\"{folder}/{self.prefix}_{name}.txt\" for name in self.sets]\n if all(os.path.exists(path) for path in split_paths):\n file_lists = [get_lines(path) for path in split_paths]\n labeled_sets = self.convert_splits(file_lists)\n for name, split_set in zip(self.sets, labeled_sets):\n setattr(self, name, split_set)\n return True\n return False",
"def _load(self):\n sample_list = []\n for path in self.case_path_lst:\n for root, dirs, files in os.walk(path):\n for current_file in files:\n pandas_file = pandas.read_csv(root + current_file)\n date = path.split('/')[2]\n use_case = path.split('/')[3]\n sample_file = current_file\n sample_list.append(CellSample(use_case=use_case,\n date=date,\n sample_file=sample_file,\n raw_data=pandas_file,\n x=None, y=None))\n self.raw_sample_list = sample_list",
"def loadink(inkfolder):\n #Find all .inkml files from the subdirectories of the input folder\n files = [os.path.join(root, name)\n for root, dirs, files in os.walk(inkfolder)\n for name in files\n if name.endswith((\".inkml\"))]\n\n ## Parallel Loading ##\n\n sys.stdout.write(\"\\rLoading: 0.0%\")\n pool = Pool(8)\n featclassdata = []\n for i, result in enumerate(pool.imap_unordered(getfeatures, (f for f in files), chunksize=10)):\n sys.stdout.write(\"\\rLoading: {0:0.1f}%\".format(100*(i+1)/len(files)))\n featclassdata.extend(result)\n sys.stdout.write(\"\\n\")\n\n return featclassdata",
"def load_datasets(labelfile: Union[io.IOBase, str], boxdir: str, dataconfig: DataConfig, datasets=None,\n recursive=False, shuffle=True, verify=True, start_workers=True):\n\n out_datasets = {\n }\n\n if recursive:\n # walk the box directory. Create dataset for each directory that contains '.box.xz' files.\n for root, dirs, files in os.walk(boxdir):\n dirname = os.path.basename(root)\n\n if datasets is not None and dirname not in datasets:\n continue\n\n # accumulate all boxfiles\n boxfiles = [os.path.join(root, boxfile) for boxfile in files if\n RE_BOXXZFILE.search(boxfile) or RE_BOXFILE.search(boxfile)]\n\n if not len(boxfiles):\n continue\n\n # add files to current dataset, but only if the current root dir is not the top level box directory\n if not os.path.abspath(root) == os.path.abspath(boxdir):\n out_datasets[dirname] = DataSet(labelfile, boxfiles, dataconfig, shuffle=shuffle, verify=verify,\n start_worker=start_workers)\n if isinstance(labelfile, io.IOBase):\n labelfile.seek(io.SEEK_SET)\n else:\n # recurse into top level directories\n for dirname in (d.name for d in os.scandir(boxdir) if d.is_dir()):\n if datasets is not None and dirname not in datasets:\n continue\n\n files = (f.name for f in os.scandir(os.path.join(boxdir, dirname)))\n boxfiles = [os.path.join(boxdir, dirname, boxfile) for boxfile in files if\n RE_BOXXZFILE.search(boxfile) or RE_BOXFILE.search(boxfile)]\n\n if not len(boxfiles):\n continue\n\n out_datasets[dirname] = DataSet(labelfile, boxfiles, dataconfig, shuffle=shuffle, verify=verify,\n start_worker=start_workers)\n if isinstance(labelfile, io.IOBase):\n labelfile.seek(io.SEEK_SET)\n\n if datasets is not None and \"\" in out_datasets:\n rootfiles = list()\n for ds in out_datasets.values():\n # add files to root dataset\n rootfiles.extend(ds.files)\n\n out_datasets[\"\"] = DataSet(labelfile, rootfiles, dataconfig, shuffle=shuffle, verify=False,\n start_worker=start_workers)\n\n return out_datasets",
"def MbrLoader():\n import idaapi;\n import idc;\n\n global SECTOR_SIZE, BOOT_START, BOOT_SIZE, BOOT_END, SECTOR2, MBRNAME\n\n # wait till end of analysis\n idc.Wait()\n\n # adjust segment\n idc.SetSegBounds(BOOT_START, BOOT_START, BOOT_START + BOOT_SIZE, idaapi.SEGMOD_KEEP)\n\n # load the rest of the MBR\n idc.loadfile(MBRNAME, SECTOR_SIZE, SECTOR2, SECTOR_SIZE)\n\n # Make code\n idc.AnalyzeArea(BOOT_START, BOOT_END)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads the bricks for a specific beam and returns it.
|
def load_beam(self, filepath: str) -> Iterable[Brick]:
|
[
"def _load_beams(self, dirpaths: Iterable[str]) -> [Iterable[Brick], Iterable[Brick]]:\n\n random.seed(self._training['seed'])\n beam_filepaths = []\n for dirpath in dirpaths:\n beam_filepaths.extend([os.path.join(dirpath, filename) for filename in os.listdir(dirpath)])\n beam_filepaths = list(sorted(beam_filepaths))\n #ref = self\n #pool = mp.Pool()\n #fps = [(self, fp) for fp in beam_filepaths]\n #train_bricks, val_bricks = pool.map(f, fps)\n #train_bricks, val_bricks = list(itertools.chain.from_iterable(train_bricks)), \\\n # list(itertools.chain.from_iterable(val_bricks))\n\n train_bricks, val_bricks = [], []\n\n for i, (filepath) in enumerate(beam_filepaths):\n bricks, beam = self.load_beam(filepath=filepath)\n if should_keep(p_keep=self._training['trainRatio']):\n if beam.valid:\n train_bricks.extend(bricks)\n else:\n continue\n else:\n val_bricks.extend(bricks)\n console_logger.progress('Loading Beams -', i + 1, len(beam_filepaths), debounce_buffer=5)\n console_logger.progress('Loading Beams -', i + 1, len(beam_filepaths), debounce_buffer=0.)\n\n random.seed()\n return train_bricks, val_bricks",
"def get_top_bricks(self):\n return self.top_bricks",
"def get_beer(self, id): \r\n response = json.loads(self._call(\"%s/%s\" % (Beer.resource_url, id), self._params({'withBreweries': 'Y'})).text)\r\n return Beer(response['data'])",
"def get_instance(self, payload):\n return BrandsInformationInstance(self._version, payload, )",
"def get_brick(var):\n return get_annotation(var, Brick)",
"def load_batteries():\n # find specific directory with the data\n subpath = f\"Huizen&Batterijen\\wijk{INPUT}_batterijen.txt\"\n path = str(Path.cwd()).replace(\"scripts\", subpath)\n\n with open(path) as batteries_text:\n\n # read text file per line\n data_batteries = batteries_text.readlines()\n\n # delete headers\n data_batteries.pop(0)\n\n batteries = {}\n\n # Library color list toevoegen\n COLOUR_LIST = [\"m\", \"g\", \"c\", \"y\", \"b\",\n \"grey\", \"maroon\", \"yellow\", \"orange\",\n \"fuchsia\", \"lime\", \"peru\"]\n\n # for every batterie isolate coordinates and capacity\n for id, battery in enumerate(data_batteries):\n coordinates = battery.split(\"\\t\", 1)[0]\n cap = battery.split(\"\\t\", 1)[1].strip()\n x = re.sub(\"\\D\", \"\", coordinates.split(\",\", 1)[0])\n y = re.sub(\"\\D\", \"\", coordinates.split(\",\", 1)[1])\n colour = COLOUR_LIST[id]\n batteries[id] = Battery(cap, x, y, colour)\n\n # return dict to INIT\n return batteries",
"def set_bricks(self):\n for c in range(BRICKS_IN_ROW):\n for q in range(BRICK_ROWS):\n self._bricks.append(GRectangle(y=GAME_HEIGHT-\n (BRICK_Y_OFFSET+(BRICK_SEP_V+BRICK_HEIGHT)*(q+1)),\n x=BRICK_SEP_H/2.0+c*(float(BRICK_WIDTH)+float(BRICK_SEP_H)),\n linecolor=BRICK_COLORS[q%10], fillcolor=BRICK_COLORS[q%10],\n height=BRICK_HEIGHT, width=BRICK_WIDTH))\n self.view.add(GImage(size=(GAME_WIDTH,GAME_HEIGHT),x=0,y=0,\n source=\"futurama\" + str(random.randrange(10)) + \".png\"))\n for p in self._bricks:\n self.view.add(p)",
"def load_qa_brick(filename):\n from desispec.qa.qa_brick import QA_Brick\n log=get_logger()\n if os.path.isfile(filename): # Read from file, if it exists\n qabrick = read_qa_brick(filename)\n log.info(\"Loaded QA file {:s}\".format(filename))\n else: # Init\n qabrick = QA_Brick()\n # Return\n return qabrick",
"def _get_lb(self, lb_or_id):\r\n if isinstance(lb_or_id, CloudLoadBalancer):\r\n ret = lb_or_id\r\n else:\r\n ret = self.get(lb_or_id)\r\n return ret",
"def get(self):\n\n try:\n num = request.args.get('num')\n if num:\n num = int(num)\n user_id = current_user.get_id() if request.args.get('current_user') == '1' else None\n result = search_records.get_heat_brands(num, user_id)\n json_res = {\n 'brands': [x.to_json() for x in result]\n }\n return json_res, HTTPStatus.OK\n except Exception as err:\n return handle_internal_error(str(err))",
"def getBricks(self):\n return (len(self._bricks))",
"def get_bikers(self, id_team: int) -> dict[int, Biker]:\n\t\tself.send(CMD_GETBIKERS, id_team)\n\t\t_, *bikers_raw = self.receive_command()\n\t\tfor biker_raw in bikers_raw:\n\t\t\tid_, x, y = [int(attr) for attr in biker_raw.split(\";\")]\n\t\t\tif id_ in self._bikers:\n\t\t\t\tbiker = self._bikers[id_]\n\t\t\t\tbiker.pos.x = x\n\t\t\t\tbiker.pos.y = y\n\t\t\telse:\n\t\t\t\tself._bikers[id_] = Biker(self._map, id_, x, y)\n\t\treturn self._bikers",
"def retrieve_active_plates(cls, beamline):\n pass",
"def load_bace_classification(\n featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',\n splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',\n transformers: List[Union[TransformerGenerator, str]] = ['balancing'],\n reload: bool = True,\n data_dir: Optional[str] = None,\n save_dir: Optional[str] = None,\n **kwargs\n) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:\n loader = _BaceLoader(featurizer, splitter, transformers,\n BACE_CLASSIFICATION_TASKS, data_dir, save_dir,\n **kwargs)\n return loader.load_dataset('bace_c', reload)",
"def get_bmap(self, safemode=True):\n # only return if b map data is complete\n if safemode:\n self._chkdata()\n bmap = self.data_array\n segs = bmap[1]\n bkgd = self._b(bmap[0])\n return BkgdMap(bkgd, segs)",
"def connect_to_brick():\n return nxt.locator.find_one_brick()",
"def retrieve_active_plates(self, beamline):\n pass",
"def bullfighters_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=BULLFIGHTER_TYPE_URI,\n rdf_type_name=BULLFIGHTER_TYPE_NAME, \n kls=Bullfighter)",
"def __init__(self, name: str, nodes: List, beam_list: List, load_list: List):\n self.temp_beams = beam_list\n self.materials = {}\n self.material: str = \"\"\n self.name: str = name\n self.window = Graphics.Construction(\"Bridge 1\", 1280, 720)\n self.nodes: List = nodes\n self.beams: List = []\n self.current_loads = 0\n self.load_list = load_list\n self.beams = []\n self.last_iteration = False\n self.max_beams = []\n self.set_beams()\n self.optional_loads: List = []\n self.iteration = 0\n\n # Declare later used data\n self.matrix = []\n self.B = []\n self.X = []\n self.weight = np.inf\n self.get_materials()\n self.inter_plot = False\n print(\"Construction created...\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load graphnet module from string name.
|
def load_module(class_name: str) -> Type:
# Get a lookup for all classes in `graphnet`
import graphnet.data
import graphnet.models
import graphnet.training
namespace_classes = get_all_grapnet_classes(
graphnet.data, graphnet.models, graphnet.training
)
return namespace_classes[class_name]
|
[
"def load(name):\n g = read_graphml(\"graphs//\" + name + \".graphml\", node_type=int)\n return g",
"def load_module(self, name):\n if name not in sys.modules:\n sys.modules[name] = getattr(maps, name.split('.')[2])\n return sys.modules[name]",
"def _LoadDriver(self, name):\n module_name = name\n logging.info('Load module %s...', module_name)\n package = __import__('chameleond.drivers', fromlist=[module_name])\n module = getattr(package, module_name)\n return getattr(module, 'ChameleondDriver')",
"def load_module(name):\n names = name.split(\".\")\n path = None\n for name in names:\n f, path, info = imp.find_module(name, path)\n path = [path]\n return imp.load_module(name, f, path[0], info)",
"def load_module(module_name, path):\n return SourceFileLoader(module_name, path).load_module()",
"def load_name(cg, name, local_names):\n if name in local_names:\n cg.load_fast(name)\n else:\n cg.load_global(name)",
"def load_by_name(self, name):\n return self.load(self.names.get(name, 0))",
"def module(self, name: str) -> Any:\n return self.active_fork.module(name)",
"def load(self, name):\n try:\n module = do_import(name)\n if not hasattr(module, \"register\") or \\\n not callable(module.register):\n self.debug.warning(\n \"Warnung, Modul '%s' hat keine Funktion 'register'\" %\n name)\n return\n\n module.register(self.service)\n except Exception, e:\n self.debug.error(e, \"Fehler in Modul %s:\" % (str(name)))",
"def load_module_dynamic(path: str, name: str = None, force_reload: bool = False) -> ModuleType:\n path = _validate_module_path(path)\n name = name or path_to_module_name(path)\n if not force_reload and name in sys.modules:\n return sys.modules[name]\n\n spec = util.spec_from_file_location(name, path)\n if spec is None:\n raise ModuleNotFoundError(f\"Could not load module @ {path}, no model specification found.\")\n module = util.module_from_spec(spec)\n\n try:\n spec.loader.exec_module(module)\n except Exception as ex:\n logging.error(f\"Failed to load model @ {path}\")\n raise ex\n\n sys.modules[name] = module\n logging.info(f\"Loaded module {name} from file: {path}\")\n return module",
"def loadnetwork(fname):",
"def load_extension(self, name):\n fullname = 'extensions.%s' % name\n try:\n if HAS_IMPORTLIB:\n mod = importlib.import_module('.' + fullname, package=__package__)\n else:\n mod = __import__(fullname, globals(), locals(), [''], 1)\n except Exception as err:\n import traceback\n traceback.print_exc()\n mod = None\n return mod",
"def load_backend(backend_name):\r\n try:\r\n if len(backend_name.split(\".\")) > 1:\r\n mod = import_module(backend_name)\r\n elif backend_name == \"sqlite\":\r\n mod = import_module(\"gaffer.gafferd.auth.SqliteAuthHandler\")\r\n return mod\r\n except ImportError:\r\n error_msg = \"%s isn't a socketpool backend\" % backend_name\r\n raise ImportError(error_msg)",
"def importName(modulename, name=None):\n if name is None:\n modulename, name = modulename.split(':', 1)\n module = __import__(modulename, globals(), {}, [name])\n return getattr(module, name)",
"def import_name(path, name=None):\n if name is None:\n path, name = path.rsplit(\".\", 1)\n module = importlib.import_module(path)\n obj = getattr(module, name, UNSPECIFIED)\n if obj is UNSPECIFIED:\n raise ImportError(\"Cannot import name %s from %s\" % (name, path))\n return obj",
"def load_module (self, mod_name, config=1, *args, **kw_args): \n debug (\"In MayaVi::load_module ()\")\n if not self.dvm_name:\n msg = \"You need to have some data opened to be able to \"\\\n \"load a module. Click on the 'File' menu to \"\\\n \"open some data.\"\n print_err (msg)\n return\n dvm = self.data_viz_mgr[self.cur_dvm_name]\n mm = dvm.get_current_module_mgr ()\n if not mm:\n msg = \"You need to have an active ModuleManager to \"\\\n \"load a module. Click on the 'New' button to \"\\\n \"create one or select an existing but inactive \"\\\n \"ModuleManager and click on the 'Show' button to \"\\\n \"activate it.\"\n print_err (msg)\n return\n try:\n Common.state.busy ()\n _imp = Common.mod_fil_import\n if mod_name[:5] == 'User.':\n mod_scr = _imp('Modules', mod_name[5:], globals(),\n locals(), 1)\n mod_name = mod_name[5:]\n else:\n mod_scr = _imp('Modules', mod_name, globals(),\n locals(), 0)\n \n m = eval (\"mod_scr.%s\"%mod_name)(mm, *args, **kw_args)\n\n mm.add_module_gui (m)\n if config:\n m.configure (self.gui.root)\n self._do_render ()\n Common.state.idle ()\n return m\n except Exception, v:\n exception ()\n Common.state.force_idle ()",
"def try_load_module_dynamic(path, name: str = None, force_reload: bool = False) -> ModuleType:\n try:\n return load_module_dynamic(path, name, force_reload)\n except ModuleNotFoundError:\n return None",
"def register_graph(self, graph, name: str) -> str:\n return self._neural_graph_manager.register(graph, name)",
"def load_network(name):\n return pickle.load(open('saves/' + name + '.txt', 'rb'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct `Dataset` instance from `source` configuration.
|
def from_config( # type: ignore[override]
cls,
source: Union[DatasetConfig, str],
) -> Union[
"Dataset",
"EnsembleDataset",
Dict[str, "Dataset"],
Dict[str, "EnsembleDataset"],
]:
if isinstance(source, str):
source = DatasetConfig.load(source)
assert isinstance(source, DatasetConfig), (
f"Argument `source` of type ({type(source)}) is not a "
"`DatasetConfig`"
)
assert (
"graph_definition" in source.dict().keys()
), "`DatasetConfig` incompatible with current GraphNeT version."
# Parse set of `selection``.
if isinstance(source.selection, dict):
return cls._construct_datasets_from_dict(source)
elif (
isinstance(source.selection, list)
and len(source.selection)
and isinstance(source.selection[0], str)
):
return cls._construct_dataset_from_list_of_strings(source)
cfg = source.dict()
if cfg["graph_definition"] is not None:
cfg["graph_definition"] = parse_graph_definition(cfg)
return source._dataset_class(**cfg)
|
[
"def make_data_source(self, batch_size: int, cutoff_date: datetime = None) -> DataSource:\n importers_module = importlib.import_module(\"vulnerabilities.importers\")\n klass = getattr(importers_module, self.data_source)\n\n ds = klass(\n batch_size,\n last_run_date=self.last_run,\n cutoff_date=cutoff_date,\n config=self.data_source_cfg,\n )\n\n return ds",
"def from_config(cls, config: Dict):\n if keys.DataSteps.DATA not in config[keys.GlobalKeys.STEPS]:\n raise Exception(\"Cant have datasource without data step.\")\n\n # this is the data step config block\n step_config = config[keys.GlobalKeys.STEPS][keys.DataSteps.DATA]\n source = config[keys.GlobalKeys.DATASOURCE][keys.DatasourceKeys.SOURCE]\n datasource_class = source_utils.load_source_path_class(source)\n datasource_name = config[keys.GlobalKeys.DATASOURCE][\n keys.DatasourceKeys.NAME]\n _id = config[keys.GlobalKeys.DATASOURCE][keys.DatasourceKeys.ID]\n return datasource_class(\n name=datasource_name, _id=_id, _source=source,\n **step_config[keys.StepKeys.ARGS])",
"def load_da(source_name, target_name, test=False, *args, **kwargs):\n # Sanity checks\n assert source_name in datasets.datasets, \\\n source_name + \" not a supported dataset, only \"+str(datasets.datasets)\n assert target_name in datasets.datasets, \\\n target_name + \" not a supported dataset, only \"+str(datasets.datasets)\n\n # Get dataset information\n source_num_classes = datasets.datasets[source_name].num_classes\n source_class_labels = datasets.datasets[source_name].class_labels\n target_num_classes = datasets.datasets[target_name].num_classes\n target_class_labels = datasets.datasets[target_name].class_labels\n\n # Get dataset tfrecord filenames\n def _path(filename):\n \"\"\" Files are in datasets/ subdirectory. If the file exists, return it\n as an array since we may sometimes want more than one file for a\n dataset. If it doesn't exist, ignore it (some datasets don't have a test\n set for example).\"\"\"\n fn = os.path.join(\"datasets\", filename)\n return [fn] if os.path.exists(fn) else []\n\n names = (source_name, target_name)\n source_train_filenames = _path(tfrecord_filename(*names, source_name, \"train\"))\n source_valid_filenames = _path(tfrecord_filename(*names, source_name, \"valid\"))\n source_test_filenames = _path(tfrecord_filename(*names, source_name, \"test\"))\n target_train_filenames = _path(tfrecord_filename(*names, target_name, \"train\"))\n target_valid_filenames = _path(tfrecord_filename(*names, target_name, \"valid\"))\n target_test_filenames = _path(tfrecord_filename(*names, target_name, \"test\"))\n\n # By default use validation data as the \"test\" data, unless test=True\n if not test:\n source_test_filenames = source_valid_filenames\n target_test_filenames = target_valid_filenames\n\n # However, also train on the source \"valid\" data since we don't actually\n # care about those numbers much and some datasets like Office are really\n # small.\n if FLAGS.train_on_source_valid:\n source_train_filenames += source_valid_filenames\n print(\"Warning: training on source \\\"valid\\\" data\")\n\n # For very small datasets, e.g. Office-31, where there might only be a\n # few thousand target examples, then we might ought to use everything\n # for training (unlabeled still though; only validation uses labels for\n # testing, but not during training).\n if FLAGS.train_on_target_valid:\n target_train_filenames += target_valid_filenames\n print(\"Warning: training on unlabeled target \\\"valid\\\" data\")\n\n # If test=True, then make \"train\" consist of both training and validation\n # data to match the original dataset.\n else:\n source_train_filenames += source_valid_filenames\n target_train_filenames += target_valid_filenames\n\n # Create all the train, test, evaluation, ... tf.data.Dataset objects within\n # a Dataset() class that stores them\n source_dataset = Dataset(source_num_classes, source_class_labels,\n source_train_filenames, source_test_filenames,\n *args, **kwargs)\n target_dataset = Dataset(target_num_classes, target_class_labels,\n target_train_filenames, target_test_filenames,\n *args, **kwargs)\n\n return source_dataset, target_dataset",
"def _fetch_dataset_as_dataclass(\n source: Literal[\"openml\", \"world_bank\", \"figshare\"],\n dataset_name: str,\n dataset_id: Union[int, str],\n target: Optional[str],\n load_dataframe: bool,\n data_directory: Optional[Union[Path, str]] = None,\n read_csv_kwargs: Optional[dict] = None,\n) -> Union[DatasetAll, DatasetInfoOnly]:\n if isinstance(data_directory, str):\n data_directory = Path(data_directory)\n\n if source == \"openml\":\n info = _fetch_openml_dataset(dataset_id, data_directory)\n elif source == \"world_bank\":\n info = _fetch_world_bank_data(dataset_id, data_directory)\n elif source == \"figshare\":\n info = _fetch_figshare(dataset_id, data_directory)\n else:\n raise ValueError(f\"Unknown source {source!r}\")\n\n if read_csv_kwargs is None:\n read_csv_kwargs = {}\n\n if target is None:\n target = []\n\n if load_dataframe:\n if source == \"figshare\":\n df = pd.read_parquet(info[\"path\"])\n else:\n df = pd.read_csv(info[\"path\"], **read_csv_kwargs)\n y = df[target]\n X = df.drop(target, axis=\"columns\")\n dataset = DatasetAll(\n name=dataset_name,\n description=info[\"description\"],\n source=info[\"source\"],\n target=target,\n X=X,\n y=y,\n path=info[\"path\"],\n read_csv_kwargs=read_csv_kwargs,\n )\n else:\n dataset = DatasetInfoOnly(\n name=dataset_name,\n description=info[\"description\"],\n source=info[\"source\"],\n target=target,\n path=info[\"path\"],\n read_csv_kwargs=read_csv_kwargs,\n )\n\n return dataset",
"def create_dataset(cfg, writer, logger):\n data_loader = CustomDatasetDataLoader(cfg, writer, logger)\n dataset = data_loader.load_data()\n return dataset",
"def make_datasource(fileset:dict, name: str, query: ObjectStream, ignore_cache: bool, backend_name: str = \"uproot\"):\n datasets = [ServiceXDataset(fileset[name][\"files\"], backend_name=backend_name, ignore_cache=ignore_cache)]\n return servicex.DataSource(\n query=query, metadata=fileset[name][\"metadata\"], datasets=datasets\n )",
"def test_source_dataset_factory_build(self):\n source_dataset = factories.SourceDatasetFactory.build()\n self.assertIsInstance(source_dataset, models.SourceDataset)",
"def read_metadata(cls, source):\n if not source.scraper_name or not source.scraperwiki_url:\n return\n\n scraper_name = source.scraper_name\n api_key = source.scraper_api_key.strip() \\\n if source.scraper_api_key else ''\n scraperwiki_url = source.scraperwiki_url.rstrip('/')\n\n data_list = cls.get_metadata_of_scraper(\n scraperwiki_url, scraper_name, api_key\n )\n\n if isinstance(data_list, dict):\n raise Exception('Obtained error', repr(data_list))\n\n errors = []\n total = 0\n bounded = cls.BOUNDED\n for list_elem in data_list:\n stripped = lambda i: list_elem[bounded[i]].strip()\n stripped_or_none = lambda i: stripped(i)\\\n if bounded[i] in list_elem else None\n\n try:\n total += 1\n dataset = Dataset()\n dataset.source = source\n dataset.url = stripped(0)\n\n download = stripped(1)\n if download is None:\n raise Exception(\n 'Dataset %s does not define a valid download URL' %\n dataset.url\n )\n if download.startswith('http:'):\n dataset.download = download\n else:\n dataset.download = '{}{}:{}'.format(\n SCRAPER_PROTOCOL,\n scraper_name,\n get_table_name_from_scraper(stripped(1))\n )\n\n dataset.name = stripped(2)\n dataset.curator = stripped(3)\n dataset.license = stripped(4)\n dataset.description = stripped_or_none(5)\n dataset.tags = stripped_or_none(6)\n dataset.bounding_box = stripped_or_none(7)\n dataset.other_meta = json.dumps(cls._get_unbounded(list_elem))\n dataset.save()\n except Exception as e:\n logger.exception('Invalid dataset')\n errors.append(repr(e))\n return {'total': total, 'errors': len(errors), 'report': errors}",
"def from_array(cls, x, title='generic', chunks='auto', lock=False, \n datatype = 'UNKNOWN', units = 'generic', quantity = 'generic', \n modality = 'generic', source = 'source', **kwargs):\n\n # create vanilla dask array\n if isinstance(x, da.Array):\n dask_array = x\n else:\n dask_array = da.from_array(np.array(x), chunks=chunks, lock=lock)\n\n # view as sub-class\n sid_dataset = view_subclass(dask_array, cls)\n sid_dataset.data_type = datatype\n sid_dataset.units = units\n sid_dataset.title = title\n sid_dataset.quantity = quantity\n\n sid_dataset.modality = modality\n sid_dataset.source = source\n\n sid_dataset._axes = {}\n for dim in range(sid_dataset.ndim):\n # TODO: add parent to dimension to set attribute if name changes\n sid_dataset.set_dimension(dim,\n Dimension(np.arange(sid_dataset.shape[dim]), string.ascii_lowercase[dim]))\n sid_dataset.metadata = {}\n sid_dataset.original_metadata = {}\n return sid_dataset",
"def data_factory(config, data_type):\n data_source = config[data_type]['data_source']\n input_file = config[data_type]['file']\n outdir = config['outdir']\n output_file = f'{outdir}/{os.path.basename(input_file)}'\n\n if data_source == 's3':\n return S3Data(input_file, output_file)\n elif data_source == 'local':\n return LocalData(input_file, output_file)\n else:\n raise ValueError(\n f'Unknown data_source: \"{data_source}\".'\n ' data_source must be either \"s3\" or \"local\".')",
"def create_dataloader(self, **kwargs: Any) -> DataLoader:\n return DataLoader(self, **kwargs)",
"def dataset_from_files(**kwargs):\n random_seed = kwargs.get(\"random_seed\", None)\n preprocess = kwargs.get(\"preprocessor\", lambda x: x)\n name = kwargs.get(\"name\", \"dataset\")\n series = None\n series_paths = _get_series_paths(kwargs)\n\n debug(\"Series paths: {}\".format(series_paths), \"datasetBuild\")\n\n if len(series_paths) > 0:\n log(\"Initializing dataset with: {}\".format(\", \".join(series_paths)))\n series = {s: Dataset.create_series(series_paths[s], preprocess)\n for s in series_paths}\n name = kwargs.get('name', _get_name_from_paths(series_paths))\n\n series_outputs = {SERIES_OUTPUT.match(key).group(1): value\n for key, value in kwargs.items()\n if SERIES_OUTPUT.match(key)}\n\n dataset = Dataset(name, series, series_outputs, random_seed)\n log(\"Dataset length: {}\".format(len(dataset)))\n return dataset",
"def test_source_dataset_factory_create(self):\n source_dataset = factories.SourceDatasetFactory.create()\n self.assertIsInstance(source_dataset, models.SourceDataset)",
"def create_datasource(name):\n config = util.read_app_config(override_config=False)\n if isinstance(config, dict):\n datasource_list = config[DC.DATA_SOURCES_PARENT]\n else:\n raise TypeError(\"config is not a dict\")\n config = next(filter(lambda x: x[DC.NAME] == name, datasource_list), None)\n\n if not config:\n raise ValueError(f\"Unable to find a configuration to a datasource with name: {name}\")\n\n try:\n return globals()[config[DC.TYPE]](config)\n except KeyError:\n return DataSource(config)",
"def load_single_dataset(config):\n from ..dataset import Dataset # breaks circular import\n\n dataset = Dataset(directory=config.dataset_folder[0],\n replace_rare_tokens=config.replace_rare_tokens)\n dataset.load()\n\n return [dataset]",
"def load():\n data = _get_data()\n names = data.dtype.names\n dataset = Dataset(data=data, names=names)\n return dataset",
"def __init__(self, driver_name, data_source_name, query, output_types):\n super(SqlDataset, self).__init__()\n self._driver_name = ops.convert_to_tensor(\n driver_name, dtype=dtypes.string, name=\"driver_name\")\n self._data_source_name = ops.convert_to_tensor(\n data_source_name, dtype=dtypes.string, name=\"data_source_name\")\n self._query = ops.convert_to_tensor(\n query, dtype=dtypes.string, name=\"query\")\n self._output_types = output_types",
"def build_dataset(args, input_data, target_data):\n dataset = SupervisedDataSet(len(input_data[0]), len(target_data[0]))\n for in_data, tg_data in izip(input_data, target_data):\n dataset.addSample(in_data, tg_data)\n\n if args['verbose']:\n print('Dataset built.')\n\n return dataset",
"def from_file(cls, cfg: ConfigType) -> 'DatasetPreparer':\n\n cfg = copy.deepcopy(cfg)\n data_preparer = cls(\n data_root=cfg['data_root'],\n dataset_name=cfg.get('dataset_name', ''),\n task=cfg.get('task', 'textdet'),\n nproc=cfg.get('nproc', 4),\n train_preparer=cfg.get('train_preparer', None),\n test_preparer=cfg.get('test_preparer', None),\n val_preparer=cfg.get('val_preparer', None),\n delete=cfg.get('delete', None),\n config_generator=cfg.get('config_generator', None))\n return data_preparer"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Concatenate multiple `Dataset`s into one instance.
|
def concatenate(
cls,
datasets: List["Dataset"],
) -> "EnsembleDataset":
return EnsembleDataset(datasets)
|
[
"def append_datasets(*datasets):\n logging.info(\"Running union on datasets\")\n return reduce(DataFrame.unionAll, datasets)",
"def concat(dataarrays: Sequence[\"DataArray\"], keep=\"last\") -> \"DataArray\":\n from mikeio import Dataset\n\n datasets = [Dataset([da]) for da in dataarrays]\n\n ds = Dataset.concat(datasets, keep=keep)\n da = ds[0]\n assert isinstance(da, DataArray)\n return da",
"def concat_datasets(datasets):\n s0 = []\n s1 = []\n labels = []\n for s0x, s1x, labelsx in datasets:\n s0 += s0x\n s1 += s1x\n labels += labelsx\n return (np.array(s0), np.array(s1), np.array(labels))",
"def combine(datasets):\n first = datasets[0]\n for i in range(1, len(datasets)):\n first.extend(datasets[i])\n return first",
"def concat(self, other):\n\t\tdatasets = []\n\t\tif isinstance(self, DatasetConcatenated):\n\t\t\tdatasets.extend(self.datasets)\n\t\telse:\n\t\t\tdatasets.extend([self])\n\t\tif isinstance(other, DatasetConcatenated):\n\t\t\tdatasets.extend(other.datasets)\n\t\telse:\n\t\t\tdatasets.extend([other])\n\t\treturn DatasetConcatenated(datasets)",
"def flatten_zip_dataset(*args):\n flattened = tf.data.Dataset.from_tensors(args[0])\n for ex in args[1:]:\n flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex))\n return flattened",
"def concatenate(dfs):\n\n cmplt = pd.concat(dfs)\n return cmplt",
"def combined_data ( sample ,\n varset , \n datasets ,\n name = '' ,\n title = '' ,\n args = () ) :\n \n labels = sample.labels()\n \n largs = [ ROOT.RooFit.Index ( sample ) ] \n\n for label in labels :\n\n dset = None \n if isinstance ( datasets , dict ) : dset = datasets[label]\n else :\n for ds in dataset :\n if label == ds[0] :\n dset = ds[1]\n break\n \n assert isinstance ( dset , ROOT.RooAbsData ),\\\n 'Invalid data set for label %s' % label\n \n largs.append ( ROOT.RooFit.Import ( label , dset ) )\n\n name = name if name else dsID()\n title = title if title else 'Data for simultaneous fit/%s' % sample.GetName()\n\n args = args + tuple ( largs )\n\n vars = ROOT.RooArgSet()\n if isinstance ( varset , ROOT.RooArgSet ) : vars = varset\n elif isinstance ( varset , ROOT.RooAbsReal ) : vars.add ( varset )\n else :\n for v in varset : vars.add ( v )\n \n return ROOT.RooDataSet ( name , title , vars , *args )",
"def join(self, other_datasets: Iterable[DatasetBase]) -> None:\n if not all(isinstance(d, JsonIndexDataset) for d in other_datasets):\n raise ValueError(\"This function can only join a list of JsonIndexDataset\")\n # pyre-ignore[16]\n self.frame_annots.extend([fa for d in other_datasets for fa in d.frame_annots])\n # pyre-ignore[16]\n self.seq_annots.update(\n # https://gist.github.com/treyhunner/f35292e676efa0be1728\n functools.reduce(\n lambda a, b: {**a, **b},\n # pyre-ignore[16]\n [d.seq_annots for d in other_datasets],\n )\n )\n all_eval_batches = [\n self.eval_batches,\n *[d.eval_batches for d in other_datasets], # pyre-ignore[16]\n ]\n if not (\n all(ba is None for ba in all_eval_batches)\n or all(ba is not None for ba in all_eval_batches)\n ):\n raise ValueError(\n \"When joining datasets, either all joined datasets have to have their\"\n \" eval_batches defined, or all should have their eval batches undefined.\"\n )\n if self.eval_batches is not None:\n self.eval_batches = sum(all_eval_batches, [])\n self._invalidate_indexes(filter_seq_annots=True)",
"def concat_2d(ds: Dataset, dims: Tuple[Hashable, Hashable]) -> DataArray:\n arrs = []\n for var in ds:\n arr = ds[var]\n if arr.dims[0] != dims[0]:\n continue\n if arr.ndim > 2:\n raise ValueError(\n \"All variables must have <= 2 dimensions \"\n f\"(variable {var} has shape {arr.shape})\"\n )\n if arr.ndim == 2:\n # Rename concatenation axis\n arr = arr.rename({arr.dims[1]: dims[1]})\n else:\n # Add concatenation axis\n arr = arr.expand_dims(dim=dims[1], axis=1)\n arrs.append(arr)\n return xr.concat(arrs, dim=dims[1])",
"def concatenate(cls, variables, axis=0, _preserve=True):\n variable0 = variables[0]\n\n if len(variables) == 1:\n return variable0.copy()\n\n out = variable0.copy() # data=False)\n\n data = Data.concatenate(\n [v.get_data() for v in variables], axis=axis, _preserve=_preserve\n )\n out.set_data(data, copy=False)\n\n return out",
"def concat(self, other: \"SampleBatch\") -> \"SampleBatch\":\n return concat_samples([self, other])",
"def combine_feature_sets(features):\n return pd.concat(features, axis=1)",
"def merge_dataset(data_1, label_1, data_2, label_2):\n merged_data = data_1 + data_2\n merged_label = label_1 + label_2\n\n merged_data = np.array(merged_data)\n merged_label = np.array(merged_label)\n\n return merged_data, merged_label",
"def __add__(self, dataset):\n for attr in ['extent', 'crs', 'sensor', 'acquisition_mode', 'proc_steps', 'outname_base']:\n if getattr(self, attr) != getattr(dataset, attr):\n raise ValueError('value mismatch: {}'.format(attr))\n # self.filename.append(dataset.filename)\n for key in dataset.measurements.keys():\n if key in self.measurements.keys():\n raise RuntimeError('only different measurements can be combined to one dataset')\n self.measurements.update(dataset.measurements)\n return self",
"def add_all(\n self, data_sets: dict[str, AbstractDataset], replace: bool = False\n ) -> None:\n for name, data_set in data_sets.items():\n self.add(name, data_set, replace)",
"def dataset_merge():\n os.chdir(\"./dataset\")\n merged_vect = []\n\n # read all the dataset file\n for pkl in glob.glob(\"*.pkl\"):\n with open(pkl, 'rb') as pkl:\n for i in pickle.load(pkl):\n merged_vect.append(i)\n\n # merge everything inside a single file\n with open('merged.pkl', 'wb') as pkl:\n pickle.dump(merged_vect, pkl)\n\n # remove old dataset\n for dataset in glob.glob(\"dataset*.pkl\"):\n os.remove(dataset)",
"def _concatenate(data1, data2):\r\n\r\n data1_shape, data2_shape = data1.shape[1:], data2.shape[1:]\r\n\r\n if data1_shape == data2_shape:\r\n return np.concatenate((data1, data2), axis=0)\r\n\r\n raise TypeError('Add data failed. Entries are not of correct shape.\\n'\r\n 'Expected {}, but got {}'.format(data1_shape, data2_shape))",
"def extend(self, datasets: Iterable[_TypeMultiBlockLeaf]) -> None:\n # Code based on collections.abc\n if isinstance(datasets, MultiBlock):\n for key, data in zip(datasets.keys(), datasets):\n self.append(data, key)\n else:\n for v in datasets:\n self.append(v)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct `Dataset` for each entry in dict `self.selection`.
|
def _construct_datasets_from_dict(
cls, config: DatasetConfig
) -> Dict[str, "Dataset"]:
assert isinstance(config.selection, dict)
datasets: Dict[str, "Dataset"] = {}
selections: Dict[str, Union[str, List]] = deepcopy(config.selection)
for key, selection in selections.items():
config.selection = selection
dataset = Dataset.from_config(config)
assert isinstance(dataset, (Dataset, EnsembleDataset))
datasets[key] = dataset
# Reset `selections`.
config.selection = selections
return datasets
|
[
"def _construct_dataset_from_list_of_strings(\n cls, config: DatasetConfig\n ) -> \"Dataset\":\n assert isinstance(config.selection, list)\n datasets: List[\"Dataset\"] = []\n selections: List[str] = deepcopy(cast(List[str], config.selection))\n for selection in selections:\n config.selection = selection\n dataset = Dataset.from_config(config)\n assert isinstance(dataset, Dataset)\n datasets.append(dataset)\n\n # Reset `selections`.\n config.selection = selections\n\n return cls.concatenate(datasets)",
"def _to_dataset(self):\n from mikeio import Dataset\n\n return Dataset(\n {self.name: self}\n ) # Single-item Dataset (All info is contained in the DataArray, no need for additional info)",
"def build_eval_dataset(self):\n pass",
"def build_synthetic_dataset(self):\n pass",
"def _load_selection_data(self):\n for env_id in Config.ENV_IDS:\n o_data = torch.load('data/'+Config.ENV_NAME+'_o_'+env_id+'.pth')\n a_data = torch.load('data/'+Config.ENV_NAME+'_a_'+env_id+'.pth')\n t_data = torch.load('data/'+Config.ENV_NAME+'_t_'+env_id+'.pth')\n self.select_data[env_id] = (o_data, a_data, t_data)",
"def _produce_train_dataset(self):\r\n pass",
"def _build_datasets(*args, **kwargs):\n datasets = OrderedDict()\n _add_arg_datasets(datasets, args)\n _add_kwarg_datasets(datasets, kwargs)\n return datasets",
"def _make_subset(self, indices: np.ndarray, name: str) -> \"PathologyDataset\":\n data = copy.deepcopy(self)\n data.X = data.X[indices]\n if data.has_y:\n data.y = data.y[indices]\n data.ninstances = len(data.X)\n data.name = name\n data.to_intermediate()\n return data",
"def dataset(self):\n data = tablib.Dataset()\n data.headers = self.keys()\n data.append(self.values())\n\n return data",
"def get_subset(self, subset: Subset) -> \"DatasetEntity\":\n dataset = DatasetEntity(\n items=[item for item in self._items if item.subset == subset],\n purpose=self.purpose,\n )\n return dataset",
"def gen_dataset(self):\n full_set = []\n for i in range(self.set_size):\n # the full set is portioned with roughly 1/4 of each image category\n if i > self.set_size * 0.75:\n full_set.append(self._gen_image(self.img_size, 'blob', self.noise, self.fig_centered))\n elif i > self.set_size * 0.5:\n full_set.append(self._gen_image(self.img_size, 'bars', self.noise, self.fig_centered))\n elif i > self.set_size * 0.25:\n full_set.append(self._gen_image(self.img_size, 'rect', self.noise, self.fig_centered))\n else:\n full_set.append(self._gen_image(self.img_size, 'cross', self.noise, self.fig_centered))\n np.random.shuffle(full_set)\n\n if (sum(self.train_val_test) - 0.01)**2 < 1 or (sum(self.train_val_test) - 0.01)**2 == 1:\n # Dividing the shuffled full set into training set, validation set and test set\n train_proportion = round(self.train_val_test[0] * len(full_set))\n val_proportion = round(self.train_val_test[1] * len(full_set))\n test_proportion = round(self.train_val_test[2] * len(full_set))\n self.train_set = full_set[:train_proportion]\n self.val_set = full_set[train_proportion:train_proportion + val_proportion]\n self.test_set = full_set[train_proportion + val_proportion:train_proportion + val_proportion + test_proportion]\n else:\n print(\"trainValTest values must sum to exactly 1\")\n\n draw_selection = self.test_set[:20] # Drawing a selection from the test set\n if self.draw:\n for image in draw_selection:\n self.draw_image(image)",
"def get_dataset(self):\n linear_qs = AminoAcid.objects.filter(amino_acid__in=self.linear)\\\n .values_list('data__linear_smile')\n methylated_qs = AminoAcid.objects.filter(amino_acid__in=self.methylated)\\\n .values_list('data__methylated_smile')\n linear_dataset = list(map(lambda x: x[0], linear_qs))\n methylated_dataset = list(map(lambda x: x[0], methylated_qs))\n dataset = linear_dataset + methylated_dataset\n return dataset",
"def create_dataset(player_stats, matches):\n pass",
"def fromDatasetTypes(cls, datasetTypes: Iterable[DatasetType], *,\n universe: DimensionUniverse) -> _DatasetDict:\n return cls({datasetType: {} for datasetType in datasetTypes}, universe=universe)",
"def add_dataset(self, **kwargs) -> None:\n dataset = XLDataset(**kwargs)\n\n if dataset.split == \"training\":\n self.training.append(dataset)\n elif dataset.split == \"validation\":\n self.validation.append(dataset)\n elif dataset.split == \"test\":\n self.test.append(dataset)\n else:\n raise ValueError(f\"Unknown value for 'split' in \"\n \"{dataset.pxid}.\")",
"def group_dataset(self, group):\n ds = Dataset()\n ds.update(dict(\n [(tag,data_element) for tag,data_element in self.items() if tag.group==group]\n ))\n return ds",
"def get_dataset(self):\n\n # https://developer.nvidia.com/blog/preparing-state-of-the-art-models-for-classification-and-object-detection-with-tlt/\n train_download = not os.path.exists(os.path.join(self.load_path, \"train\"))\n trainval_2012 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n trainval_2007 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n test_download = not os.path.exists(os.path.join(self.load_path, \"test\"))\n valset = datasets.VOCDetection(os.path.join(self.load_path, \"test\"), image_set='test',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=test_download)\n train_loader_2007 = torch.utils.data.DataLoader(trainval_2007, batch_size=1, shuffle=False, num_workers=2)\n train_loader_2012 = torch.utils.data.DataLoader(trainval_2012, batch_size=1, shuffle=False, num_workers=2)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n check = 0\n directories = [os.path.join(self.save_path, \"train\"), os.path.join(self.save_path, \"test\")]\n for directory in directories:\n if os.path.exists(directory):\n check += 1\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if check != len(directories):\n indices_data = {}\n # create folders to save data\n for loader_name, loader in [('train', train_loader_2007),\n ('train', train_loader_2012),\n ('test', val_loader)]:\n for (img, annotation) in tqdm(loader):\n\n #print(annotation)\n # there may be multiple labels, they are concatenated to: 'label1_label2_'\n label = ''\n int_label = []\n\n elems = annotation['annotation']['object']\n # if only 1 label - it is a dictionary, but not list of dictionaries\n # for consistency reasons and to be able to use the loop later\n if not isinstance(elems, list):\n elems = [elems]\n\n # get bboxes, compute object size, add all object sizes and divide by img size (h*w)\n obj_sizes = 0\n num_instances = 0\n\n for elem in elems:\n # every name is in a list\n # there may be multiple instances of the same object\n # those are disregarded for label\n\n if not (bool(int(elem['difficult'][0])) and loader_name == 'test'):\n if not str(self.class_to_idx[elem['name'][0]]) in label:\n label += str(self.class_to_idx[elem['name'][0]]) + '_'\n int_label.append(self.class_to_idx[elem['name'][0]])\n\n num_instances += 1\n # percentage of objects in the image: sum obj_size/img_size\n obj_sizes += (int(elem['bndbox']['xmax'][0]) - int(elem['bndbox']['xmin'][0])) * \\\n (int(elem['bndbox']['ymax'][0]) - int(elem['bndbox']['ymin'][0]))\n obj_sizes /= float(int(annotation['annotation']['size']['width'][0]) *\n int(annotation['annotation']['size']['height'][0]))\n\n img_name = label + '_' + annotation['annotation']['filename'][0]\n\n directory = os.path.join(os.path.join(self.save_path, loader_name), label)\n if not os.path.exists(directory):\n os.makedirs(directory)\n save_image(img, os.path.join(directory, img_name))\n\n indices_data[os.path.join(directory, img_name)] = (int_label,\n obj_sizes, num_instances)\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': np.array(list(indices_data.values()), dtype=object)[:, 0],\n 'obj_sizes': np.array(list(indices_data.values()), dtype=object)[:, 1],\n 'num_instances': np.array(list(indices_data.values()), dtype=object)[:, 2]})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n train_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n # like in https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf\n # https://arxiv.org/pdf/1409.1556.pdf\n transforms.Resize(256), # resize smaller size to 256\n transforms.RandomCrop(self.args.patch_size), # 224\n transforms.ToTensor()\n ])\n\n test_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n transforms.Resize(256), # resize smaller size to 256\n transforms.CenterCrop((self.args.patch_size, self.args.patch_size)), # 224\n transforms.ToTensor()\n ])\n\n if self.args.compute_dataset_metrics is True:\n # when computing dataset metrics, an original image should be used\n # - without randomness of RandomCrop\n train_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n # if not already set, set batch-size to 1 for computing the metrics\n # due to different image sizes\n self.args.batch_size = 1\n\n # load the image dataset from folder with indices\n trainset = IndxImageFolder(root = os.path.join(self.save_path, \"train\"), transform=train_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n valset = IndxImageFolder(root=os.path.join(self.save_path, \"test\"), transform=test_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n\n return trainset, valset",
"def as_dataset(self) -> \"Dataset\":\n\n # Initialize dataset\n dset = dataset.Dataset()\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"year\"])\n\n # Add time\n epochs = list()\n for year, doy, seconds in zip(self.data[\"year\"], self.data[\"doy\"], self.data[\"seconds\"]):\n epochs.append(datetime.strptime(\"{:.0f} {:.0f}\".format(year, doy), \"%Y %j\") + timedelta(seconds=seconds))\n\n dset.add_time(name=\"time\", val=epochs, scale=\"gps\", fmt=\"datetime\", write_level=\"operational\")\n\n # Add system field\n if \"system\" in self.data.keys():\n systems = []\n for system in self.data[\"system\"]:\n systems.append(enums.gnss_name_to_id[system.lower()].value)\n\n dset.add_text(\"system\", val=systems)\n\n # Add system field\n if \"satellite\" in self.data.keys():\n satellites = []\n for system, satellite in zip(dset.system, self.data[\"satellite\"]):\n satellites.append(system + str(satellite).zfill(2))\n\n dset.add_text(\"satellite\", val=satellites)\n\n # Add text and float fields\n fields = set(self.data.keys()) - {\"year\", \"doy\", \"seconds\", \"system\", \"satellite\"}\n for field in fields:\n if self.data[field].dtype.kind in {\"U\", \"S\"}: # Check if numpy type is string\n dset.add_text(field, val=self.data[field])\n continue\n\n dset.add_float(field, val=self.data[field])\n\n return dset",
"def create_dataset(self, samples: List[Dict[str, Any]]) -> CompleteDataset:\r\n _, texts, labels = zip(*[split_sample(sample) for sample in samples])\r\n tokens = self.tokenize(texts)\r\n return CompleteDataset(tokens=tokens, labels=labels)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct `Dataset` for each entry in list `self.selection`.
|
def _construct_dataset_from_list_of_strings(
cls, config: DatasetConfig
) -> "Dataset":
assert isinstance(config.selection, list)
datasets: List["Dataset"] = []
selections: List[str] = deepcopy(cast(List[str], config.selection))
for selection in selections:
config.selection = selection
dataset = Dataset.from_config(config)
assert isinstance(dataset, Dataset)
datasets.append(dataset)
# Reset `selections`.
config.selection = selections
return cls.concatenate(datasets)
|
[
"def _to_dataset(self):\n from mikeio import Dataset\n\n return Dataset(\n {self.name: self}\n ) # Single-item Dataset (All info is contained in the DataArray, no need for additional info)",
"def build_eval_dataset(self):\n pass",
"def build_synthetic_dataset(self):\n pass",
"def _construct_datasets_from_dict(\n cls, config: DatasetConfig\n ) -> Dict[str, \"Dataset\"]:\n assert isinstance(config.selection, dict)\n datasets: Dict[str, \"Dataset\"] = {}\n selections: Dict[str, Union[str, List]] = deepcopy(config.selection)\n for key, selection in selections.items():\n config.selection = selection\n dataset = Dataset.from_config(config)\n assert isinstance(dataset, (Dataset, EnsembleDataset))\n datasets[key] = dataset\n\n # Reset `selections`.\n config.selection = selections\n\n return datasets",
"def get_dataset(self):\n linear_qs = AminoAcid.objects.filter(amino_acid__in=self.linear)\\\n .values_list('data__linear_smile')\n methylated_qs = AminoAcid.objects.filter(amino_acid__in=self.methylated)\\\n .values_list('data__methylated_smile')\n linear_dataset = list(map(lambda x: x[0], linear_qs))\n methylated_dataset = list(map(lambda x: x[0], methylated_qs))\n dataset = linear_dataset + methylated_dataset\n return dataset",
"def get_subset(self, subset: Subset) -> \"DatasetEntity\":\n dataset = DatasetEntity(\n items=[item for item in self._items if item.subset == subset],\n purpose=self.purpose,\n )\n return dataset",
"def gen_dataset(self):\n full_set = []\n for i in range(self.set_size):\n # the full set is portioned with roughly 1/4 of each image category\n if i > self.set_size * 0.75:\n full_set.append(self._gen_image(self.img_size, 'blob', self.noise, self.fig_centered))\n elif i > self.set_size * 0.5:\n full_set.append(self._gen_image(self.img_size, 'bars', self.noise, self.fig_centered))\n elif i > self.set_size * 0.25:\n full_set.append(self._gen_image(self.img_size, 'rect', self.noise, self.fig_centered))\n else:\n full_set.append(self._gen_image(self.img_size, 'cross', self.noise, self.fig_centered))\n np.random.shuffle(full_set)\n\n if (sum(self.train_val_test) - 0.01)**2 < 1 or (sum(self.train_val_test) - 0.01)**2 == 1:\n # Dividing the shuffled full set into training set, validation set and test set\n train_proportion = round(self.train_val_test[0] * len(full_set))\n val_proportion = round(self.train_val_test[1] * len(full_set))\n test_proportion = round(self.train_val_test[2] * len(full_set))\n self.train_set = full_set[:train_proportion]\n self.val_set = full_set[train_proportion:train_proportion + val_proportion]\n self.test_set = full_set[train_proportion + val_proportion:train_proportion + val_proportion + test_proportion]\n else:\n print(\"trainValTest values must sum to exactly 1\")\n\n draw_selection = self.test_set[:20] # Drawing a selection from the test set\n if self.draw:\n for image in draw_selection:\n self.draw_image(image)",
"def _make_subset(self, indices: np.ndarray, name: str) -> \"PathologyDataset\":\n data = copy.deepcopy(self)\n data.X = data.X[indices]\n if data.has_y:\n data.y = data.y[indices]\n data.ninstances = len(data.X)\n data.name = name\n data.to_intermediate()\n return data",
"def _load_selection_data(self):\n for env_id in Config.ENV_IDS:\n o_data = torch.load('data/'+Config.ENV_NAME+'_o_'+env_id+'.pth')\n a_data = torch.load('data/'+Config.ENV_NAME+'_a_'+env_id+'.pth')\n t_data = torch.load('data/'+Config.ENV_NAME+'_t_'+env_id+'.pth')\n self.select_data[env_id] = (o_data, a_data, t_data)",
"def add_dataset(self, **kwargs) -> None:\n dataset = XLDataset(**kwargs)\n\n if dataset.split == \"training\":\n self.training.append(dataset)\n elif dataset.split == \"validation\":\n self.validation.append(dataset)\n elif dataset.split == \"test\":\n self.test.append(dataset)\n else:\n raise ValueError(f\"Unknown value for 'split' in \"\n \"{dataset.pxid}.\")",
"def parse_datasets(self , selector, response):\n datasets = []\n for row in selector.xpath('//table[@class=\"Tabular\"]//tr[td]'):\n base_title = row.xpath(\"td[1]//text()\").extract()[0].strip()\n for link in row.xpath(\"td[2]//a\"):\n dataset = DatasetItem()\n dataset.set_default('dataset/base_title', base_title)\n item = DistributionItem()\n dataset.add_distribution(item)\n dataset[\"documentation_title\"] = documentation_title(response)\n dataset[\"documentation_url\"] = documentation_url(response)\n\n date_arr = link.xpath(\".//text()\").extract()\n date_long = \"\".join( date_arr )\n date = re.sub( ' +', '', date_long )\n\n item['description'] = \" \".join([base_title , date])\n item['access_url'] = urlparse.urljoin(\"http://www.eba.europa.eu\", link.xpath(\"@href\").extract()[0])\n item['distribution_type'] = \"dcat:Download\"\n item['distribution_format'] = \"XLS\"\n\n dataset['title'] = item['description']\n# dataset['description'] = item['description']\n dataset['description'] = \"Aggregated statistical data on a key aspect of the implementation of prudential framework in each Member State.\"\n dataset['keyword_eng'] = \"Credit, Credit and financial institutions, European banking, Financial market, Market risk, Market supervision, Monetary and financial indicators, Regulation and policy, Risk analysis, Supervisory convergence\"\n dataset['issued'] = date\n dataset['uri'] = item['access_url']\n\n datasets.append(dataset)\n return datasets",
"def build_dataset(args, input_data, target_data):\n dataset = SupervisedDataSet(len(input_data[0]), len(target_data[0]))\n for in_data, tg_data in izip(input_data, target_data):\n dataset.addSample(in_data, tg_data)\n\n if args['verbose']:\n print('Dataset built.')\n\n return dataset",
"def _produce_train_dataset(self):\r\n pass",
"def select_sources(self, selection):\n\n # store selection\n self.selection = selection\n\n # make selection\n self.unit_vector = [self.unit_vector[i] for i in selection]\n self.distance = [self.distance[i] for i in selection]\n\n self.N = len(self.distance)\n\n self.coord = self.coord[selection]\n try:\n self.flux = self.flux[selection]\n self.flux_weight = self.flux_weight[selection]\n except:\n pass",
"def as_dataset(self) -> \"Dataset\":\n\n # Initialize dataset\n dset = dataset.Dataset()\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"year\"])\n\n # Add time\n epochs = list()\n for year, doy, seconds in zip(self.data[\"year\"], self.data[\"doy\"], self.data[\"seconds\"]):\n epochs.append(datetime.strptime(\"{:.0f} {:.0f}\".format(year, doy), \"%Y %j\") + timedelta(seconds=seconds))\n\n dset.add_time(name=\"time\", val=epochs, scale=\"gps\", fmt=\"datetime\", write_level=\"operational\")\n\n # Add system field\n if \"system\" in self.data.keys():\n systems = []\n for system in self.data[\"system\"]:\n systems.append(enums.gnss_name_to_id[system.lower()].value)\n\n dset.add_text(\"system\", val=systems)\n\n # Add system field\n if \"satellite\" in self.data.keys():\n satellites = []\n for system, satellite in zip(dset.system, self.data[\"satellite\"]):\n satellites.append(system + str(satellite).zfill(2))\n\n dset.add_text(\"satellite\", val=satellites)\n\n # Add text and float fields\n fields = set(self.data.keys()) - {\"year\", \"doy\", \"seconds\", \"system\", \"satellite\"}\n for field in fields:\n if self.data[field].dtype.kind in {\"U\", \"S\"}: # Check if numpy type is string\n dset.add_text(field, val=self.data[field])\n continue\n\n dset.add_float(field, val=self.data[field])\n\n return dset",
"def __samples_to_datalist(self, training_data: Iterable[TrainingData]) -> DataList:\n texts = [format_text(td.text) for td in training_data]\n labels = [td.label for td in training_data]\n return DataList(texts=texts, labels=labels)",
"def create_dataset(self, samples: List[Dict[str, Any]]) -> CompleteDataset:\r\n _, texts, labels = zip(*[split_sample(sample) for sample in samples])\r\n tokens = self.tokenize(texts)\r\n return CompleteDataset(tokens=tokens, labels=labels)",
"def _make_train_eval_dataset(self):\n return self._train_dataset.take(-1) # Take all.",
"def get_dataset(self):\n\n # https://developer.nvidia.com/blog/preparing-state-of-the-art-models-for-classification-and-object-detection-with-tlt/\n train_download = not os.path.exists(os.path.join(self.load_path, \"train\"))\n trainval_2012 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n trainval_2007 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n test_download = not os.path.exists(os.path.join(self.load_path, \"test\"))\n valset = datasets.VOCDetection(os.path.join(self.load_path, \"test\"), image_set='test',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=test_download)\n train_loader_2007 = torch.utils.data.DataLoader(trainval_2007, batch_size=1, shuffle=False, num_workers=2)\n train_loader_2012 = torch.utils.data.DataLoader(trainval_2012, batch_size=1, shuffle=False, num_workers=2)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n check = 0\n directories = [os.path.join(self.save_path, \"train\"), os.path.join(self.save_path, \"test\")]\n for directory in directories:\n if os.path.exists(directory):\n check += 1\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if check != len(directories):\n indices_data = {}\n # create folders to save data\n for loader_name, loader in [('train', train_loader_2007),\n ('train', train_loader_2012),\n ('test', val_loader)]:\n for (img, annotation) in tqdm(loader):\n\n #print(annotation)\n # there may be multiple labels, they are concatenated to: 'label1_label2_'\n label = ''\n int_label = []\n\n elems = annotation['annotation']['object']\n # if only 1 label - it is a dictionary, but not list of dictionaries\n # for consistency reasons and to be able to use the loop later\n if not isinstance(elems, list):\n elems = [elems]\n\n # get bboxes, compute object size, add all object sizes and divide by img size (h*w)\n obj_sizes = 0\n num_instances = 0\n\n for elem in elems:\n # every name is in a list\n # there may be multiple instances of the same object\n # those are disregarded for label\n\n if not (bool(int(elem['difficult'][0])) and loader_name == 'test'):\n if not str(self.class_to_idx[elem['name'][0]]) in label:\n label += str(self.class_to_idx[elem['name'][0]]) + '_'\n int_label.append(self.class_to_idx[elem['name'][0]])\n\n num_instances += 1\n # percentage of objects in the image: sum obj_size/img_size\n obj_sizes += (int(elem['bndbox']['xmax'][0]) - int(elem['bndbox']['xmin'][0])) * \\\n (int(elem['bndbox']['ymax'][0]) - int(elem['bndbox']['ymin'][0]))\n obj_sizes /= float(int(annotation['annotation']['size']['width'][0]) *\n int(annotation['annotation']['size']['height'][0]))\n\n img_name = label + '_' + annotation['annotation']['filename'][0]\n\n directory = os.path.join(os.path.join(self.save_path, loader_name), label)\n if not os.path.exists(directory):\n os.makedirs(directory)\n save_image(img, os.path.join(directory, img_name))\n\n indices_data[os.path.join(directory, img_name)] = (int_label,\n obj_sizes, num_instances)\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': np.array(list(indices_data.values()), dtype=object)[:, 0],\n 'obj_sizes': np.array(list(indices_data.values()), dtype=object)[:, 1],\n 'num_instances': np.array(list(indices_data.values()), dtype=object)[:, 2]})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n train_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n # like in https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf\n # https://arxiv.org/pdf/1409.1556.pdf\n transforms.Resize(256), # resize smaller size to 256\n transforms.RandomCrop(self.args.patch_size), # 224\n transforms.ToTensor()\n ])\n\n test_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n transforms.Resize(256), # resize smaller size to 256\n transforms.CenterCrop((self.args.patch_size, self.args.patch_size)), # 224\n transforms.ToTensor()\n ])\n\n if self.args.compute_dataset_metrics is True:\n # when computing dataset metrics, an original image should be used\n # - without randomness of RandomCrop\n train_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n # if not already set, set batch-size to 1 for computing the metrics\n # due to different image sizes\n self.args.batch_size = 1\n\n # load the image dataset from folder with indices\n trainset = IndxImageFolder(root = os.path.join(self.save_path, \"train\"), transform=train_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n valset = IndxImageFolder(root=os.path.join(self.save_path, \"test\"), transform=test_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n\n return trainset, valset"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Name of the table containing eventlevel truth information.
|
def truth_table(self) -> str:
return self._truth_table
|
[
"def __str__(self):\n return_string = \"Truth Table type=\"\n return_string += 'REPORTING' if self.type == TruthTableType.REPORTING else 'TRANSITION'\n return_string += '\\n'\n for k,v in self.header.items():\n if k not in ['next_state', 'output']:\n return_string += '[' + k + '=' + ','.join(v) + ']'\n else:\n return_string += '[' + k + '=' + v + ']'\n return_string += '\\n'\n return_string += '--------------------------------------\\n'\n for transition_dict in self.transitions:\n for k,v in transition_dict.items():\n return_string += '[' + k + '=' + ','.join(v) + ']'\n return_string += '\\n'\n return return_string",
"def forecast_table_name():\n return None",
"def table_name(self) -> str:\n return jsii.get(self, \"tableName\")",
"def _historic_tbl_name(self):\n return 'historic_{0}'.format(self._import_relation_name)",
"def get_predicate_labels(event):\n\n\tlabels = [ACTIVE_LABEL]\n\n \tif event == 'problem_check':\n\t\tlabels.append(PROBLEM_LABEL)\n\n if event == 'thread_create':\n labels.append(POST_FORUM_LABEL)\n\n if event == 'play_video':\n labels.append(PLAY_VIDEO_LABEL)\n\n return labels",
"def statistical_test_name(self) -> str:\n raise NotImplementedError",
"def vrijednostTable (cls):\n\n return 1",
"def tableReport(self):\n # Print eventList.\n for i, e in enumerate(self.eventList):\n print(\"Event \" + str(i) + \" from task \" + str(e.idx))\n print(e.case())\n print(e.delta)\n\n # Print statusTable.\n for x in range(self.n):\n print(\"task\" + str(x) + \": \")\n for y in range(5):\n print(self.statusTable[x][y])",
"def is_tautology(self) -> bool:\r\n return all((_[1] for _ in self.truth_table))",
"def output_tb_name(self) -> str:\n try:\n return self.attr_getter(\"_output_tb_name\", None)\n except AttributeError:\n raise ValueError(\"Nothing set for the sim testbench name yet\")",
"def statusName(dictname):\n return (dictname, \"S\")",
"def __tablename__(self) -> str:\n return gen_tablenames(self.__name__)",
"def get_statement_event_name():\n return 'Term Statement'",
"def tb_name(self) -> str:\n try:\n return self.attr_getter(\"_tb_name\", None)\n except AttributeError:\n raise ValueError(\"Nothing set for the testbench name yet\")",
"def true_var_name(self):\n return self._true_var_name",
"def generate_enum_table(self):\n return False",
"def trace_table(self):\n dtype = np.float32\n\n tab = utils.GTable()\n tab.meta['CONFFILE'] = os.path.basename(self.beam.conf.conf_file)\n\n tab['wavelength'] = np.cast[dtype](self.beam.lam*u.Angstrom)\n tab['trace'] = np.cast[dtype](self.beam.ytrace + self.beam.sh_beam[0]/2 - self.beam.ycenter)\n\n sens_units = u.erg/u.second/u.cm**2/u.Angstrom/(u.electron/u.second)\n tab['sensitivity'] = np.cast[dtype](self.beam.sensitivity*sens_units)\n\n return tab",
"def triggerLevel(self,trigname,quiet=False):\n if (trigname[:2]=='L1'): return 1\n if (trigname[:2]=='L2'): return 2\n if (trigname[:2]=='EF'): return 3\n if (not quiet): print('WARNING: Trigger name',trigname,'does not define trigger level, assume L1')\n return 1",
"def get_event_name(self, event):\n return self.get_pygame().event.event_name(event.type)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the event index corresponding to a `sequential_index`.
|
def _get_event_index(
self, sequential_index: Optional[int]
) -> Optional[int]:
|
[
"def _get_index(self, beacon_config, label):\n\n indexes = [index for index, item in enumerate(beacon_config) if label in item]\n if not indexes:\n return -1\n else:\n return indexes[0]",
"def get_index(self) -> int:\n return self._index",
"def get_index(self, label):\n\t\treturn self._label_to_index[label]",
"def convert_state_index_to_var_index(\n state_index: int, on_para_eq_constraint: bool = True\n) -> int:\n var_index = state_index - 1 if on_para_eq_constraint else state_index\n return var_index",
"def _get_event_by_index(self, index):\n if self._getevent_warn:\n msg = (\n \"Seeking event by iterating through events.. (potentially long process)\"\n )\n self.log.warning(msg)\n self._getevent_warn = False\n\n if self._current_event and index < self._current_event.count:\n self._reset()\n\n for event in self._source:\n if event.count == index:\n return event\n raise IndexError(f\"Event index {index} not found in file\")",
"def convert_var_index_to_state_index(\n var_index: int, on_para_eq_constraint: bool = True\n) -> int:\n state_index = var_index + 1 if on_para_eq_constraint else var_index\n return state_index",
"def find_index(s: AnyPandas, index: Union[int, DateLike]) -> int:\n if isinstance(index, (int, np.int64)):\n return int(index)\n date_index = to_datetime(index, errors=\"coerce\")\n\n if date_index is NaT:\n raise ValueError(f\"{index} cannot be converted to datetime\")\n loc = np.argwhere(s.index == date_index).squeeze()\n if loc.size == 0:\n raise ValueError(\"index not found\")\n return int(loc)",
"def get_next_seq(self) -> int:\n res = self._event_key\n self._event_key += 1\n if self._event_key > 0xFFFF_FFFF:\n self._event_key = 1\n return res",
"def findIndex(sequence, function):\n return next(__builtin__.filter(lambda x: function(x[1]), enumerate(sequence)), None)[0]",
"def get_aso_id_from_idx(df, aso_idx):\n return df.iloc[aso_idx].aso_id",
"def get_state_index(self, stateaction_index):\n return self.get_index_component(stateaction_index, self.state_space)",
"def _get_index_for_date(self, the_date):\n date_ordinal = the_date.toordinal()\n index = 2 * (date_ordinal - self.start_date_ordinal)\n return (int(index))",
"def get_index(self):\n if hasattr(self, '_v_index'):\n return self._v_index\n else:\n return sys.maxint",
"def get_index(usage_key, children):\n children = [str(child) for child in children]\n return children.index(usage_key)",
"def index(self, state):\n try:\n idx = self.basis_lut[tuple(state)]\n return idx\n except:\n return -1",
"def get_vehicle_index(self, vehicle):\n index = None\n for i,v in enumerate(self.state['vehicles']):\n if v.id == vehicle.id:\n index = i\n break\n return index",
"def get_index(self, ticket):\n if hasattr(ticket, '_v_index'):\n return ticket._v_index\n else:\n return self.__items.index(ticket)",
"def getIndex(self, index: 'int const') -> \"int\":\n return _coin.SoPath_getIndex(self, index)",
"def position_to_index(self, position):\n return position[1] + position[0] * self._grid_size"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add custom graph label define using function `fn`.
|
def add_label(
self, fn: Callable[[Data], Any], key: Optional[str] = None
) -> None:
if isinstance(fn, Label):
key = fn.key
assert isinstance(
key, str
), "Please specify a key for the custom label to be added."
assert (
key not in self._label_fns
), f"A custom label {key} has already been defined."
self._label_fns[key] = fn
|
[
"def axis_label(label):\n\n def result(func):\n func.__axis_label__ = label\n return func\n\n return result",
"def id_for_label(value):\n return f\"labels->{value}\"",
"def set_label(self, label):",
"def handle_label(self, label, namespace):\n raise NotImplementedError",
"def add_label(self, bit, name):\n self.line_labels[bit] = name",
"def generate_label_cmd(vm_cmd, asm_file):\n global g_curr_func\n\n label_name = vm_cmd[1]\n cmd_string = \"(\" + label_name + \")\"\n if g_curr_func:\n cmd_string = \"(\" + str(g_curr_func) + \"$\" + label_name + \")\"\n # Write cmd_string to asm file.\n asm_file.write(cmd_string + NEW_LINE)",
"def addLabel(self, label, instruction_number):\n self._labels[label] = instruction_number",
"def make_ticklabels(ax: Axes, fn, dim='xy'):\n for d in dim:\n getter = getattr(ax, f'get_{d}ticks')\n setter = getattr(ax, f'set_{d}ticklabels')\n setter([fn(item) for item in getter()])",
"def label(self, name):\n self.labels[name] = self.node\n return self",
"def process_label(self, process_fn, **kwargs):\n\n def processor(x):\n x['label'] = process_fn(label=x['label'], **kwargs)\n return x\n\n self.dataset = self.dataset.map(lambda x: processor(x),\n self.num_parallel_calls)",
"def label(self, graph, node, valid_name):\n return self.depending_library.link_label(graph, node, valid_name)",
"def _add_label(self, labelname, label_dict, firstpoint, secondpoint, boxtype=-1, original_coords=False):\n\t\tix,iy = firstpoint\n\t\tx,y = secondpoint\n\t\tupper_left = (np.min([ix,x]), np.min([iy,y]))\n\t\tlower_right = (np.max([ix,x]), np.max([iy,y]))\n\t\tself.label_dict[labelname] = [upper_left, lower_right, boxtype]\n\t\t\n\t\tself._add_log_label(self.label_dict[labelname])",
"def add_label(self, label: Union[tuple, int], description: str):\n\n self.labels[label] = description",
"def node_label(node):\n return NODE_LABELS[type(node)]",
"def write_label(self, label):\n self._write_asm_commands(['({})'.format(label)])",
"def wsmark(label: str):\n l = LABEL(label)\n\n def _wsmark(func):\n PYFN_MAP[l] = func\n return func\n\n return _wsmark",
"def write_label(self, label):\n self.vm_lines.append('label ' + label + \"\\n\")",
"def augment_label(label, n):\n return [label]*n",
"def register(name, fn):\n return el.Dotted.register(name, fn)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return graph `Data` object at `index`.
|
def __getitem__(self, sequential_index: int) -> Data:
if not (0 <= sequential_index < len(self)):
raise IndexError(
f"Index {sequential_index} not in range [0, {len(self) - 1}]"
)
features, truth, node_truth, loss_weight = self._query(
sequential_index
)
graph = self._create_graph(features, truth, node_truth, loss_weight)
return graph
|
[
"def index(self,index):\n node = self.front\n counter = 0\n while counter < index:\n node = node.getNext()\n counter += 1\n return node.getData()",
"def datum(self, *index):\n data = self.get_data(None)\n if data is None:\n raise ValueError(\n \"ERROR: Can't return an element when there is no data array\"\n )\n\n return data.datum(*index)",
"def get_at_index(self, index):\n if not (0 <= index < self.size):\n raise ValueError('List index out of range: {}'.format(index))\n current_node = self.head\n count = 0\n while current_node:\n if count == index:\n return current_node.data\n current_node = current_node.next\n count += 1",
"def _getExtractionSubgraphAt(self, index):\n if 0 <= index < len(self.extractionSubgraphs):\n return copy.deepcopy(self.extractionSubgraphs[index])\n else:\n raise KeyError('Invalid index value')",
"def get_at_index(self, idx):\n return self._get_node_at_index(idx).data",
"def __getitem__(self, index: int) -> List[object]:\n return [d[index] for d in self.datasets]",
"def getObject(self, index: long) -> object:\n ...",
"def __getitem__(self, index):\r\n if len(self) <= index:\r\n raise IndexError\r\n\r\n curr = self.first\r\n # Iterate to (index)-th node\r\n for i in range(index):\r\n curr = curr.next\r\n return curr.item",
"def getNode(self, index: 'int const') -> \"SoNode *\":\n return _coin.SoPath_getNode(self, index)",
"def _getNode(self, index: int):\n node = self.head\n while index > 0:\n node = node.next\n index -= 1\n return node, node.next",
"def __getitem__ ( self , index ) :\n return self._histos[ index ]",
"def get_node(self, index):\n if index.isValid():\n node = index.internalPointer()\n if node:\n return node\n\n return self.root",
"def __getitem__(self, index):\n outputs = []\n for dataset in self.datasets:\n outputs.append(dataset.__getitem__(index))\n return outputs",
"def _getDecompressedNetworkAt(self, index):\n if 0 <= index < len(self.decompressedFrames):\n return copy.deepcopy(self.decompressedFrames[index])\n else:\n raise KeyError('Invalid index value')",
"def get_node(self, index) -> DoubleLinkedListNode:\n if(self._isEmpty()):\n return None\n\n currentNode = self.begin\n \n while(index > 0):\n currentNode = currentNode.nxt\n if(currentNode is None):\n return None\n index -= 1\n \n return currentNode",
"def grab_at(self, index: int) -> Embed:\n\n return Embed.from_dict(deepcopy(self.history[index]))",
"def __getitem__(self, index):\n\n # index is a single number\n if isinstance(index, Number):\n model = self._models[index]\n model_index = self._model_indices[index]\n if model.get_stored_output_values() is None:\n return None\n else:\n output_values = model.get_stored_output_values()\n return output_values[model_index]\n\n # index is a slice\n elif isinstance(index, slice):\n result = []\n for i in range(index.start, index.stop):\n val = self[i]\n if val != None:\n result.append(val)\n else:\n return None\n return result",
"def get_instance(self, index):\n return self.instances[index]",
"def __getitem__(self, index):\n if isinstance(index, int):\n return self.__savepoint_list[index]\n return self.__make_savepoint_collection(index, True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolve selection as string to list of indices. Selections are expected to have pandas.DataFrame.querycompatible syntax, e.g., ``` "event_no % 5 > 0" ``` Selections may also specify a fixed number of events to randomly sample, e.g., ``` "10000 random events ~ event_no % 5 > 0" "20% random events ~ event_no % 5 > 0" ```
|
def _resolve_string_selection_to_indices(
self, selection: str
) -> List[int]:
return self._string_selection_resolver.resolve(selection)
|
[
"def _sample_to_idxs(df: pd.DataFrame, sample: str) -> List[int]:\n if sample.startswith((\"SRR\", \"DRR\", \"ERR\")):\n idxs = df.index[df.run_accession == sample].tolist()\n assert len(idxs) == 1, f\"sample {sample} with idxs: {idxs}\"\n elif sample.startswith((\"SRX\", \"ERX\", \"DRX\")):\n idxs = df.index[df.experiment_accession == sample].tolist()\n assert len(idxs) >= 1, len(idxs)\n else:\n assert False, (\n f\"sample {sample} not a run, this should not be able to happen!\" f\" Please make an issue about this!\"\n )\n return idxs",
"def queried_indices(self):\n result = None\n selected_indices = {\n key: value\n for (key, value) in self._query_str_dict.items()\n if type(value) == cudf.Series\n }\n if len(selected_indices) > 0:\n result = cudf.DataFrame(selected_indices).fillna(False).all(axis=1)\n\n return result",
"def list_of_select(split_query_list):\n #Finds where the select token is and creates an easy to iterate\n #over list\n location = split_query_list.index('select') + 1\n select_list = split_query_list[location].split(',')\n\n return select_list",
"def _parse_feature_index_str(self, feature_indice_included_str,\n feature_indice_excluded_str):\n feature_index_included = []\n if feature_indice_included_str and feature_indice_excluded_str:\n raise ValueError(\n 'Only one of feature_indice_included_str and feature_indice_excluded_str can be non-empty.'\n )\n if not feature_indice_included_str and not feature_indice_excluded_str:\n logging.info('No manual feature selection. All feature included.')\n return feature_index_included\n\n if feature_indice_included_str:\n feature_index_included = feature_indice_included_str.split(',')\n if not all(\n int(i) >= 0 and int(i) < self.num_feature\n for i in feature_index_included):\n raise ValueError(\n f'values in feature_indice_included_str needs to be within [0, {self.num_feature}).'\n )\n feature_index_included = [int(i) for i in feature_index_included]\n if feature_indice_excluded_str:\n feature_index_excluded = feature_indice_excluded_str.split(',')\n if not all(\n int(i) >= 0 and int(i) < self.num_feature\n for i in feature_index_excluded):\n raise ValueError(\n f'values in feature_indice_excluded_str needs to be within [0, {self.num_feature}).'\n )\n feature_index_included = [\n f for f in range(self.num_feature)\n if str(f) not in feature_index_excluded\n ]\n\n return feature_index_included",
"def _split_query(q):\n\n res = []\n done = False\n while not done:\n if _p_query.match(q):\n q, idx = _p_query.match(q).groups()\n res.append(int(idx))\n else:\n res.append(q)\n done = True\n return list(reversed(res))",
"def select(df, *args):\n df = df[list(args)]\n return df",
"def _query_chooser(query):\n return [_DEFAULT_SHARD_ID]\n\n shard_ids = []\n for column, operator, value in _get_query_comparisons(query):\n pass\n\n if len(shard_ids) == 0:\n return [_DEFAULT_SHARD_ID]\n else:\n return shard_ids",
"def get_queries(df):\n q_snpedia = []\n\n for index, row in df.iterrows():\n if len(row.genotype) == 2:\n q_snpedia.append(f\"{row.rsid}({row.genotype[0]};{row.genotype[1]})\")\n else:\n q_snpedia.append(f\"{row.rsid}({row.genotype[0]})\")\n\n df[\"query\"] = q_snpedia\n\n return df",
"def msseltoindex(self, *args, **kwargs):\n return _ms.ms_msseltoindex(self, *args, **kwargs)",
"def search_series(self, search_string):\n return",
"def init_select(selection_size: int, dataset_size: int) -> list:\n selected_inds = rand_gen.choice(dataset_size, selection_size, replace=False)\n selected_inds.sort()\n selected_inds = selected_inds.tolist()\n return selected_inds",
"def select_cols(df,list_col):\n df = df[list_col] ##loading data using read_csv from pandas\n return df #returning the data structure ",
"def relabel_expr(expr: pd.DataFrame) -> pd.Series:\n\n # Separate lists for ease of indexing and replacing columns, though\n # elements in each list directly correspond. ensembl_ids_in_entrez\n # will be used to index into `expr`, then entrez_ids will be used\n # as a replacement index\n ensembl_ids_in_entrez = []\n entrez_ids = []\n for ensembl_id_full in expr.index:\n ensembl_id, *junk = ensembl_id_full.split('.')\n if ensembl_id in ensembl_entrez_mapping:\n ensembl_ids_in_entrez.append(ensembl_id_full)\n entrez_ids.append(ensembl_entrez_mapping[ensembl_id])\n\n subset = expr.loc[ensembl_ids_in_entrez, :].iloc[:, 0]\n subset.index = entrez_ids\n\n entrez_id_counts = Counter(entrez_ids)\n single_genes = {gene for (gene, count) in entrez_id_counts.items() if count == 1}\n duplicated_genes = {gene for (gene, count) in entrez_id_counts.items() if count > 1}\n\n s = pd.Series(index=entrez_id_counts)\n s.loc[single_genes] = subset.loc[single_genes]\n for gene in duplicated_genes:\n s.loc[gene] = subset.loc[gene].median()\n\n return s",
"def string_to_index(df, input_cols, output_cols=None, columns=None, **kargs):\n df_actual = df\n\n if columns is None:\n input_cols = parse_columns(df, input_cols)\n if output_cols is None:\n output_cols = [name_col(input_col, STRING_TO_INDEX) for input_col in input_cols]\n output_cols = get_output_cols(input_cols, output_cols)\n else:\n input_cols, output_cols = zip(*columns)\n\n indexers = [StringIndexer(inputCol=input_col, outputCol=output_col, **kargs).fit(df) for input_col, output_col\n in zip(list(set(input_cols)), list(set(output_cols)))]\n\n pipeline = Pipeline(stages=indexers)\n df = pipeline.fit(df).transform(df)\n\n df = df.preserve_meta(df_actual, Actions.STRING_TO_INDEX.value, output_cols)\n\n return df",
"def get_specialized_query_selectors(q_select: AbstractSet[str]) -> List[dict]:\n if not q_select:\n return list(specialized_query_selectors)\n\n new_selection = [a for a in specialized_query_selectors if a['labels'] & q_select]\n if not new_selection:\n raise ValueError('q_select %s not in %s', q_select, specialized_query_selectors)\n\n return new_selection",
"def _process_query(self, query: str) -> List[int]:\n tokens = word_tokenize(query)\n tokens = [t.lower() for t in tokens if len(t) < 25 and self.pattern.match(t)]\n \n\n stop_words = set(stopwords.words('english'))\n query = []\n stemmer = PorterStemmer()\n for t in tokens:\n t_stem = stemmer.stem(t)\n if t_stem in stop_words:\n continue\n query.append(self.ph.get_id_by_term(t_stem))\n print(\"query:\", query)\n return query",
"def slice_select(x,dim,ind,return_indices=False):\n \n indices = (slice(None),) * dim + ((ind),)\n return x[indices] if not return_indices else indices # return indices if requested",
"def asindices(hdr, spec):\n\n flds = list(map(text_type, hdr))\n indices = list()\n if not isinstance(spec, (list, tuple)):\n spec = (spec,)\n for s in spec:\n # spec could be a field index (takes priority)\n if isinstance(s, int) and s < len(hdr):\n indices.append(s) # index fields from 0\n # spec could be a field\n elif s in flds:\n idx = flds.index(s)\n indices.append(idx)\n flds[idx] = None # replace with None to mark as used\n else:\n raise FieldSelectionError(s)\n return indices",
"def _add_select_statement(self):\n query = \"select \" + \"\".join([index_col + \", \" for index_col in self.index_col]) + \"\\n\"\n return query"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove columns that are not present in the input file. Columns are removed from `self._features` and `self._truth`.
|
def _remove_missing_columns(self) -> None:
# Check if table is completely empty
if len(self) == 0:
self.warning("Dataset is empty.")
return
# Find missing features
missing_features_set = set(self._features)
for pulsemap in self._pulsemaps:
missing = self._check_missing_columns(self._features, pulsemap)
missing_features_set = missing_features_set.intersection(missing)
missing_features = list(missing_features_set)
# Find missing truth variables
missing_truth_variables = self._check_missing_columns(
self._truth, self._truth_table
)
# Remove missing features
if missing_features:
self.warning(
"Removing the following (missing) features: "
+ ", ".join(missing_features)
)
for missing_feature in missing_features:
self._features.remove(missing_feature)
# Remove missing truth variables
if missing_truth_variables:
self.warning(
(
"Removing the following (missing) truth variables: "
+ ", ".join(missing_truth_variables)
)
)
for missing_truth_variable in missing_truth_variables:
self._truth.remove(missing_truth_variable)
|
[
"def drop_features(self):\n\n if self.drop_columns is not None:\n cols = self.features_df.columns\n feature_list = list()\n for col in cols:\n if (col.split('_')[0] in self.drop_columns) | (col in self.drop_columns):\n feature_list += [col]\n\n self.features_df.drop(columns=feature_list, inplace=True)\n self.features = self.features_df.values\n else:\n self.features = self.features_df.values",
"def clean(self):\n for column in self.columns:\n column.change_misc_values()\n column.drop_greater_than()",
"def stripCols(self):\n for frame in self.files.values():\n for col in frame.columns:\n frame[col] = frame[col].str.strip()",
"def filter_measurement_columns(self, columns):\n columns = [\n x\n for x in columns\n if not self.ignore_feature(x[0], x[1], True, wanttime=True)\n ]\n\n #\n # put Image ahead of any other object\n # put Number_ObjectNumber ahead of any other column\n #\n def cmpfn(x, y):\n if x[0] != y[0]:\n if x[0] == \"Image\":\n return -1\n elif y[0] == \"Image\":\n return 1\n else:\n return cellprofiler_core.utilities.legacy.cmp(x[0], y[0])\n if x[1] == M_NUMBER_OBJECT_NUMBER:\n return -1\n if y[1] == M_NUMBER_OBJECT_NUMBER:\n return 1\n return cellprofiler_core.utilities.legacy.cmp(x[1], y[1])\n\n columns = sorted(columns, key=functools.cmp_to_key(cmpfn))\n #\n # Remove all but the last duplicate\n #\n duplicate = [\n c0[0] == c1[0] and c0[1] == c1[1]\n for c0, c1 in zip(columns[:-1], columns[1:])\n ] + [False]\n columns = [x for x, y in zip(columns, duplicate) if not y]\n return columns",
"def prune(self):\n # Sort rows and columns in descending order\n # NOTE: This means they don't destroy state as they are removed\n e_rows = sorted(self.empty_rows(), reverse=True)\n e_cols = sorted(self.empty_columns(), reverse=True)\n # Remove identified rows and columns\n for row in e_rows: self.remove_row(row)\n for col in e_cols: self.remove_column(col)\n # Return the dropped rows & columns\n return (e_rows, e_cols)",
"def keep_columns_of_interest(self):\n re_string = r'^I/V_\\w*$'\n name_of_iv_columns_to_keep = []\n name_of_eiv_columns_to_keep = []\n\n full_list_name_of_columns = self.data.raw.columns.values\n pd_vdrive_raw_data = self.data.raw.copy()\n for _index, _label in enumerate(full_list_name_of_columns):\n m = re.match(re_string, _label)\n if m:\n name_of_iv_columns_to_keep.append(pd_vdrive_raw_data.columns.values[_index])\n name_of_eiv_columns_to_keep.append(pd_vdrive_raw_data.columns.values[_index + 1])\n\n self.data.raw_iv = pd_vdrive_raw_data.filter(name_of_iv_columns_to_keep)\n self.data.raw_eiv = pd_vdrive_raw_data.filter(name_of_eiv_columns_to_keep)",
"def columns_to_ignore(self) -> list:\n pass",
"def clean_data(self, data):\n # features that are more than 20% NA are dropped first\n ft_na_lim = np.floor(data.shape[0] * 0.2)\n na_ct = np.isnan(data).sum(axis=0)\n drop_cols = data.columns[(na_ct > ft_na_lim)]\n data.drop(drop_cols.values, axis=1, inplace=True)\n # data = data[data.columns[~data.columns.isin(drop_cols)]]\n self.features = data.columns[1:]\n\n # observations that are more than 20% NA are dropped next\n obs_na_lim = np.floor(data.shape[1] * 0.2)\n na_ct = np.isnan(data).sum(axis=1)\n drop_rows = data.index[(na_ct > obs_na_lim)]\n data.drop(drop_rows.values, axis=0, inplace=True)\n # data = data[~data.index.isin(drop_rows)]\n self.observations[\"train\"] = data.index\n self.dropped_observations.append(drop_rows.values)\n\n # replace +/-inf with large numbers, and NA with mean\n self.find_infs_nas(data)\n data = self.replace_infs_nas(data)\n return data",
"def EliminateCols(self, ess_dof_list):\n return _handle.OperatorHandle_EliminateCols(self, ess_dof_list)",
"def clean_data(self):\n # Remove any row that is SCRATCHED\n self.rows[:] = [row for row in self.rows if \"SCRATCHED\" not in row]\n # Remove the list items we aren't using\n self.rows.pop() # Remove the last item in list\n for row in self.rows:\n split_string = row[10].split(\"$\", 2)\n if len(split_string) >= 3:\n row[10] = split_string[1]\n # Remove the columns we aren't using\n columns_to_remove = [0, 3, 4, 6, 7]\n for column in sorted(columns_to_remove, reverse=True):\n del row[column]",
"def clean_data(df_or_fpath, clean_columns=None):\n if isinstance(df_or_fpath, str):\n df = pd.read_csv(df_or_fpath, encoding='gbk')\n else:\n df = df_or_fpath\n \n df = drop_non_feature_columns(df)\n \n # Calculate invalid rate of columns\n invalid_rate = df.isin(INVALID_VALUES).apply(pd.value_counts)\n invalid_rate = invalid_rate.fillna(0)\n invalid_rate = invalid_rate.loc[True] / invalid_rate.sum()\n\n # Determine columns should be cleaned\n if clean_columns is not None:\n discard_columns, strong_clean_columns, weak_clean_columns = clean_columns\n else:\n discard_columns = invalid_rate.index[invalid_rate > DISCARD_THRESHOLD]\n logging.debug('Discard columns: {}'.format(discard_columns))\n\n strong_clean_columns = invalid_rate.index[invalid_rate.between(FILL_THRESHOLD+1e-6, DISCARD_THRESHOLD)]\n logging.debug('Strong clean columns: {}'.format(strong_clean_columns))\n\n weak_clean_columns = invalid_rate.index[invalid_rate <= FILL_THRESHOLD]\n logging.debug('Weak clean columns: {}'.format(weak_clean_columns))\n\n logging.debug('Total columns: {}, Discard columns: {}, Strong clean columns: {}, Weak clean columns: {}'.format(\n len(invalid_rate.index), len(discard_columns), len(strong_clean_columns), len(weak_clean_columns)))\n\n # Case 1:\n # Invalid rate of specific column is higher than DISCARD_THRESHOLD\n # Action:\n # Delete this column\n clean_df = df.drop(discard_columns, axis=1, errors='ignore')\n logging.debug('DataFrame shape for case 1: {}'.format(clean_df.shape))\n\n # Case 2:\n # Invalid rate of specific column is less or equal than DISCARD_THRESHOLD and larger than FILL_THRESHOLD\n # Action:\n # Split this column into two columns:\n # 1. one as one-hot column, 1 for valid value, 0 for invalid value\n # 2. the other copies data from the original column, but use normalization func to normalize valid value,\n # and replace invalid value with CONST2\n strong_clean_df = strong_clean(clean_df, strong_clean_columns)\n logging.debug('DataFrame shape for case 2: {}'.format(strong_clean_df.shape))\n\n # Case 3:\n # Invalid rate of specific column is less or equal than FILL_THRESHOLD\n # Action:\n # Normalize valid values, replace invalid values with CONST1\n weak_clean_df = weak_clean(clean_df, weak_clean_columns)\n logging.debug('DataFrame shape for case 3: {}'.format(weak_clean_df.shape))\n\n # Concatenate cleaned data frame with apply id and apply date series\n final_df = pd.concat([strong_clean_df, weak_clean_df], axis=1)\n final_df = final_df.reindex(sorted(final_df.columns), axis=1)\n logging.debug('DataFrame shape after cleaned: {}'.format(final_df.shape))\n \n return final_df, (discard_columns, strong_clean_columns, weak_clean_columns)",
"def remove_nonfeature_cols(\n self, df: pd.DataFrame, non_features: List[str], index: List[str]\n ) -> pd.DataFrame:\n ##### YOUR CODE GOES HERE #####\n pass",
"def exclude(self, metadata):\n self.exclude_columns = [] #metadata.get_columns_with_semantic_type(\"https://metadata.datadrivendiscovery.org/types/CategoricalData\")\n cols = metadata.get_columns_with_semantic_type(\"http://schema.org/DateTime\")\n timecols = metadata.get_columns_with_semantic_type(\"https://metadata.datadrivendiscovery.org/types/Time\")\n for col in cols:\n self.exclude_columns.append(col)\n for col in timecols:\n self.exclude_columns.append(col)\n\n targets = metadata.get_columns_with_semantic_type(\"https://metadata.datadrivendiscovery.org/types/SuggestedTarget\")\n for t in targets:\n if t in self.exclude_columns:\n self.exclude_columns.remove(t)",
"def drop_column(self, column):\n\t\tfor df in self.processed_data:\n\t\t\tdf.drop(column, axis=1, inplace=True)\n\n\t\t# Clean up target_columns and predictive_columns.\n\t\tif type(column) == str:\n\t\t\tcolumn = list(column)\n\t\tfor c in column:\n\t\t\tif c in self.predictive_columns:\n\t\t\t\tself.predictive_columns.remove(c)\n\t\t\telif c in self.target_columns:\n\t\t\t\tself.target_columns.remove(c)\n\n\t\tprint(headerize('Success'))\n\t\tprint('Columns dropped:\\n\\t', column)\n\t\tprint()\n\t\tself.show_data_shapes()",
"def drop_columns(df: pd.DataFrame) -> pd.DataFrame:\n new_features = set(df.columns.tolist()) - SelectedFeatures.get_all_features()\n if len(new_features):\n print('>>> New features found in df: {}'.format(new_features))\n whitelist = SelectedFeatures.get_whitelist()\n for key in [k for k in df.columns if k not in whitelist]:\n df = df.drop(key, 1)\n return df",
"def delete_empty_cols(self) -> pd.DataFrame:\n full_cols = []\n for col in self.dataframe.columns:\n if self.dataframe[col].isnull().sum() / len(self.dataframe) \\\n < config.BAD_FULLNESS_RATE:\n full_cols.append(col)\n print('data_cleaning.py: Delete empty cols...')\n self.dataframe = self.dataframe[full_cols]\n return self.dataframe",
"def removeSkips(data: np.ndarray, columns: np.ndarray, colmeta: dict) -> None:\n\n for col in colmeta:\n if colmeta[col]['type'] == 'skip':\n colidxs = np.argwhere(columns!=col).flatten()\n columns = columns[colidxs]\n data = data[:, colidxs]",
"def remove_column_(self, column_name: str):\n self._check_values_type()\n for dataset in self.values():\n dataset.remove_column_(column_name=column_name)",
"def missing_preprocess(features, df=None):\n\n # number of missing in each row\n # print(df.isnull().sum(axis=1))\n\n # number of missing in each feature\n # print(df.isnull().sum())\n\n # number of instances\n num_instances = df.shape[0]\n # number of features\n num_features = df.shape[1]\n\n # detect empty rows\n if any(df.isnull().sum(axis=1) == num_features):\n print(df[df.isnull().sum(axis=1) == num_features])\n print(\"Above empty rows are detected and removed \\n\")\n df = df.dropna(how='all') # remove empty rows\n\n large_missing_cols = [] # list of columns with extreme large proportion of missing data\n for col in df.columns[:-1]: # exclude target class\n if df[col].isnull().sum() > 0.9 * num_instances:\n large_missing_cols.append(col)\n if large_missing_cols:\n print(\"Feature {} has extreme large proportion of missing data\".format(large_missing_cols))\n ans = input('Do you want to delete the above features? [y/n]')\n if ans == 'y':\n df.drop(large_missing_cols, 1, inplace=True)\n else:\n pass\n print(df.columns)\n features_new = df.columns.values\n return df, features_new"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list missing columns in `table`.
|
def _check_missing_columns(
self,
columns: List[str],
table: str,
) -> List[str]:
for column in columns:
try:
self.query_table(table, [column], 0)
except ColumnMissingException:
if table not in self._missing_variables:
self._missing_variables[table] = []
self._missing_variables[table].append(column)
except IndexError:
self.warning(f"Dataset contains no entries for {column}")
return self._missing_variables.get(table, [])
|
[
"def get_columns_with_missing_values(self): #df dataframe\n missing_df = self.get_count_of_missing_values()\n missing_data = missing_df[missing_df[0] != 0]\n return missing_data",
"def get_missing_columns(column_labels: set, df: pd.DataFrame) -> List:\n return [\n column\n for column in column_labels\n if column not in df\n ]",
"def get_columns_without_missing_values(self): #df dataframe\n missing_df = self.get_count_of_missing_values()\n clean_data = missing_df[missing_df[0] == 0]\n return clean_data",
"def empty_columns(self):\n empty = []\n for idx in range(self.cols):\n if not any((x for x in self.data if x[idx] != 0)):\n empty.append(idx)\n return empty",
"def test_column_values(self):\n for column in self.table.columns:\n assert len(column.values) == 0",
"def columns_null(self) -> List[str]:\n if not self.__columns_null:\n columns = []\n if isinstance(self.dataframe, PyDataFrame):\n for column in self.columns_names:\n count = self.dataframe.filter(F.col(column).isNull()).count()\n if count > 0:\n columns.append(column)\n elif isinstance(self.dataframe, pd.DataFrame):\n nan_cols = self.dataframe.columns[self.dataframe.isna().any()].tolist()\n columns.extend(nan_cols)\n self.__columns_null = columns\n return self.__columns_null",
"def info_missing_table(df_pd):\n mis_val = df_pd.isnull().sum() #count total of null in each columns in dataframe\n#count percentage of null in each columns\n mis_val_percent = 100 * df_pd.isnull().sum() / len(df_pd) \n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) \n #join to left (as column) between mis_val and mis_val_percent\n mis_val_table_ren_columns = mis_val_table.rename(\n columns = {0 : 'Missing Values', 1 : '% of Total Values'}) \n#rename columns in table\n mis_val_table_ren_columns = mis_val_table_ren_columns[\n mis_val_table_ren_columns.iloc[:,1] != 0].sort_values('% of Total Values', ascending=False).round(1) \n \n print (\"Your selected dataframe has \" + str(df_pd.shape[1]) + \" columns.\\n\" #.shape[1] : just view total columns in dataframe \n \"There are \" + str(mis_val_table_ren_columns.shape[0]) + \n \" columns that have missing values.\") #.shape[0] : just view total rows in dataframe\n return mis_val_table_ren_columns",
"def what_columns(table):\n print [c.name for c in table.c]",
"def _add_missing_cols(msg_list, fields=None):\n new_list = []\n required_cols = ['type', 'id', 'view_href', 'subject', 'last_publish_time']\n\n # Add any defined fields to the list of required columns\n if fields and fields != '*':\n parsed_fields = fields.split(',')\n for field in parsed_fields:\n if field not in required_cols:\n required_cols.append(field)\n\n # Loop through the messages and add any missing columns\n for msg in msg_list:\n for col in required_cols:\n if col not in msg:\n msg[col] = ''\n new_list.append(msg)\n return new_list",
"def required_colnames(self):\n return self._required_colnames[:]",
"def check_for_required_columns(problems: list, table: str, df: DataFrame) -> list:\n r = cs.GTFS_REF\n req_columns = r.loc[(r[\"table\"] == table) & r[\"column_required\"], \"column\"].values\n for col in req_columns:\n if col not in df.columns:\n problems.append([\"error\", f\"Missing column {col}\", table, []])\n\n return problems",
"def check_ingress_required_columns(self, col_names):\n if not set(col_names).issuperset(INGRESS_REQUIRED_COLUMNS):\n if not set(col_names).issuperset(INGRESS_ALT_COLUMNS):\n missing_columns = [x for x in INGRESS_REQUIRED_COLUMNS if x not in col_names]\n return missing_columns\n return None",
"def getColumns(self):\r\n # Save list of main columns\r\n for row in self.table:\r\n for entry in row:\r\n if str(row[0]).startswith('!') and not str(row[0]).startswith('!!'):\r\n delimiter = misc.getDelimiter(row)\r\n column_names = list(row)\r\n break\r\n\r\n # Insert mandatory first column if not existent\r\n inserted_column = False\r\n #if not column_names[0].title() == '!' + self.table_type.title():\r\n # column_names.insert(0, '!' + self.table_type.title())\r\n # inserted_column = True\r\n\r\n # Get column positions\r\n columns = {}\r\n for i, column in enumerate(column_names):\r\n columns[column] = i\r\n\r\n return column_names, columns, inserted_column, delimiter",
"def create_missing_dimension_row(columns: Sequence[dict]) -> List[str]:\n na_values_row = []\n for column in columns:\n if column.get(\"skipped\", False):\n continue\n elif column.get(\"identity\", False):\n na_values_row.append(\"0\")\n else:\n if not column.get(\"not_null\", False):\n # Use NULL for any nullable column and use type cast (for UNION ALL to succeed)\n na_values_row.append(\"NULL::{}\".format(column[\"sql_type\"]))\n elif \"timestamp\" in column[\"sql_type\"]:\n na_values_row.append(\"'0000-01-01 00:00:00'\")\n elif \"boolean\" in column[\"type\"]:\n na_values_row.append(\"false\")\n elif \"string\" in column[\"type\"]:\n na_values_row.append(\"'N/A'\")\n else:\n na_values_row.append(\"0\")\n return na_values_row",
"def _remove_missing_columns(self) -> None:\n # Check if table is completely empty\n if len(self) == 0:\n self.warning(\"Dataset is empty.\")\n return\n\n # Find missing features\n missing_features_set = set(self._features)\n for pulsemap in self._pulsemaps:\n missing = self._check_missing_columns(self._features, pulsemap)\n missing_features_set = missing_features_set.intersection(missing)\n\n missing_features = list(missing_features_set)\n\n # Find missing truth variables\n missing_truth_variables = self._check_missing_columns(\n self._truth, self._truth_table\n )\n\n # Remove missing features\n if missing_features:\n self.warning(\n \"Removing the following (missing) features: \"\n + \", \".join(missing_features)\n )\n for missing_feature in missing_features:\n self._features.remove(missing_feature)\n\n # Remove missing truth variables\n if missing_truth_variables:\n self.warning(\n (\n \"Removing the following (missing) truth variables: \"\n + \", \".join(missing_truth_variables)\n )\n )\n for missing_truth_variable in missing_truth_variables:\n self._truth.remove(missing_truth_variable)",
"def getColumns(self, tableName):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n c = self.conn.execute('pragma table_info(%s)' % tableName)\n return c.fetchall()",
"def get_all_filled_columns(board):\n filled_columns = tuple()\n for col in range(1, dimension(board)+1):\n if is_filled_column(board, col):\n filled_columns = (col,) + filled_columns\n\n return filled_columns",
"def columns_negative(self) -> List[str]:\n columns = []\n\n if not self.__columns_negative:\n if isinstance(self.dataframe, PyDataFrame):\n for column in self.columns_names:\n count = 0\n try:\n count = self.dataframe.filter((F.col(column) < 0)).count()\n except AnalysisException:\n pass\n if count > 0:\n columns.append(column)\n elif isinstance(self.dataframe, pd.DataFrame):\n for column in self.columns_names:\n if is_numeric_dtype(self.dataframe[column]):\n dt_filtered = self.dataframe[self.dataframe[column] < 0]\n count = dt_filtered.shape[0]\n if count > 0:\n columns.append(column)\n self.__columns_negative = columns\n return self.__columns_negative",
"def check_none(d):\n nones = []\n for c in d.colnames:\n nnone = np.sum(d[c] == None)\n if nnone > 0:\n nones.append([c, np.where(d[c] == None)[0]])\n\n return(nones)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return iterator for sequence of strongly connected components.
|
def __iter__(self):
return iter(self._components)
|
[
"def iterator(self, *args, **kwargs):\n return _decomp.component_set_iterator(self, *args, **kwargs)",
"def iter(self) -> Iterator[Sequence]:\n ...",
"def __iter__(self):\n for coreg in self.pipeline:\n yield coreg",
"def __iter__(self):\n for node in self.nodes(): # use same order as nodes()\n yield node._element # but yield each element",
"def get_connected_components(self):\n start_id = choice(list(self.__vertex_dict.keys()))\n\n # must be a list, can't be a set because random.choice does not it\n remaining_ids = list(self.__vertex_dict.keys())\n remaining_ids.remove(start_id) \n\n seen = set()\n seen.add(start_id)\n\n queue = deque()\n queue.append(self.get_vertex(start_id))\n\n components = []\n com = []\n while queue:\n v_obj = queue.pop()\n v_id = v_obj.get_id()\n com.append(v_id)\n\n neighbors = v_obj.get_neighbors()\n\n for n in neighbors:\n n_id = n.get_id()\n if n_id not in seen:\n seen.add(n_id)\n queue.appendleft(n)\n remaining_ids.remove(n_id)\n\n # if there is no vertex left in the queue\n if len(queue) == 0:\n components.append(com)\n # if there are no more components left to traverse through\n if len(remaining_ids) == 0:\n break\n com = []\n new_start = choice(remaining_ids)\n seen.add(new_start)\n queue.appendleft(self.get_vertex(new_start))\n remaining_ids.remove(new_start)\n\n return components",
"def cycles(self) -> List[GraphComponent]:\n return [\n compo\n for _, compo in self.tarjan_scc().items()\n if len(compo) > 1 or compo[0] in self.edges[compo[0]]\n ]",
"def __iter__(self):\n return iter(self._connector.graph_names())",
"def __iter__(self):\n self.check_iterable()\n for element in self.iterable:\n self.next_val = element\n self.notify()\n yield element",
"def __iter__(self):\n return iter(self._elements)",
"def __iter__(self):\n node = self.head\n while node.next != None:\n yield node\n node = node.next",
"def __iter__(self):\n pairs = self.get_pairs()\n for v in pairs: # pairs is a sequence, and sequences are also\n # iterable. TODO: consider changing this to return iter(pairs).\n yield v",
"def __iter__(self):\n i = 0\n while True:\n i = (i + 1) % self._circumference\n yield self[i]",
"def nodes_iter(self):\n\n return self.nodes()",
"def edge_iter(self):\n return xrange(self.ecount())",
"def iter_parts(self):\n def walk_parts(source, visited=list()):\n for rel in source.rels.values():\n if rel.is_external:\n continue\n part = rel.target_part\n if part in visited:\n continue\n visited.append(part)\n yield part\n new_source = part\n for part in walk_parts(new_source, visited):\n yield part\n\n for part in walk_parts(self):\n yield part",
"def attracting_components(G):\n scc = list(nx.strongly_connected_components(G))\n cG = nx.condensation(G, scc)\n for n in cG:\n if cG.out_degree(n) == 0:\n yield scc[n]",
"def get_connected_components(self):\r\n # Reset the network.\r\n self.reset_network()\r\n\r\n # Keep track of the number of nodes visited.\r\n num_visited = 0\r\n\r\n # Make the result list of lists.\r\n components = []\r\n\r\n # Repeat until all nodes are in a connected component.\r\n while num_visited < len(self.all_nodes):\r\n # Find a node that hasn't been visited.\r\n start_node = None\r\n for node in self.all_nodes:\r\n if not node.visited:\r\n start_node = node\r\n break\r\n\r\n # Make sure we found one.\r\n assert start_node != None\r\n\r\n # Add the start node to the stack.\r\n stack = []\r\n stack.append(start_node)\r\n start_node.visited = True\r\n num_visited += 1\r\n\r\n # Add the node to a new connected component.\r\n component = []\r\n components.append(component)\r\n component.append(start_node)\r\n\r\n # Process the stack until it's empty.\r\n while len(stack) > 0:\r\n # Get the next node from the stack.\r\n node = stack.pop()\r\n\r\n # Process the node's links.\r\n for link in node.links:\r\n # Only use the link if the destination\r\n # node hasn't been visited.\r\n to_node = link.node1\r\n if not to_node.visited:\r\n # Mark the node as visited.\r\n to_node.visited = True\r\n\r\n # Mark the link as part of the tree.\r\n link.visited = True\r\n num_visited += 1\r\n\r\n # Add the node to the current connected component.\r\n component.append(to_node)\r\n\r\n # Push the node onto the stack.\r\n stack.append(to_node)\r\n\r\n # Return the components.\r\n return components",
"def initial_strong_components(graph: nx.DiGraph) -> List[Set[Atom]]:\n iscc = []\n sccs = nx.strongly_connected_components(graph)\n for scc in sccs:\n atoms = set(scc)\n predecessors_set = set()\n for atom in atoms:\n predecessors_set = predecessors_set.union(set(graph.predecessors(atom)))\n if predecessors_set.issubset(atoms):\n iscc.append(atoms)\n return iscc",
"def __iter__(self) -> Iterator[_SetElementT]:\n return iter(self._elements)",
"def connected_components(graph):\n \n visited= []\n components = []\n for node in sorted(graph.nodes()): \n print(node)\n if node not in visited:\n temp = Search(graph,node)\n components.append(temp)\n for node in temp:\n visited.append(node)\n return components"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the property access on the auto config workflow.
|
def test_auto_configure_properties(project):
config_name = "Test"
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert auto_config.design_execution is None
assert auto_config.score is None
assert len(auto_config.candidates) == 0
|
[
"def test_config_customproperties_get(self):\n pass",
"def test_config_get(self):\n pass",
"def test_properties(self):\n prj = self._read_string(\"\"\"\nProject 3372\n $prop1 value1\nTask a\n $prop2 value2\n $prop3 \"long property value\"\n\"\"\")\n self.assertEqual(prj.getProperty('prop1'), 'value1')\n a = prj.getTask('.a')\n self.assertEqual(a.getProperty('prop1'), None) # not defined on a\n self.assertEqual(a.getProperty('prop2'), 'value2')\n self.assertEqual(a.getProperty('prop3'), 'long property value')",
"def example_property(self):",
"def test_config_put(self):\n pass",
"def verifyConfiguration(self):",
"def test_get_kv_config(self):\n pass",
"def test_sysconfig_get(self):\n pass",
"def test_get_auto_vars(self):\n actual = get_auto_vars(\"config\")\n self.assertEqual(\n actual,\n {\n \"config/global.auto.tfvars\": {\n Variable(\"foo\", \"bar\"),\n Variable(\"dog\", \"cat\"),\n },\n \"config/app1/app.auto.tfvars\": {\n Variable(\"bar\", \"bye\"),\n Variable(\"baz\", \"bat\"),\n },\n \"config/app3/app.auto.tfvars\": {\n Variable(\"bar\", \"bye\"),\n Variable(\"baz\", \"bat\"),\n },\n \"config/team/team.auto.tfvars\": {\n Variable(\"foo\", \"cat\"),\n },\n \"config/app5/app.auto.tfvars\": {\n Variable(\"foo\", (((\"key\", \"value\"),),)),\n },\n },\n )",
"def test_get_run_settings(self):\n pass",
"def test_get_cloud_settings(self):\n pass",
"def test_dev(self):\r\n dev = Config.dev()\r\n self.assertIsInstance(dev, bool)\r\n \r\n Config.data['dev'] = 'True'\r\n dev = Config.dev()\r\n self.assertFalse(dev)\r\n \r\n Config.data['dev'] = True\r\n dev = Config.dev()\r\n self.assertTrue(dev)\r\n \r\n Config.data['dev'] = 'Yes'\r\n dev = Config.dev()\r\n self.assertFalse(dev)",
"def test_can_access_attributes_with_dot(self):\n\n assert app_config.name is not None",
"def test_observedproperty_property_value(self):\n observedproperty = ObservedProperty.objects.get(name='Temperature')\n properties = [\n 'name',\n 'definition',\n 'description'\n ]\n baseurl = reverse('observedproperty-detail',\n kwargs={'version': 'v1.0',\n 'pk': observedproperty.id\n })\n for property in properties:\n url = baseurl + '/' + property\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(property, response.data)\n if response.data[property]:\n url = baseurl + '/' + property + '/$value'\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.data)",
"def test_get_debugging(self):\n\n debugging = self.configer.configOptionValue(\"Debugging\", \"debug\")\n\n if debugging is False:\n self.assertFalse(debugging, \"Debugging/debug is not set to False.\")\n elif debugging is True:\n self.assertTrue(debugging, \"Debugging/debug is not set to True.\")\n else:\n self.assertTrue(False,\n \"Debugging/debug does not have a valid value.\")",
"def test_properties(self):\n self.assertEqual(self.site.a, 0.25)\n self.assertEqual(self.site.b, 0.35)\n self.assertEqual(self.site.c, 0.45)\n self.assertEqual(self.site.x, 2.5)\n self.assertEqual(self.site.y, 3.5)\n self.assertEqual(self.site.z, 4.5)\n self.assertTrue(self.site.is_ordered)\n self.assertFalse(self.site2.is_ordered)\n self.assertEqual(self.propertied_site.properties[\"magmom\"], 5.1)\n self.assertEqual(self.propertied_site.properties[\"charge\"], 4.2)",
"def test_text_config(self):\r\n config = configuration.PropertiesConfiguration(open(os.path.join(os.environ['TEST_DATA'], 'data/ant_config_test.txt'), 'r'))\r\n \r\n assert config['text.a'] == 'text.value.A'\r\n assert config['text.b'] == 'text.value.B'",
"def test_properties(self):\n # Prepare some properties\n key1 = self.random_str()[:5]\n key2 = self.random_str()[:5]\n\n val1 = self.random_str()\n val2 = self.random_str()\n\n # Start the shell process\n process = subprocess.Popen(\n [sys.executable, '-m', 'pelix.shell',\n '-D', '{}={}'.format(key1, val1), '{}={}'.format(key2, val2)],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\n try:\n # List properties, stop and get output\n output = to_str(process.communicate(to_bytes(\"properties\"))[0])\n\n found = 0\n for line in output.splitlines(False):\n if key1 in line:\n self.assertIn(val1, line)\n found += 1\n elif key2 in line:\n self.assertIn(val2, line)\n found += 1\n\n self.assertEqual(found, 2, \"Wrong number of properties\")\n finally:\n try:\n # Kill it in any case\n process.terminate()\n except OSError:\n # Process was already stopped\n pass",
"def test_property(self):\n datasite = self.get_repo()\n page = pywikibot.Page(datasite, 'P6')\n property_page = next(datasite.preload_entities([page]))\n self.assertIsInstance(property_page, pywikibot.PropertyPage)\n self.assertTrue(hasattr(property_page, '_content'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the score execution on auto config workflow.
|
def test_auto_config_execute(project):
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.predictors.register(resources["predictor"])
project.predictor_evaluation_workflows.register(resources["pew"])
project.design_spaces.register(resources["design_space"])
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert auto_config.design_workflow is None
# Inputs for execute
objective = ScalarMaxObjective(descriptor_key="Fake Target")
with pytest.raises(ValueError):
auto_config.execute(score=objective)
# Now create a config with a working design workflow
project.design_workflows.register(resources["design_workflow"])
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert auto_config.status == "DESIGN WORKFLOW CREATED"
# Mock function to bypass create_default_score call internally
def _default_score(*args, **kwargs):
return LIScore(objectives=[], baselines=[])
with mock.patch("citrine.builders.auto_configure.create_default_score", _default_score):
auto_config.execute(score=objective)
assert auto_config.design_execution is not None
|
[
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_configure_predictor_evaluation(project, caplog):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 3\n assert auto_config.status == \"PREDICTOR CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n evaluator = CrossValidationEvaluator(name=\"Eval\", description=\"\", responses=set())\n\n # Create default w/ a valid response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_succeeded):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=None,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Create default w/ an invalid response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_failed):\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=None,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW FAILED\"\n assert any(r.levelno == logging.WARNING for r in caplog.records)\n\n # Create manual w/ a valid response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_succeeded):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=evaluator,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Create manual w/ a failed response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_failed):\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=evaluator,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW FAILED\"\n assert any(r.levelno == logging.WARNING for r in caplog.records)",
"def main():\n with open(\"src/game.yaml\") as file:\n game = Game(**yaml.safe_load(file))\n solver = Solver(game)\n solver.run()\n if game.rule:\n print(np.where(solver.selling == 1))\n else:\n buying = np.where(solver.buying[:, :, 1:] == 1)\n for step in set(buying[0]):\n print(step, [state[1:] for state in zip(*buying) if state[0] == step])\n # print(np.round(solver.scores[0, 0, 1], 2))\n print(*map(partial(round, ndigits=4), solver.scores[0, 0, :]))",
"def update_score(self, config):\n\t\tif self.time == 0.0 or self.count == 0 or len(self.sut.allBranches()) == 0 or len(self.sut.allStatements()) == 0:\n\t\t\tself.score = float('inf')\n\t\telse:\n\t\t\tself.score = len(self.sut.allBranches()) * 1.0e9 / self.time",
"def execute(self, context):\n\t\tpred_state = context.agent.get_predicate_state()\n\t\tfor a_inst in self.sequence:\n\t\t\tif rospy.is_shutdown():\n\t\t\t\treturn 0.0\n\n\t\t\tprecon_diff = AssertionDrivenPredicateState(a_inst.precons).difference(context, pred_state)\n\t\t\tif len(precon_diff) == 0:\n\t\t\t\tif self.execute_subaction(context, a_inst.action_wrapper.instantiate_action(context, a_inst.assignments)) < 0.8:\n\t\t\t\t\treturn 0.0\n\t\t\telse:\n\t\t\t\tcontext.log('Execution of action sequence terminated because the preconditions for {} are not met. Mismatch:\\n {}'.format(\n\t\t\t\t\tpbainst_str(a_inst),\n\t\t\t\t\tpfd_str(precon_diff, '\\n ')))\n\t\t\t\treturn 0.0\n\t\treturn 1.0",
"def _run_evaluation(self) -> None:",
"def run_experiments():\n if False: # Change to False when done testing always_roll\n result = eval_strategy_range(always_roll, 1, 10)\n print('Best always_roll strategy:', result)\n\n if False: # Change to True when ready to test make_comeback_strategy\n result = eval_strategy_range(make_comeback_strategy, 5, 15)\n print('Best comeback strategy:', result)\n\n if True: # Change to True when ready to test make_mean_strategy\n result = eval_strategy_range(make_mean_strategy, 1, 10)\n print('Best mean strategy:', result)\n\n \"*** You may add additional experiments here if you wish ***\"",
"def test_classification(init_env, config_file):\n run_all_steps(init_env, config_file)",
"def test_score_hrt(self) -> None:\n self._test_score(score=self.instance.score_hrt, columns=slice(None), shape=(self.batch_size, 1))",
"def test_are_games_in_progress(self):\n pass",
"def test_perfect_game(self):\n self.roll_many(20, 10)\n assert 300 == self.g.score()",
"def do_scores(self, _):\r\n try:\r\n print(self.game.show_scores_of_the_current_game())\r\n except AttributeError:\r\n print(\"You need to start the game before\"\r\n \" trying to see the scores\")",
"def test_start_run_tensorboard(self):\n pass",
"def administer(self):\n\n score = 0\n\n for question in self.questions:\n evaluation = question.ask_and_evaluate()\n\n if evaluation:\n score += 1\n\n return score",
"def administer(self):\n\n score = 0\n total_questions = 0\n\n for question in self.questions:\n evaluation = question.ask_and_evaluate()\n total_questions += 1\n\n if evaluation:\n score += 1\n\n if score >= (total_questions / 2.0):\n return \"Pass\"\n else:\n return \"Fail\"",
"def test_score_t(self) -> None:\n self._test_score(\n score=self.instance.score_t, columns=slice(0, 2), shape=(self.batch_size, self.instance.num_entities)\n )",
"def run(self):\n log('Now running')\n\n results = {'learn_scores': [], 'test_scores': [], 'behavior_count': {}}\n\n \"\"\"Todo:\n * This as one list, probably by checking if agent is instance of\n BehaviorLearningAgent (needs refactoring).\n \"\"\"\n # Initialize Results\n if self.pacman_class == agents.BehaviorLearningPacmanAgent:\n results['behavior_count'][self.pacman.agent_id] = {}\n\n if self.ghost_class == agents.BehaviorLearningGhostAgent:\n for ghost in self.ghosts:\n results['behavior_count'][ghost.agent_id] = {}\n\n # Load policies from file\n policies = self.__load_policies_from_file__(self.policy_file)\n\n # Initialize agents\n for agent in self.all_agents:\n self.__initialize__(agent)\n\n for x in xrange(self.learn_runs):\n log('LEARN game {} (of {})'.format(x + 1, self.learn_runs))\n\n score = self.__process_game__(policies, results)\n results['learn_scores'].append(score)\n\n for agent in self.all_agents:\n agent.enable_test_mode()\n\n for x in xrange(self.test_runs):\n log('TEST game {} (of {})'.format(x + 1, self.test_runs))\n\n score = self.__process_game__(policies, results)\n results['test_scores'].append(score)\n\n if self.policy_file:\n self.__save_policies__(policies)\n\n if self.mse is True:\n total = 0\n total = (self.mseCounters[0]/(self.learn_runs+self.test_runs))\n log('Total mse: {}'.format(total))\n\n if os.path.isfile(\"./no_comm.txt\"):\n f = open('no_comm.txt', 'a')\n else:\n f = open('no_comm.txt', 'w')\n f.write(str(total))\n f.write(\"\\n\")\n\n\n elif self.comm == 'mse':\n log('Mean Square Error {}'.format(self.mseCount/(self.learn_runs +\n self.test_runs)))\n if os.path.isfile(\"./comm.txt\"):\n f = open('comm.txt', 'a')\n else:\n f = open('comm.txt', 'w')\n f.write(str(self.mseCount/(self.learn_runs + self.test_runs)))\n f.write(\"\\n\")\n\n log('Learn scores: {}'.format(results['learn_scores']))\n log('Test scores: {}'.format(results['test_scores']))\n\n self.__write_to_file__(self.output_file, results)",
"def loop(env, agent, training):\n reward = 0\n done = False\n score = 0\n special_data = {}\n special_data['ale.lives'] = 3\n ob = env.reset()\n while not done:\n \n action = agent.act(ob, reward, done, training=training)\n ob, reward, done, _ = env.step(action)\n score += reward\n # env.render()\n \n # Close the env and write monitor result info to disk\n # print (\"Your score: %d\" % score)\n return score",
"def test_all_scenarios(self):\n\n exr_bash = self.prep_exr()\n percents = [1, 50, 90]\n # TODO: Don't use a for loop, use the trials kwarg\n for i in range(0, 2):\n Simulator().run(attack_types=Attack.runnable_attacks,\n adopt_policies=list(Non_Default_Policies.__members__.values()),\n percents=percents,\n exr_bash=exr_bash)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the table build stage of auto configure.
|
def test_auto_config_table_build(project):
config_name = "Test"
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert len(auto_config.assets) == 0
auto_config._table_build_stage(
material="Fake Material",
mode=AutoConfigureMode.PLAIN
)
assert len(auto_config.assets) == 2
|
[
"def test_build_from_database(self):",
"def build_tables():\n yield setup_tables()\n IOLoop.current().stop()",
"def test_build_creation(self):",
"def test_auto_configure_design_space_build(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n project.predictor_evaluation_workflows.register(resources[\"pew\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n design_space = resources[\"design_space\"]\n\n # When validation succeeds\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_ready):\n auto_config._design_space_build_stage(\n predictor=predictor,\n design_space=design_space,\n print_status_info=False\n )\n assert len(auto_config.assets) == 5\n assert auto_config.status == \"DESIGN SPACE CREATED\"\n\n # When validation fails\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_invalid):\n with pytest.raises(RuntimeError):\n auto_config._design_space_build_stage(\n predictor=predictor,\n design_space=design_space,\n print_status_info=False\n )\n assert auto_config.status == \"DESIGN SPACE INVALID\"",
"def aaa_add_init_config_with_table(duthost):\n cmds = []\n cmds.append(\"config aaa authentication login local\")\n cmds.append(\"config aaa authorization local\")\n cmds.append(\"config aaa accounting local\")\n\n output = duthost.shell_cmds(cmds=cmds)['results']\n logger.info(output)\n for res in output:\n pytest_assert(not res['rc'],\n \"AAA init config failed\"\n )",
"def basicsetup(testcase):\n testcase.connection = util.generatedatabase(\":memory:\")\n for app in [\"Core\",\"Anime\",\"AnimeLife\"]:\n module,config = util.loadapp(app)\n util.loadtables(module,config,testcase.connection)\n\n populate_tables(testcase)",
"def test_create_product_table(self):\n _db = Database.instance(\":memory:\")\n try:\n create_product_table()\n except Exception as e:\n print(e)",
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def tacacs_add_init_config_with_table(duthost):\n cmds = []\n cmds.append(\"config tacacs authtype pap\")\n cmds.append(\"config tacacs passkey testing123\")\n cmds.append(\"config tacacs timeout 5\")\n\n output = duthost.shell_cmds(cmds=cmds)['results']\n logger.info(output)\n for res in output:\n pytest_assert(not res['rc'],\n \"TACACS init config failed\"\n )",
"def setUp(self):\n # Level 0 0 table. I.e., first table on level 0\n self.category0 = DynamicTable(name='level0_0', description=\"level0_0 DynamicTable\")\n self.category0.add_row(id=10)\n self.category0.add_row(id=11)\n self.category0.add_row(id=12)\n self.category0.add_row(id=13)\n self.category0.add_column(data=['tag1', 'tag2', 'tag2', 'tag1', 'tag3', 'tag4', 'tag5'],\n name='tags',\n description='custom tags',\n index=[1, 2, 4, 7])\n self.category0.add_column(data=np.arange(4),\n name='myid',\n description='custom ids',\n index=False)\n\n # Aligned table\n self.aligned_table = AlignedDynamicTable(name='aligned_table',\n description='parent_table',\n columns=[VectorData(name='a1', description='a1', data=np.arange(4)), ],\n colnames=['a1', ],\n category_tables=[self.category0, ])\n\n # Parent table\n self.parent_table = DynamicTable(name='parent_table',\n description='parent_table',\n columns=[VectorData(name='p1', description='p1', data=np.arange(4)),\n DynamicTableRegion(name='l1', description='l1',\n data=np.arange(4), table=self.aligned_table)])\n # Super-parent table\n dtr_sp = DynamicTableRegion(name='sl1', description='sl1', data=np.arange(4), table=self.parent_table)\n vi_dtr_sp = VectorIndex(name='sl1_index', data=[1, 2, 3], target=dtr_sp)\n self.super_parent_table = DynamicTable(name='super_parent_table',\n description='super_parent_table',\n columns=[VectorData(name='sp1', description='sp1', data=np.arange(3)),\n dtr_sp, vi_dtr_sp])",
"def _setup_configure_test(self):\n self._config_test = self._test_lib.trfgen_configure_test\n self._config_test.restype = None\n self._config_test.argtypes = (ctypes.c_int32, s1ap_types.struct_test)",
"def test_build_mgi(self):\n\n mgi = Phenoscoring(MGITestConfig()) \n mgi.update()\n \n # in contrast to complete config, this db should have fewer rows\n desctab = ModelDescriptionTable(self.dbfile) \n self.assertEqual(desctab.count_rows(), 6)\n modeltab = ModelPhenotypeTable(self.dbfile) \n self.assertEqual(modeltab.count_rows(), 9)",
"def test_update_existing_build(self):",
"def test_auto_configure_predictor_registration(project):\n # Start from having a table config and table\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 2\n assert auto_config.status == \"TABLE CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n\n # Mock a valid predictor response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_ready):\n auto_config._predictor_registration_stage(\n predictor=predictor,\n print_status_info=False\n )\n assert len(auto_config.assets) == 3\n assert auto_config.status == \"PREDICTOR CREATED\"\n\n # Mock an invalid predictor response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_invalid):\n with pytest.raises(RuntimeError):\n auto_config._predictor_registration_stage(\n predictor=predictor,\n print_status_info=False\n )\n assert len(auto_config.assets) == 3\n assert auto_config.status == \"PREDICTOR INVALID\"",
"def setUp(self):\n with database() as db:\n db.query('DROP TABLE IF EXISTS test_data')\n db.query('CREATE TABLE test_data (variable INTEGER)')",
"def dbSetUp(self):\n pass",
"def test_create_tables(self):\n self._db.create_tables()\n tables = json.loads(self._db.get_database_info())\n expected_tables = db_connection.Database.get_columns().keys()\n for table in expected_tables:\n assert table in tables.keys()",
"def test_multiple_build_retrieval(self):",
"def setup_build_properties(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the predictor registration stage of auto configure.
|
def test_auto_configure_predictor_registration(project):
# Start from having a table config and table
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.tables.build_from_config(resources["table_config"])
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert len(auto_config.assets) == 2
assert auto_config.status == "TABLE CREATED"
# Inputs to pass to method
predictor = resources["predictor"]
# Mock a valid predictor response
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_ready):
auto_config._predictor_registration_stage(
predictor=predictor,
print_status_info=False
)
assert len(auto_config.assets) == 3
assert auto_config.status == "PREDICTOR CREATED"
# Mock an invalid predictor response
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_invalid):
with pytest.raises(RuntimeError):
auto_config._predictor_registration_stage(
predictor=predictor,
print_status_info=False
)
assert len(auto_config.assets) == 3
assert auto_config.status == "PREDICTOR INVALID"
|
[
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_configure_predictor_evaluation(project, caplog):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 3\n assert auto_config.status == \"PREDICTOR CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n evaluator = CrossValidationEvaluator(name=\"Eval\", description=\"\", responses=set())\n\n # Create default w/ a valid response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_succeeded):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=None,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Create default w/ an invalid response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_failed):\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=None,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW FAILED\"\n assert any(r.levelno == logging.WARNING for r in caplog.records)\n\n # Create manual w/ a valid response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_succeeded):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=evaluator,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Create manual w/ a failed response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_failed):\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n auto_config._predictor_evaluation_stage(\n predictor=predictor,\n evaluator=evaluator,\n print_status_info=False\n )\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW FAILED\"\n assert any(r.levelno == logging.WARNING for r in caplog.records)",
"def _setup_configure_test(self):\n self._config_test = self._test_lib.trfgen_configure_test\n self._config_test.restype = None\n self._config_test.argtypes = (ctypes.c_int32, s1ap_types.struct_test)",
"def test_config():\n check_model_exist()\n test_suite = InferenceTest()\n test_suite.load_config(model_path=\"./resnet50_quant/resnet50_quant\")\n test_suite.config_test()",
"def test_post_kv_config(self):\n pass",
"def test_dynaconf():\n assert settings.TESTING is True",
"def verifyConfiguration(self):",
"def test_auto_config_execute(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n project.predictor_evaluation_workflows.register(resources[\"pew\"])\n project.design_spaces.register(resources[\"design_space\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert auto_config.design_workflow is None\n\n # Inputs for execute\n objective = ScalarMaxObjective(descriptor_key=\"Fake Target\")\n\n with pytest.raises(ValueError):\n auto_config.execute(score=objective)\n\n # Now create a config with a working design workflow\n project.design_workflows.register(resources[\"design_workflow\"])\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert auto_config.status == \"DESIGN WORKFLOW CREATED\"\n\n # Mock function to bypass create_default_score call internally\n def _default_score(*args, **kwargs):\n return LIScore(objectives=[], baselines=[])\n\n with mock.patch(\"citrine.builders.auto_configure.create_default_score\", _default_score):\n auto_config.execute(score=objective)\n assert auto_config.design_execution is not None",
"def test_init(self):\n assert self.registration_behaviour.is_registered is False\n assert self.registration_behaviour.registration_in_progress is False\n assert self.registration_behaviour.failed_registration_msg is None\n assert self.registration_behaviour._nb_retries == 0",
"def test_create_registry(self):\n pass",
"def test_config():\n assert not create_app().testing\n assert create_app({\"TESTING\": True}).testing",
"def test_config_put(self):\n pass",
"def test_2_construct_config(self):\n environment = self._dummy_environment('test_2')\n\n plugin_dict = {\n 'plugin_id': UCTT_PLUGIN_ID_DUMMY,\n }\n plugins_dict = {\n 'one': plugin_dict,\n 'two': plugin_dict,\n 'three': plugin_dict,\n }\n\n environment.config.add_source(PLUGIN_ID_SOURCE_DICT, priority=80).set_data({\n UCTT_PROVISIONER_CONFIG_PROVISIONERS_LABEL: plugins_dict,\n UCTT_CLIENT_CONFIG_CLIENTS_LABEL: plugins_dict,\n UCTT_WORKLOAD_CONFIG_WORKLOADS_LABEL: plugins_dict,\n\n UCTT_PROVISIONER_CONFIG_PROVISIONER_LABEL: plugin_dict,\n UCTT_CLIENT_CONFIG_CLIENT_LABEL: plugin_dict,\n UCTT_WORKLOAD_CONFIG_WORKLOAD_LABEL: plugin_dict\n })\n\n self.assertIsInstance(\n environment.add_fixture_from_config(\n type=Type.PROVISIONER,\n label=UCTT_PROVISIONER_CONFIG_PROVISIONER_LABEL).plugin,\n DummyProvisionerPlugin)\n self.assertIsInstance(\n environment.add_fixture_from_config(\n type=Type.CLIENT, label=UCTT_CLIENT_CONFIG_CLIENT_LABEL).plugin,\n DummyClientPlugin)\n self.assertIsInstance(\n environment.add_fixture_from_config(\n type=Type.WORKLOAD, label=UCTT_WORKLOAD_CONFIG_WORKLOAD_LABEL).plugin,\n DummyWorkloadPlugin)\n\n provisioners = environment.add_fixtures_from_config(\n type=Type.PROVISIONER, label=UCTT_PROVISIONER_CONFIG_PROVISIONERS_LABEL)\n\n self.assertIsInstance(provisioners, Fixtures)\n self.assertEqual(len(provisioners), 3)\n\n two = provisioners.get_plugin(instance_id='two')\n\n self.assertIsInstance(two, DummyProvisionerPlugin)\n self.assertEqual(\n provisioners.get_plugin(\n type=Type.PROVISIONER).instance_id,\n 'one')",
"def test_pro_bowlers(self):\n pass",
"def test_classification(init_env, config_file):\n run_all_steps(init_env, config_file)",
"def test_config_get(self):\n pass",
"def test_good_config():\n\n valid_discovery_config(\"esphome\", {\"host\": \"test\", \"port\": 6052})",
"def setup_environment():",
"def test_auto_configure_design_space_build(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n project.predictor_evaluation_workflows.register(resources[\"pew\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n design_space = resources[\"design_space\"]\n\n # When validation succeeds\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_ready):\n auto_config._design_space_build_stage(\n predictor=predictor,\n design_space=design_space,\n print_status_info=False\n )\n assert len(auto_config.assets) == 5\n assert auto_config.status == \"DESIGN SPACE CREATED\"\n\n # When validation fails\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_invalid):\n with pytest.raises(RuntimeError):\n auto_config._design_space_build_stage(\n predictor=predictor,\n design_space=design_space,\n print_status_info=False\n )\n assert auto_config.status == \"DESIGN SPACE INVALID\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the predictor evaluation stage of auto configure.
|
def test_auto_configure_predictor_evaluation(project, caplog):
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.tables.build_from_config(resources["table_config"])
project.predictors.register(resources["predictor"])
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert len(auto_config.assets) == 3
assert auto_config.status == "PREDICTOR CREATED"
# Inputs to pass to method
predictor = resources["predictor"]
evaluator = CrossValidationEvaluator(name="Eval", description="", responses=set())
# Create default w/ a valid response
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_succeeded):
auto_config._predictor_evaluation_stage(
predictor=predictor,
evaluator=None,
print_status_info=False
)
assert len(auto_config.assets) == 4
assert auto_config.status == "PREDICTOR EVALUATION WORKFLOW CREATED"
# Create default w/ an invalid response
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_failed):
caplog.clear()
with caplog.at_level(logging.WARNING):
auto_config._predictor_evaluation_stage(
predictor=predictor,
evaluator=None,
print_status_info=False
)
assert len(auto_config.assets) == 4
assert auto_config.status == "PREDICTOR EVALUATION WORKFLOW FAILED"
assert any(r.levelno == logging.WARNING for r in caplog.records)
# Create manual w/ a valid response
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_succeeded):
auto_config._predictor_evaluation_stage(
predictor=predictor,
evaluator=evaluator,
print_status_info=False
)
assert len(auto_config.assets) == 4
assert auto_config.status == "PREDICTOR EVALUATION WORKFLOW CREATED"
# Create manual w/ a failed response
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_failed):
caplog.clear()
with caplog.at_level(logging.WARNING):
auto_config._predictor_evaluation_stage(
predictor=predictor,
evaluator=evaluator,
print_status_info=False
)
assert len(auto_config.assets) == 4
assert auto_config.status == "PREDICTOR EVALUATION WORKFLOW FAILED"
assert any(r.levelno == logging.WARNING for r in caplog.records)
|
[
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_config_execute(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n project.predictor_evaluation_workflows.register(resources[\"pew\"])\n project.design_spaces.register(resources[\"design_space\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert auto_config.design_workflow is None\n\n # Inputs for execute\n objective = ScalarMaxObjective(descriptor_key=\"Fake Target\")\n\n with pytest.raises(ValueError):\n auto_config.execute(score=objective)\n\n # Now create a config with a working design workflow\n project.design_workflows.register(resources[\"design_workflow\"])\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert auto_config.status == \"DESIGN WORKFLOW CREATED\"\n\n # Mock function to bypass create_default_score call internally\n def _default_score(*args, **kwargs):\n return LIScore(objectives=[], baselines=[])\n\n with mock.patch(\"citrine.builders.auto_configure.create_default_score\", _default_score):\n auto_config.execute(score=objective)\n assert auto_config.design_execution is not None",
"def test_dynaconf():\n assert settings.TESTING is True",
"def test_auto_configure_predictor_registration(project):\n # Start from having a table config and table\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 2\n assert auto_config.status == \"TABLE CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n\n # Mock a valid predictor response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_ready):\n auto_config._predictor_registration_stage(\n predictor=predictor,\n print_status_info=False\n )\n assert len(auto_config.assets) == 3\n assert auto_config.status == \"PREDICTOR CREATED\"\n\n # Mock an invalid predictor response\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_invalid):\n with pytest.raises(RuntimeError):\n auto_config._predictor_registration_stage(\n predictor=predictor,\n print_status_info=False\n )\n assert len(auto_config.assets) == 3\n assert auto_config.status == \"PREDICTOR INVALID\"",
"def test_config():\n check_model_exist()\n test_suite = InferenceTest()\n test_suite.load_config(model_path=\"./resnet50_quant/resnet50_quant\")\n test_suite.config_test()",
"def check_config( self ) :\n\n self._logger.info( 'exercising execution engine...' )\n\n return True",
"def verifyConfiguration(self):",
"def test_post_kv_config(self):\n pass",
"def config_test(self) -> None:\n try:\n util.run_script([self.conf('ctl'), \"-c\", self.nginx_conf, \"-t\"])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))",
"def test_config1(self):\n # Create system call.\n call = \"python tests/hydra/my_app.py --config-name config1.yaml\"\n\n # Run the call as subprocess.\n subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)\n\n # Make sure that .hydra dir is not present.\n assert not path.exists(f\".hydra\")\n # Make sure that default hydra log file is not present.\n assert not path.exists(f\"my_app.log\")",
"def test_config():\n assert not create_app().testing\n assert create_app({\"TESTING\": True}).testing",
"def test_config_get(self):\n pass",
"def test_get_auto_vars(self):\n actual = get_auto_vars(\"config\")\n self.assertEqual(\n actual,\n {\n \"config/global.auto.tfvars\": {\n Variable(\"foo\", \"bar\"),\n Variable(\"dog\", \"cat\"),\n },\n \"config/app1/app.auto.tfvars\": {\n Variable(\"bar\", \"bye\"),\n Variable(\"baz\", \"bat\"),\n },\n \"config/app3/app.auto.tfvars\": {\n Variable(\"bar\", \"bye\"),\n Variable(\"baz\", \"bat\"),\n },\n \"config/team/team.auto.tfvars\": {\n Variable(\"foo\", \"cat\"),\n },\n \"config/app5/app.auto.tfvars\": {\n Variable(\"foo\", (((\"key\", \"value\"),),)),\n },\n },\n )",
"def test():\r\n print \"Testing the rcfile\"\r\n for item in env.keys():\r\n print \"%s => %s\" % (item, env.get(item))",
"def test_environment(self):\n pass",
"def is_auto_gen_test_config(self, module_name):\n if self.is_module(module_name):\n mod_info = self.get_module_info(module_name)\n auto_test_config = mod_info.get('auto_test_config', [])\n return auto_test_config and auto_test_config[0]\n return False",
"def test_get_run_settings(self):\n pass",
"def test_auto_configure_design_space_build(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.tables.build_from_config(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n project.predictor_evaluation_workflows.register(resources[\"pew\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 4\n assert auto_config.status == \"PREDICTOR EVALUATION WORKFLOW CREATED\"\n\n # Inputs to pass to method\n predictor = resources[\"predictor\"]\n design_space = resources[\"design_space\"]\n\n # When validation succeeds\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_ready):\n auto_config._design_space_build_stage(\n predictor=predictor,\n design_space=design_space,\n print_status_info=False\n )\n assert len(auto_config.assets) == 5\n assert auto_config.status == \"DESIGN SPACE CREATED\"\n\n # When validation fails\n with mock.patch(\"citrine.builders.auto_configure.wait_while_validating\", fake_wait_while_invalid):\n with pytest.raises(RuntimeError):\n auto_config._design_space_build_stage(\n predictor=predictor,\n design_space=design_space,\n print_status_info=False\n )\n assert auto_config.status == \"DESIGN SPACE INVALID\"",
"def test_prepare_environment(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the design space build stage of auto configure.
|
def test_auto_configure_design_space_build(project):
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.tables.build_from_config(resources["table_config"])
project.predictors.register(resources["predictor"])
project.predictor_evaluation_workflows.register(resources["pew"])
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert len(auto_config.assets) == 4
assert auto_config.status == "PREDICTOR EVALUATION WORKFLOW CREATED"
# Inputs to pass to method
predictor = resources["predictor"]
design_space = resources["design_space"]
# When validation succeeds
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_ready):
auto_config._design_space_build_stage(
predictor=predictor,
design_space=design_space,
print_status_info=False
)
assert len(auto_config.assets) == 5
assert auto_config.status == "DESIGN SPACE CREATED"
# When validation fails
with mock.patch("citrine.builders.auto_configure.wait_while_validating", fake_wait_while_invalid):
with pytest.raises(RuntimeError):
auto_config._design_space_build_stage(
predictor=predictor,
design_space=design_space,
print_status_info=False
)
assert auto_config.status == "DESIGN SPACE INVALID"
|
[
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_config_table_build(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert len(auto_config.assets) == 0\n\n auto_config._table_build_stage(\n material=\"Fake Material\",\n mode=AutoConfigureMode.PLAIN\n )\n assert len(auto_config.assets) == 2",
"def test_dynaconf():\n assert settings.TESTING is True",
"def setUp(self):\n #cbrandom.toggleDebugMode(True)",
"def setup_build_properties(self):",
"def check_config( self ) :\n\n self._logger.info( 'exercising execution engine...' )\n\n return True",
"def test_build_creation(self):",
"def test_auto_config_execute(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.register(resources[\"table_config\"])\n project.predictors.register(resources[\"predictor\"])\n project.predictor_evaluation_workflows.register(resources[\"pew\"])\n project.design_spaces.register(resources[\"design_space\"])\n\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert auto_config.design_workflow is None\n\n # Inputs for execute\n objective = ScalarMaxObjective(descriptor_key=\"Fake Target\")\n\n with pytest.raises(ValueError):\n auto_config.execute(score=objective)\n\n # Now create a config with a working design workflow\n project.design_workflows.register(resources[\"design_workflow\"])\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n assert auto_config.status == \"DESIGN WORKFLOW CREATED\"\n\n # Mock function to bypass create_default_score call internally\n def _default_score(*args, **kwargs):\n return LIScore(objectives=[], baselines=[])\n\n with mock.patch(\"citrine.builders.auto_configure.create_default_score\", _default_score):\n auto_config.execute(score=objective)\n assert auto_config.design_execution is not None",
"def _setup_configure_test(self):\n self._config_test = self._test_lib.trfgen_configure_test\n self._config_test.restype = None\n self._config_test.argtypes = (ctypes.c_int32, s1ap_types.struct_test)",
"def have_configs(self):\n self.assert_(self.config.has_option('PDC','passwd'))\n self.assert_(self.config.has_option('PDC','principal'))\n self.assert_(self.config.has_option('PDC','host'))\n self.assert_(self.config.has_option('PDC','smrl2_dir'))\n self.assertTrue(self.config.has_option('PDC','user'))",
"def testSetup(self) -> None:\n test_state = state.DFTimewolfState(config.Config)\n processor = gcp_crt.GCPCloudResourceTree(test_state)\n processor.SetUp(project_id='test-project-hkhalifa',\n location='us-central1-a',\n resource_name='vm1',\n resource_id='1809669853321684335',\n resource_type='gcp_instance',\n mode='offline')\n self.assertEqual(processor.project_id, 'test-project-hkhalifa')\n self.assertEqual(processor.resource_name, 'vm1')\n self.assertEqual(processor.resource_type, 'gcp_instance')\n self.assertEqual(processor.mode, gcp_crt_helper.OperatingMode.OFFLINE)",
"def _check_custom_build(self):\n pass",
"def func_config_summary(args=None):\n\n print(\"buildtest version: \", BUILDTEST_VERSION)\n print(\"buildtest Path:\", shutil.which(\"buildtest\"))\n\n print(\"\\n\")\n print(\"Machine Details\")\n print(\"{:_<30}\".format(\"\"))\n print(\"Operating System: \", system.system[\"os\"])\n print(\"Hostname: \", system.system[\"host\"])\n print(\"Machine: \", system.system[\"machine\"])\n print(\"Processor: \", system.system[\"processor\"])\n print(\"Python Path\", system.system[\"python\"])\n print(\"Python Version:\", system.system[\"pyver\"])\n print(\"User:\", getpass.getuser())\n\n print(\"\\n\")\n\n print(\"Buildtest Settings\")\n print(\"{:_<80}\".format(\"\"))\n print(f\"Buildtest Settings: {buildtest_configuration.file}\")\n\n executors = []\n for executor_type in buildtest_configuration.target_config.get(\"executors\").keys():\n for name in buildtest_configuration.target_config[\"executors\"][\n executor_type\n ].keys():\n executors.append(f\"{executor_type}.{name}\")\n\n print(\"Executors: \", executors)\n\n print(\"Buildspec Cache File:\", BUILDSPEC_CACHE_FILE)\n print(\"\\n\")\n\n print(\"Buildtest Schemas\")\n print(\"{:_<80}\".format(\"\"))\n print(\"Available Schemas:\", supported_schemas)",
"def configure(self):\n if self.name == 'ncm-ncd':\n self.configure_ncm_ncd()\n\n if self.name == 'maven-tools':\n self.configure_maven_tools()\n\n if self.name == 'CAF':\n self.configure_caf()\n\n if self.name == 'CCM':\n self.configure_ccm()\n\n if self.name == 'configuration-modules-grid':\n self.configure_components_grid()\n\n if self.name == 'configuration-modules-core':\n self.configure_components()\n\n if self.name == 'template-library-core':\n self.configure_template_library_core()",
"def create_design_config(self) -> bool:\n design_config = self.design_config_path()\n\n # Load input files and check that they are all Verilog.\n if not self.check_input_files([\".v\", \".sv\"]):\n return False\n abspath_input_files = list(map(lambda name:\n os.path.join(os.getcwd(), name), self.input_files))\n\n # Add any verilog_synth wrappers (which are needed in some \n # technologies e.g. for SRAMs) which need to be synthesized.\n abspath_input_files += self.technology.read_libs([\n hammer_tech.filters.verilog_synth_filter\n ], hammer_tech.HammerTechnologyUtils.to_plain_item)\n\n # Generate constraints\n input_sdc = os.path.join(self.run_dir, \"input.sdc\")\n unit = self.get_time_unit().value_prefix + self.get_time_unit().unit\n with open(input_sdc, \"w\") as f:\n f.write(\"set_units -time {}\\n\".format(unit))\n f.write(self.sdc_clock_constraints)\n f.write(\"\\n\")\n f.write(self.sdc_pin_constraints)\n\n # TODO: i am blindly reading in all libs for all corners. but this is\n # not a performance issue for nangate45\n extra_lefs = set([extra_lib.library.lef_file for extra_lib in self.technology.get_extra_libraries()\n if extra_lib.library.lef_file is not None])\n extra_libs = set([extra_lib.library.nldm_liberty_file for extra_lib in self.technology.get_extra_libraries()\n if extra_lib.library.nldm_liberty_file is not None])\n\n with open(design_config, \"w\") as f:\n f.write(dd(\"\"\"\n export DESIGN_NICKNAME = {design}\n export DESIGN_NAME = {design}\n export PLATFORM = {node}\n export VERILOG_FILES = {verilogs}\n export SDC_FILE = {sdc}\n\n export ADDITIONAL_LEFS = {extra_lefs}\n export ADDITIONAL_LIBS = {extra_libs}\n\n # These values must be multiples of placement site, which is\n # (x=0.19 y=1.4) for nangate45\n export DIE_AREA = {die_area}\n export CORE_AREA = {core_area}\n\n export CLOCK_PERIOD = {period}\n\n \"\"\".format(\n design=self.top_module,\n node=self.get_setting(\"vlsi.core.technology\"),\n verilogs=\" \".join(abspath_input_files),\n sdc=input_sdc,\n extra_lefs=\" \".join(extra_lefs),\n extra_libs=\" \".join(extra_libs),\n die_area=self._floorplan_bbox(),\n core_area=self._floorplan_bbox(),\n period=self._clock_period_value(),\n )))\n return True",
"def test_config_ide(self, mock_config, mock_paths, mock_preference):\n # Mock SDkConfig flow to not to generate real jdk config file.\n mock_preference.return_value = None\n module_path = os.path.join(self._TEST_DIR, 'test')\n idea_path = os.path.join(module_path, '.idea')\n os.makedirs(idea_path)\n shutil.copy(IdeUtilUnittests._MODULE_XML_SAMPLE, idea_path)\n util_obj = ide_util.IdeUtil()\n util_obj.config_ide(module_path)\n self.assertFalse(mock_config.called)\n self.assertFalse(mock_paths.called)",
"def do_configure():\n if flag_do_fetch:\n fetch_in_volume()\n dochdir(ssdroot)\n targdir = flag_subvol\n if flag_snapshot:\n targdir = flag_snapshot\n do_configure_binutils(targdir)\n do_setup_cmake(targdir)",
"def setUp(self):\r\n\r\n self.DUT = Component()",
"def test_config():\n assert not create_app().testing\n assert create_app({\"TESTING\": True}).testing"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handle entity registry update events.
|
def entity_registry_updated(self, event):
if event.data["action"] == "remove":
self.remove_empty_devices()
|
[
"async def update_entity_registry() -> None:\n\n get_base().entities = await hass_entities()",
"async def update_registries() -> None:\n\n await update_area_registry()\n await update_entity_registry()",
"def on_entity_update(self, event):\n self.entity.cubolt_entity.on_entity_update(event)",
"async def async_handle_node_update(hass: HomeAssistant, node: OZWNode):\n dev_registry = await get_dev_reg(hass)\n # grab device in device registry attached to this node\n dev_id = create_device_id(node)\n device = dev_registry.async_get_device({(DOMAIN, dev_id)})\n if not device:\n return\n # update device in device registry with (updated) info\n for item in dev_registry.devices.values():\n if item.id != device.id and item.via_device_id != device.id:\n continue\n dev_name = create_device_name(node)\n dev_registry.async_update_device(\n item.id,\n manufacturer=node.node_manufacturer_name,\n model=node.node_product_name,\n name=dev_name,\n )",
"def discovery_callback(config: TasmotaEntityConfig) -> None:\n _LOGGER.debug(\n \"Got update for entity with hash: %s '%s'\",\n self._discovery_hash,\n config,\n )\n if not self._tasmota_entity.config_same(config):\n # Changed payload: Notify component\n _LOGGER.debug(\"Updating component: %s\", self.entity_id)\n self.hass.async_create_task(self.discovery_update(config))\n else:\n # Unchanged payload: Ignore to avoid changing states\n _LOGGER.debug(\"Ignoring unchanged update for: %s\", self.entity_id)",
"async def _refresh_entities(self):\n data = {\n QUICK_MODE: quick_mode_to_json(self._quick_mode),\n HOLIDAY_MODE: holiday_mode_to_json(self._holiday_mode),\n }\n self._hass.bus.async_fire(REFRESH_EVENT, data)",
"def _tracker_updater(self):\n event_dispatcher.subscribe(\n [NewTrackerAddedEvent, TrackerConfigChangedEvent],\n self._config_changes_handler)\n event_dispatcher.subscribe([TrackerDeletedEvent],\n self._tracker_deletion_handler)\n\n gevent.spawn(event_dispatcher.dispatch)",
"def agent_updated(self, context, payload):\n self.fullsync = True\n LOG.info(_LI(\"agent_updated by server side %s!\"), payload)",
"async def test_device_update_listener(\n hass: HomeAssistant,\n device_registry: dr.DeviceRegistry,\n entity_registry: er.EntityRegistry,\n) -> None:\n device = get_device(\"Office\")\n\n mock_setup = await device.setup_entry(hass)\n await hass.async_block_till_done()\n\n with patch(DEVICE_FACTORY, return_value=mock_setup.api):\n hass.config_entries.async_update_entry(mock_setup.entry, title=\"New Name\")\n await hass.async_block_till_done()\n\n device_entry = device_registry.async_get_device(\n identifiers={(DOMAIN, mock_setup.entry.unique_id)}\n )\n assert device_entry.name == \"New Name\"\n for entry in er.async_entries_for_device(entity_registry, device_entry.id):\n assert (\n hass.states.get(entry.entity_id)\n .attributes[ATTR_FRIENDLY_NAME]\n .startswith(\"New Name\")\n )",
"def update(self, event):\n raise NotImplementedError('update event is not implemented')",
"def entity_added(self, entity):\r\n\t\tpass",
"def input(self, _event):\n if _event.type == GAME_EVENT and _event.reason == GameEventType.HUD_UPDATE:\n for entity in self.observing:\n entity.artifacts[SpriteArtifact.NAME].sprite.updatehud(_event.caps, _event.lifes, _event.points)\n self.dirty = True",
"async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n data: SynologyDSMData = hass.data[DOMAIN][entry.unique_id]\n async_add_entities(\n SynoDSMUpdateEntity(data.api, data.coordinator_central, description)\n for description in UPDATE_ENTITIES\n )",
"def _handle_EditConfigHookEvent (self, event):\n log.debug(\"Received %s event...\" % event.__class__.__name__)\n request_id = event.callback.request_id\n deploy_status = self.status_mgr.get_status(id=request_id)\n if event.was_error():\n log.debug(\"Update failed status for service request: %s...\" %\n request_id)\n deploy_status.set_domain_failed(domain=event.domain)\n else:\n log.debug(\"Update success status for service request: %s...\" % request_id)\n deploy_status.set_domain_ok(domain=event.domain)\n if isinstance(event.callback.data, NFFG):\n log.log(VERBOSE, \"Changed topology:\\n%s\" % event.callback.data.dump())\n domain_mgr = self.domains.get_component_by_domain(event.domain)\n if domain_mgr is None:\n log.error(\"DomainManager for domain: %s is not found!\" % event.domain)\n return\n if isinstance(domain_mgr, UnifyDomainManager) and domain_mgr.polling:\n log.debug(\"Polling in domain: %s is enabled! Skip explicit update...\"\n % event.domain)\n domain_mgr.update_topology_cache()\n if CONFIG.one_step_update():\n log.debug(\"One-step-update is enabled. Skip explicit domain update!\")\n else:\n self.DoVManager.update_domain(domain=event.domain,\n nffg=event.callback.data)\n log.debug(\"Installation status: %s\" % deploy_status)\n if not deploy_status.still_pending:\n if deploy_status.success:\n log.info(\"All installation process has been finished for request: %s! \"\n \"Result: %s\" % (deploy_status.id, deploy_status.status))\n if CONFIG.one_step_update():\n log.info(\"One-step-update is enabled. Update DoV now...\")\n self.DoVManager.set_global_view(nffg=deploy_status.data)\n elif deploy_status.failed:\n log.error(\"All installation process has been finished for request: %s! \"\n \"Result: %s\" % (deploy_status.id, deploy_status.status))\n if CONFIG.one_step_update():\n log.warning(\"One-step-update is enabled. \"\n \"Skip update due to failed request...\")\n if CONFIG.rollback_on_failure():\n self.__do_rollback(status=deploy_status,\n previous_state=self.DoVManager.get_backup_state())\n result = InstallationFinishedEvent.get_result_from_status(deploy_status)\n log.info(\"Overall installation result: %s\" % result)\n # Rollback set back the domains to WAITING status\n if not deploy_status.still_pending:\n is_fail = InstallationFinishedEvent.is_error(result)\n self._layer_API._process_mapping_result(nffg_id=request_id,\n fail=is_fail)\n self._layer_API.raiseEventNoErrors(InstallationFinishedEvent,\n id=request_id,\n result=result)\n else:\n log.debug(\"Installation process is still pending! Waiting for results...\")",
"def register_for_changed_events(self):\n pass",
"def em_update(self):\n raise NotImplementedError",
"def register_for_changed_log_entries(self):\n pass",
"def do_update(self):\n pass",
"def handle_update(update: Update, context: CallbackContext) -> None:\n global bot\n\n # have the message handler dispatch the message\n dispatch(bot, update)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove devices with no entities.
|
def remove_empty_devices(self):
entity_registry = er.async_get(self.hass)
device_registry = dr.async_get(self.hass)
device_list = dr.async_entries_for_config_entry(
device_registry, self.config_entry.entry_id
)
for device_entry in device_list:
entities = er.async_entries_for_device(
entity_registry, device_entry.id, include_disabled_entities=True
)
if not entities:
_LOGGER.debug("Removing orphaned device: %s", device_entry.name)
device_registry.async_update_device(
device_entry.id, remove_config_entry_id=self.config_entry.entry_id
)
|
[
"def destroy(self):\n for item in self.__dict__:\n self.removeDevice(item)",
"def delete(self, *devices):\n for d in devices:\n d.delete()",
"def test_setup_component_without_devices(self):\n self.hass.data[dyson.DYSON_DEVICES] = []\n add_devices = MagicMock()\n dyson.setup_platform(self.hass, None, add_devices)\n add_devices.assert_not_called()",
"def entity_registry_updated(self, event):\n if event.data[\"action\"] == \"remove\":\n self.remove_empty_devices()",
"def reset(self):\n self.devices_home = []",
"def filter_empty_sensors(self,dl,head=0):\n res = []\n for s in self.sensors:\n if dl.tmpo_has_data(s.sensor_id,head):\n res.append(s)\n\n self.sensors = res",
"def teardown_class(cls):\n cls.logger.info(f\"AWS IoT Core test teardown - removing devices: {cls.devices}\")\n # for device_id in cls.devices:\n # delete_device(device_id)",
"async def cleanup_device_registry(\n hass: HomeAssistant, device_manager: TuyaDeviceManager\n) -> None:\n device_registry = dr.async_get(hass)\n for dev_id, device_entry in list(device_registry.devices.items()):\n for item in device_entry.identifiers:\n if DOMAIN == item[0] and item[1] not in device_manager.device_map:\n device_registry.async_remove_device(dev_id)\n break",
"def empty( self ):\n LectionInSystem.objects.filter( system=self ).delete()",
"def detachGPU(self):\n cards = self.requestedCards()\n for c in cards:\n if len(self.cards[c]) == 2:\n pciV, pciA = self.cards[c]\n self.detachDeviceLink(c, pciV, \"video\")\n self.detachDeviceLink(c, pciA, \"audio\")\n if len(self.cards[c]) == 1:\n pciV = self.cards[c][0]\n self.detachDeviceLink(c, pciV, \"video\")",
"def delete(self):\n length=len(self.userResources) \n for i in range(length-1, -1, -1):\n if self.userResources[i]._idevice is None:\n self.userResources[i]._idevice = self\n self.userResources[i].delete()\n if self.parentNode:\n self.ChangedParentNode(self.parentNode, None)\n self.parentNode.idevices.remove(self)\n self.parentNode = None",
"def getUnmeteredDevices() -> List[str]:\n allDevices = getDeviceInfo()\n deviceMapping = getDeviceMapping()\n # All directly metered appliances\n meteredAppliances = []\n for k in deviceMapping:\n meteredAppliances.extend(deviceMapping[k][\"appliances\"])\n meteredAppliances.extend(getChangingDevices())\n unmetered = [m for m in allDevices if m not in meteredAppliances]\n return unmetered",
"async def test_device_remove(\n hass: HomeAssistant,\n mqtt_mock: MqttMockHAClient,\n caplog: pytest.LogCaptureFixture,\n device_reg,\n entity_reg,\n setup_tasmota,\n) -> None:\n config = copy.deepcopy(DEFAULT_CONFIG)\n mac = config[\"mac\"]\n\n async_fire_mqtt_message(\n hass,\n f\"{DEFAULT_PREFIX}/{mac}/config\",\n json.dumps(config),\n )\n await hass.async_block_till_done()\n\n # Verify device entry is created\n device_entry = device_reg.async_get_device(\n connections={(dr.CONNECTION_NETWORK_MAC, mac)}\n )\n assert device_entry is not None\n\n async_fire_mqtt_message(\n hass,\n f\"{DEFAULT_PREFIX}/{mac}/config\",\n \"\",\n )\n await hass.async_block_till_done()\n\n # Verify device entry is removed\n device_entry = device_reg.async_get_device(\n connections={(dr.CONNECTION_NETWORK_MAC, mac)}\n )\n assert device_entry is None",
"def clear_cloud(fs):\r\n\r\n fs.remove_all()\r\n print(\"Your entire filesystem has been emptied.\\n\")",
"def empty_appd_collection():\n appd_coll.delete_many({})",
"def unknown_devices(self):\n return self._id_manager.unknown_devices",
"async def _async_remove_all_device_links(self, address: Address):\n if self._devices.get(address) is None:\n return\n for rec in self[address].aldb.find(target=self.modem.address, in_use=True):\n if rec.group != 0 or rec.is_controller: # do not process group 0 responder\n self[address].aldb.modify(mem_addr=rec.mem_addr, in_use=False)\n await self[address].aldb.async_write()",
"def removeDevice(self, item, number = None):\n if number == None: # remove all\n print \"removing all\", item, \"devices...\"\n if item in self.__dict__:\n for device in self.__dict__[item]:\n device.setVisible(0)\n device.setActive(0)\n device.destroy()\n del self.__dict__[item]\n else:\n raise AttributeError,\"no such device: '%s'\" % item\n else:\n print \"removing %s[%d] device...\" % (item, number)\n if item in self.__dict__:\n device = self.__dict__[item][number]\n device.setVisible(0)\n device.setActive(0)\n device.destroy()\n del self.__dict__[item][number]\n else:\n raise AttributeError,\"no such device: %s[%d]\" % (item, number)\n return \"Ok\"",
"def list_devices(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Via the requests library, performs a post with the payload to the path
|
def post(self, payload={}, path=""):
return requests.post(self.base_url + path,
data=json.dumps(payload),
headers=self.headers)
|
[
"def post(self, path, data):\n return(self._request('POST', path, json=data))",
"def _post(self, path, data):\n url = self._url(path)\n if self.debug:\n pprint(data)\n resp = self.session.post(url, data=data)\n if self.debug:\n print('CODE', resp.status_code)",
"def post(self, url, params=b'', headers=None, extra_environ=None,\n status=None, upload_files=None, expect_errors=False):\n return self._gen_request('POST', url, params=params, headers=headers,\n extra_environ=extra_environ,status=status,\n upload_files=upload_files,\n expect_errors=expect_errors)",
"def post(self, resource, jsonbody=None, files=None):\n return self.request(\n method=\"post\", resource=resource, jsonbody=jsonbody, files=files\n )",
"def post(self, payload, **kwargs):\n url, content, kwargs = self._process_arguments(payload, **kwargs)\n\n deferred = self._post(url, content, **kwargs)\n deferred.addCallback(self.check_response)\n deferred.addCallback(treq.content)\n deferred.addErrback(self.log_error)\n\n return deferred",
"def send_post_request(url, data):\n post_data = {\n 'data': data\n }\n return requests.post(url, data=post_data)",
"def requester(payload):\n\n r = requests.post(url,headers=encabezado,json=payload)\n return r",
"def api_call_post(path, fields):\n try:\n return api_interpret(itapi.post(path, fields))\n except ITTechnicalAPIError as e:\n return api_interpret(e.response, e.status)",
"def send_request(self, path, post=None, json_data=None, headers=None,\n method=None):\n if headers is None:\n headers = {}\n if json_data is not None:\n post = json.dumps(json_data)\n headers['Content-Type'] = 'application/json'\n request = webapp2.Request.blank(path, POST=post, headers=headers)\n if method:\n request.method = method\n return request.get_response(main.app)",
"def post(self, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params):\n return self.request('POST', url=url, headers=headers, timeout=timeout, **params)",
"def do_post(cs, args):\n\n url = args.url\n\n #translate the endpoint shortcut into an actual url\n (endpoint, token) = get_endpoint_and_token(args)\n\n curl_args = ''\n if url:\n curl_args = endpoint + url\n\n curl_args = curl_args + \" -H \\\"X-Auth-Token: \" + token + \"\\\"\"\n curl_args = curl_args + \" -H \\\"Content-Type: application/json\\\"\"\n #this will tell curl to read data from stdin\n curl_args = curl_args + \" -X POST -d @-\"\n\n out = curl(args, curl_args)\n if args.debug:\n print out\n else:\n try:\n parsed = json.loads(out)\n print json.dumps(parsed, sort_keys=True, indent=4, separators=(',', ': '))\n except:\n print out",
"def test_request_post(self):\n r = self.base._request('/post', 'POST', {\n 'foo': 'bar'\n })\n\n self.assertEqual(r['url'], 'https://httpbin.org/post')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')\n self.assertEqual(r['form']['foo'], 'bar')",
"def post(self, url, data):\n json_data = json.dumps(data)\n request = urllib.request.Request(\n self.compose_url(url),\n json_data.encode('utf-8'),\n {'Content-Type': 'application/json'}\n )\n return self.send_request(request)",
"def post(self, url_path):\n # Check that the ShortURL is valid.\n short_url = yield self._CheckShortURL(url_path)\n\n # Invoke the derived class to handle the request.\n self._HandlePost(short_url, **short_url.json)",
"def _post_request(self, apiname, params):\n url = \"http://{host:}:{port:}/api/{api:}\".format(\n host=self.host, port=self.port, api=apiname\n )\n r = requests.post(url, json=params)\n try:\n return r.json()\n except json.decoder.JSONDecodeError:\n print(r.text)\n raise",
"def _session_post(self, url, data=None, **kwargs):\n return self.session.request(\n method='post', url=url, data=data, **kwargs\n )",
"def post_mock(url, params=None, payload=payload):\n if not payload:\n payload = {\n 'url': url,\n 'params': params,\n }\n return RequestResponseStub(payload=payload)",
"def _post(self, data=None, url_name=None, url_args=None,\r\n url_kwargs=None, get_kwargs=None, url=None, *args, **kwargs):\r\n url = url or self._url(url_name, url_args, url_kwargs, get_kwargs)\r\n data = self.post_data if data is None else data\r\n return self.client.post(path=url, data=data, *args, **kwargs)",
"def do_post(self,data=None,params={}):\n if data and params:\n raise ValueError('Either data or params can be submitted to be the POST body, but not both.')\n \n post_data = json.dumps(data) if data else params\n \n response = requests.post('%s/%s.json' % (self.service_url,self.descriptor['slug']),\n data=post_data,\n auth=(self.user,self.password))\n \n return self.process_response(response)",
"def post(self, endpoint, payload):\n if self.host not in endpoint:\n endpoint = self.host + str(endpoint)\n response = self.session.post(endpoint, str(payload))\n response.raise_for_status()\n\n return response"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Via the requests library, performs a put with the payload to the path
|
def put(self, payload={}, path=""):
return requests.put(self.base_url + path,
data=json.dumps(payload),
headers=self.headers)
|
[
"def put(self, path, data):\n return(self._request('PUT', path, json=data))",
"def _put(self, subpath: str, data: any = None) -> None:\n self._request('put', subpath, data, None)",
"def put(self, url, params=b'', headers=None, extra_environ=None,\n status=None, upload_files=None, expect_errors=False):\n return self._gen_request('PUT', url, params=params, headers=headers,\n extra_environ=extra_environ,status=status,\n upload_files=upload_files,\n expect_errors=expect_errors)",
"def test_request_put(self):\n r = self.base._request('/put', 'PUT', {\n 'foo': 'bar'\n })\n\n self.assertEqual(r['url'], 'https://httpbin.org/put')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')\n self.assertEqual(r['form']['foo'], 'bar')",
"def put_url(urlstr, src_path):\n with open(src_path, \"rb\") as f:\n res = requests.put(urlstr, data=f)\n raise_for_status_and_print_error(res)",
"def put(self, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params):\n return self.request('PUT', url=url, headers=headers, timeout=timeout, **params)",
"def do_put(url, content_type = 'text/plain', accept = 'text/plain', categories = [], attributes = [], links = [], locations = []):\n return [], [], [], []",
"def put(self):\n failed, model, entity = self._get_model_and_entity(True, True)\n if failed: return\n jobj = jsonutil.receive_json(self.request)\n jobj = jsonutil.update_entity(entity, jobj)\n self._serve(jobj)\n updated_entity_path = \"/%s/%s\" % (self._classname, jobj['id'])\n self.response.set_status(200, 'Updated entity %s' % updated_entity_path)",
"def put(self, uri, auth=None, **kwargs):\n if (auth is None):\n auth = (self.user, self.password)\n return requests.put(uri, auth=auth, **kwargs)",
"async def put(self, container, path, access_key=None, raise_for_status=None, body=None, append=None):\n return await self._transport.request(\n container,\n access_key or self._access_key,\n raise_for_status,\n v3io.dataplane.request.encode_put_object,\n locals(),\n )",
"def handle_put_simple(url, req):\n client = boto3.client(\"s3\")\n key = get_key(url.path)\n\n # for simple upload, return 200 for initial dry_run request\n if \"dry_run\" in url.path:\n return httmock.response(200, \"OK\")\n\n # simple upload\n data = req.body.read(req.body.filesize)\n client.put_object(Body=data, Bucket=\"test-bucket\", Key=key)\n return httmock.response(200, \"OK\")",
"def put(self, endpoint, payload):\n if self.host not in endpoint:\n endpoint = self.host + str(endpoint)\n response = self.session.put(endpoint, str(payload))\n response.raise_for_status()\n\n return response",
"def put(self, id):\n data = request.json\n update_trucker(id, data)\n return None, 204",
"def put(self, url, representation, media_type, headers=None):\n extra_headers = {'Content-Type': media_type}\n if headers is not None:\n extra_headers.update(headers)\n return self._request(\n url, representation, 'PUT', extra_headers=extra_headers)",
"def test_update_document_using_put(self):\n pass",
"def put_ajax(\n self,\n path,\n data=\"\",\n content_type=CONTENT_TYPE,\n follow=False,\n secure=False,\n **extra,\n ):\n return self.put(path, data, content_type, follow, secure, **extra)",
"def put(self, api_function, payload):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return requests.put('https://{0}/wapi/{1}/{2}'\n .format(self.url, self.vers, api_function),\n data=payload,\n headers={'Authorization': 'Basic {0}'\n .format(self.creds)},\n verify=False)",
"def test_put_valid(self):\n data = {'age': 100}\n response = self.app.put('/api/a/actors/Bruce Willis',\n data=json.dumps(data),\n headers=self.headers)\n data = json.loads(response.get_data(as_text=True))\n self.assertEqual(data['age'], 100)\n\n data = {'year': 2005}\n response = self.app.put('/api/a/movies/The First Deadly Sin',\n data=json.dumps(data),\n headers=self.headers)\n data = json.loads(response.get_data(as_text=True))\n self.assertEqual(data['year'], 2005)",
"def put_request_with_retries(url, json_payload=None, plaintext_payload=None, return_json=True, url_encoded_payload=None,\n khoros_object=None, auth_dict=None, headers=None, multipart=False, content_type=None,\n verify=None, proxy_user_object=None):\n url = _add_json_query_to_uri(url, return_json)\n content_type = '' if not content_type else content_type\n return payload_request_with_retries(url, 'put', json_payload=json_payload, plaintext_payload=plaintext_payload,\n url_encoded_payload=url_encoded_payload, return_json=return_json,\n khoros_object=khoros_object, auth_dict=auth_dict, headers=headers,\n multipart=multipart, content_type=content_type.lower(), verify=verify,\n proxy_user_object=proxy_user_object)",
"def put(self, *args, **kwargs):\n self.method_not_allowed_error('PUT')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Via the requests library, performs a patch with the payload to the path
|
def patch(self, payload={}, path=""):
return requests.patch(self.base_url + path,
data=json.dumps(payload),
headers=self.headers)
|
[
"def method_patch(self, uri, **kwargs):\r\n return self._api_request(uri, \"PATCH\", **kwargs)",
"def sample_patch_request(host, username, password, resource, data):\n # build the URL\n url = urlunparse(('https', host, resource, None, None, None))\n print \"PATCH: %s\" % url\n\n return requests.patch(url, json=data, auth=HTTPBasicAuth(username, password), verify=False)",
"def patch(self):\n req_op = self.get_argument('op')\n req_path = self.get_argument('path')\n req_value = self.get_argument('value', None)\n req_from = self.get_argument('from', None)\n\n response = ontology_patch_handler(req_op, req_path, req_value,\n req_from)\n self.write(response)",
"def patch(self, uri, auth=None, **kwargs):\n if (auth is None):\n auth = (self.user, self.password)\n return requests.patch(uri, auth=auth, **kwargs)",
"def rf(monkeypatch, rf):\n def rf_patch(self, path, data='', content_type='application/octet-stream',\n **extra):\n \"\"\"Prepare PATCH request\"\"\"\n return self.generic('PATCH', path, data, content_type, **extra)\n\n from django.test.client import RequestFactory\n monkeypatch.setattr(RequestFactory, 'patch', rf_patch, raising=False)\n return rf",
"def test_patch_path(self):\n self.assertEqual(\n utils.patch_path(\n '/Users/sudeep.agarwal/src/squiddy/api/v0.1',\n '/Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml',\n ), '/Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml')",
"def json_patch():\n\n try:\n data = request.json\n\n if isinstance(data, list) and len(data) == 2:\n\n doc = data[0]\n patch = data[1]\n\n result = jsonpatch.apply_patch(doc, patch)\n\n response = jsonify({'data': result})\n return response\n\n else:\n msg = {'errors': [{'title': 'Invalid request',\n 'details': 'Body was not a array of two JSON elements, ex: [{},{}]. '\n 'First element is the JSON doc to patch, second element is the JSON patch'}]}\n return jsonify(msg), 400\n\n except Exception as e:\n msg = {'errors': [{'title': 'Invalid request', 'details': str(e)}]}\n return jsonify(msg), 400",
"def test_disallow_patch_many(self):\r\n response = self.app.patch('/api/person', data=dumps(dict(name='foo')))\r\n assert response.status_code == 405",
"def parallel_patch(\n self,\n method_args: List[Dict[str, Any]],\n max_workers: int = 5,\n ) -> ListResponse:\n for method_arg in method_args:\n method_arg[\"method\"] = \"patch\"\n\n return self.parallel_request(method_args=method_args, max_workers=max_workers)",
"def patch(self, endpoint=None, data=None, json=None, callback=None, callback_kwargs=None):\n return self._call(\"PATCH\",\n endpoint=endpoint,\n data=data,\n json=json,\n callback=callback,\n callback_kwargs=callback_kwargs)",
"def update_request():",
"def test_update_recipe_id(client):\n resp = client.patch('/recipe/1', json={'id': '2'})\n assert resp.status_code == server.HTTP_BAD_REQUEST",
"def patch():\n if getattr(httplib, PATCH_FLAG, False):\n return\n # we set an attribute to avoid multiple wrapping\n setattr(httplib, PATCH_FLAG, True)\n\n wrapt.wrap_function_wrapper(\n httplib_client_module,\n 'HTTPConnection._send_request',\n _send_request\n )\n\n wrapt.wrap_function_wrapper(\n httplib_client_module,\n 'HTTPConnection.getresponse',\n _xray_traced_http_getresponse\n )\n\n wrapt.wrap_function_wrapper(\n httplib_client_module,\n 'HTTPResponse.read',\n _xray_traced_http_client_read\n )",
"def patch(self, request, project_id):\n data = json.loads(request.body)\n action = data.get('action')\n try:\n operation = getattr(self, action.lower())\n except AttributeError:\n LOGGER.error(\n 'Invalid action.',\n extra=request.POST.dict(),\n exc_info=True\n )\n raise ApiException(\n 'Invalid action.',\n 403,\n request.POST.dict()\n )\n return operation(request, project_id, **data)",
"def patch(self, *args, **kwargs):\n self.method_not_allowed_error('PATCH')",
"def test_patch_request_response_404_when_resource_does_not_exists(api):\n data = b'abcd\\nefgh\\nijkl\\nmnop\\n'\n resp = request_creation(len(data), api)\n assert resp.status_code == 201\n\n headers = {\n 'Content-Type': 'application/offset+octet-stream',\n 'Upload-Offset': '0',\n 'Tus-Resumable': '1.0.0'\n }\n resource_path = f'/files/{str(uuid.uuid4())}'\n resp = api.requests.patch(resource_path, headers=headers, data=data[0:5])\n\n assert resp.status_code == 404",
"def test_custom_client_patch_methods():\n client = BlogTestClient()\n responses.add(responses.PATCH, 'http://dev/api/blogs/1',\n body='''\n {\"id\": 1, \"title\": \"blog title\",\n \"slug\": \"blog-title\",\n \"content\": \"This is some content\"}''',\n status=200,\n content_type='application/json')\n data = {\n \"title\": \"blog title\",\n }\n result = client.patch_blog(uid=1, data=data)\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == 'http://dev/api/blogs/1'\n assert responses.calls[0].request.body == json.dumps(data)\n assert responses.calls[0].request.method == 'PATCH'\n assert isinstance(result, list)\n assert isinstance(result[0], BlogResource)\n resource = result[0]\n assert resource.title == 'blog title'",
"def test_update_comment_of_specific_redflag(self):\n self.app.post(\"/api/v1/red-flags/1/comment\", headers={'Content-Type': 'application/json'},\n data = json.dumps(self.redflag))\n response = self.app.patch(\"/api/v1/red-flags/1/comment\", headers={'Content-Type': 'application/json'},\n data = json.dumps({\"comment\" : \"police wanted money to pass the offense\"}))\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200) \n self.assertIn(\"Successfully updated redflag comment\",\n str(result))",
"def test_patch_request_apply_received_bytes_at_given_offset(api):\n data = b'abcd\\nefgh\\nijkl\\nmnop\\n'\n resp = request_creation(len(data), api)\n assert resp.status_code == 201\n resource_path = resp.headers['Location']\n\n for i in range(0, 4):\n headers = {\n 'Content-Type': 'application/offset+octet-stream',\n 'Upload-Offset': f'{i * 5}',\n 'Tus-Resumable': '1.0.0'\n }\n resp = api.requests.patch(resource_path, headers=headers, data=data[i*5:(i+1)*5])\n\n assert resp.status_code == 204\n assert resp.headers['Upload-Offset'] == str((i+1) * 5)\n assert resp.headers['Tus-Resumable'] == '1.0.0'\n assert resp.headers['Cache-Control'] == 'no-store'",
"def _patch_update():\n def patched_update(self, *args, **kwargs):\n \"\"\"\n Patched version of Resource.update which send update requests\n containing only the properties specified as arguments to the\n method. If no properties are specified all of them are sent in the\n request.\n \"\"\"\n # pylint: disable=protected-access\n orig_props = self._properties\n\n # user specified which properties to update: set properties dict\n # to contain only them so that the update request do not update\n # unwanted fields\n if args or kwargs:\n self._properties = dict()\n if '$uri' in orig_props:\n self._properties['$uri'] = orig_props['$uri']\n\n # perform the request\n self._properties.update(*args, **kwargs)\n self.save()\n\n # restore all properties\n if args or kwargs:\n orig_props.update(self._properties)\n self._properties = orig_props\n # patched_update()\n potion_resource.Resource.update = patched_update"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Via the requests library, performs a get with the payload to the path
|
def get(self, payload={}, path=""):
return requests.get(self.base_url + path,
data=json.dumps(payload),
headers=self.headers)
|
[
"def get(path, **kwargs):\n return generic_request('GET', path, **kwargs)",
"def get(self, path, **kwargs):\n return(self._request('GET', path, params=kwargs))",
"def _get(self, url, query_params=None):\n return self._request(\"GET\", url, query_params)",
"def _get(self, url, *args, **kwargs):\n kwargs = self._update_headers(kwargs, {'api-key': self._api_key})\n return requests.get(url, *args, **kwargs)",
"def get(base: str, *path: str):\n url = reduce(urllib.parse.urljoin, path, base)\n with urllib.request.urlopen(url) as response:\n return json.loads(response.read().decode())",
"def test_request_get(self):\n r = self.base._request('/get', 'GET', {\n 'foo': 'bar'\n })\n\n self.assertEqual(r['url'], 'https://httpbin.org/get?foo=bar')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')",
"def request(path='/',\n is_https=IS_HTTPS,\n host=HOST,\n port=PORT):\n if is_https:\n host_part = 'https://%s:%s' % (host, str(port))\n else:\n host_part = 'http://%s:%s' % (host, str(port))\n url = '%s%s' % (host_part, path)\n return SESSION.get(url, timeout=2, allow_redirects=False)",
"def do_get(cs, args):\n\n url = args.url\n\n #translate the endpoint into an actual url\n (endpoint, token) = get_endpoint_and_token(args)\n \n curl_args = ''\n if url:\n curl_args = endpoint + url\n\n curl_args = curl_args + \" -H \\\"X-Auth-Token: \" + token + \"\\\"\"\n\n out = curl(args, curl_args)\n if args.debug:\n print out\n else:\n try:\n parsed = json.loads(out)\n print json.dumps(parsed, sort_keys=True, indent=4, separators=(',', ': '))\n except:\n print out",
"def _get(self, path):\n url = self._url(path)\n resp = self.session.get(url)\n dic = resp.json()\n if self.debug:\n print('CODE', resp.status_code)\n pprint(dic)\n return dic",
"def get(self, path):\n self.__init__(self.addr, self.port)\n msg = 'GET ' + path + ' HTTP/1.1\\r\\n\\r\\n'\n self.ss.send(msg)\n self.resp = self.ss.recv(1000)\n self.close()",
"def get(url, requests=20):\n cmd = ['ab',\n '-T', 'application/json',\n '-c', '1',\n '-n', str(requests),\n url]\n run(cmd)",
"def request(req_url, params={}, print_status=False):\n response = requests.get(req_url, params)\n if (print_status): print(\"...response for <{}> was {}...\".format(response.url, response.status_code),flush=True)\n return response",
"def api_call_get(path):\n try:\n return api_interpret(itapi.call(path))\n except ITTechnicalAPIError as e:\n return api_interpret(e.response, e.status)",
"def request(self, method, path, data):\n method = method.upper()\n kwargs = {\"headers\": {\"x-api-token\": self.token}}\n if method == \"GET\":\n kwargs[\"params\"] = data\n else:\n kwargs[\"json\"] = data\n\n req = requests.request(\n method,\n f\"https://api.lokalise.com/api2/projects/{self.project_id}/{path}\",\n **kwargs,\n )\n req.raise_for_status()\n return req.json()",
"def get(self, resource):\n return self.request(method=\"get\", resource=resource)",
"def request(method, url, data=None, json=None, headers={}, stream=None, timeout=None):\n\t...",
"def http_req(self, obj_path, method='GET',\n data=None, with_auth=True):\n full_url = urljoin(self._baseurl, obj_path)\n headers = {'Content-Type': 'application/json;charset=utf-8',\n 'Accept': 'application/json;charset=utf-8',\n 'OData-Version': '4.0'}\n auth = None\n if with_auth:\n if self._token:\n headers['X-Auth-Token'] = self._token\n else:\n auth = self._auth\n req_func = requests.get\n if method == 'POST':\n req_func = requests.post\n elif method == 'DELETE':\n req_func = requests.delete\n elif method == 'PATCH':\n req_func = requests.patch\n # Actually run the request\n res = req_func(full_url, auth=auth, json=data,\n headers=headers, verify=self._verify)\n obj_name = obj_path\n if 'Location' in res.headers:\n obj_name = res.headers['Location']\n auth_token = None\n if 'X-Auth-Token' in res.headers:\n auth_token = res.headers['X-Auth-Token']\n if len(res.text):\n obj_data = res.json()\n return (obj_name, obj_data, auth_token)\n else:\n return None",
"def request(path, method='GET', params=None, debug=False):\n url = '{}/api/v3{}?private_token={}&per_page={}'.format(\n config.gitlab_url, path, config.gitlab_api_token, 1000)\n if debug: print(\"[REQUEST] \" + url)\n return requests.request(method, url, data=json.dumps(params), verify=False).json()",
"def _get(self, url_name=None, url_args=None, url_kwargs=None,\r\n get_kwargs=None, url=None, *args, **kwargs):\r\n url = url or self._url(url_name, url_args, url_kwargs, get_kwargs)\r\n return self.client.get(path=url, *args, **kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Via the requests library, performs a delete with the payload to the path
|
def delete(self, payload={}, path=""):
return requests.delete(self.base_url + path,
data=json.dumps(payload),
headers=self.headers)
|
[
"def delete_request():",
"def _delete(self, subpath: str) -> None:\n self._request('delete', subpath, None, None)",
"def test_request_delete(self):\n r = self.base._request('/delete', 'DELETE', {\n 'foo': 'bar'\n })\n self.assertEqual(r['url'], 'https://httpbin.org/delete?foo=bar')\n self.assertEqual(r['headers']['Client'], 'foo.bar')\n self.assertEqual(r['headers']['Token'], 'foobar')",
"def delete(request):\n if request.method == \"POST\":\n if request.POST['type'] == 'file':\n uri = _download_url_to_ofs_url(request.POST['uri'])\n data = tsc.query_data([\"uri\", \"eq\", uri], limit=1, single=True)\n if data:\n resp = tsc.delete(data.id)\n return HttpResponse(json.dumps(dict(status='ok')),\n content_type=\"application/json\")\n elif request.POST['type'] == 'dir':\n path = _path_from_json(request.POST['path'])\n ddir = _path_dimes(path)\n _delete_dir(ddir)\n return HttpResponse(json.dumps(dict(status='ok')),\n content_type=\"application/json\")\n return HttpResponse(json.dumps(dict(status='failed')),\n content_type=\"application/json\")",
"def do_delete(cs, args):\n\n url = args.url\n\n #translate the endpoint shortcut into an actual url\n (endpoint, token) = get_endpoint_and_token(args)\n\n curl_args = ''\n if url:\n curl_args = endpoint + url\n\n curl_args = curl_args + \" -H \\\"X-Auth-Token: \" + token + \"\\\"\"\n curl_args = curl_args + \" -X DELETE\"\n\n out = curl(args, curl_args)\n if args.debug:\n print out\n else:\n try:\n parsed = json.loads(out)\n print json.dumps(parsed, sort_keys=True, indent=4, separators=(',', ': '))\n except:\n print out",
"def _make_delete_call(cls, url: str, body: OptionalJSON = None, params: Params = None,\n error_msg: str = None) -> OptionalJSON:\n\n response = requests.delete(url=url, headers=cls.headers, data=body, params=params)\n return cls._process_response(response, error_msg)",
"def delete(func: Callable, allowed_exceptions: List = None,\n title: str = None, req_obj_type: Callable = None) -> HTTPMethod:\n return HTTPMethod('delete', func, allowed_exceptions=allowed_exceptions,\n title=title, req_obj_type=req_obj_type)",
"def delete(self, url):\n self._request(url, method='DELETE')\n return None",
"def delete(self, uri, auth=None, **kwargs):\n if (auth is None):\n auth = (self.user, self.password)\n return requests.delete(uri, auth=auth, **kwargs)",
"def delete(api_key, url, data, return_formatted=True):\n headers = {}\n if api_key:\n headers[\"x-api-key\"] = api_key\n response = requests.delete(url, headers=headers)\n response.raise_for_status()\n response = response.json()\n if return_formatted:\n print(\"Response\")\n print(\"--------\")\n print(response)\n else:\n return response",
"def test_delete_recipe(client):\n resp = client.delete('/recipe/1')\n assert resp.status_code == server.HTTP_METHOD_NOT_ALLOWED",
"def test_delete_valid(self):\n response = self.app.delete('/api/a/actors/Christopher Lloyd')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.delete('/api/a/movies/Blind Date')\n self.assertEqual(response.status_code, 200)",
"def delete(self, endpoint, **kwargs):\n url = TextItApiClient.get_api_url(endpoint)\n return self.request('delete', url, **kwargs)",
"def delete(self, key: str, path: Optional[str] = Path.root_path()) -> int:\n return self.execute_command(\"JSON.DEL\", key, str(path))",
"def test_delete_without_additional_request_headers(self):\n responses.add(\n responses.DELETE,\n re.compile(r'^https://.*/Case/some-case-id$'),\n body='{}',\n status=http.OK\n )\n\n sf_type = _create_sf_type()\n result = sf_type.delete(record_id='some-case-id')\n\n self.assertEqual(result, http.OK)",
"def delete(self):\n return attachment_service.delete_attachment(get_jwt_identity(), get_uuid(request)), 201",
"def delete(self, request, *args, **kwargs):\n pk = kwargs.get('pk')\n job = Job.objects.get(pk=pk)\n job.delete()\n return Http200()",
"def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n response = {'url': str(success_url)}\n self.object.delete()\n return HttpResponse(json.dumps(response),\n content_type='application/json')",
"def delete(self, request=None, mtype=None, this_thread=False):\n\n return \"OK\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Removes comments and extra spaces from the file and put it's commands in a list
|
def _clean(self):
lines = self.__file.readlines()
for line in lines:
command = line.replace('\n', '')
if not command.startswith('//') and command != '':
result = command.split()
self.__vm_commands.append(result)
|
[
"def initialise(input_file):\n\tlines = [] \t\t\t# stores lines from input_file \n\tcommands = []\t\t# stores commands from \"cleaned\" lines\n\n\twith open(input_file) as f:\n\t\tfor line in f:\n\t\t\tlines.append(line)\n\n\tfor line in lines:\n\t\tif line[0] != \"/\" and line != \"\\n\":\n\t\t\t# ignore whitespace on LHS (necessary for files with tabbing)\n\t\t\tline = line.lstrip()\n\t\t\t# ignore comments on same line in .asm file\n\t\t\tline = line.split()[0]\n\t\t\t# ignore newline chars on RHS of every line\n\t\t\tcommands.append(line.rstrip(\"\\n\"))\n\n\treturn commands",
"def strip_comments(lines: list[str]) -> list[str]:\n global results\n results = []\n for line in lines:\n index = line.find('#')\n if index >= 0:\n modified = line[0:index]\n else:\n modified = line\n modified = modified.strip()\n if len(modified) > 0:\n results.append(modified)\n return results",
"def parse_file(self, pythonpath):\r\n\r\n text = \"\".join(read_batchfile(pythonpath, file_ending='.ev'))\r\n\r\n def replace_insert(match):\r\n \"Map replace entries\"\r\n return \"\\#\\n\".join(self.parse_file(match.group()))\r\n\r\n # insert commands from inserted files\r\n text = re.sub(r\"^\\#INSERT (.*?)\", replace_insert, text, flags=re.MULTILINE)\r\n # get all commands\r\n commands = re.split(r\"^\\#.*?$\", text, flags=re.MULTILINE)\r\n #remove eventual newline at the end of commands\r\n commands = [c.strip('\\r\\n') for c in commands]\r\n commands = [c for c in commands if c]\r\n\r\n return commands",
"def instructions_list(file_name):\r\n with open(file_name, 'r') as f:\r\n lst = f.read().rsplit()\r\n f.close()\r\n return lst",
"def parse_file(f):\n install = []\n remove = []\n for line in f:\n # strip out comments and whitespace:\n line, _, _ = line.partition('#')\n line = line.strip()\n\n if line:\n first = line[0]\n if first == '%':\n continue\n\n if first == '@':\n # Remove any options\n line, _, _ = line.partition('--')\n\n if first == '-':\n # Add the package (without the \"-\") to the remove list. The\n # line (with the \"-\") is still added to the install list.\n package = line[1:].strip()\n remove.append(package)\n else:\n install.append(line)\n return (install, remove)",
"def reloc_commands(placeholder_dir: str,\n build_dir: str,\n scratch_dir: str,\n simulator: str,\n testname: str,\n src: str) -> List[List[str]]:\n ret = []\n with open(src) as src_file:\n for line in src_file:\n line = line.strip()\n if not line:\n continue\n\n ret.append([reloc_word(simulator,\n placeholder_dir, build_dir,\n scratch_dir, testname, w)\n for w in shlex.split(line)])\n return ret",
"def parse_gitignore(fin):\n parsed = []\n if hasattr(fin, 'readlines'):\n lines = fin.readlines()\n elif hasattr(fin, 'splitlines'):\n lines = fin.splitlines()\n else:\n raise TypeError('Expecting a file-like object or a string')\n\n for line in lines:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n\n flags = 0\n if line.startswith('!'):\n flags |= MATCH_INVERSE\n line = line[1:]\n elif line.startswith('\\!'):\n line = line[1:]\n elif line.startswith('\\#'):\n line = line[1:]\n line = re.sub('/+', '/', line)\n if line.startswith('/'):\n line = line[1:]\n if line.endswith('/'):\n flags |= MATCH_END_WITH_DIRECTORY\n line = line[:-1]\n\n pattern = [re.compile(fnmatch.translate(part))\n for part in line.split('/')]\n parsed.append((pattern, flags))\n\n return parsed",
"def _filter_comments(self, conf_file):\n f = open(conf_file)\n conf = ''\n while True:\n c = f.read(1)\n if c == '':\n break\n conf += c\n # If we just appended a commenter:\n if conf.endswith('#'):\n self._skip_until(f, '\\n')\n # Strip the '#' we appended earlier\n conf = conf[:-1]\n elif conf.endswith('//'):\n self._skip_until(f, '\\n')\n # Strip the '//' we appended earlier\n conf = conf[:-2]\n elif conf.endswith('/*'):\n self._skip_until(f, '*/')\n # Strip the '/*' we appended earlier\n conf = conf[:-2]\n f.close()\n return conf",
"def tokenize_comments(ruddit: str) -> List[List[str]]:\n tokenizer = RegexpTokenizer(r'\\w+')\n with open(ruddit, 'r', encoding='utf-8') as cfile:\n\n tokenized_comments = []\n reader = csv.reader(cfile)\n for i, line in enumerate(reader):\n\n # Ignore the column headers\n if (i == 0):\n continue\n\n # Tokenize the lower-cased comment\n word_tokens = tokenizer.tokenize(line[0].lower())\n filtered_sentence = [w for w in word_tokens if w not in stop_words]\n tokenized_comments.append(filtered_sentence)\n\n cfile.close()\n\n return tokenized_comments",
"def _cleanlines(textfile):\n result = []\n with open(textfile, 'r') as f:\n for line in f:\n ix = line.find('#')\n if ix >= 0:\n line = line[:ix]\n line = line.strip()\n if line:\n result.append(line)\n return result",
"def load_file(filename: str) -> list:\n with open(filename) as f:\n raw_instructions = f.readlines()\n instructions = [path.split(',') for path in raw_instructions]\n return instructions",
"def fetch_command_token_lists(command_starting_words, input_filename, indent = None):\n \n tokens = []\n files_encountered = []\n generate_tokens_for_file(files_encountered, tokens, input_filename, indent)\n commands = dict()\n gather_commands(tokens, command_starting_words, commands)\n return commands",
"def read_delimited_file(\n fname: str, delimiter: str = \"\\n\", comment: str = \"#\"\n) -> List[str]:\n with open(fname) as source:\n contents = source.read().strip()\n retval = contents.split(delimiter)\n # Filter out comment entries\n retval = [item for item in retval if not item.startswith(comment)]\n return retval",
"def read_commands(self, filename=None):\n if filename is None:\n filename = self.spec_filename\n self.logger.debug('Reading spec %s' % filename)\n f = open(self.maker.path(filename))\n context = dict(find_links=[],\n src_base=os.path.join(self.project.build_properties['virtualenv_path'], 'src'),\n always_unzip=False)\n commands = []\n uneditable_eggs = []\n # Used to flag state when we are looking for multi-line settings, like:\n # setting = value\n # line 2\n in_setting = False\n for line in f:\n line = line.rstrip()\n if not line or line.strip().startswith('#'):\n continue\n if in_setting:\n if line.strip() != line:\n # Leading whitespace; multi-line setting\n continue\n else:\n in_setting = False\n if self._setting_re.search(line):\n # We just skip settings here\n in_setting = True\n continue\n if line.startswith('-f') or line.startswith('--find-links'):\n if line.startswith('-f'):\n line = line[2:]\n else:\n line = line[len('--find-links'):].lstrip('=')\n context['find_links'].append(line.strip())\n continue\n if line.startswith('--always-unzip') or line.startswith('-Z'):\n context['always_unzip'] = True\n continue\n if line.startswith('-e') or line.startswith('--editable'):\n if uneditable_eggs:\n commands.append((self.install_eggs, uneditable_eggs))\n uneditable_eggs = []\n if line.startswith('-e'):\n line = line[2:]\n else:\n line = line[len('--editable'):].lstrip('=')\n line = line.strip()\n if line.startswith('svn+') and not line.startswith('svn+ssh'):\n line = line[4:]\n commands.append((self.install_editable, line))\n continue\n uneditable_eggs.append(line.strip())\n if uneditable_eggs:\n commands.append((self.install_eggs, uneditable_eggs))\n return context, commands",
"def extract_comments(filename):\n try:\n with open(filename, 'r') as source_file:\n comments = []\n file_content = source_file.read()\n tokens = list(javalang.tokenizer.tokenize(file_content))\n\n prev_line = ''\n prev_comment_text = '-'\n for token in tokens:\n if token.__class__.__name__ == 'Comment':\n comment_text = token.value\n if comment_text.startswith('/*'):\n is_multiline = True\n comment_text = comment_text.replace('/*', '', 1)\n comment_text = comment_text.replace('*/', '', 1)\n end_line = token.position[0]\n start_line = end_line - comment_text.count('\\n')\n else:\n is_multiline = False\n comment_text = token.value.rstrip().replace('//', '', 1)\n end_line = token.position[0] - 1\n start_line = token.position[0] - 1\n\n comment = common.Comment(comment_text, start_line, end_line, is_multiline)\n\n if not is_multiline:\n line_counter = 0\n for line in file_content.splitlines():\n if start_line - 1 == line_counter:\n if re.match(r\"^[ \\t]*//\" + re.escape(comment_text) + r\"[ \\t]*$\", line) and \\\n re.match(r\"^[ \\t]*//\" + re.escape(prev_comment_text) + r\"[ \\t]*$\", prev_line):\n comment = combine_consecutive_comments(comments, comment)\n\n prev_comment_text = comment_text\n prev_line = line\n break\n line_counter += 1\n file_content = remove_comment(file_content, comment_text, is_multiline)\n comments.append(comment)\n tag_comments(comments, file_content, eof_line_number=file_content.count('\\n'))\n return comments\n except OSError as exception:\n raise common.FileError(str(exception))",
"def lex(self):\n #These regexes defines all comments: (//comment\\n).\n #Anything (.*) can go in comment, including nothing (hence * instead of +).\n comment_reg = '//.*\\n'\n #Split prog around non-comment sections, then join to remove comments.\n new_inp = \"\".join(re.split(comment_reg, self.inp))\n #Separate into list called 'items' of strings which will become tokens\n items = re.findall('\\w+|[,+*/(){}\\[\\];-]|[<=>]+|\"[^\\'\\r\\n]*\"', new_inp)\n tokens = TokenList([self.choose_tok(x) for x in items])\n tokens.ls.append(Token(EOF, \"eof\")) #no end-of-file in string input\n return tokens",
"def get_comments(filename):\n with open(filename, 'rbU') as fh: \n commented = (line for line in fh if line.startswith('#'))\n return commented",
"def get_keywords(keywords_file):\n kw_file = open(keywords_file)\n keywords = []\n for line in kw_file:\n # \"#\" are supposed to be comments on txt file\n if line[0] != '#':\n keywords.append(line[:-1])\n print(keywords)\n return keywords",
"def splitLine(line: str) -> list:\n splitter = shlex.shlex(line)\n splitter.commenters = ';'\n splitter.quotes = '\"'\n splitter.whitespace_split = True\n return list(splitter)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create thumbnails for company company logo.
|
def create_company_logo_thumbnails(company_id):
create_thumbnails(
pk=company_id, model=Company, size_set="company_logos", image_attr="logo"
)
|
[
"def logo():",
"def test_create_thumbnail(self):\r\n\r\n raise SkipTest\r\n\r\n if not os.path.isdir(TEST_OUT):\r\n os.makedirs(TEST_OUT)\r\n\r\n input_raster_uri = os.path.join(REGRESSION_DATA, 'png_reg_raster.png')\r\n output_uri = os.path.join(TEST_OUT, 'png_thumbnail.png')\r\n reg_uri = os.path.join(REGRESSION_DATA, 'png_thumbnail_reg.png')\r\n\r\n size = (256, 256)\r\n\r\n style.create_thumbnail(input_raster_uri, output_uri, size)\r\n\r\n self.assertFiles(output_uri, reg_uri)",
"def generate_thumbnail(cls):\n from anima.dcc.mayaEnv import auxiliary\n\n # reload(auxiliary)\n result = auxiliary.generate_thumbnail()\n if result:\n pm.informBox(\"Done!\", \"Thumbnail generated successfully!\")\n else:\n pm.informBox(\"Fail!\", \"Thumbnail generation was unsuccessful!\")",
"def logos():\n for n in range(12):\n yield \"eh_logo_%d.tiff\" % (n + 1)",
"def create_photos_web_images(self,kwargs):\n\n files = json.load(kwargs['mappings'])\n print('[+] Processing {} files'.format(len(files)))\n\n names = cfg.WEB_IMAGE_NAMES\n sizes = cfg.WEB_IMAGE_SIZES\n\n # resize image\n for f in tqdm(files[:1]):\n # construct source filepath\n sha256 = f['sha256']\n ext = f['ext']\n sha256_tree = fiox.sha256_tree(sha256)\n fp_src = join(kwargs['input'],sha256_tree,'{}{}'.format(sha256,ext))\n try:\n im = cv.imread(fp_src)\n except:\n print('[-] Could not load: {}'.format(fp_src))\n continue\n if im is None or im.shape[0] == 0:\n print('[-] Bad file: {}'.format(fp_src))\n continue\n\n # make all sizes\n for abbr,w in zip(names,sizes):\n fp_dir_out = join(kwargs['output'],sha256_tree,sha256,abbr)\n fiox.ensure_dir(fp_dir_out)\n fp_im = join(fp_dir_out,'index.jpg')\n im_pil = imx.ensure_pil(im,bgr2rgb=True)\n w_orig,h_orig = im_pil.size\n h = int((w / w_orig) * h_orig)\n im_pil = im_pil.resize((w,h), Image.ANTIALIAS)\n #im_pil.save(fp_im, 'PNG', quality=)\n im_pil.save(fp_im, 'JPEG', quality=kwargs['quality'])",
"def get_thumbnail_200(self, object):\n format = upper(imghdr.what('media/' + object.image.name))\n new_height = 200\n image = Image.open('media/' + object.image.name)\n width, height = image.size\n new_width = int(new_height * width / height)\n im = get_thumbnail(object, f'{new_width}x{new_height}', crop='center', quality=99, format=format)\n return \"http://127.0.0.1:8000\" + im.url",
"def generateThumbnail(self, imagePath, imagePrefix, extension, posterFrame=1001): #, posterFrame=os.environ['STARTFRAME']):\r\n\t\tinPrefix = os.path.join(imagePath, imagePrefix)\r\n\t\toutPrefix = os.path.join(imagePath, '.icThumbs', imagePrefix)\r\n\t\toutFile = '%s.%s.jpg' % (outPrefix, posterFrame)\r\n\t\t# if not os.path.isfile(outFile):\r\n\t\t# \tprint(\"Generating thumbnails...\")\r\n\t\t# \tos_wrapper.createDir(os.path.join(imagePath, '.icThumbs'))\r\n\t\t# \tdjvOps.prcImg(inPrefix, outPrefix, posterFrame, posterFrame, extension.split('.', 1)[1], outExt='jpg', resize=(512,288))\r\n\t\treturn outFile",
"def get_thumbnail_400(self, object):\n format = upper(imghdr.what('media/' + object.image.name))\n new_height = 400\n image = Image.open('media/' + object.image.name)\n width, height = image.size\n new_width = int(new_height * width / height)\n im = get_thumbnail(object, f'{new_width}x{new_height}', crop='center', quality=99, format=format)\n return \"http://127.0.0.1:8000\" + im.url",
"def generate_image(self) -> None:",
"def logo(**style_props):\n return pc.image(\n src=styles.LOGO_URL,\n **style_props,\n )",
"def _create_thumbnail(red_file, green_file, blue_file, output_path,\n x_constraint=None, nodata=-999, work_dir=None, overwrite=True):\n nodata = int(nodata)\n\n # GDAL calls need absolute paths.\n thumbnail_path = pathlib.Path(output_path).absolute()\n\n if thumbnail_path.exists() and not overwrite:\n _LOG.warning('File already exists. Skipping creation of %s', thumbnail_path)\n return None, None, None\n\n # thumbnail_image = os.path.abspath(thumbnail_image)\n\n out_directory = str(thumbnail_path.parent)\n work_dir = os.path.abspath(work_dir) if work_dir else tempfile.mkdtemp(prefix='.thumb-tmp', dir=out_directory)\n try:\n # working files\n file_to = os.path.join(work_dir, 'rgb.vrt')\n warp_to_file = os.path.join(work_dir, 'rgb-warped.vrt')\n outtif = os.path.join(work_dir, 'thumbnail.tif')\n\n # Build the RGB Virtual Raster at full resolution\n run_command(\n [\n \"gdalbuildvrt\",\n \"-overwrite\", \"-separate\",\n file_to,\n str(red_file), str(green_file), str(blue_file)\n ],\n work_dir\n )\n assert os.path.exists(file_to), \"VRT must exist\"\n\n # Determine the pixel scaling to get the correct width thumbnail\n vrt = gdal.Open(file_to)\n intransform = vrt.GetGeoTransform()\n inpixelx = intransform[1]\n # inpixely = intransform[5]\n inrows = vrt.RasterYSize\n incols = vrt.RasterXSize\n\n # If a specific resolution is asked for.\n if x_constraint:\n outresx = inpixelx * incols / x_constraint\n _LOG.info('Input pixel res %r, output pixel res %r', inpixelx, outresx)\n\n outrows = int(math.ceil((float(inrows) / float(incols)) * x_constraint))\n\n run_command([\n \"gdalwarp\",\n \"--config\", \"GDAL_CACHEMAX\", str(GDAL_CACHE_MAX_MB),\n \"-of\", \"VRT\",\n \"-tr\", str(outresx), str(outresx),\n \"-r\", \"near\",\n \"-overwrite\", file_to,\n warp_to_file\n ], work_dir)\n else:\n # Otherwise use a full resolution browse image.\n outrows = inrows\n x_constraint = incols\n warp_to_file = file_to\n outresx = inpixelx\n\n _LOG.debug('Current GDAL cache max %rMB. Setting to %rMB', gdal.GetCacheMax() / 1024 / 1024, GDAL_CACHE_MAX_MB)\n gdal.SetCacheMax(GDAL_CACHE_MAX_MB * 1024 * 1024)\n\n # Open VRT file to array\n vrt = gdal.Open(warp_to_file)\n driver = gdal.GetDriverByName(\"GTiff\")\n outdataset = driver.Create(outtif, x_constraint, outrows, 3, gdalconst.GDT_Byte)\n\n # Loop through bands and apply Scale and Offset\n for band_number in (1, 2, 3):\n band = vrt.GetRasterBand(band_number)\n\n scale, offset = _calculate_scale_offset(nodata, band)\n\n # Apply gain and offset\n outdataset.GetRasterBand(band_number).WriteArray(\n (numpy.ma.masked_less_equal(band.ReadAsArray(), nodata) * scale) + offset\n )\n _LOG.debug('Scale %r, offset %r', scale, offset)\n\n # Must close datasets to flush to disk.\n # noinspection PyUnusedLocal\n outdataset = None\n # noinspection PyUnusedLocal\n vrt = None\n\n # GDAL Create doesn't support JPEG so we need to make a copy of the GeoTIFF\n run_command(\n [\n \"gdal_translate\",\n \"--config\", \"GDAL_CACHEMAX\", str(GDAL_CACHE_MAX_MB),\n \"-of\", \"JPEG\",\n outtif,\n str(thumbnail_path)\n ],\n work_dir)\n\n _LOG.debug('Cleaning work files')\n finally:\n # Clean up work files\n if os.path.exists(work_dir):\n shutil.rmtree(work_dir)\n\n # Newer versions of GDAL create aux files due to the histogram. Clean them up.\n for f in (red_file, blue_file, green_file):\n f = pathlib.Path(f)\n aux_file = f.with_name(f.name + '.aux.xml')\n if aux_file.exists():\n _LOG.info('Cleaning aux: %s', aux_file)\n os.remove(str(aux_file.absolute()))\n\n return x_constraint, outrows, outresx",
"def make_thumbnail(src, dest):\n im = Image.open(src)\n im = im.crop((0, 0, 1024, 768))\n im.thumbnail((400, 300))\n im.save(dest)\n subprocess.call(['optipng', dest])",
"def createWebAlbum(self, logger = lambda name: 0):\n if os.path.exists(self.destfolder):\n showerror(title=\"GWTPhotoAlbumCreator.py - error!\",\n message=\"Directory:\\n%s\\n\"%self.destfolder+\n \"already exists!\\nPlease chose a different \"+\n \"output directory name.\")\n return False \n save_info = createGWTPhotoAlbum.info\n info = {}; info.update(save_info)\n \n info[\"title\"] = self.title\n info[\"subtitle\"] = self.subtitle\n info[\"image clickable\"] = \"true\"\n if self.gallery: info[\"presentation type\"] = \"gallery\"\n else: info[\"presentation type\"] = \"slideshow\"\n info[\"disable scrolling\"] = \"true\"\n if self.fullscreen:\n if self.filmstrip:\n info[\"layout type\"] = \"fullscreen\"\n info[\"layout data\"] = \"IOF\"\n else:\n info[\"layout type\"] = \"fullscreen\"\n info[\"layout data\"] = \"CIP\"\n else:\n if self.filmstrip:\n info[\"layout type\"] = \"tiled\"\n info[\"layout data\"] = \"ICF\"\n else:\n info[\"layout type\"] = \"tiled\"\n info[\"layout data\"] = \"ICP\"\n \n if self.overblend:\n info[\"image fading\"] = \"1000\"\n else:\n info[\"image fading\"] = \"-750\"\n \n filelist = [self.files[entry] for entry in self.entries]\n sizes = list(self.resolutions)\n sizes.sort()\n sizes.insert(0, THUMBNAIL)\n if self.originals:\n createGWTPhotoAlbum.create_picture_archive = True\n #createGWTPhotoAlbum.archive_quality = 80\n if self.bottomline == \"\":\n self.bottomline = '<a href=\"pictures.zip\">download all pictures</a>'\n #sizes.append(FULLSIZE)\n else:\n createGWTPhotoAlbum.create_picture_archive = False \n info[\"bottom line\"] = self.bottomline\n \n #for key in self.captions:\n # self.captions[key] = re.sub(\"\\\\\\n\", \"<br />\", self.captions[key])\n \n createGWTPhotoAlbum.info.update(info)\n logger(\"creating directory: \"+ self.destfolder)\n remove_old_directories(self.destfolder)\n create_directories(self.destfolder)\n createGWTPhotoAlbum.quick_scaling = False\n logger(\"assembling images...\") \n assemble(filelist, self.destfolder, sizes, self.compression[0], \n self.compression[1], self.captions, logger)\n logger(\"deploying AJAX scripts in: \"+ self.destfolder) \n deploy(createGWTPhotoAlbum.deploy_pack, self.destfolder, \n self.addjsontohtml)\n if self.createhtml:\n logger(\"creating static html pages for browsers without javascript.\")\n create_noscript_html(filelist, self.destfolder, sizes)\n create_index_page(self.destfolder, self.createhtml, self.addjsontohtml)\n createGWTPhotoAlbum.info.update(save_info)\n return True",
"def create_thumbnails(self):\n \n initial = log_time(\"create_thumbnails\")\n sizes = sorted(self.sizes.items(), key=lambda x: x[1][0], reverse=True)\n last = None\n \n for name, dimension in sizes:\n self.create_thumbnail(name, last=last)\n last = name\n assert self.thumbnail_exists(name)\n\n log_time(\"end create_thumbnails\", initial=initial)",
"def thumbnail(self):\n MAX_THUMB_LENGTH = 200\n max_img_length = max(self.get_image_width(), self.get_image_height())\n ratio = max_img_length > MAX_THUMB_LENGTH and float(max_img_length) / MAX_THUMB_LENGTH or 1\n thumb_width = self.get_image_width() / ratio\n thumb_height = self.get_image_height() / ratio\n return '<img src=\"%s\" width=\"%s\" height=\"%s\"/>' % (self.image, thumb_width, thumb_height)",
"def create_half_res_images(self) -> None:\n\n utils.create_if_not_exists(self.half_res_img_path)\n pngs = utils.sorted_glob(self.img_pngs_glob)\n\n for orig_path in pngs:\n img = cv2.imread(orig_path)\n img_half = im_helpers.resize_percent(img, 50)\n cv2.imwrite(f'{self.half_res_img_path}/{os.path.basename(orig_path)}', img_half)",
"def apply_thumbnail(self, size = None, kind = None, **kwargs ):\n self._clone = False\n self._is_derived = True\n self._is_thumbnail = True\n self._is_base = False\n if size is None or (size[0] is None and size[1] is None):\n size = self._configuration.thumbnail_default_size\n # x_size = self._configuration.thumbnail_default_xsize\n # y_size = self._configuration.thumbnail_default_ysize\n # else:\n x_size = size[0]\n y_size = size[1]\n \n if x_size is None:\n x_size = y_size\n\n if y_size is None:\n y_size = x_size\n\n if kind is None:\n kind = self._configuration.thumbnail_default_format\n self._size = (x_size, y_size)\n self._image_kind = kind\n\n self._equalise = self._configuration.thumbnail_equalise\n self._liquid = self._configuration.thumbnail_liquid_resize\n self._sharpen = self._configuration.thumbnail_sharpen\n\n # Allow overriding of the config defaults\n if \"equalise\" in kwargs:\n self._equalise = kwargs[\"equalise\"]\n\n if \"sharpen\" in kwargs:\n self._sharpen = kwargs[\"sharpen\"]\n\n if \"liquid\" in kwargs:\n self._liquid = kwargs[\"liquid\"]\n\n #Place simple one letter codes in the name\n encoded_options = \"\"\n if self._equalise:\n encoded_options += \"e\"\n if self._liquid:\n encoded_options += \"l\"\n if self._sharpen:\n encoded_options += \"s\"\n self._operations.append(\"thumbnail({},{},{})\".format(x_size, y_size, encoded_options))\n\n self._image_name = str(self)",
"def create_logo(self, seqs=[]):\n # seperate headers\n headers, instances = [list(x) for x in zip(*seqs)]\n\n if self.options.sequence_type is 'rna':\n alphabet = Alphabet('ACGU')\n elif self.options.sequence_type is 'protein':\n alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY')\n else:\n alphabet = Alphabet('ACGT')\n motif_corebio = SeqList(alist=instances, alphabet=alphabet)\n data = wbl.LogoData().from_seqs(motif_corebio)\n\n format = wbl.LogoFormat(data, self.options)\n\n if self.output_format == 'png':\n return wbl.png_formatter(data, format)\n elif self.output_format == 'png_print':\n return wbl.png_print_formatter(data, format)\n elif self.output_format == 'jpeg':\n return wbl.jpeg_formatter(data, format)\n else:\n return wbl.eps_formatter(data, format)",
"def create_thumbs(self):\n for m in Movie.query.filter(Movie.thumb == False).all():\n tname = m.hash_id + THUMB_EXT\n tname_full = os.path.join(THUMB_DIR, tname)\n p = subprocess.Popen(get_thumb_cmd(m.location, tname_full), \\\n stdout=subprocess.PIPE).communicate()\n if os.path.isfile(tname_full):\n if os.path.getsize(tname_full) > 10000:\n m.thumb = True\n else:\n os.remove(tname_full)\n database.session.commit()\n return 'Created thumbnails'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
connects to broker and provides convenience methods
|
def __init__(self, hostname, port, vhost, userid, password):
self.broker = BrokerConnection(hostname=hostname, port=port,
userid=userid, password=password,
virtual_host=vhost)
|
[
"def __init__(self, broker):\n\n self.broker = broker",
"def connect_mqtt(self):\n\n\t\tdef on_connect(client, userdata, flags, rc):\n\t\t\t\"\"\"\n\t\t\tThis method is the callback for a connection try.\n\t\t\t:param client: the client\n\t\t\t:param userdata: the submitted userdata\n\t\t\t:param flags: the submitted connection flags\n\t\t\t:param rc: the response code\n\t\t\t\"\"\"\n\t\t\tif rc == 0:\n\t\t\t\tprint(f\"[{self.game}]: Connected to MQTT Broker!\")\n\t\t\telse:\n\t\t\t\tprint(f\"[{self.game}]: Failed to connect, return code %d\\n\", rc)\n\n\t\tclient = mqtt_client.Client(self.client_id)\n\t\tclient.on_connect = on_connect\n\t\tclient.connect(self.broker, self.port)\n\t\treturn client",
"def connect(username, pwd, brokerName):\n global channel, _parameters\n try:\n _parameters = pika.URLParameters(\"amqps://{}:{}@{}/space-maker-vhost?heartbeat_interval=30&socket_timeout=1\".format(username, pwd, brokerName))\n connection = pika.BlockingConnection(_parameters)\n channel = connection.channel()\n return True\n except:\n logger.exception(\"broker connection failure\")\n return False",
"def make_celery_conn(backend='pyampq', broker='pyampq'):\n raise NotImplementedError",
"def __init__(self, hostname=\"127.0.0.1\", userid=\"guest\", password=\"guest\",\n virtual_host=\"/\", port=5672):\n\n self.connection = BrokerConnection(hostname=hostname, \n userid=userid, password=password, \n virtual_host=virtual_host, port=port,\n insist=False, ssl=False)\n self.channel = self.connection.channel()\n self.exchange = Exchange(name=self.name, type=\"topic\", durable=True,\n channel=self.channel)\n self.queue = Queue(self.name, exchange=self.exchange,\n routing_key=self.routing_key)\n self.queue = self.queue(self.channel)\n self.queue.declare()\n self.queue.consume(consumer_tag=\"\", callback=self.callback, no_ack=True)\n self.connection.connect()\n return",
"def connect(self):\r\n self.log('Connecting to Interactive Brokers TWS...')\r\n try:\r\n if USING_NOTEBOOK:\r\n util.startLoop()\r\n ib = IB()\r\n ib.connect('127.0.0.1', 7497, clientId=1)\r\n self.log('Connected') \r\n self.log()\r\n return ib\r\n except:\r\n self.log('Error in connecting to TWS!! Exiting...')\r\n self.log(sys.exc_info()[0])\r\n exit(-1)",
"def connect_mqtt():\n # Connect to the MQTT client\n client = mqtt.Client()\n client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n return client",
"def __connect_with_credentials(self):\n\t\tself.client_.username_pw_set(\"xgvutxaa\", \"9cMIpVoL4Ujj\")\n\t\tself.client_.connect('spectacular-pharmacist.cloudmqtt.com',1883,3600)",
"def connect_and_subscribe():\n global client\n client = MQTTClient(machine_id, broker)\n client.set_callback(mqtt_callback)\n client.connect()\n print(\"Connected to {}\".format(broker))\n for topic in (b'config', b'set'):\n t = topic_name(topic)\n client.subscribe(t)\n print(\"Subscribed to {}\".format(t))",
"def run(self):\n if self._username and self._password:\n self.username_pw_set(self._username, self._password)\n self.connect_async(self._mqtt_ip, self._mqtt_port)\n self.loop_start()",
"def connect(mqtt_conf):\n logger.info(\"Creating MQTT client.\")\n client = mqtt.Client()\n client.on_publish = on_publish\n\n username = mqtt_conf.get('USERNAME', '')\n password = mqtt_conf.get('PASSWORD', '')\n\n client.username_pw_set(username, password)\n logger.info(\"Connecting to MQTT server\")\n\n host = mqtt_conf.get('HOST', 'localhost')\n port = mqtt_conf.get('PORT', 1883)\n client.connect(host, port)\n return client",
"def __init__(self, delegate=None):\n self.client = mqtt.Client()\n self.delegate = delegate\n self.subscription_topic_name = None\n self.publish_topic_name = None\n self.rose_broker = \"mosquitto.csse.rose-hulman.edu\"",
"async def connect_to_amqp(self):\n self._status = StatusCodes.CONNECTING\n rabbitmq_config = await self.get_rabbitmq_configuration()\n await self.set_queues_prefix_iostreams()\n self.amqp_connection = AMQPConnection(\n **rabbitmq_config, ioloop=self.loop, on_error_callback=self.on_error_callback\n )\n await self.amqp_connection.connect(rabbitmq_config[\"connection_timeout\"])",
"def initialize(self):\n self.logger.info(\"Initializing connection to the MQTT broker.\")\n self.client = AWSIoTMQTTClient(self.client_id)\n self.client.configureEndpoint(self.endpoint, portNumber=8883)\n self.client.configureCredentials(CAFilePath=self.root_ca, KeyPath=self.private_key,\n CertificatePath=self.client_certificate)\n self.client.configureConnectDisconnectTimeout(self.conn_disconnect_timeout)\n self.client.configureMQTTOperationTimeout(self.mqtt_oper_timeout)\n if self.client.connect():\n self.logger.info(\"Connected!\")",
"def __init_mqtt(self):\n\n def on_connect(client, userdata, flags, rc):\n \"\"\"Callback for when the connection is established with the mqtt broker\"\"\"\n try:\n logging.info('MQTT Paho Connected with result code ' + str(rc))\n self.flag_connected = True\n logging.info('Subscribing to invoke topic')\n client.subscribe(self.invoke_topic)\n client.subscribe(self.cloud_to_device_topic)\n\n\n except Exception as e:\n logging.warning(\"on_connect with result error %s\" % e)\n\n def on_message(client, userdata, msg):\n \"\"\"Callback for when a message is received by client\"\"\"\n logging.info('MQTT message arrived')\n print('MQTT message arrived')\n logging.debug('topic %s' % msg.topic)\n print('topic %s' % msg.topic)\n logging.debug('payload %s' % msg.payload)\n print('payload %s' % msg.payload)\n self.handle_mqtt_messages(msg.topic, msg.payload)\n\n def on_disconnect(client, userdata, rc):\n \"\"\"Callback for when the connection is lost\"\"\"\n self.flag_connected = False\n logging.info('MQTT Disconnected!!')\n\n self.paho_client_mqtt = mqtt.Client(client_id=self.device_id, protocol=self.broker_mqtt_protocol)\n self.paho_client_mqtt.on_connect = on_connect\n self.paho_client_mqtt.on_message = on_message\n self.paho_client_mqtt.on_disconnect = on_disconnect\n self.paho_client_mqtt.username_pw_set(username=self.username)\n self.paho_client_mqtt.tls_set(ca_certs=self.broker_mqtt_CACert,\n certfile=self.device_cert,\n keyfile=self.device_key,\n cert_reqs=ssl.CERT_REQUIRED,\n tls_version=ssl.PROTOCOL_TLSv1_2,\n ciphers=None)\n self.paho_client_mqtt.tls_insecure_set(True)",
"def connect(self):\n self._mqtt = IoTMQTT(self, self._wifi_manager, self._hostname, self._device_id, self._shared_access_key, self._token_expires, self._logger)\n self._mqtt.connect()",
"def configure_client():\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect\n client.will_set('status/mqttc', payload=\"disconnected\", qos=1, retain=True)\n print('connecting')\n client.connect('broker', 1883, 60)",
"def __init__(self):\n global mq_broker_url\n global params\n self.amqp_url = mq_broker_url\n self._connection = pika.BlockingConnection(params)\n self._channel = self.connection.channel()\n self._knownQueues = []\n self._knownExchanges = []",
"def connect_and_subscribe(sub_callback=None):\n with open(\"credentials.json\", \"r\") as f:\n credentials = ujson.load(f)\n \n try:\n from umqtt.robust import MQTTClient\n except ImportError as e:\n import upip\n upip.install('micropython-umqtt.simple')\n upip.install('micropython-umqtt.robust')\n from umqtt.robust import MQTTClient\n \n # Set Options for MQTT-Broker\n client = MQTTClient(ubinascii.hexlify(machine.unique_id()), credentials[\"mqtt\"][\"host\"], credentials[\"mqtt\"][\"port\"])\n # Set callback to handle Messages\n if sub_callback is not None:\n client.set_callback(sub_callback)\n # Connect\n client.connect(clean_session=False)\n for topic in credentials[\"mqtt\"][\"topics\"]:\n client.subscribe(topic)\n time.sleep(3)\n client.check_msg()\n return client"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
declares the exchange, the queue and binds the queue to the exchange exchange exchange name exchange_type direct, topic, fanout binding binding to queue (optional) queue queue to bind to exchange using binding (optional)
|
def declare(self, exchange, exchange_type, binding="", queue=""):
if (binding and not queue) or (queue and not binding):
if queue and not exchange_type == "fanout":
raise Error("binding and queue are not mutually exclusive")
consumer = Consumer(connection=self.broker,
exchange=exchange, exchange_type=exchange_type,
routing_key=binding, queue=queue)
consumer.declare()
consumer.close()
|
[
"def declare_exchange_to_queue_binding(self, exchange: str, queue: str, *, routing_key: str = \"\",\n arguments: dict = None,\n vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url(\"/bindings/{vhost}/e/{exchange}/q/{queue}\", vhost=vhost, exchange=exchange,\n queue=queue)\n data = dict(routing_key=routing_key, arguments=arguments or {})\n return self.request('post', endpoint, data=data)",
"def bind(self, exchange, routing_key=None, arguments=None, nowait=False):\n if isinstance(exchange, Exchange):\n exchange = exchange.name\n\n return self.channel.queue_bind(\n queue=self.name,\n exchange=exchange,\n routing_key=routing_key or '',\n arguments=arguments,\n nowait=nowait\n )",
"def get_exchange_to_queue_binding(self, exchange: str, queue: str, props: str, *, vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url(\"/bindings/{vhost}/e/{exchange}/q/{queue}/{props}\",\n vhost=vhost, exchange=exchange, queue=queue, props=props)\n return self.request('get', endpoint)",
"def __init__(self, broker_vhost='/', queue_name=None,\n exchange_name=None, queue_type='direct', routing_key=None):\n\n self._msgs = []\n self._broker_timeout = BROKER_CONNECT_TIMEOUT\n self._queue_name = queue_name\n self._exchange_name = exchange_name if exchange_name is not None \\\n else BROKER_DESTINATION\n self._routing_key = routing_key if routing_key is not None else \\\n BROKER_DESTINATION\n self._queue_type = queue_type\n\n self.broker = 'amqp://{}/{}'.format(\n BROKER_URL,\n broker_vhost,\n )",
"def bind(self, queue, routing_key=None, arguments=None, nowait=False):\n if isinstance(queue, Queue):\n queue = queue.name\n\n # [to update: return object?]\n return self.channel.queue_bind(\n queue=queue,\n exchange=self.name,\n routing_key=routing_key or '',\n arguments=arguments,\n nowait=nowait\n )",
"def __init__(self, amqp_url, queue, routing_key, exchange='pika', exchange_type='topic'):\n\n self.exchange = exchange\n self.exchange_type = exchange_type\n self.queue = queue\n self.routing_key = routing_key\n self._connection = None\n self._channel = None\n self._closing = False\n self._consumer_tag = None\n self._url = amqp_url",
"def setup_exchange(self, exchange_name):\n logger.debug('Declaring exchange %s', exchange_name)\n self._channel.exchange_declare(\n exchange_name, callback=self.on_exchange_declareok,\n exchange_type=self.exchange_type, durable=True)",
"def delete_exchange_to_queue_binding(self, exchange: str, queue: str, props: str, *, vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url(\"/bindings/{vhost}/e/{exchange}/q/{queue}/{props}\",\n vhost=vhost, exchange=exchange, queue=queue, props=props)\n return self.request('delete', endpoint)",
"def __on_queue_declareok(self, _):\n LOGGER.info('Binding %s to %s with %s',\n self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.__on_bindok,\n self.queue,\n self.exchange,\n self.routing_key)",
"def setup_exchange(self, exchange_name, durable=False):\n # Note: using functools.partial is not required, it is demonstrating\n # how arbitrary data can be passed to the callback when it is called\n cb = functools.partial(\n self.on_exchange_declareok, userdata=exchange_name)\n self._channel.exchange_declare(\n exchange=exchange_name,\n exchange_type=self.EXCHANGE_TYPE,\n durable=durable,\n callback=cb)",
"def create_queue(self, queue: Queue, address: Address, durable: bool = True):",
"def keystone_amq_rabbitmq(self):\n\n #Khai báo kết nối\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.rpc_host,\n credentials=pika.PlainCredentials(\n username=self.rpc_user,\n password=self.rpc_pass)))\n #Khai báo channel\n channel = connection.channel()\n #Khai báo queue, tạo nếu cần thiết,\n # exclusive chỉ cho phép thao tác bởi phiên hiện tại\n result = channel.queue_declare(exclusive=True)\n #Lấy tên của queue vừa tạo\n queue_name = result.method.queue\n\n # exchange name should be made available as option, maybe advanced\n try:\n #Khởi tạo một exchange mới, nếu tồn tại kiểm tra xem exchange đó có được tạo đúng hay không.\n #http://pika.readthedocs.org/en/latest/modules/channel.html\n channel.exchange_declare(exchange='keystone', type='topic')\n #tạo queue trong một exchange cụ thể\n channel.queue_bind(exchange='keystone', queue=queue_name, routing_key='notifications.info')\n #consumer_callback (method)–The method to callback when consuming with the signature consumer_callback\n # (channel, method, properties,body), where\n # channel: pika.Channel method: pika.spec.Basic.Deliver properties: pika.spec.BasicProperties body: str, unicode, or bytes (python 3.x)\n channel.basic_consume(self.keystone_callback_rabbitmq, queue=queue_name, no_ack=True)\n channel.start_consuming()\n except Exception,e:\n print e",
"def bind_queues(self):\n\n for name, queue in self.queues.items():\n self.queues[name] = queue(self.channel)\n self.queues[name].declare()",
"def create_queue(self, queue: Queue, address: Address, durable: bool=True):\n pass",
"def __on_exchange_declareok(self, _):\n LOGGER.info('Exchange declared')\n LOGGER.info('Declaring queue %s', self.queue)\n self._channel.queue_declare(self.__on_queue_declareok, self.queue)",
"def __setup_kombu_queue(self, config):\n configs = config[u'config']\n for item in configs:\n if item[u'group'] == u'queue':\n value = item[u'value']\n queue = value[u'queue']\n uri = value[u'uri']\n manager = RedisManager(uri)\n manager.server.set(u'_kombu.binding.%s' % queue, value)",
"def setup_queue(self, queue_name):\n logger.debug('Declaring queue %s', queue_name)\n self._channel.queue_declare(queue_name, callback=self.on_queue_declareok,\n durable=self.queue_durable, exclusive=self.queue_exclusive,\n auto_delete=self.queue_auto_delete)",
"def on_queue_declareok(self, method_frame):\n logger.debug('Binding %s to %s with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.queue, self.exchange,\n routing_key=self.routing_key, callback=self.on_bindok)",
"def __init__(self):\n global mq_broker_url\n global params\n self.amqp_url = mq_broker_url\n self._connection = pika.BlockingConnection(params)\n self._channel = self.connection.channel()\n self._knownQueues = []\n self._knownExchanges = []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
publish a message to exchange using routing_key exchange name of exchange routing_key interpretation of routing key depends on exchange type message message content to send
|
def publish(self, exchange, routing_key, message,
auto_declare=False, persistent=True):
delivery_mode = 2
if not persistent:
delivery_mode = 1
publisher = Publisher(connection=self.broker,
exchange=exchange, routing_key=routing_key,
auto_declare=auto_declare)
publisher.send(message, delivery_mode=delivery_mode)
publisher.close()
|
[
"def publish_message(self, exchange: str, *, routing_key: str = \"\", payload: str = \"\",\n payload_encoding: str = \"string\",\n properties: dict = None, vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url(\"/exchanges/{vhost}/{exchange}/publish\", vhost=vhost, exchange=exchange)\n data = dict(\n routing_key=routing_key,\n payload=payload,\n payload_encoding=payload_encoding,\n properties=properties or {},\n )\n return self.request('post', endpoint, data=data)",
"def _post(self, routing_key: str, message: dict) -> None:\n if not self.is_connected:\n self.connect()\n logger.debug(\n f\"Attempt publish: \"\n f\"exchange={self.publish_exchange}, \"\n f\"routing_key={routing_key}, \"\n f\"body={json.dumps(message)}, \"\n f\"properties={self.publish_message_properties}, \"\n f\"mandatory=True\"\n )\n self.channel.basic_publish(\n exchange=self.publish_exchange,\n routing_key=routing_key,\n body=json.dumps(message),\n properties=self.publish_message_properties,\n mandatory=True,\n )",
"def publish_message(self, message, queue):",
"def mq_send(channel, exchange, routing_key, message):\n return channel.basic_publish(exchange, routing_key, message,\n pika.BasicProperties(content_type='text/plain',\n delivery_mode=1))",
"def publish_message(self,\n exchange,\n routing_key,\n properties,\n body,\n no_serialization=False,\n no_encoding=False,\n channel=None,\n connection=None):\n # Auto-serialize the content if needed\n is_string = (isinstance(body, str) or isinstance(body, bytes)\n or isinstance(body, unicode))\n if not no_serialization and not is_string and \\\n properties.get('content_type'):\n body = self._serialize(\n body, headers.parse_content_type(properties['content_type']))\n\n # Auto-encode the message body if needed\n if not no_encoding and \\\n properties.get('content_encoding') in self._CODEC_MAP.keys():\n body = self._compress(\n body, self._CODEC_MAP[properties['content_encoding']])\n\n return super(SmartConsumer, self).publish_message(\n exchange, routing_key, properties, body, channel or connection)",
"def _push_to_amqp(self, msg):\n StatsClientSingleton().incr('amqp.output', count=1)\n payload = dict()\n path = self.request.uri.split('/')[1:]\n payload['type'] = path[0]\n payload['parser_ver'] = path[1]\n payload['env'] = path[2]\n payload['app'] = path[3]\n payload['message'] = msg\n payload['http_content_length'] = len(msg)\n routing_key = \"{}.{}.{}.{}\".format(payload['type'],\n payload['parser_ver'],\n payload['env'], payload['app'])\n\n self.amqp_con.publish(routing_key, json.dumps(payload))",
"def send_durable_exchange_message(self, exchange_name, body):\n self.connect()\n channel = self.connection.channel()\n # Fanout will send message to multiple subscribers\n channel.exchange_declare(exchange=exchange_name, exchange_type='fanout')\n result = channel.basic_publish(exchange=exchange_name, routing_key='', body=body,\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n ))\n self.close()\n return result",
"def publish(self, topic: str, message: dict):\n pass",
"def publish(self, msg=None):\n if not self.message_queue:\n logger.error('publish message error due to message_queue is None')\n return\n message = json.dumps(msg)\n try:\n self.message_queue.send(message)\n except Exception as e:\n logger.error('publish ipc message to queue fail: {}'.format(e))",
"def publish(self, queue, message):\n\n if queue not in self._queues:\n error_msg = f'Queue with name `{queue}` is not declared.' \\\n f'Please call bootstrap before using publish'\n raise QueueDoesNotExistError(error_msg)\n\n self._channel.open()\n try:\n self._channel.send_message(self._exchange_name, queue, message)\n finally:\n self._channel.close()",
"def publish(self, msg, exchange):\n\n try:\n self._publish(msg, exchange)\n except pika.exceptions.ConnectionClosed:\n logging.info('Reconnecting to queue')\n self.connect(exchange)\n self._publish(msg, exchange)",
"def send_to_rabbit(dialogue: Dialogue):\n global channel_send\n _prepare()\n\n channel_send.basic_publish(\n exchange='',\n routing_key=QUEUE_REQUESTS,\n body=dumps({'id': dialogue.id, 'request': dialogue.request, 'meta': dialogue.meta})\n )",
"def send_durable_message(self, queue_name, body):\n self.connect()\n channel = self.create_channel(queue_name)\n channel.basic_publish(exchange='',\n routing_key=queue_name,\n body=body,\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n ))\n self.close()",
"def send(self, payload):\n self.connection.send_durable_exchange_message(self.exchange_name, payload)\n logging.info(\"Sent message to exchange.\".format(self.exchange_name))",
"def test_publish_message(self):\n pass",
"def publish(self, channel, message):\r\n return self.execute_command(\"PUBLISH\", channel, message)",
"def publish_kwargs(self) -> dict:\n exchange = self.message.exchange or ''\n routing_key = self.message.routing_key or 'default'\n body = self.body()\n return dict(exchange=exchange, routing_key=routing_key, body=body, mandatory=True)",
"def launch(args, message, headers, formatter, position=0):\n credentials = pika.PlainCredentials(args.username, args.password)\n props = pika.BasicProperties(content_type='application/json',\n headers=headers,\n delivery_mode=2)\n connection = pika.BlockingConnection(pika.ConnectionParameters(\n host=args.host,\n port=args.port,\n credentials=credentials))\n channel = connection.channel()\n\n # tqdm the range for pretty metrics\n for i in tqdm(range(args.bunnos), position=position):\n channel.basic_publish(exchange=args.exchange,\n routing_key=args.routing_key,\n properties=props,\n body=formatter.format(message))\n\n connection.close()",
"def mq_send_once(exchange, routing_key, message):\n connection = mq_connect()\n confirmation = mq_send(connection.channel(), exchange, routing_key, message)\n connection.close()\n return confirmation"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
decode message envelope args message_data encoded message envelope (see encode_message) secret secret key to decrypt message (if encrypted) returns (sender, content, timestamp) sender message sender content content string (plaintext) timestamp datetime instance
|
def decode_message(message_data, secret=None):
sender = str(message_data['sender'])
content = base64.urlsafe_b64decode(str(message_data['content']))
timestamp = datetime(*map(lambda f: int(f), message_data['timestamp-utc']))
if message_data['encrypted']:
content = decrypt(content, secret)
content = json.loads(content)
return sender, content, timestamp
|
[
"def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_signature':\n # NOTE: right now we're only handling fours\n if msg.numerator == 4 and msg.denominator == 4:\n data = _dict_update(\n data,\n clocks_per_click=msg.clocks_per_click,\n notated_32nd_notes_per_beat=msg.notated_32nd_notes_per_beat)\n else:\n raise TimeSignatureException('not 4/4')\n elif msg.type == 'note_on':\n data = _note_on_update(data, msg)\n elif msg.type == 'note_off':\n data = _note_off_update(data, msg)\n\n return data",
"def decode_message(self, message, is_key=False):\n\n if message is None:\n return None\n\n if len(message) <= 5:\n raise SerializerError(\"message is too small to decode\")\n\n with ContextStringIO(message) as payload:\n magic, schema_id = struct.unpack(\">bI\", payload.read(5))\n if magic != MAGIC_BYTE:\n raise SerializerError(\"message does not start with magic byte\")\n decoder_func = self._get_decoder_func(schema_id, payload, is_key)\n return decoder_func(payload)",
"def decrypt(self, data):\n aes_key, hmac_key = self.keys\n sig = data[-SIG_SIZE:]\n data = data[:-SIG_SIZE]\n if hmac.new(hmac_key, data, hashlib.sha256).digest() != sig:\n return None\n #raise AuthenticationError(\"message authentication failed\")\n iv_bytes = data[:AES_BLOCK_SIZE]\n data = data[AES_BLOCK_SIZE:]\n cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)\n data = cypher.decrypt(data)\n return data[:-ord(data[-1])]",
"def decode_message(data):\n try:\n creator_id, channel_id, message_id = data.split('|')\n except:\n return None, None, None\n\n return creator_id, channel_id, message_id",
"def diff_message_from_exchange(cls,\n msg: Dict[str, any],\n timestamp: Optional[float] = None,\n metadata: Optional[Dict] = None,\n ) -> OrderBookMessage:\n trading_pair = metadata[\"trading_pair\"]\n order_book_id = int(msg[FoxbitOrderBookFields.MDUPDATEID.value])\n prc = '%.10f' % float(msg[FoxbitOrderBookFields.PRICE.value])\n qty = '%.10f' % float(msg[FoxbitOrderBookFields.QUANTITY.value])\n\n if msg[FoxbitOrderBookFields.ACTIONTYPE.value] == FoxbitOrderBookAction.DELETION.value:\n qty = '0'\n\n if msg[FoxbitOrderBookFields.SIDE.value] == FoxbitOrderBookSide.BID.value:\n\n return OrderBookMessage(\n OrderBookMessageType.DIFF, {\n \"trading_pair\": trading_pair,\n \"update_id\": order_book_id,\n \"bids\": [[prc, qty]],\n \"asks\": [],\n }, timestamp=int(msg[FoxbitOrderBookFields.ACTIONDATETIME.value]))\n\n if msg[FoxbitOrderBookFields.SIDE.value] == FoxbitOrderBookSide.ASK.value:\n return OrderBookMessage(\n OrderBookMessageType.DIFF, {\n \"trading_pair\": trading_pair,\n \"update_id\": order_book_id,\n \"bids\": [],\n \"asks\": [[prc, qty]],\n }, timestamp=int(msg[FoxbitOrderBookFields.ACTIONDATETIME.value]))",
"def market_data_decoder(bs):\n order_type = struct.unpack(\">B\", bs[0:1])[0]\n if order_type != FIXTYPE_MARKET_DATA:\n raise MarketSpreadError(\"Wrong Fix message type. Did you connect \"\n \"the senders the wrong way around?\")\n symbol = struct.unpack(\">4s\", bs[1:5])[0]\n transact_time = struct.unpack(\">21s\", bs[5:26])[0]\n bid = struct.unpack(\">d\", bs[26:34])[0]\n offer = struct.unpack(\">d\", bs[34:42])[0]\n return MarketDataMessage(symbol, transact_time, bid, offer)",
"def parse_message(message):\n return {\n \"msg\": message.message,\n \"sender\": message.sender.name,\n \"sent_on\": message.sent_on.strftime(\"%b %d %y - %H:%M\"),\n }",
"def _parse_message(self):\n parts = self.sms_message.split(\",\")\n values = {}\n for part in parts:\n key,value = part.split(\"=\",1)\n values[key] = value\n # grab the parts we want\n if '@' in values:\n self.message_type = int(values['@'])\n elif 'AT' in values:\n self.message_type = int(values['AT'])\n self.msisdn = values['CN']\n self.serialnumber = values['SN']\n self.signalstrength = int(values['S'])\n self.batterystrength = int(values['B'])\n self.puffcount = int(values['PC'])\n self.ussdresponse = values['U']\n self.medicationcompartment = values['M']\n self.ce = values['CE']\n # datetime format=DDMMYYHHMMSS\n t = values['T']\n (dd,month,yy,hh,minute,ss) = [int(x) for x in (t[0:2],t[2:4],t[4:6],t[6:8],t[8:10],t[10:12])]\n yy += 2000 # Y2K!\n self.timestamp = datetime.datetime(yy,month,dd,hh,minute,ss)",
"def decode_payload(message, enc=\"utf-8\"):\n return MessageUtils.decode(message.payload, enc=enc)",
"def unpack_transform_msg(msg, stamped=False):\n if stamped:\n t = msg.transform.translation\n r = msg.transform.rotation\n else:\n t = msg.translation\n r = msg.rotation\n\n return (t.x, t.y, t.z), (r.w, r.x, r.y, r.z)",
"def decrypt(self, data):\n a, b = data\n cipher_text = CipherText(a, b)\n decrypted_message = Elgamal.decrypt(cipher_text, self.private_key)\n\n return decrypted_message.decode('utf-8')",
"def get_message_timestamp(self, message):\n if isinstance(message.get(\"ts\"), str): return message[\"ts\"]\n return None",
"def parse_msg(msg):\n subject = msg.get(\"Subject\")\n return {\n \"subject\": subject,\n \"sender\": msg.get(\"Sender\"),\n \"date\": msg.get(\"Date\"),\n \"size\": len(bytes(msg)),\n }",
"def extract_sub_event(event) -> any:\n return event[\"messagekey\"]",
"def _cms_parse_enveloped_data(cls, ciphertext_for_recipient):\n content_info = cms.ContentInfo.load(ciphertext_for_recipient)\n if content_info.tag != 16:\n raise ValueError('CMS tag is not (16: Sequence)')\n\n if content_info['content_type'].native != 'enveloped_data':\n raise ValueError('CMS content_type is not enveloped_data')\n\n enveloped_data = content_info['content']\n recipient = enveloped_data['recipient_infos'][0].chosen\n encrypted_content_info = enveloped_data['encrypted_content_info']\n cipherkey = recipient['encrypted_key'].native\n\n block_size = encrypted_content_info['content_encryption_algorithm'].encryption_block_size\n init_vector = encrypted_content_info['content_encryption_algorithm'].encryption_iv\n ciphertext = encrypted_content_info['encrypted_content'].native\n\n return cipherkey, init_vector, block_size, ciphertext",
"def crds_decode(msg):\n if isinstance(msg, dict) and \"crds_encoded\" in msg:\n ascii = msg[\"crds_payload\"]\n b64 = ascii.encode(\"ascii\")\n compressed = base64.b64decode(b64)\n utf8 = gzip.decompress(compressed)\n json_str = utf8.decode()\n obj = json.loads(json_str)\n return obj\n else:\n return msg",
"def decode_msg_dict(timestamp, bitlist):\n\n return {'timestamp': timestamp,\n 'msgtype': bitlist.ubits(0,6),\n 'repeat': bitlist.ubits(6,2),\n 'mmsi': bitlist.ubits(8, 30),\n 'status': bitlist.ubits(38, 4),\n 'turn': bitlist.sbits(42, 8),\n 'speed': bitlist.ubits(50, 10),\n 'accuracy': bitlist.ubits(60, 1),\n 'lon': bitlist.sbits(61, 28),\n 'lat': bitlist.sbits(89, 27),\n 'course': bitlist.ubits(116, 12),\n 'heading': bitlist.ubits(128, 9),\n 'second': bitlist.ubits(137, 6),\n 'maneuver': bitlist.ubits(143, 2),\n 'raim': bitlist.ubits(148, 1),\n 'radio': bitlist.ubits(149, 19)}",
"def __decode(self, message):\n message = message.decode(\"UTF-8\")\n try:\n data = json.loads(message)\n except ValueError:\n data = None\n\n if type(data) is dict and 'event' in data:\n return data['event']\n\n return None",
"def deserialize(data):\n #TODO: for efficiency, we should use an index based system for\n #handling left over data to avoid copying messages when we don't have to.\n data = ''.join(data) #string for convenience\n delim = data.index('!')\n expected_len = int(data[0:delim])\n msg = data[delim+1:delim+1+expected_len]\n leftovers = data[delim+1+expected_len:]\n if expected_len != len(msg):\n leftovers = data\n return None, leftovers\n try:\n message = pickle.loads(msg) #strip newline character\n log('good message' + data) \n return message, leftovers\n \n except Exception as ex:\n log('bad' + data)\n return Message('message parsing failed', 'failure', 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A taks to display the mosaic image in dataserver
|
def displayMosaic( fitsfd ):
myDS9 = ds9()
fname = fitsfd.filename()
myDS9.set( "file mosaicimage {}".format(fname) )
myDS9.set("zoom to fit")
return fitsfd
|
[
"def test_raft_image_mosaic(self):\n infiles = sorted(glob.glob(os.path.join(_root_dir, 'S??',\n '*_lambda_flat_1000_*.fits')))\n infiles = OrderedDict([(filename.split('/')[-2], filename)\n for filename in infiles])\n test_files = dict()\n step = 100\n level = step\n for slot, infile in list(infiles.items()):\n outfile = '%s_test_image_%05i.fits' % (slot, level)\n with fits.open(infile) as hdu_list:\n for hdu in hdu_list[1:17]:\n hdu.data = np.ones(hdu.data.shape, dtype=np.float32)*level\n level += step\n fitsWriteto(hdu_list, outfile, overwrite=True)\n test_files[slot] = outfile\n\n raft_mosaic = raftTest.RaftMosaic(test_files, bias_subtract=False)\n raft_mosaic.plot(title='Test pattern')\n plt.savefig(self.outfile)",
"def save_mosaic():\n mosaic_image = Image.new('RGB', (target_image_width, target_image_height), (255, 255, 255))\n for x in range(0, target_image_width, SQUARE_SIZE):\n for y in range(0, target_image_height, SQUARE_SIZE):\n grid = split_image(pixels, (y, x), SQUARE_SIZE)\n avg_color = get_color_average(grid)\n best_match_thumbnail = pythagoras_nearest_rgb(avg_color, img_thumbs_mean_color)\n img_thumb_value = img_thumbnails.get(best_match_thumbnail)\n mosaic_image.paste(img_thumb_value, (x, y))\n\n unique_filename = str(uuid.uuid4()) + \".jpg\"\n path = os.path.dirname(os.getcwd())\n directory = os.path.join(path, \"results\", \"mosaic\")\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n mosaic_filepath = os.path.join(path, \"results\", \"mosaic\", unique_filename)\n mosaic_image.save(mosaic_filepath)\n mosaic_image.show()",
"def zoom(image):",
"def create_mosaic(self, filename, min_size, threshold):\r\n \r\n def _create_mosaic(image, min_size, threshold, database):\r\n \"\"\"Create and store a photomosaic version of image. min_size \r\n determines the smallest height or width of the picture to start \r\n replacing with pictures from database.threshold determines \r\n the lowest colour distance of two images to be considered \r\n 'matching'.\"\"\"\r\n \r\n size = image.size\r\n width, height = size[0], size[1]\r\n if width < min_size or height < min_size:\r\n best_match = self._closest_match(image, database)\r\n return best_match\r\n match = self._matching(image, threshold, database)\r\n if match:\r\n image.paste(match, (0, 0))\r\n else:\r\n q1 = (0, 0, (width / 2), (height / 2))\r\n q2 = ((width / 2), 0, width, (height / 2))\r\n q3 = (0, (height / 2), (width / 2), height)\r\n q4 = ((width / 2), (height / 2), width, height) \r\n image.paste(_create_mosaic(image.crop(q1), min_size, threshold,\r\n database), q1)\r\n image.paste(_create_mosaic(image.crop(q2), min_size, threshold,\r\n database), q2)\r\n image.paste(_create_mosaic(image.crop(q3), min_size, threshold,\r\n database), q3)\r\n image.paste(_create_mosaic(image.crop(q4), min_size, threshold,\r\n database), q4)\r\n return image\r\n \r\n pic = Image.open(filename)\r\n database = self._images_database(self.PICTURE_DATABASE)\r\n self.mosaic = _create_mosaic(pic, min_size, threshold, database)\r\n return self.mosaic",
"def mosaic():\n\n example1 = \"\"\"\n '''\n AB\n CC\n '''\"\"\"\n example2 = \"'AB;CC'\"\n\n for text in [example1, example2]:\n fig = new_slide()\n\n slide_heading(fig, '3.4 Feature: subplot_mosaic shortcut')\n\n fig.text(0.05, 0.8, f'plt.figure().subplot_mosaic({text})', **CODE)\n\n ax_dict = fig.subplot_mosaic(eval(text.lstrip()),\n # Don't overlay title and code.\n gridspec_kw={'left': 0.3, 'top': 0.7,\n 'right': 0.97})\n identify_axes(ax_dict)\n\n annotate_pr_author(fig, 'timhoffm', pr=18763)\n\n yield fig",
"def toMosaic(self, outfile):\n if len(self.detectors) > 1:\n import montage_wrapper as montage\n self._log(\"info\", \"Converting to FITS mosaic\")\n tmp_dir = os.path.join(self.out_path, \"tmp-\"+str(uuid.uuid4()))\n os.makedirs(tmp_dir)\n tmp_out_dir = os.path.join(self.out_path, \"tmp-out-\"+str(uuid.uuid4()))\n tmp_work_dir = os.path.join(self.out_path, \"tmp-work-\"+str(uuid.uuid4()))\n self.toFits(os.path.join(tmp_dir, \"image.fits\"))\n montage.mosaic(tmp_dir, tmp_out_dir, background_match=True, work_dir=tmp_work_dir)\n self._log(\"info\", \"Mosaic finished running\")\n shutil.copy(os.path.join(tmp_out_dir, \"mosaic.fits\"), outfile)\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n if os.path.exists(tmp_out_dir):\n shutil.rmtree(tmp_out_dir)\n if os.path.exists(tmp_work_dir):\n shutil.rmtree(tmp_work_dir)\n self._log(\"info\", \"Created FITS file {} and cleaned up\".format(outfile))\n return [outfile]\n else:\n self._log(\"info\", \"Not creating single-detector mosaic\")\n return []",
"def mosaic(clip, num):\r\n clip = clip.resize.Spline36(format=vs.RGB48, matrix_in_s=\"709\")\r\n frames = [clip[int(((clip.num_frames-1)/(num**2 - 1)) * i)].dpid.Dpid(clip.width // num, clip.height // num) for i in range(num**2)]\r\n horizontal = [core.std.StackHorizontal(frames[i:i+num]) for i in range(0, num**2, num)]\r\n return core.std.StackVertical(horizontal)",
"def make_mosaic_and_seg(filt, name):\n\n files = glob.glob('*_flt.fits')\n info = grizli.utils.get_flt_info(files)\n\n group = {'files': list(info['FILE'][info['FILTER'] == filt.upper()]), 'product': name + '-' + filt}\n grizli.prep.drizzle_overlaps([group], scale=0.06, pixfrac=0.8, skysub=False)\n cat = grizli.prep.make_drz_catalog(group['product'], threshold=2)\n \n print('Drizzled mosaic and seg map written under the name: ' + name)",
"def mosaic_header(self, mosaic_shape, block, tile):\n mcomm = \"Set by MosaicAD, v{}\".format(__version__)\n fmat1 = \"[{}:{},{}:{}]\"\n fmat2 = \"[1:{},1:{}]\"\n\n mosaic_hd = self.ad[0].hdr.copy() # ref ext header.\n ref_block = self.geometry.ref_block \n amps_per_block = self._amps_per_block\n\n # ---- update CCDSEC,DETSEC and DATASEC keyword\n # Get keyword names for array_section and data_section\n arr_section_keyw = self.ad._keyword_for('array_section')\n dat_section_keyw = self.ad._keyword_for('data_section')\n det_section_keyw = self.ad._keyword_for('detector_section')\n # Get lowest x1 and y1\n min_x1 = np.min([k[0] for k in self.ad.detector_section()])\n min_y1 = np.min([k[2] for k in self.ad.detector_section()])\n\n # Unbin the mosaic shape\n x_bin, y_bin = (self.ad.detector_x_bin(), self.ad.detector_y_bin())\n if x_bin is None:\n x_bin, y_bin = (1, 1)\n\n unbin_width = mosaic_shape[1] * x_bin\n unbin_height = mosaic_shape[0] * y_bin\n detsec = fmat1.format(min_x1 + 1, min_x1 + unbin_width,\n min_y1 + 1, min_y1 + unbin_height)\n\n mosaic_hd[det_section_keyw] = detsec\n mosaic_hd[arr_section_keyw] = fmat2.format(unbin_width, unbin_height)\n mosaic_hd[dat_section_keyw] = fmat2.format(mosaic_shape[1],mosaic_shape[0])\n\n ccdname = self.ad.detector_name()\n mosaic_hd.set(\"CCDNAME\", ccdname)\n\n # Remove these keywords from the mosaic header.\n remove = ['FRMNAME', 'FRAMEID', 'CCDSIZE', 'BIASSEC', 'DATATYP']\n for kw in remove:\n if kw in mosaic_hd:\n del mosaic_hd[kw]\n\n mosaic_hd.set('EXTVER', 1, comment=mcomm, after='EXTNAME')\n pwcs = wcs.WCS(mosaic_hd)\n\n # Update CRPIX1 and CRPIX2.\n crpix1, crpix2 = self.update_crpix(pwcs, tile)\n mosaic_hd.set(\"CRPIX1\", crpix1, comment=mcomm)\n mosaic_hd.set(\"CRPIX2\", crpix2, comment=mcomm)\n return mosaic_hd",
"def fig_mofa_head(fig,ini_means,mix,k,L,pshape):\n # write some info\n ax = fig.add_subplot(L+2,L,3)\n ax.set_axis_off()\n ax.text(0.0,0.5,'Component = %d' % k,\n transform=ax.transAxes,ha='left',va='center',\n fontsize=35)\n ax = fig.add_subplot(L+2,L,6)\n ax.set_axis_off()\n ax.text(0.0,0.5,'Amp = %0.3f' % mix.amps[k],\n transform=ax.transAxes,ha='left',va='center',\n fontsize=35)\n\n # write mean patches\n fmpatch = mix.means[k].reshape(pshape)\n impatch = ini_means[k].reshape(pshape)\n ax = fig.add_subplot(L+1,L,1)\n ax.imshow(impatch,origin='lower',interpolation='nearest',\n vmin=np.min(impatch)*1.0001,\n vmax=np.max(impatch)*0.9999)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.text(0.5,1.1,'Initial Mean',\n transform=ax.transAxes,ha='center',va='center')\n ax = fig.add_subplot(L+1,L,L)\n ax.imshow(fmpatch,origin='lower',interpolation='nearest',\n vmin=np.min(fmpatch)*1.0001,\n vmax=np.max(fmpatch)*0.9999)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.text(0.5,1.1,'Final Mean',\n transform=ax.transAxes,ha='center',va='center')\n\n # psis\n ax = fig.add_subplot(L+2,L,L+1)\n ax.imshow(mix.psis[k].reshape(pshape),\n origin='lower',interpolation='nearest',\n vmin=np.min(mix.psis[k])*1.0001,\n vmax=np.max(mix.psis[k])*0.9999)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.text(0.5,1.05,'Psi',\n transform=ax.transAxes,ha='center',va='center')\n \n for ii in range(mix.M):\n ax = fig.add_subplot(L+2,L,L+2+ii)\n ax.imshow(mix.lambdas[k,:,ii].reshape(pshape),\n origin='lower',interpolation='nearest',\n vmin=np.min(mix.lambdas[k,:,ii])*1.0001,\n vmax=np.max(mix.lambdas[k,:,ii])*0.9999)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.text(0.5,1.05,'Lambda',\n transform=ax.transAxes,ha='center',va='center')\n return fig",
"def draw_mosaic(name, sequences, D, fi, options): \n class Var: pass #just because handling of global var is ugly otherwise\n Var.doPlot = False #dot and profile plots only when shift key pressed\n si,sj = None,None #start coord. of selection rectangle\n \n def indices(event):\n if event.xdata == None or event.ydata == None: return (None,None)\n return int(event.xdata+0.5), int(event.ydata+0.5)\n def onKeyPressed(event):\n Var.doPlot = event.key == \"control\"\n fig.canvas.mpl_disconnect(kpi)\n def onKeyReleased(event):\n Var.doPlot = False\n global kpi\n kpi = fig.canvas.mpl_connect('key_press_event', onKeyPressed)\n def onButtonPressed(event):\n global si,sj\n if not Var.doPlot: return\n Var.doPlot = False\n si,sj = indices(event)\n def onButtonReleased(event):\n if not Var.doPlot: return\n Var.doPlot = False\n ei,ej = indices(event)\n if (ei,ej) == (None,None): return\n if (ei,ej) == (si,sj):\n figure()\n plot_dot(sequences[ei], sequences[ej], n=options.n, \n c=options.c, bin=options.binary);\n show()\n else:\n i1, i2 = min([ei,ej,si,sj]), max([ei,ej,si,sj])\n figure()\n plot_mprofile(sequences[i1:i2+1], \"%d-%d\"%(i1,i2), options.n)\n show()\n \n if not options.no_mosaic:\n n = len(sequences)\n cd = colordict(sequences)\n fig = figure(figsize=(8.0,8.2))\n matshow(D, fignum=fig.number, cmap=cm.hot, aspect='equal')\n title(\"Mosaic: \"+name)\n fs = options.fontsize\n for i,label in enumerate(s.name() for s in sequences):\n color = \"k\" if options.black_labels else cd[pn(label)]\n text(i, n+0.1, label, size=fs, color=color, rotation='vertical', ha='center', va='top')\n text(n+0.1, i, label, size=fs, color=color, rotation='horizontal', ha='left', va='center')\n fig.canvas.mpl_connect('button_release_event', onButtonReleased)\n fig.canvas.mpl_connect('button_press_event', onButtonPressed)\n fig.canvas.mpl_connect('key_release_event', onKeyReleased)\n kpi = fig.canvas.mpl_connect('key_press_event', onKeyPressed)\n \n if not options.no_fiedler:\n figure()\n plot(fi, \"o-b\")\n axis([-1, len(fi), min(fi)*1.1, max(fi)*1.1])\n title(\"Fiedler: \"+name)\n \n if options.flowers:\n show_flowers()",
"def show(self):\n titles = ['Original']\n images = [self.images[0]]\n for i in range(4):\n if self.order[i] == Preprocessing.bw:\n titles.append('Binarise')\n images.append(self.images[i+1])\n elif self.order[i] == Preprocessing.crop_image:\n titles.append('Crop')\n images.append(self.images[i+1])\n elif self.order[i] == Preprocessing.morph_image:\n titles.append('Close')\n images.append(self.images[i+1])\n elif self.order[i] == Preprocessing.blur_image:\n titles.append('Blur')\n images.append(self.images[i+1])\n elif self.order[i] == Preprocessing.return_image:\n continue\n titles.append('Text')\n empty = np.zeros((images[-1].shape[0], images[-1].shape[1], 3), np.uint8)\n empty[:] = (255, 255, 255)\n images.append(empty)\n\n for i in range(len(titles)):\n plt.subplot(1, len(titles), i + 1), plt.imshow(images[i], 'gray')\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([])\n ax = plt.gca()\n plt.text(0.5, 0.5, self.text, horizontalalignment='center',\n verticalalignment='center', transform=ax.transAxes,\n fontsize=13)\n plt.show()",
"def __call__(self, images, boxes):\n if images.shape[0] != 4:\n err_msg = \"Currently Exact 4 Images are supported by Mosaic Aug.\"\n logging.error(err_msg)\n raise Exception(err_msg)\n\n x, y = self._mosaic_divide_points()\n mosaic_sub_images, mosaic_boxes = self._mosaic(\n images, boxes, mosaic_divide_points=(x, y))\n\n upper_stack = tf.concat([mosaic_sub_images[0], mosaic_sub_images[1]],\n axis=0)\n lower_stack = tf.concat([mosaic_sub_images[2], mosaic_sub_images[3]],\n axis=0)\n mosaic_image = tf.concat([upper_stack, lower_stack], axis=1)\n return mosaic_image, mosaic_boxes",
"def visualize_data(lines):\n print('size of data', len(lines))\n n = random.randint(0, len(lines))\n fig = plt.figure(figsize=(20, 200))\n for i in range(3):\n image = cv2.cvtColor(cv2.imread(lines[n][i]), cv2.COLOR_BGR2RGB)\n ax = fig.add_subplot(1, 3, i + 1)\n ax.imshow(image)",
"def default_mosaic(self):\n return None",
"def _create_mosaic(image, min_size, threshold, database):\r\n \r\n size = image.size\r\n width, height = size[0], size[1]\r\n if width < min_size or height < min_size:\r\n best_match = self._closest_match(image, database)\r\n return best_match\r\n match = self._matching(image, threshold, database)\r\n if match:\r\n image.paste(match, (0, 0))\r\n else:\r\n q1 = (0, 0, (width / 2), (height / 2))\r\n q2 = ((width / 2), 0, width, (height / 2))\r\n q3 = (0, (height / 2), (width / 2), height)\r\n q4 = ((width / 2), (height / 2), width, height) \r\n image.paste(_create_mosaic(image.crop(q1), min_size, threshold,\r\n database), q1)\r\n image.paste(_create_mosaic(image.crop(q2), min_size, threshold,\r\n database), q2)\r\n image.paste(_create_mosaic(image.crop(q3), min_size, threshold,\r\n database), q3)\r\n image.paste(_create_mosaic(image.crop(q4), min_size, threshold,\r\n database), q4)\r\n return image",
"def __make_one_mosaic(self, tile_record_list):\n mosaic = MosaicContents(\n tile_record_list,\n self.datacube.tile_type_dict,\n self.dataset_dict['level_name'],\n self.collection.get_temp_tile_directory()\n )\n mosaic.create_record(self.db)\n self.collection.mark_tile_for_creation(mosaic)",
"def __init__ ( self,\n sources = None,\n threshold = None,\n image_rotations = None,\n image_shifts = None,\n image_scales = None,\n image_nicknames = None,\n image_manipulations = None, \n output_file = None,\n n_hdf5 = None ,\n plot_every_n = None,\n accumulate_n = None,\n fignum = \"1\" ):\n\n opt = PyanaOptions() # convert option string to appropriate type\n self.plot_every_n = opt.getOptInteger(plot_every_n)\n self.mpl_num = opt.getOptInteger(fignum)\n\n self.sources = opt.getOptStrings(sources)\n nsources = len(self.sources)\n print \"pyana_image, %d sources: \" % nsources\n for sources in self.sources :\n print \" \", sources\n\n self.image_nicknames = []\n if image_nicknames is None:\n for i in range (0, len(self.sources) ):\n self.image_nicknames.append( \"Im%d\"%(i+1) )\n else :\n self.image_nicknames = image_nicknames.split(\" \")\n\n \n self.image_rotations = None\n if image_rotations is not None:\n if image_rotations == \"\" or image_rotations == \"None\" :\n self.image_rotations = None\n else : \n self.image_rotations = {}\n list_of_rotations = image_rotations.split(\" \")\n if len(list_of_rotations) != nsources: print \"Plz provide rotation angles for *all* images!\"\n i = 0\n for source in self.sources :\n self.image_rotations[source] = float( list_of_rotations[i] )\n i+=1\n \n \n self.image_shifts = None\n if image_shifts is not None:\n if image_shifts == \"\" or image_shifts == \"None\" :\n self.image_shifts = None\n else :\n self.image_shifts = {}\n list_of_shifts = image_shifts.split(\" \") \n if len(list_of_shifts) != nsources: print \"Plz provide shift amount for *all* images!\"\n i = 0\n for source in self.sources :\n shift = list_of_shifts[i].lstrip(\"(\").rstrip(\")\").split(\",\")\n self.image_shifts[source] = (int(shift[0]), int(shift[1]))\n i+=1\n\n self.image_scales = None\n if image_scales is not None:\n if image_scales == \"\" or image_scales == \"None\" :\n self.image_scales = None\n else :\n self.image_scales = {}\n list_of_scales = image_scales.split(\" \") \n if len(list_of_scales) != nsources: print \"Plz provide scale factors for *all* images!\"\n i = 0\n for sources in self.image_adresses :\n self.image_scales[source] = float( list_of_scales[i] )\n i+=1\n\n self.image_manipulations = None\n if image_manipulations is not None:\n if image_manipulations == \"\" or image_manipulations == \"None\" :\n self.image_manipulations = None\n else : \n self.image_manipulations = image_manipulations\n\n\n self.output_file = output_file\n if output_file == \"\" or output_file == \"None\" :\n self.output_file = None\n print \"Using output_file: \", self.output_file\n\n threshold_string = opt.getOptStrings(threshold)\n # format: 'value (xlow:xhigh,ylow:yhigh)', only value is required\n \n self.threshold = None\n if len(threshold_string)>0:\n self.threshold = Threshold()\n self.threshold.value = opt.getOptFloat(threshold_string[0])\n print \"Using threshold value \", self.threshold.value\n if len(threshold_string)>1:\n self.threshold.area = np.array([0.,0.,0.,0.])\n \n intervals = threshold_string[1].strip('()').split(',')\n xrange = intervals[0].split(\":\")\n yrange = intervals[1].split(\":\")\n self.threshold.area[0] = float(xrange[0])\n self.threshold.area[1] = float(xrange[1])\n self.threshold.area[2] = float(yrange[0])\n self.threshold.area[3] = float(yrange[1])\n \n print \"Using threshold area \", self.threshold.area\n \n\n self.n_hdf5 = None\n if n_hdf5 is not None :\n if n_hdf5 == \"\" or n_hdf5 == \"None\" :\n self.n_hdf5 = None\n else :\n self.n_hdf5 = int(n_hdf5)\n\n # to keep track\n self.n_shots = None\n\n # averages\n self.sum_good_images = {}\n self.sum_dark_images = {}\n self.n_good = {}\n self.n_dark = {}\n for addr in self.sources :\n self.sum_good_images[addr] = None\n self.sum_dark_images[addr] = None\n self.n_good[addr] = 0\n self.n_dark[addr] = 0\n\n # output file\n self.hdf5file = None\n if self.output_file is not None :\n if \".hdf5\" in self.output_file and self.n_hdf5 is None:\n print \"opening %s for writing\" % self.output_file\n self.hdf5file = h5py.File(self.output_file, 'w')\n\n self.plotter = Plotter() \n self.plotter.settings(7,7) # set default frame size",
"def visualize_data(self):\r\n\r\n\t\tx = np.arange(self.tile_width)\r\n\t\ty = np.arange(self.tile_height)\r\n\t\thovertext = []\r\n\r\n\t\tcounts_list = np.zeros(len(self.data_list))\r\n\t\tfiles_list = np.empty(len(self.data_list), dtype = 'object')\r\n\r\n\t\tfor index, item in enumerate(self.data_list):\r\n\t\t\tcounts_list[index] = item[1]\r\n\t\t\tfiles_list[index] = item[0]\r\n\r\n\t\tcounts_list = counts_list.reshape((self.tile_width, self.tile_height))\r\n\t\tfiles_list = files_list.reshape((self.tile_width, self.tile_height))\r\n\r\n\t\tfor yi, yy in enumerate(y):\r\n\t\t\thovertext.append(list())\r\n\t\t\tfor xi, xx in enumerate(x):\r\n\t\t\t\thovertext[-1].append('File name : {}<br />Count: {}'.format(np.flipud(files_list)[self.tile_height -1 -yi][xi], np.flipud(counts_list)[self.tile_height - 1 - yi][xi]))\r\n\r\n\t\ttrace = go.Heatmap(z = counts_list, x = x, y = y, hoverinfo = 'text', text = hovertext, colorscale = 'Greys')\r\n\t\tdata = [trace]\r\n\t\tpy_offline.plot(data, filename = './tiles/tile'+str(self.tile_number)+'.html', auto_open = False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
starts the uploader and shows bounding boxes for a detected face
|
def run(self):
cv2.namedWindow(consts.UPLOADER_WINDOW)
# TODO : video capture source should be handled by camera.py and /
# not default 0(webcam)
self.camera = cv2.VideoCapture(0)
while self.camera.isOpened() and self.ready_to_detect_face:
_, frame = self.camera.read()
face_coords = self._detect_face(frame)
# draw rectangle bounding box for every face
for i in face_coords:
print("found face coords")
self._upload(frame)
cv2.rectangle(frame,(i[0], i[1]),(i[2], i[3]),(255,0,0),2)
print(f"Detected face: uploading as {self.name} .. exiting")
self.ready_to_detect_face = False
key = cv2.waitKey(100)
cv2.imshow(consts.UPLOADER_WINDOW, frame)
if key == 27: # exit on ESC
break
self.stop()
|
[
"def run(self):\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if ret:\n boxes, face_probs = self.mtcnn.detect(frame)\n if boxes is not None and len(boxes) > 0:\n name_probs = []\n for box in boxes:\n y1, y2, x1, x2 = int(box[1]), int(box[3]), int(box[0]), int(box[2])\n face = frame[y1:y2, x1:x2]\n if face.size > 0:\n pred, probs = self.classify_face(face)\n name_probs.append(probs)\n\n self.draw(frame, boxes, face_probs, name_probs)\n else:\n cv2.putText(frame, \"Couldn't Find Any Faces\", (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n (0, 0, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"Face Detection\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()",
"def face_bounding_boxes(gcs_uri):\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.enums.Feature.FACE_DETECTION]\n\n config = videointelligence.types.FaceConfig(\n include_bounding_boxes=True)\n context = videointelligence.types.VideoContext(\n face_detection_config=config)\n\n operation = video_client.annotate_video(\n gcs_uri, features=features, video_context=context)\n print('\\nProcessing video for face annotations:')\n\n result = operation.result(timeout=900)\n print('\\nFinished processing.')\n\n # There is only one result because a single video was processed.\n faces = result.annotation_results[0].face_detection_annotations\n for i, face in enumerate(faces):\n print('Face {}'.format(i))\n\n # Each face_detection_annotation has only one segment.\n segment = face.segments[0]\n start_time = (segment.segment.start_time_offset.seconds +\n segment.segment.start_time_offset.nanos / 1e9)\n end_time = (segment.segment.end_time_offset.seconds +\n segment.segment.end_time_offset.nanos / 1e9)\n positions = '{}s to {}s'.format(start_time, end_time)\n print('\\tSegment: {}\\n'.format(positions))\n\n # Each detected face may appear in many frames of the video.\n # Here we process only the first frame.\n frame = face.frames[0]\n\n time_offset = (frame.time_offset.seconds +\n frame.time_offset.nanos / 1e9)\n box = frame.attributes[0].normalized_bounding_box\n\n print('First frame time offset: {}s\\n'.format(time_offset))\n\n print('First frame normalized bounding box:')\n print('\\tleft : {}'.format(box.left))\n print('\\ttop : {}'.format(box.top))\n print('\\tright : {}'.format(box.right))\n print('\\tbottom: {}'.format(box.bottom))\n print('\\n')",
"def Training():\r\n \r\n recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n path = 'dataset'\r\n \r\n def getImageWithID(path): #get all pictures saved\r\n imagePaths =[os.path.join(path,f) for f in os.listdir(path)]\r\n faces = []\r\n IDs=[]\r\n for imagePath in imagePaths:\r\n faceImg = Image.open(imagePath).convert('L')\r\n faceNp = np.array(faceImg,'uint8')\r\n ID=int(os.path.split(imagePath)[-1].split('.')[1])\r\n faces.append(faceNp)\r\n print (ID)\r\n IDs.append(ID)\r\n cv2.imshow(\"training\",faceNp)\r\n cv2.waitKey(10)\r\n return IDs,faces\r\n Ids,faces = getImageWithID(path)\r\n recognizer.train(faces,np.array(Ids))\r\n recognizer.save('recognizer/trainingData.yml')\r\n cv2.destroyAllWindows()\r\n \r\n os.system('python3 training.py')",
"def startTrackingNFOV(self):\n # default load and init\n self._initLoad()\n\n # prints just basic guide and info\n print(\"--------------------------------------------------------------------\")\n print(\"OpenCV tracking process with rectilinear improvement has started...\")\n print(\"Tracker : \" + self.tracker_name)\n print(\"Frame #1 : \" + str(self.bbox))\n print(\"Press 'Esc' or 'Q' key to exit\")\n print(\"--------------------------------------------------------------------\")\n\n\n ##########################################################################\n ################## Normal field of view initialization ###################\n ##########################################################################\n # init instance for normal field of view according to rectilinear framework\n nfov_width = int(self.video_width / 2)\n nfov_height = int(self.video_height / 2)\n\n # lets define max rectilinear window size - 720p\n if nfov_width > 1440 or nfov_height > 720:\n whRatio = nfov_width / nfov_height\n nfov_height = 720\n nfov_width = round(whRatio * 720)\n\n # create instance of NFOV\n nfov = NFOV(nfov_height, nfov_width)\n\n # center point of selected bounding box\n center_equi_x = int(self.bbox[0] + self.bbox[2]/2)\n center_equi_y = int(self.bbox[1] + self.bbox[3]/2)\n center_equi_x_normalized = center_equi_x / self.video_width\n center_equi_y_normalized = center_equi_y / self.video_height\n \n # bounding box points left_top and bottom_right\n x1_normalized = self.bbox[0] / self.video_width\n y1_normalized = self.bbox[1] / self.video_height\n x2_normalized = (self.bbox[0] + self.bbox[2]) / self.video_width\n y2_normalized = (self.bbox[1] + self.bbox[3]) / self.video_height\n\n # camera center point (valid range [0,1])\n center_point = np.array([center_equi_x_normalized, center_equi_y_normalized])\n # bounding box points left_top and bottom_right\n nfov.point1_equi = np.array([x1_normalized, y1_normalized])\n nfov.point2_equi = np.array([x2_normalized, y2_normalized])\n # remap to normal field of view\n frameRectilinear = nfov.toNFOV(self.frame, center_point, computeRectPoints=True)\n\n # get coordinates of points in rectilinear projection\n x1_rect = int(nfov.point1_rect[0])\n y1_rect = int(nfov.point1_rect[1])\n width_rect = int(nfov.point2_rect[0] - nfov.point1_rect[0])\n height_rect = int(nfov.point2_rect[1] - nfov.point1_rect[1])\n\n\n ##########################################################################\n ################## Tracking process initialization #######################\n ##########################################################################\n # use bounding box representation also in rectilinear\n bbox_rect = (x1_rect, y1_rect, width_rect, height_rect)\n\n # top left in equirectangular\n p1_equi = (int(self.bbox[0]), int(self.bbox[1]))\n # bottom right in equirectangular\n p2_equi = (int(self.bbox[0] + self.bbox[2]), int(self.bbox[1] + self.bbox[3]))\n\n # top left in rectilinear\n p1_rect = (bbox_rect[0], bbox_rect[1])\n # bottom right in rectilinear\n p2_rect = (bbox_rect[0] + bbox_rect[2], bbox_rect[1] + bbox_rect[3])\n\n # display first frame\n cv2.rectangle(self.frame, p1_equi, p2_equi, (0, 255, 0), 2, 1)\n cv2.imshow(self.WINDOW_NAME, self.frame)\n\n cv2.namedWindow(self.WINDOW_NAME_RECTILINEAR)\n cv2.rectangle(frameRectilinear, p1_rect, p2_rect, (255, 255, 0), 2, 1)\n cv2.imshow(self.WINDOW_NAME_RECTILINEAR, frameRectilinear)\n\n # initialize tracker with first frame and bounding box\n ok = self.tracker.init(frameRectilinear, bbox_rect)\n\n # max fps\n videoFPS = 30\n # videoFPS = cap.get(cv2.CAP_PROP_FPS) \n # calculate the interval between frame\n interval = int(1000/videoFPS) \n\n # cv2.waitKey(0)\n\n # empiric constants for shifting/scaling in rectilinear projection - setup by experiments\n SHIFT_SLOW_X_START = 0.45 * nfov_width\n SHIFT_SLOW_Y_START = 0.45 * nfov_height\n SHIFT_FAST_X_START = 0.35 * nfov_width\n SHIFT_FAST_Y_START = 0.35 * nfov_height\n\n SHIFT_FAST_X = int(self.video_width / 100)\n SHIFT_FAST_Y = int(self.video_height / 100)\n\n SHIFT_SLOW_X = int(self.video_width / 200)\n SHIFT_SLOW_Y = int(self.video_height / 200)\n\n SCALEDOWN_FOV_SLOW_START_X = 0.66 * nfov_width\n SCALEDOWN_FOV_SLOW_START_Y = 0.66 * nfov_height\n\n SCALEDOWN_FOV_FAST_START_X = 0.8 * nfov_width\n SCALEDOWN_FOV_FAST_START_Y = 0.8 * nfov_height\n\n SCALEUP_FOV_SLOW_START_X = 0.33 * nfov_width\n SCALEUP_FOV_SLOW_START_Y = 0.33 * nfov_height\n \n SCALE_FOV_STEP_SLOW = 0.01\n SCALE_FOV_STEP_FAST = 0.02\n\n while True:\n # Read a new frame\n ok, self.frame = self.video.read()\n if not ok:\n break\n\n # Start timer\n timer = cv2.getTickCount()\n \n # update center point\n if p1_rect and p2_rect and bbox_rect:\n # center of bounding box in rectilinear projection\n center_rect = [bbox_rect[0] + bbox_rect[2]/2, bbox_rect[1] + bbox_rect[3]/2]\n\n # SHIFTS X\n # FAST\n if center_rect[0] < SHIFT_FAST_X_START:\n center_equi_x -= SHIFT_FAST_X\n if center_equi_x < 0:\n center_equi_x = self.video_width + center_equi_x\n else:\n center_equi_x = center_equi_x % self.video_width\n elif center_rect[0] > nfov_width - SHIFT_FAST_X_START:\n center_equi_x += SHIFT_FAST_X\n center_equi_x = center_equi_x % self.video_width\n # SLOW\n elif center_rect[0] < SHIFT_SLOW_X_START:\n center_equi_x -= SHIFT_SLOW_X\n if center_equi_x < 0:\n center_equi_x = self.video_width + center_equi_x\n else:\n center_equi_x = center_equi_x % self.video_width\n elif center_rect[0] > nfov_width - SHIFT_SLOW_X_START:\n center_equi_x += SHIFT_SLOW_X\n center_equi_x = center_equi_x % self.video_width\n \n\n # SHIFTS Y\n # FAST\n if center_rect[1] < SHIFT_FAST_Y_START:\n center_equi_y -= SHIFT_FAST_Y\n if center_equi_y < 0:\n center_equi_y = self.video_height + center_equi_y\n else:\n center_equi_y = center_equi_y % self.video_height\n elif center_rect[1] > nfov_height - SHIFT_FAST_Y_START:\n center_equi_y += SHIFT_FAST_Y\n center_equi_y = center_equi_y % self.video_height\n # SLOW Y\n elif center_rect[1] < SHIFT_SLOW_Y_START:\n center_equi_y -= SHIFT_SLOW_Y\n if center_equi_y < 0:\n center_equi_y = self.video_height + center_equi_y\n else:\n center_equi_y = center_equi_y % self.video_height\n elif center_rect[1] > nfov_height - SHIFT_SLOW_Y_START:\n center_equi_y += SHIFT_SLOW_Y\n center_equi_y = center_equi_y % self.video_height\n\n\n # default nfov.FOV is 0.5 == 90°\n # rescale FOV - enable zoom back (further from object)\n # object is close to camera/big -> increase field of view\n # FAST\n if bbox_rect[2] > SCALEDOWN_FOV_FAST_START_X or bbox_rect[3] > SCALEDOWN_FOV_FAST_START_Y:\n # max FOV 0.8 == 144° \n if nfov.FOV[0] < 0.8:\n nfov.FOV = [nfov.FOV[0] + SCALE_FOV_STEP_FAST, nfov.FOV[1] + SCALE_FOV_STEP_FAST]\n # SLOW\n elif bbox_rect[2] > SCALEDOWN_FOV_SLOW_START_X or bbox_rect[3] > SCALEDOWN_FOV_SLOW_START_Y:\n # max FOV 0.8 == 144°\n if nfov.FOV[0] < 0.8:\n nfov.FOV = [nfov.FOV[0] + SCALE_FOV_STEP_SLOW, nfov.FOV[1] + SCALE_FOV_STEP_SLOW]\n # rescale FOV - enable zoom forward (closer from object)\n elif bbox_rect[2] < SCALEUP_FOV_SLOW_START_X and bbox_rect[3] < SCALEUP_FOV_SLOW_START_Y:\n # object is small and field of view is large \n if nfov.FOV[0] > 0.6:\n # decrease field of view\n nfov.FOV = [nfov.FOV[0] - SCALE_FOV_STEP_SLOW, nfov.FOV[1] - SCALE_FOV_STEP_SLOW]\n\n # normalize center point in [0,1]\n center_equi_x_normalized = center_equi_x / self.video_width\n center_equi_y_normalized = center_equi_y / self.video_height\n # camera center point (valid range [0,1])\n center_point = np.array([center_equi_x_normalized, center_equi_y_normalized]) \n\n # new frame to rectilinear/normal field of view\n frameRectilinear = nfov.toNFOV(self.frame, center_point)\n\n # Update tracker\n ok, bbox_rect = self.tracker.update(frameRectilinear)\n\n # calculate Frames per second after tracking (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n\n # handle tracking result\n if ok:\n # rectilinear\n p1_rect = (int(round(bbox_rect[0])), int(round(bbox_rect[1])))\n p2_rect = (int(round(bbox_rect[0] + bbox_rect[2])), int(round(bbox_rect[1] + bbox_rect[3])))\n # store points to nfov instance\n nfov.point1_rect = [p1_rect[0], p1_rect[1]]\n nfov.point2_rect = [p2_rect[0], p2_rect[1]]\n # draw bounding box to rectilinear frame\n cv2.rectangle(frameRectilinear, p1_rect, p2_rect, (255, 255, 0), self.RECTANGLE_BORDER_PX, 1)\n\n # equirectangular\n # compute corresponding points of rectilinear bounding box in equirectangular projection \n nfov.computeEquirectangularBbox(bbox_width=round(bbox_rect[2]), bbox_height=round(bbox_rect[3]))\n # bbox points top left and right bottom in equirectangular projection\n p1_equi = (int(round(nfov.point1_equi[0] * self.video_width)), int(round(nfov.point1_equi[1] * self.video_height)))\n p2_equi = (int(round(nfov.point2_equi[0] * self.video_width)), int(round(nfov.point2_equi[1] * self.video_height)))\n\n # in NFOV points.x could be negative\n p1_equi = self._checkBoundsOfPoint(p1_equi)\n p2_equi = self._checkBoundsOfPoint(p2_equi)\n\n # create custom equirectangular bounding box instance\n bb = BoundingBox(p1_equi, p2_equi, self.video_width)\n bb.is_annotated = True\n self.result_bounding_boxes.append(bb)\n\n # draw bounding box to original equirectangular frame\n self._drawBoundingBox(self.video_width, p1_equi, p2_equi, bb, (0, 255, 0), self.RECTANGLE_BORDER_PX)\n else:\n # tracking failure\n cv2.putText(self.frame, \"Tracking failure detected\", self.TEXT_ROW4_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 0, 255), self.FONT_WEIGHT)\n \n # reinit points\n p1_rect = None\n p2_rect = None\n p1_equi = None\n p2_equi = None\n\n # create custom empty equirectangular bounding box instance\n bb = BoundingBox(None, None, self.video_width)\n bb.is_annotated = False\n self.result_bounding_boxes.append(bb)\n \n # Display tracker type on frame\n cv2.putText(self.frame, self.tracker_name + \" Tracker\", self.TEXT_ROW1_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 255, 0), self.FONT_WEIGHT)\n # Display FPS on frame\n cv2.putText(self.frame, \"Video FPS : \" + str(videoFPS), self.TEXT_ROW2_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 255, 0), self.FONT_WEIGHT)\n cv2.putText(self.frame, \"Tracker FPS : \" + str(int(fps)), self.TEXT_ROW3_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 255, 0), self.FONT_WEIGHT)\n \n # Display result\n cv2.imshow(self.WINDOW_NAME, self.frame)\n cv2.imshow(self.WINDOW_NAME_RECTILINEAR, frameRectilinear)\n\n\n # waitKey time computing\n # time in ms\n time = int(1000 * (cv2.getTickCount() - timer) / cv2.getTickFrequency())\n\n waitMiliseconds = 1\n if (time >= interval):\n waitMiliseconds = 1\n else:\n waitMiliseconds = interval - time\n \n k = cv2.waitKey(waitMiliseconds) & 0xff\n # Exit if 'Esc' or 'q' key is pressed\n if k == 27 or k == ord(\"q\"): \n break\n \n\n # always save tracker result\n self._saveResults()\n \n # release, destroy\n self.video.release()\n cv2.destroyAllWindows()",
"def faceRecog():\n recognise.main()",
"def classify_face(im):\r\n #get_encoded_faces()\r\n faces = shelve.open('trainingData.yml')\r\n #faces = faces1.read()\r\n #print(faces)\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n \r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n #print(\"face_names\",face_names)\r\n #print(\"faces_encoded\",faces_encoded)\r\n #print(\"known_fac_names:\",known_face_names)\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_COMPLEX_SMALL\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n while True:\r\n\r\n cv2.imshow('Video', img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names",
"def face_callback(self,value):",
"def test_valid_bounding_box(self):\n detection = TestFaceDetector.defaultDetector.detectOne(image=VLIMAGE_ONE_FACE)\n self.assertBoundingBox(detection.boundingBox)\n detection = TestFaceDetector.defaultDetector.detect(images=[VLIMAGE_ONE_FACE])[0][0]\n self.assertBoundingBox(detection.boundingBox)",
"def get_face(self, detector, img_queue, box_queue):\n while True:\n image = img_queue.get()\n box = detector.extract_cnn_facebox(image)\n box_queue.put(box)",
"def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)\n\n imageWithOneFace = VLImage.load(\n filename=EXAMPLE_O\n )\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, detection=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(\n filename=EXAMPLE_SEVERAL_FACES\n )\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(detector.redetect(\n images=[ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect\n for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)])]))",
"def display_loading_picture_boxes(self):\n # Necessary collors in rgb\n red_color = (204, 0, 0)\n hov_color = (255, 102, 102)\n # Creating input boxes\n path_input_box = InputBox(700, 245, 400, 40, red_color, hov_color)\n steps_input_box = InputBox(700, 345, 200, 40, red_color, hov_color)\n # Displaying info on screen\n self.display_info_loading_pictures()\n # Gets data from input boxes and returns it\n return self.run_loading_picture_boxes(path_input_box, steps_input_box)",
"def _draw_boxes(\n frame: np.ndarray,\n detections: List[Dict],\n) -> np.ndarray:\n\n # for each detection draw a corresponding labeled bounding box\n for detection in detections:\n\n # draw the bounding box of the face\n frame = \\\n cv2.rectangle(\n frame,\n (detection[\"start_x\"], detection[\"start_y\"]),\n (detection[\"end_x\"], detection[\"end_y\"]),\n _BOX_COLOR,\n _BOX_LINE_WIDTH,\n )\n\n # draw the object's label and probability value\n label = f\"{detection['class']}: {int(float(detection['confidence']) * 100)}%\"\n if detection[\"start_y\"] - 10 > 10:\n y = detection[\"start_y\"] - 10\n else:\n y = detection[\"start_y\"] + 10\n frame = cv2.putText(frame,\n label,\n (detection[\"start_x\"], y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.45,\n _LABEL_COLOR,\n _LABEL_WIDTH)\n\n return frame",
"def show_boxes(self, image, boxes, box_classes, box_scores, file_name):\n for i, box in enumerate(boxes):\n x, y, w, h = int(box[0]), int(box[1]), int(box[2]), int(box[3])\n label = self.class_names[box_classes[i]] + '{:.2f}'.format(\n box_scores[i])\n rect = cv2.rectangle(image, (x, y), (w, h),\n 255, 2, cv2.LINE_AA)\n image = cv2.putText(rect, label, (x-5, y-5),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 1)\n cv2.imshow(file_name, image)\n if not os.path.isdir('./detections/'):\n os.mkdir('./detections/')\n if cv2.waitKey() == 115:\n cv2.imwrite('./detections/{}'.format(file_name), image)\n cv2.destroyAllWindows()",
"def loadInit(self):\n # Read video\n self.video = cv2.VideoCapture(self.path)\n # Exit if video not opened.\n if not self.video.isOpened():\n print(\"Error - Could not open video\")\n sys.exit(-1)\n\n # store video width/height to variables\n self.video_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.video_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Read and parse existing groundtruth file\n if not(os.path.exists(self.groundtruth_path)):\n print(\"Error - Could not read a groundtruth file\")\n sys.exit(-1)\n\n # Read and parse existing tracking result file\n if not(os.path.exists(self.result_path)):\n print(\"Error - Could not read a tracking result file\")\n sys.exit(-1)\n\n # list of annotated bounding box objects\n self.gt_bounding_boxes = []\n # list of tracking result bounding box objects\n self.result_bounding_boxes = []\n\n # parsing groundtruth and result files\n self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)\n self.result_bounding_boxes = self.parser.parseGivenDataFile(self.result_path, self.video_width)",
"def recognize(self, image, boxes):\r\n raise NotImplementedError",
"def run_detection(self):\n self.rows = self.result_image.shape[0]\n self.cols = self.result_image.shape[1]\n self.cvNet.setInput(cv2.dnn.blobFromImage(self.input_image, size=self.rsize,\n swapRB=True, crop=False))\n self.cvOut = self.cvNet.forward()\n print(\"[INFO] Inference completed successfully.\")",
"def capture_user_face(self, user):\n\n folder = \"./{}/{}\".format(self.__dataset, user)\n\n # Create a new folder for the new name\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self.__start_camera()\n\n face_detector = cv2.CascadeClassifier(self.__classifier)\n\n img_counter = 0\n while img_counter <= 9:\n key = input(\"Press ENTER to take photo, or q to quit\")\n # if key == \"q\":\n # print(\"Aborting registration\")\n # return\n\n ret, frame = self.__camera.read()\n if not ret:\n break\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_detector.detectMultiScale(gray, 1.3, 5)\n\n if faces == () or faces.size == 0:\n print(\"No face detected, please try again\")\n continue\n\n for (x, y, w, h) in faces:\n img_name = \"{}/{:04}.jpg\".format(folder, img_counter)\n cv2.imwrite(img_name, frame[y: y + h, x: x + w])\n print(\"Photo {}/10 saved!\".format(img_counter + 1))\n img_counter += 1\n\n self.close()",
"def draw_identify(self, result):\n # defaults\n box_width = 100\n box_height = 22\n\n camera_width = self.camera.properties['width']\n camera_height = self.camera.properties['height']\n left = int(camera_width * 0.5 - box_width * 0.5)\n right = int(camera_width * 0.5 + box_width * 0.5)\n top = int(camera_height * 0.5 - box_height * 0.5)\n bottom = int(camera_height * 0.5 + box_height * 0.5)\n\n text = \"Searching...\"\n color = (0, 0, 200)\n text_color = (255, 255, 255)\n\n if result:\n left, top, right, bottom, text, color, text_color = result\n\n # Draw frame\n cv2.rectangle(Data.frame, (left, top), (right, bottom), color, 1)\n\n # Draw a label with a name below the face\n cv2.rectangle(Data.frame, (left, bottom - 20), (right, bottom), color, cv2.FILLED)\n font = cv2.FONT_HERSHEY_PLAIN\n center = int((left + right) * 0.5 * 0.95)\n cv2.putText(Data.frame, text, (center, bottom - 6), font, 1.0, text_color, 1)",
"def photoProcessed(self):\n # set up initial window features\n self.photoProcessedScreen.setWindowTitle(\"Unique Facial Feature Detection\")\n self.photoProcessedScreen.resize(575, 400)\n\n obtainedFeaturesText= QLabel(self.photoProcessedScreen)\n obtainedFeaturesText.setStyleSheet(\"font: 14pt Century Gothic\")\n obtainedFeaturesText.setText(\"Obtained unique features!\")\n obtainedFeaturesText.setGeometry(QRect(30, -10, 500, 200))\n obtainedFeaturesText.setAlignment(Qt.AlignCenter)\n\n global results\n global images\n if saveImage == 1:\n # open cropped image if it exists\n if (str(os.path.isfile(\"./backend/ResizedImages/newCropped.jpeg\")) == True):\n images = Image.open(\"./backend/ResizedImages/newCropped.jpeg\")\n else:\n images = Image.open(path)\n\n # set up button to show unique features\n photoProcessedBtnLayout = QHBoxLayout()\n getFeaturesListBtn = QPushButton(\"Get unique feature's list!\")\n photoProcessedBtnLayout.addWidget(getFeaturesListBtn)\n\n # go to next window if button is clicked\n getFeaturesListBtn.clicked.connect(self.outputtingList)\n\n self.photoProcessedScreen.setLayout(photoProcessedBtnLayout)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
detects the face in the frame
|
def _detect_face(self, frame):
face_coords = list()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
print(rects)
# get bounding box for every face in the frame
for i, d in enumerate(rects):
x1 = d.left()-consts.PADDING
y1 = d.top()-consts.PADDING
x2 = d.right()+consts.PADDING
y2 = d.bottom()+consts.PADDING
face_coords.append((x1, y1, x2, y2))
return face_coords
|
[
"def ffp_detect(self, img):\r\n # convert to gray\r\n if img.ndim > 2:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n \r\n # detect face first\r\n bbox = self.face_detect(img).flatten()\r\n num_pts = self.face_lmks_model['num_pts']\r\n norm_width = self.face_lmks_model['norm_width']\r\n num_iter = self.face_lmks_model['num_iter']\r\n if bbox.shape[0] == 0:\r\n pts = np.zeros((num_pts, 2))\r\n return pts, 2, 0\r\n \r\n\r\n # obtain normalized face image and bounding box\r\n face_scale = norm_width/bbox[2]\r\n img = cv2.resize(img, None, fx=face_scale, fy=face_scale, interpolation=cv2.INTER_CUBIC) \r\n bbox_norm = (bbox*face_scale).round().astype(np.uint16)\r\n cut_x1 = max([0, bbox_norm[0] - self.face_lmks_model['margin']])\r\n cut_x2 = min([bbox_norm[0] + bbox_norm[2] + self.face_lmks_model['margin'], img.shape[1]-1])\r\n cut_y1 = max([0, bbox_norm[1] - self.face_lmks_model['margin']])\r\n cut_y2 = min([bbox_norm[1] + bbox_norm[3] + self.face_lmks_model['margin'], img.shape[0]-1])\r\n im_cut = img[cut_y1:cut_y2, cut_x1:cut_x2]\r\n bbox_cut = bbox_norm.copy()\r\n bbox_cut[0] = bbox_cut[0] - cut_x1 + 1\r\n bbox_cut[1] = bbox_cut[1] - cut_y1 + 1\r\n\r\n # detect facial landmarks with cascade framework\r\n for it in np.arange(num_iter):\r\n if it == 0:\r\n x0_norm = np.zeros((num_pts*2))\r\n x0_norm[0::2] = self.face_lmks_model['mm'][0::2] + bbox_cut[0] + bbox_cut[2]/2.0\r\n x0_norm[1::2] = self.face_lmks_model['mm'][1::2] + bbox_cut[1] + bbox_cut[3]/2.0\r\n # compute features\r\n temp = x0_norm.reshape(-1, 2)\r\n tkp = []\r\n for idx in range(temp.shape[0]):\r\n tkp.append(cv2.KeyPoint(temp[idx, 0], temp[idx, 1], 5.2, -1, 1, 0, 1))\r\n tkp, tdp = self.sift_extractor.compute(im_cut, tkp)\r\n tdp = tdp.reshape(1, -1)\r\n tdp = np.append(1, tdp/255.0)\r\n V_diff = np.dot(self.face_lmks_model['para_detect'][it]['R'], tdp)\r\n x0_norm = x0_norm + V_diff\r\n \r\n # confidence, evaluate the quality of facial landmark detection\r\n flag_succ, confidence = self.compute_confidence(im_cut, x0_norm.reshape((-1, 2)), \r\n self.face_detector['confidence_SIFT']['descriptor'],\r\n self.face_detector['confidence_SIFT']['thre_detect'])\r\n if flag_succ == 0:\r\n x0_norm = x0_norm.reshape((-1, 2))\r\n x_est = (x0_norm + np.array([cut_x1-1, cut_y1-1]).reshape((-1, 2)))/face_scale \r\n else:\r\n x_est = np.zeros((num_pts, 2))\r\n return x_est.reshape((-1, 2)), flag_succ, confidence",
"def locate_faces(input_image):\n face_cascade = cv2.CascadeClassifier(CASCADE_FILE_PATH)\n gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\n # detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.2, 5)\n print(faces)\n return faces",
"def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)\n\n imageWithOneFace = VLImage.load(\n filename=EXAMPLE_O\n )\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, detection=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(\n filename=EXAMPLE_SEVERAL_FACES\n )\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(detector.redetect(\n images=[ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect\n for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)])]))",
"def face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0",
"def face_detect(face_detector, img):\n test_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n grayed_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n face_coordinates = face_detector.detectMultiScale(grayed_img, 1.1, 5)\n return grayed_img, face_coordinates",
"def read(self):\n ret, frame = self.capture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = self.face_detector.detectMultiScale(gray, 1.1, 5)\n return frame, faces",
"def recognize(self, frame) -> retval:\n ...",
"def test_detect_one_with_image_of_several_faces(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_SEVERAL_FACE)\n self.assertFaceDetection(detection, VLIMAGE_SEVERAL_FACE)",
"def test_detect_one_by_area_with_face(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE, detectArea=GOOD_AREA)\n self.assertFaceDetection(detection, VLIMAGE_ONE_FACE)",
"def run(self):\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if ret:\n boxes, face_probs = self.mtcnn.detect(frame)\n if boxes is not None and len(boxes) > 0:\n name_probs = []\n for box in boxes:\n y1, y2, x1, x2 = int(box[1]), int(box[3]), int(box[0]), int(box[2])\n face = frame[y1:y2, x1:x2]\n if face.size > 0:\n pred, probs = self.classify_face(face)\n name_probs.append(probs)\n\n self.draw(frame, boxes, face_probs, name_probs)\n else:\n cv2.putText(frame, \"Couldn't Find Any Faces\", (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n (0, 0, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"Face Detection\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()",
"def isConnectedToFace(*args, **kwargs):\n \n pass",
"def is_single_face_valid(img) -> int:\n # TODO stub\n return 0",
"def classify_face(im):\r\n #get_encoded_faces()\r\n faces = shelve.open('trainingData.yml')\r\n #faces = faces1.read()\r\n #print(faces)\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n \r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n #print(\"face_names\",face_names)\r\n #print(\"faces_encoded\",faces_encoded)\r\n #print(\"known_fac_names:\",known_face_names)\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_COMPLEX_SMALL\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n while True:\r\n\r\n cv2.imshow('Video', img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names",
"def detect_ball(self, frame):\n # Save frame dimensions\n if self.video_width is None:\n self.video_width = frame.shape[1]\n self.video_height = frame.shape[0]\n self.last_frame = self.before_last_frame\n self.before_last_frame = self.current_frame\n self.current_frame = frame.copy()\n\n # detect only in 3 frames were given\n if self.last_frame is not None:\n # combine the frames into 1 input tensor\n frames = combine_three_frames(self.current_frame, self.before_last_frame, self.last_frame,\n self.model_input_width, self.model_input_height)\n frames = (torch.from_numpy(frames) / 255).to(self.device)\n # Inference (forward pass)\n x, y = self.detector.inference(frames)\n if x is not None:\n # Rescale the indices to fit frame dimensions\n x = x * (self.video_width / self.model_input_width)\n y = y * (self.video_height / self.model_input_height)\n\n # Check distance from previous location and remove outliers\n if self.xy_coordinates[-1][0] is not None:\n if np.linalg.norm(np.array([x,y]) - self.xy_coordinates[-1]) > self.threshold_dist:\n x, y = None, None\n self.xy_coordinates = np.append(self.xy_coordinates, np.array([[x, y]]), axis=0)",
"def _detect_facial_points(self, frame):\n # Rectangles of multiple faces\n rect = self._detector(frame, IMAGE_UPSAMPLE_FACTOR)\n\n # Only use the first face image\n if len(rect) > 0:\n facial_points = self._predictor(frame, rect[0])\n facial_points = face_utils.shape_to_np(facial_points)\n return facial_points\n else:\n return None",
"def crop_face(img):\r\n try:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n face_cascade = cv2.CascadeClassifier('xml/haarcascade_frontalface_alt2.xml') \r\n faces = face_cascade.detectMultiScale(gray, 1.05, 5)\r\n face = np.array(0)\r\n # if face found\r\n if len(faces) > 0:\r\n (x, y, w, h) = faces[0]\r\n \r\n # extend the size of the face detected\r\n ext = int(abs(h-y) * 0.5)\r\n \r\n # test if extension fits on image, if not ext maximum amount\r\n if (y+h+ext) > img.shape[0]:\r\n ext = img.shape[0] - h\r\n face = img[y:y + h + ext, x:x + w]\r\n \r\n # if problem with extracting face, print error and raise FaceNotFound\r\n except Exception as e:\r\n print(\"Error1: \", e)\r\n raise FaceNotFound\r\n \r\n return face",
"def detect_faces_file(file):\n\n return detect_faces(cv2.imread(file))",
"def recognize(self, image, boxes):\r\n raise NotImplementedError",
"def faceRecog():\n recognise.main()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
pickle an object, and zip the pickle before sending it
|
def send_zipped_pickle(socket, obj, flags=0, protocol=2):
p = pickle.dumps(obj, protocol)
# z = zlib.compress(p, 8)
return socket.send(p, flags=flags)
|
[
"def send_zipped_pickle(socket, obj, flags=0, protocol=2):\n p = pickle.dumps(obj, protocol)\n # z = zlib.compress(p, 8)\n return socket.send(p, flags=flags)",
"def save_object(object, filename, protocol = cPickle.HIGHEST_PROTOCOL):\r\n gc.disable()\r\n if filename.endswith('.zip'):\r\n f = gzip.GzipFile(filename, 'wb')\r\n else:\r\n f = open(filename, 'wb')\r\n# cPickle.dump(object, f, protocol)\r\n dill.dump(object, f)\r\n f.close()\r\n gc.enable()",
"def _save_single(object_) -> bytes:\n return lz4.frame.compress(pickle.dumps(object_, protocol=4), store_size=False, compression_level=9)",
"def write(object_data):\n output = pickle.dumps(object_data)\n return output",
"def pickle_this(object_to_pickle, name):\n\tprint \"Pickling \", str(name), '...',\n\toutput = open('pickles/'+str(name)+'.pickle', 'wb')\n\tpickle.dump(object_to_pickle, output)\n\tprint \"done\"",
"def serialize(obj, file):\n\tpickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)",
"def pickle_obj(obj: Any, filename: str) -> None:\n with open(filename, 'wb') as f:\n pickle.dump(obj, f)",
"def pack(self, obj, stream):\n raise NotImplementedError()",
"def save_pickled(self, obj, filename):\n path = os.path.join(pickle_dir, filename)\n with open(path, 'wb+') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def _encode(o):\n return pickle.dumps(o, pickle.HIGHEST_PROTOCOL)",
"def _dumps(obj):\n return zlib.compress(dill.dumps(obj))",
"def pickle_dumps(obj):\n return base64.b64encode(pickle.dumps(obj)).decode(\"ascii\")",
"def tryPickleOnAllContents3(obj):\n with tempfile.TemporaryFile() as output:\n try:\n MyPickler(output).dump(obj)\n except (pickle.PicklingError, TypeError):\n pass",
"def deliver(conn, localobj):\n # bytes-cast needed for IronPython-to-CPython communication, see #251:\n return conn.modules[\"rpyc.lib.compat\"].pickle.loads(\n bytes(pickle.dumps(localobj)))",
"def send_obj_zip(self, file: Union[str, BinaryIO], filename=None):\n self.__send_command(CommandsBytes.SEND_OBJ_ZIP)\n if isinstance(file, str):\n filename = os.path.basename(file)\n with open(file, \"rb\") as file:\n self.__send_file(file, filename)\n else:\n if filename is None:\n raise ValueError(\"filename cannot be None if an open file is provided\")\n self.__send_file(file, filename)\n # After sending the object, we receive the path where the file was stored\n save_path = self.__receive_string()\n return save_path",
"def store_pickle(obj, full_path):\n with open(full_path, 'wb') as f:\n pickle.dump(obj, f)",
"def serialize_to_file(obj, filename):\n Pickle.dump(obj, open(filename, 'wb'), protocol=Pickle.HIGHEST_PROTOCOL)",
"def serialize(param_args, param_kwargs, lockable_objects):\n s_param_args = pickle.dumps(param_args)\n s_param_kwargs = pickle.dumps(param_kwargs)\n s_lockable_objects = pickle.dumps(lockable_objects)\n return s_param_args, s_param_kwargs, s_lockable_objects",
"def exporter(obj, filename):\n start_time = time.time()\n\n # If the file directory does not exist create it.\n file_directory = os.path.dirname(os.path.abspath(filename))\n if not os.path.isdir(file_directory):\n logging.debug(\"Creating pickle export directory \\\"%s\\\".\" % file_directory)\n os.makedirs(file_directory)\n\n logging.info(\"Beginning pickle EXPORT of file: \\\"\" + filename + \"\\\"\")\n # Dump pickle to the file.\n f = open(filename, 'w')\n pickle.dump(obj, f)\n f.close()\n\n logging.info(\"Completed pickle EXPORT to file: \\\"\" + filename + \"\\\"\")\n print_elapsed_time(start_time, \"pickle EXPORT of file: \\\"\" + filename + \"\\\"\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the elapsed time display. Invoked once per second
|
def refresh_time(self):
if (self.enabled):
self.elapsed_time += ONE_SECOND
#self.window.set_title("stopwatch %s" % self.elapsed_time)
self.time_counter.set_text(str(self.elapsed_time))
return True
|
[
"def update():\n seconds = 0 if self.start_time == 0 else round(time.time() - self.start_time)\n hours = seconds // 3600\n seconds = seconds % 3600\n minutes = seconds // 60\n seconds = seconds % 60\n cur_time = \"\"\n if hours < 10:\n cur_time += \"0\" + str(hours) + \":\"\n else:\n cur_time += str(hours) + \":\"\n if minutes < 10:\n cur_time += \"0\" + str(minutes) + \":\"\n else:\n cur_time += str(minutes) + \":\"\n if seconds < 10:\n cur_time += \"0\" + str(seconds)\n else:\n cur_time += str(seconds)\n\n self.formatted_time.set(cur_time)\n self.last_after = self.root.after(200, update)",
"def update(self, dt):\n\t\tif self.running:\n\t\t\tself.time += 1\n\t\t\tm, s = divmod(self.time, 60)\n\t\t\tself.label.text = '%02d:%02d' % (m, s)",
"def draw_timing(self):\n\n now = self.current_time()\n\n elapsed_time = now - self.timing_start\n\n seconds = elapsed_time.total_seconds()\n hours = seconds // 3600\n minutes = (seconds % 3600) // 60\n seconds = seconds % 60 \n \n time_str = \"%02i:%02i\" % (minutes, seconds)\n\n self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)",
"def update(self, elapsed):\n\n self.elapsed += elapsed\n if self.elapsed > self.update_rate:\n self.text = self.font.render(\"{:.0f}\".format(globals.current_app.clock.get_fps()), True, (255, 255, 255))\n self.text_pos = self.text.get_rect()\n self.text_pos.centery = self.rect.centery\n self.text_pos.left = self.rect.left\n self.elapsed = 0",
"def update_timer(self, time):\n self.timer += time",
"def update_clock(self, _):\n self.clock = utils.get_time_human_readable()",
"def show_elapsed_time(start, end):\n PRINT('Elapsed: %s' % (end - start))",
"def elapsed(start_time):\n current_time = time()\n elapsed_time = current_time - start_time\n print(\"\\n\\n\" + \"~\"*50)\n print(\" End : \" + ctime(current_time))\n print(\" Run Time : \" + strftime(\"%H:%M:%S\", gmtime(elapsed_time)))\n print(\"~\"*50)",
"def draw_time(self):\n\n self.draw_rect(0, 0, 127, 31, self.time_color)\n self.draw_rect(1, 1, 126, 30, self.time_color)\n now = self.current_time()\n time_str = now.strftime(\"%H:%M:%S\")\n self.graphics.DrawText(self.canvas, self.extra_large_font, 6, 28, self.time_color, time_str)",
"def updateLCD(self):\n if self.tick != 0:\n self.tick -= 1\n\n hour = self.tick / 3600\n minute = (self.tick % 3600) / 60\n second = (self.tick % 3600) % 60\n\n self.lcd.display(\"%02d:%02d:%02d\" % (hour, minute, second))\n else:\n self.timer.stop()\n self.btnSet.setEnabled(True)\n self.btnStart.setEnabled(False)\n self.btnReset.setEnabled(True)\n self.btnStart.setText(\"Start\")\n QMessageBox.warning(self,\"Aviso\",\"### ALARM ###\")",
"def displayTTWT(self):\n if self.numProcesses == self.finishedProcesses:\n self.TT = round(self.TT/self.numProcesses,2)\n t = \"Avg. TA Time: \" + str(self.TT) + \" ms\"\n else:\n t = \"TTA Time: \" + str(self.TT) + \" ms\"\n t = pygame.font.SysFont(None, 30).render(t, True, (0,0,255))\n width, height = self.window.get_size()\n x = width - t.get_width() - 10\n y = height - t.get_height() - 40\n self.window.blit(t, (x, y))\n\n if self.algorithm == \"rr\":\n t = \"Time Quantum: \" + str(self.TIME_QUANTUM) + \" ms\"\n t = pygame.font.SysFont(None, 30).render(t, True, (0,0,255))\n width, height = self.window.get_size()\n x = width - t.get_width() - 10\n y = height - t.get_height() - 80\n self.window.blit(t, (x, y))\n \n\n\n if (self.mode == \"step\"):\n t = \"Mode: \" + self.mode\n t = self.font.render(t, True, self.txtColor)\n y -= (t.get_height() + 10)\n self.window.blit(t, (x, y))",
"def timer():\n start = time.time()\n # Send control back to the context block\n yield\n\n end = time.time()\n print('Elapsed: {:.2f}s'.format(end - start))",
"def tick(self):\n self.process_pygame_events()\n self.elapsed = self.clock.tick(self.FPS)",
"def reset_time(self, widget, data=None):\n\t\tself.elapsed_time = datetime.timedelta()\n\t\tself.time_counter.set_text(str(self.elapsed_time))\n\t\treturn",
"def toc():\r\n elapsed = time.time() - startTime_for_tictoc\r\n print(\"Elapsed time: %4.1f s\" % elapsed)",
"def time_handler():\n global time\n time += 1\n format()",
"def __float__(self):\n return self.elapsed",
"def update(self):\n if self._frame_time - self._update_time > self._interval:\n for counter in self._counters.keys():\n self._fps[counter] = (self._counters[counter] /\n (self._frame_time - self._update_time))\n self._counters[counter] = 0\n self._update_time = self._frame_time",
"def calc_elapsed(self):\n now = time()\n elapsed_time = now - self.train_stats[\"timestamp\"]\n try:\n hrs = int(elapsed_time // 3600)\n if hrs < 10:\n hrs = \"{0:02d}\".format(hrs)\n mins = \"{0:02d}\".format((int(elapsed_time % 3600) // 60))\n secs = \"{0:02d}\".format((int(elapsed_time % 3600) % 60))\n except ZeroDivisionError:\n hrs = \"00\"\n mins = \"00\"\n secs = \"00\"\n return \"{}:{}:{}\".format(hrs, mins, secs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reset the elapsed time back to zero. Invoked when the reset button is pressed.
|
def reset_time(self, widget, data=None):
self.elapsed_time = datetime.timedelta()
self.time_counter.set_text(str(self.elapsed_time))
return
|
[
"def OnResetTime(self, event):\n self.modelview.timezero(0)",
"def reset_timing(self):\n\n self.timing_start = self.current_time()",
"def reset_time(self):\n\n self._alive_time = 0 # No need to signal the change, since the view is updated by the value toggle",
"def reset(self) -> None:\n self.time_counted = 0\n self.last_start_time = 0\n self.is_running = False",
"def reset(self):\n self.timer -= self.period",
"def reset(self):\n self.formatted_time.set(\"00:00:00\")",
"def reset_timer(self):\n self.timer = datetime.datetime.now()",
"def reset_timer(self):\r\n self.timer.stop()\r\n BF4TimerGUI.counter = 90\r\n self.timer_label.setStyleSheet(self.timer_label_default_style)",
"def reset_duration(self):\n self.__duration = 0",
"def reset(self):\n self.last_time = time.time()\n self.last_time_fps = time.time()\n self.fps_counter = 0\n self.fps = 0",
"def reset_time(sys_tray_icon):\n ChangeTimeDialog().set_time(0)",
"def replay_reset(self):\r\n self.last_millisec = pygame.time.get_ticks()",
"def reset_game():\n \n # Declare global variables\n global time\n global count\n global total_click\n global message\n global score\n global start_flag\n \n # Resets global variable time, count, total click, \n # and the start_flag\n time = 0\n count = 0\n total_click = 0\n start_flag = 0\n \n # Resets the timer display and score and \n # stops time\n message = '0:00.0'\n score = 'Score: 0 / 0'\n timer.stop()",
"def reset(self):\n\n self.timestep = 0\n self.historyLayer.reset()",
"def reset_remaining(self):\n self.time_remaining = self.lifetime",
"def Reset(self):\n self.progBarRun.setValue(0)\n self.progBarSweep.setValue(0)\n self.startExperimentButton.setText('Start Experiment')",
"def reset(self) -> None:\n self.progress_text.set(\"\")\n self.progress_pct_value.set(0.0)\n self.progress_pct_text.set(\"\")\n self.frame.update()",
"def reset_timer():\r\n global counter, attempts, success\r\n\r\n counter = 0\r\n attempts = 0\r\n success = 0\r\n timer.stop()",
"def reset_duration(self):\n self.remaining_duration = self.duration"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Power (watts) = slope x speed (km/h) + intercept Level 1 2 3 4 5 6 7 8 9 10 Slope 3.73 5.33 6.87 8.27 10.07 11.4 13.13 14.4 15.93 17.73 Intcpt 28.67 36.67 43.33 47.33 66.33 67.00 83.67 82.00 89.67 114.67
|
def calcPower(speed, resistance_level):
satoridata = [
{
'level': 1,
'slope': 3.73,
'intercept': -28.67
},
{
'level': 2,
'slope': 5.33,
'intercept': -36.67
},
{
'level': 3,
'slope': 6.87,
'intercept': -43.33
},
{
'level': 4,
'slope': 8.27,
'intercept': -47.33
},
{
'level': 5,
'slope': 10.07,
'intercept': -66.33
},
{
'level': 6,
'slope': 11.4,
'intercept': -67.00
},
{
'level': 7,
'slope': 13.13,
'intercept': -83.67
},
{
'level': 8,
'slope': 14.4,
'intercept': -82.00
},
{
'level': 9,
'slope': 15.93,
'intercept': -89.67
},
{
'level': 10,
'slope': 17.73,
'intercept': -114.67
}
]
power = satoridata[resistance_level-1]['slope'] * speed + satoridata[resistance_level-1]['intercept']
print(resistance_level, power)
return max((0, round(power)))
|
[
"def measurePower(self,low):\n if math.fabs(low[0]) > 2.0:\n return 100.0\n self._awg.setOffset(self._awgChannel,low[0])\n minimum = self.measureAveragePower()\n print \"Measuring power at %g : %g\" % (low[0],minimum)\n self.d.set(minimum=minimum, offset=low[0])\n self.d.commit()\n linpower = math.pow(10.0,minimum/10.0)/10.0\n return minimum",
"def get_power(self, wsi, level: int) -> float:\n raise ValueError(\n \"Currently, TiffFile does not provide a general API to obtain objective power.\"\n \"Please use `level` (or `mpp`) instead, or try other backends.\"\n )",
"def _get_slope(self):\n return self._slope",
"def getTerminalPower(self):\n return float(self.instr.query(\"MEAS:POW?\"))",
"def measure_power(self, wavelength):\n self.wavelength = wavelength\n return self.power",
"def power_output(windspeeds):\n # Set the cut off wind speeds\n minWS, maxWS = 3, 24.5\n\n # If wind speed is inside the cut off levels\n if windspeeds > minWS and windspeeds < maxWS:\n ws = np.array([windspeeds])\n return round(model.predict([ws/Speed_F])[0][0]*Power_F, 3)\n else:\n #print(\"Error\")\n return 0",
"def slope(p1,p2):\n return (p2[1] - p1[1])/(p2[0] - p1[0])",
"def measure_p(self):\n self._ser.write('MEAS:POW?')\n __value = float(self._ser.read()[:-1])\n print(f'IT6861A OUT Power: {__value}W')\n return __value",
"def _power_fit(ln, lb0, gamm1):\n return lb0 + gamm1 * (ln - 13.6)",
"def linear_slope_fit(w_in, mean, stdev, slope, intercept):\n mean [0] = np.nan\n stdev [0] = np.nan\n slope [0] = np.nan\n intercept[0] = np.nan\n\n if np.isnan(w_in).any():\n return\n\n sum_x = sum_x2 = sum_xy = sum_y = mean[0] = stdev[0] = 0\n isum = len(w_in)\n\n for i in range(0, len(w_in), 1):\n # the mean and standard deviation\n temp = w_in[i] - mean\n mean += temp / (i + 1)\n stdev += temp * (w_in[i] - mean)\n\n # linear regression\n sum_x += i\n sum_x2 += i * i\n sum_xy += (w_in[i] * i)\n sum_y += w_in[i]\n\n stdev /= (isum - 1)\n np.sqrt(stdev, stdev)\n\n slope [0] = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept[0] = (sum_y - sum_x * slope[0]) / isum",
"def parameters(self):\n return (self.slope,)",
"def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))",
"def get_power(self):\n return self.power_total",
"def add_powder(self):",
"def compute_fit_slope(y, x):\n _, m = P.polyfit(x, y, 1)\n return -m",
"def layerSlopeGradient(x,z,win):\n\n # Calculate the Slope (dz/dx) of each line\n slope = np.gradient(z,x,axis=1)\n # create empty arrays for filling\n Dslope = np.array([])\n Derr = np.array([])\n # Calculate the change in slope with depth\n for tr in range(len(z[0])+1-win):\n # grab the data within the window\n Y = slope[:,tr:tr+win]\n X = z[:,tr:tr+win]\n # remove nan values\n idx = ~np.isnan(Y) & ~np.isnan(X)\n Y = Y[idx]\n X = X[idx]\n if len(Y)<5:\n Dslope = np.append(Dslope,np.nan)\n Derr = np.append(Derr,np.nan)\n else:\n # linear fit with depth\n p = np.polyfit(X,Y,1,cov=True)\n Dslope = np.append(Dslope,abs(p[0][0])*1000.) # *1000. for m-1 to km-1\n Derr = np.append(Derr,np.sqrt(p[1][0,0])*1000.) # *1000. for m-1 to km-1\n return Dslope,Derr",
"def slope_graph(self):\n chart = alt.Chart(self.data).mark_line(point=True).encode(\n x=\"Tag\",\n y=\"Weighting\",\n color=\"Character:N\"\n ).properties(\n title=\"Characters Compared\",\n width=350,\n height=350\n ).interactive()\n return chart",
"def get_power(self, wsi, level: int) -> float:\n objective_power = wsi.properties.get(\"openslide.objective-power\")\n if objective_power:\n downsample_ratio = self.get_downsample_ratio(wsi, level)\n return float(objective_power) / downsample_ratio\n\n raise ValueError(\"Objective `power` cannot be obtained for this file. Please use `level` (or `mpp`) instead.\")",
"def measure_p(self):\n self._ser.write('MEAS:POW?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Power: {__value}W')\n return __value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate the whole binary pattern for lengh k
|
def generate(k):
for i in range(int(math.pow(2, k))):
pat = []
count = i
for j in range(k):
pat.append(count%2)
count = count >> 1
wholePattern.append(list(reversed(pat)))
|
[
"def InitializeBinary(k):\n b = np.zeros([k, ], dtype=np.int8)\n\n \"\"\" Primal test, half of b set to 1, and another half is 0 \"\"\"\n choice = np.random.choice(k, k // 2)\n b[choice] = 1\n return b",
"def gen_all_n_length_bitsrings(n):\n for i in range(1 << n):\n yield '{:0{}b}'.format(i, n)",
"def gen_bin(length:int, prefix=\"\"):\n if length == 0:\n print(prefix)\n return\n\n gen_bin(length - 1, prefix + \"0\")\n gen_bin(length - 1, prefix + \"1\")",
"def generate_n_bit_strings(start, n, graph_size, bit_string_length=10):\r\n bit_strings = [\r\n f'{i:0{bit_string_length}b}' for i in range(start, start+n) if i <= 2**graph_size\r\n ]\r\n data = {\r\n 'bit_strings': bit_strings,\r\n 'count': len(bit_strings),\r\n 'graph_size': graph_size,\r\n 'bit_string_length': bit_string_length\r\n }\r\n return data",
"def gen(k):\n n = k * (k - 1) + 1\n\n cards = []\n # First do the first set\n for i in xrange(k):\n start = i*(k-1) + 1\n end = (i+1)*(k-1) + 1\n guys = (0,) + tuple(range(start, end))\n cards.append(guys)\n\n for block in xrange(1, k):\n for row in xrange(1, k):\n guys = (block,)\n for colblock in xrange(1, k):\n pad = (colblock-1)*(block-1)\n loc = (pad + (row-1)) % (k-1)\n offset = (colblock * (k-1)) + 1\n guys += ( loc + offset , )\n cards.append(guys)\n\n return cards",
"def is_k_bit(num, k):\n\n for i in range(1, 2 ** num):\n for j in range(num):\n if num & (1 << j):\n print(\"TRUE\")\n print(\"FALSE\")",
"def convert_number_to_pattern(num, k):\n numalphadict = {0:'A', 1:'C', 2:'G', 3:'T'}\n\n #quotient = len(str(num))\n quotient = num / 4\n remainderlist = []\n\n while (num > 0):\n #quotient = num / 4\n remainder = num % 4\n #print(quotient, remainder)\n remainderlist.append(remainder)\n quotient = num / 4\n num = int(quotient)\n\n #print(len(remainderlist), k)\n\n #while (len(remainderlist) <= k):\n while (len(remainderlist) < k):\n #print(\"Appending A's\")\n remainderlist.append(0)\n\n remainlist = [ str(numalphadict[item]) for item in remainderlist ]\n remainlist.reverse()\n pattern = ''.join(remainlist)\n\n return(pattern)",
"def generate_binary_key(length):\n key = [str(random.randint(0,1)) for x in range(length)]\n return \"\".join(key)",
"def binary_strings(max_length=10):\n yield ''\n for size in range(1, 1+max_length):\n for i in range(2**size):\n yield '{:b}'.format(i).rjust(size, '0')",
"def keysFormation(key_size=10):\n all_keys = numpy.array([''.join(seq) for seq in itertools.product(\"01\", repeat=key_size)])\n\n key_genrator = itertools.product(all_keys)\n\n return key_genrator",
"def giant_bit(n=3, k=2):\n alpha = list(map(str, range(k)))\n\n outcomes = [a * n for a in alpha]\n pmf = [1 / k] * k\n\n return Distribution(outcomes, pmf)",
"def convert_base(n, k):\n while n != 0:\n yield n % k\n n /= k",
"def generate_k_star_system(n, k):\n dag = np.zeros((n,n))\n r = int(math.ceil(n/k))\n host = 0 \n nodes = np.arange(n)\n np.random.shuffle(nodes)\n\n for i in range(n):\n if i%r == 0:\n host = i\n else:\n #start undirected\n dag[nodes[host], nodes[i]] = 1\n\n return dag",
"def hex_ring(h, k=1):\n mv = _cy.ring(_in_scalar(h), k)\n\n return _out_unordered(mv)",
"def rand_bin_array(K, N):\n arr = np.zeros(N)\n arr[:K] = 1\n np.random.shuffle(arr)\n return arr",
"def genK():\n return [frac_bin(p ** (1/3.0)) for p in first_n_primes(64)]",
"def generate_k_subsets(n,k):\n m, h = 0, k\n a = range(k)\n print a\n yield a\n while True:\n if m < n-h: h = 1\n else: h += 1\n m = a[k-h] + 1\n for j in range(h): a[k+j-h] = m + j\n yield a\n if a[0] == n - k: break",
"def freq_kmers(seq, k, d):\n patterns = []\n def def_value(): return 0\n freqMap = defaultdict(def_value)\n n = len(seq)\n for i in range(n-k+1):\n pattern = seq[i:i+k]\n neighborhood = neighbors(pattern, d)\n for n in neighborhood:\n freqMap[n] += 1\n m = max(freqMap.values())\n for p in freqMap:\n if freqMap[p] == m:\n patterns.append(p)\n patterns.sort()\n return \" \".join(patterns)",
"def k_12():\n return np.zeros((1, elements_layout[0] * elements_layout[1]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return package Git URL.
|
def git_url(cls) -> str:
return cls.url() + ".git"
|
[
"def pip_url(self, repo, package, version):\n return 'git+ssh://git@{0}/{1}/{2}.git@{4}#egg={3}-{4}'.format(\n self.host,\n self.vendor,\n repo,\n package,\n version\n )",
"def package_repo_url(self, package_name):\n s = self._repos[package_name]\n if isinstance(s, basestring):\n return s\n else:\n # For packages that have sub-documents, rather than the value\n # as the URL. See repos.yaml for format documentation.\n return s['url']",
"def package_url(self) -> Optional[str]:\n return pulumi.get(self, \"package_url\")",
"def url_repo_http(self):\n return self._url_http_format.format(**self._git)",
"def get_github_repo_url():\n return 'git://github.com/%s/%s.git' % (MOZILLA_GITHUB_ACCOUNT, DEEPSPEECH_GITHUB_PROJ)",
"def clone_url(self, repo):\n return f'git@{self.host}:{self.vendor}/{repo}.git'",
"def get_package_url(self, name: str, version: str):\n package = self.get_package(name, version)\n\n return posixpath.join(self.url, package.filename)",
"def package_url(self):\n return str(\n packageurl.PackageURL(\n type=self.type,\n namespace=self.namespace,\n name=self.name,\n version=self.version,\n subpath=self.subpath,\n qualifiers=self.qualifiers,\n )\n )",
"def _get_git_remote_url(git_repo):\n # if not matching something/something\n # such as a local directory \".\", then\n # simply return this unmodified.\n if not re.match(r\"^[^/]+/[^/]+$\", git_repo):\n return git_repo\n\n github_token = os.getenv(GITHUB_TOKEN_KEY)\n if github_token:\n return f\"https://{github_token}@github.com/{git_repo}\"\n return f\"git@github.com:{git_repo}\"",
"def git_clone_url(self) -> str:\n if self.git_clone_url_secret_name:\n return Secret(self.git_clone_url_secret_name).get() # type: ignore\n\n if self.use_ssh:\n return f\"git@{self.repo_host}:{self.repo}.git\"\n return f\"https://{self.git_token_secret}@{self.repo_host}/{self.repo}.git\"",
"def get_canonical_link(self):\n return 'https://{}/plugins/{}/'.format(self.hostname, self.repo)",
"def feedstock_url(fctx: FeedstockContext, protocol: str = \"ssh\") -> str:\n feedstock = fctx.feedstock_name + \"-feedstock\"\n if feedstock.startswith(\"http://github.com/\"):\n return feedstock\n elif feedstock.startswith(\"https://github.com/\"):\n return feedstock\n elif feedstock.startswith(\"git@github.com:\"):\n return feedstock\n protocol = protocol.lower()\n if protocol == \"http\":\n url = \"http://github.com/conda-forge/\" + feedstock + \".git\"\n elif protocol == \"https\":\n url = \"https://github.com/conda-forge/\" + feedstock + \".git\"\n elif protocol == \"ssh\":\n url = \"git@github.com:conda-forge/\" + feedstock + \".git\"\n else:\n msg = \"Unrecognized github protocol {0!r}, must be ssh, http, or https.\"\n raise ValueError(msg.format(protocol))\n return url",
"def _ro(repository):\n return 'git://%s.git' % repository.split('://', 1)[1]",
"def git_remote_url(self) -> str:\n try:\n p = subprocess.run(\n \"git config --get remote.origin.url\".split(),\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n encoding=\"utf-8\",\n check=True,\n cwd=self.local_dir,\n )\n url = p.stdout.strip()\n # Strip basic auth info.\n return re.sub(r\"https://.*@\", \"https://\", url)\n except subprocess.CalledProcessError as exc:\n raise EnvironmentError(exc.stderr)",
"def url(self):\n return f\"https://www.python.org/ftp/python/{self.version}/Python-{self.version}.tar.xz\"",
"def get_repo_url():\n default_repo = 's3://gluonnlp-numpy-data'\n repo_url = os.environ.get('GLUONNLP_REPO_URL', default_repo)\n if repo_url[-1] != '/':\n repo_url = repo_url + '/'\n return repo_url",
"def _fetch_url_from_pypi(package: str) -> Optional[str]:\n\n # Fetch from PyPI REST API\n header = {\"content-type\": \"application/json\"}\n url = f\"https://pypi.org/pypi/{package}/json\"\n response = requests.get(url, headers=header)\n metadata = response.json()[\"info\"]\n\n if response.status_code != 200:\n # May not available at PyPi\n return None\n\n if metadata.get(\"project_url\"):\n # If there is a specific repo URL\n return metadata[\"project_url\"]\n else:\n # If not, send the PyPI link\n return metadata[\"package_url\"]",
"def release_url(self) -> str | None:\n version = AwesomeVersion(self.latest_version)\n if version.dev:\n return \"https://github.com/home-assistant/core/commits/dev\"\n return f\"https://{'rc' if version.beta else 'www'}.home-assistant.io/latest-release-notes/\"",
"def remote_url(*, host: str, dirname: str):\n return f'git@{host}:{config.org}/{dirname}.git'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return MeCab version the package depends on.
|
def depends_mecab_version(cls) -> str:
return "0.996"
|
[
"def get_version():\n import pkg_resources # part of setuptools\n return pkg_resources.require(\"mbed-ls\")[0].version",
"def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version",
"def get_version_from_package() -> str:\n\n path = os.path.join(os.path.dirname(__file__), \"pdchaoskit/__init__.py\")\n path = os.path.normpath(os.path.abspath(path))\n with open(path) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n token, version = line.split(\" = \", 1)\n version = version.replace(\"'\", \"\").strip()\n print(version)\n return version",
"def getMelangeVersion():\n return getAppVersion().split('.', 1)[0]",
"def get_latest_installer_version() -> str:\n FABRIC_INSTALLER_MAVEN_URL = \"https://maven.fabricmc.net/net/fabricmc/fabric-installer/maven-metadata.xml\"\n r = requests.get(FABRIC_INSTALLER_MAVEN_URL, headers={\"user-agent\": get_user_agent()})\n xml_data = minidom.parseString(r.text)\n release = xml_data.getElementsByTagName(\"release\")\n return release.item(0).lastChild.data",
"def get_version(self) -> GoProResp:",
"def get_version():\n version_file = repository_root / f\"{package_root}/{package_name}/__init__.py\"\n initfile_lines = version_file.open(\"rt\").readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n return \"unknown\"",
"def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version",
"def get_version() -> str:\n return VERSION",
"def get_current_pkg_version():\n current_major_minor = _find_in_file(os.path.join(here, PKG_NAME, '__init__.py'))\n last_jenkins_build_num = get_next_jenkins_build()\n\n full_version = f'{current_major_minor}.{last_jenkins_build_num}'\n\n return full_version",
"def get_version() -> str:\n config = configparser.ConfigParser()\n path = Path(__file__).parent.parent / \"setup.cfg\"\n config.read(path)\n return str(config[\"metadata\"][\"version\"])",
"def getPackageVersion():\n cmd = locations.DPKG + \" -l \" + ' | grep surfids-sensor | awk \\'{print $3}\\''\n pversion = os.popen(cmd)\n ver = pversion.readline().strip()\n if ver == \"\":\n return \"Unknown\"\n else:\n return ver",
"def extract_version():\n # Regular expression for the version\n _version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n with open(\"pdftools/__init__.py\", \"r\") as f:\n content = f.read()\n\n version_match = _version_re.search(content)\n version = str(ast.literal_eval(version_match.group(1)))\n return version",
"def get_product_build():\n return read_file_value(\"VersionFile.json\", \"version_build\")",
"def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]",
"def get_version_string():\n version = ffi.string(C.blosc_get_version_string())\n if not isinstance(version, str):\n version = version.decode()\n return version",
"def version():\n vfile='/Users/mikemon/mesa/data/version_number'\n f = open(vfile, 'r')\n line=f.readline()\n aa=line.split()\n vers=aa[0]\n print '\\nComputed with MESA version',vers,'\\n'\n return vers",
"def getPackageVersion(package_info):\n\n # Parse for version_number\n package_version = re.search(version_pattern, package_info).group(0) # extract version_number\n\n return package_version",
"def myst_version():\n return 0.13"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return list of absolute paths to documentation files.
|
def misc_docs(cls) -> List[str]:
path_to_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mecab-ipadic-neologd')
return [
os.path.join(path_to_root, 'README.md'),
os.path.join(path_to_root, 'README.ja.md'),
os.path.join(path_to_root, cls.changelog_file()),
os.path.join(path_to_root, 'COPYING'),
]
|
[
"def get_documentation_files ():\n installpath = os.path.join (\"share\", \"doc\", \"ocempgui\")\n docpaths = get_directory_list (\"doc\")\n\n # Traverse all the directories in the docpath an get the needed files.\n # Every file installed from the docs will have a suffix.\n filedict = {}\n for path in docpaths:\n files = glob.glob (os.path.join (path, \"*.*\"))\n if files:\n filedict[path] = files\n return get_installation_files (\"doc\", installpath, filedict)",
"def all_wiki_docs(wikipath):\n out = []\n for root, dirs, fileshere in os.walk(wikipath):\n path_elements = root.split(os.sep)\n if \".svn\" in path_elements:\n continue\n\n for fn in fileshere:\n if fn.endswith(\".wiki\"):\n whole_pathname = os.path.join(root, fn)\n out.append(whole_pathname)\n return out",
"def load_docs(docs_dirname):\n docs = []\n ...\n return docs",
"def get_reference_files( self ):\n files_pattern = \"<ReferenceFiles>.*</ReferenceFiles>\"\n path_pattern = \"<Path>(.*)</Path>\"\n results = re.search( files_pattern, self.__file_contents, re.S )\n results = re.findall( path_pattern, results.group() )\n reffiles = []\n # Ensure all paths are absolute\n for reffile in results:\n # If the path is not absolute make it absolute\n reffiles.append( self.abs_path( reffile ) )\n return reffiles",
"def document_paths(book):\n \n sections = book.get('sections')\n if sections is None: return []\n docs_set = set()\n docs_list = list()\n for section in sections:\n href = section.get('href')\n if href:\n path = href.split('#', 1)[0]\n if path not in docs_set:\n docs_set.add(path)\n docs_list.append(path)\n subsections = section.get('sections')\n if subsections:\n subsection_docs = document_paths(section)\n for path in subsection_docs:\n if path not in docs_set:\n docs_set.add(path)\n docs_list.append(path)\n return docs_list",
"def document_files(self):\n return self.files(FileList.DOCUMENT)",
"def urls(self) -> List[str]:\n return [file_.path for file_ in self.files.all()]",
"def py_files(self):\n base = dirname(self.fnam) # Paths are relative to config.\n return [abspath(join(base, c.py_path)) for c in self.conversions]",
"def get_list_of_docs_files(folder_path: str) -> List[str]:\n try:\n list_of_files = os.listdir(path=folder_path)\n except Exception as e:\n print(e)\n return\n doc_list: List[str] = []\n for file in list_of_files:\n if file.endswith('.txt'):\n doc_list.append(file)\n return doc_list",
"def find_docusaurus_refs(dir: str) -> List[str]:\n linked_files: Set[str] = set()\n pattern: str = (\n r\"\\`\\`\\`[a-zA-Z]+ file\" # Format of internal links used by Docusaurus\n )\n\n for doc in glob.glob(f\"{dir}/**/*.md\", recursive=True):\n for line in open(doc):\n if re.search(pattern, line):\n file: str = _parse_file_from_docusaurus_link(line)\n path: str = os.path.join(os.path.dirname(doc), file)\n linked_files.add(path)\n\n return [file for file in linked_files]",
"def reference_files(self) -> List[str]:\n try:\n return self.attr_getter(\"_reference_files\", None)\n except AttributeError:\n raise ValueError(\"Nothing set for the input collection of reference design files yet\")",
"def define_files():\n paths = []\n files = os.listdir(PATH_TO_INPUT_DIR)\n for file in files:\n path = os.path.join(PATH_TO_INPUT_DIR, file)\n paths.append(path)\n return paths",
"def get_link_docs(self):\n\n\t\treturn self.__link_docs",
"def get_markdown_files():\n # TODO: Make it so you can specify the starting location.\n filenames = glob(\"**/*.md\", recursive=True)\n return filenames",
"def files(self):\n return [File(self, p) for p in fileList(self.paths['build'], relative=True)]",
"def find_data_files():\n\n if \"freebsd\" in sys.platform:\n manpagebase = pjoin('man', 'man1')\n else:\n manpagebase = pjoin('share', 'man', 'man1')\n\n # Simple file lists can be made by hand\n manpages = [f for f in glob(pjoin('docs','man','*.1.gz')) if isfile(f)]\n if not manpages:\n # When running from a source tree, the manpages aren't gzipped\n manpages = [f for f in glob(pjoin('docs','man','*.1')) if isfile(f)]\n\n # And assemble the entire output list\n data_files = [ (manpagebase, manpages) ]\n\n return data_files",
"def filelist(pwd):\n fully_qualified_list = []\n for root, dirs, files in sorted(os.walk(pwd)):\n for filename in files:\n filepath = os.path.join(root, filename)\n fully_qualified_list.append(filepath)\n return fully_qualified_list",
"def getDocDir():\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep",
"def _get_default_paths(basedir: Path) -> List[Path]:\n most_files = sorted(x for x in get_known_sources(basedir)\n if x.suffix not in {'.js'})\n\n # All files in js/*.js excluding generated files.\n # Use relpath for nicer default output.\n # Sort to ensure lib.js comes before lib_array.js, etc.\n # Filter out the generated libdot.js/libdot.min.js/etc...\n js_files = (\n list((libdot.DIR / 'html').glob('*.html')) +\n list((libdot.DIR / 'js').glob('*.js')) +\n list((libdot.DIR / 'third_party').glob('*/*.js')))\n js_files = sorted(x for x in js_files\n if not x.name.startswith('libdot.'))\n\n return [os.path.relpath(x) for x in most_files + js_files]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Hardlink dictionary files from input directory and documentation to target directory.
|
def _link_dictionaries_and_docs(cls, input_dir: str, lib_dir: str, doc_dir: str, config: PackageConfig) -> None:
cls._mkdir_p(lib_dir)
cls._mkdir_p(doc_dir)
logging.info('Linking MeCab files to library directory...')
for filename in os.listdir(input_dir):
full_filename = os.path.join(input_dir, filename)
if os.path.isfile(full_filename):
os.link(full_filename, os.path.join(lib_dir, filename))
logging.info('Linking documentation files to documentation directory...')
for doc_file_path in config.misc_docs():
os.link(doc_file_path, os.path.join(doc_dir, os.path.basename(doc_file_path)))
|
[
"def linkFileDict(fileDict, interactive):\n for f in fileDict:\n target = buildDotfilesPath(f)\n linkName = buildHomePath(fileDict[f])\n linkFile(target, linkName, interactive)",
"def create_reference_files(self):\n params = self.params\n params.ref_paths = {}\n\n for (lang1, lang2), v in self.data['para'].items():\n\n assert lang1 < lang2\n lang1_id = params.lang2id[lang1]\n lang2_id = params.lang2id[lang2]\n\n for data_type in ['valid', 'test']:\n\n lang1_path = os.path.join(params.dump_path, 'ref.{0}-{1}.{2}.txt'.format(lang2, lang1, data_type))\n lang2_path = os.path.join(params.dump_path, 'ref.{0}-{1}.{2}.txt'.format(lang1, lang2, data_type))\n\n lang1_txt = []\n lang2_txt = []\n\n # convert to text\n for (sent1, len1), (sent2, len2) in self.get_iterator(data_type, lang1, lang2):\n lang1_txt.extend(convert_to_text(sent1, len1, self.dico[lang1], lang1_id, params))\n lang2_txt.extend(convert_to_text(sent2, len2, self.dico[lang2], lang2_id, params))\n\n # replace <unk> by <<unk>> as these tokens cannot be counted in BLEU\n lang1_txt = [x.replace('<unk>', '<<unk>>') for x in lang1_txt]\n lang2_txt = [x.replace('<unk>', '<<unk>>') for x in lang2_txt]\n\n # export hypothesis\n with open(lang1_path, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(lang1_txt) + '\\n')\n with open(lang2_path, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(lang2_txt) + '\\n')\n\n # restore original segmentation\n restore_segmentation(lang1_path)\n restore_segmentation(lang2_path)\n\n # store data paths\n params.ref_paths[(lang2, lang1, data_type)] = lang1_path\n params.ref_paths[(lang1, lang2, data_type)] = lang2_path",
"def link_files(self):\n\n for package in self.packages:\n package.link_files()\n\n for _file in self.files:\n if _file.create_link:\n _file.link()",
"def learn_dictionary(word_list_file_name, en_data_file_name,\n foreign_data_file_name, output_file_name):\n en_data = open(en_data_file_name).readlines()\n fr_data = open(foreign_data_file_name).readlines()\n data = open(word_list_file_name)\n test_list = []\n for line in data:\n test_list.append(line.strip('\\n'))\n word_tran_dict = {}\n for w in test_list:\n word_tran_dict[w] = collections.Counter()\n en_nlp = spacy.load('en', disable=['parser', 'ner', 'tagger'])\n fr_nlp = spacy.load('fr', disable=['parser', 'ner', 'tagger'])\n for eng_sen, fr_sen in zip(en_data, fr_data):\n en_token = en_nlp(eng_sen)\n fr_token = fr_nlp(fr_sen)\n for en_w in en_token:\n for fr_w in fr_token:\n if str(fr_w) in word_tran_dict:\n word_tran_dict[str(fr_w)][str(en_w)] += 1\n process_dict_and_write(word_tran_dict, output_file_name)",
"def update_docs_urls(self) -> \"ProductionPrep\":\n\n to_ignore = [\"they-use-d-it.rst\"]\n\n self.update_urls(\n os.path.join(PyFunceble.storage.CONFIG_DIRECTORY, \"README.rst\")\n )\n\n for root, _, files in os.walk(\n os.path.join(PyFunceble.storage.CONFIG_DIRECTORY, \"docs\")\n ):\n for file in files:\n if not file.endswith(\".rst\"):\n continue\n\n full_path = os.path.join(root, file)\n\n if any(x in full_path for x in to_ignore):\n continue\n\n self.update_urls(os.path.join(root, file))",
"def link (self,\r\n target_desc,\r\n objects,\r\n output_filename,\r\n output_dir=None,\r\n libraries=None,\r\n library_dirs=None,\r\n runtime_library_dirs=None,\r\n export_symbols=None,\r\n debug=0,\r\n extra_preargs=None,\r\n extra_postargs=None,\r\n build_temp=None,\r\n target_lang=None):\r\n raise NotImplementedError",
"def soft_link_files(origin, target):\n\n if file_is_empty(target):\n\n # rename as full paths\n origin = get_fullpath(origin)\n target = get_fullpath(target)\n\n # check that the origin exists\n if file_is_empty(origin): raise ValueError(\"The origin %s should exist\"%origin)\n\n # remove previous lisqnk\n try: run_cmd(\"rm %s > /dev/null 2>&1\"%target)\n except: pass\n\n soft_linking_std = \"%s.softlinking.std\"%(target)\n print_if_verbose(\"softlinking. The std is in %s\"%soft_linking_std)\n run_cmd(\"ln -s %s %s > %s 2>&1\"%(origin, target, soft_linking_std))\n remove_file(soft_linking_std)\n\n # check that it worked\n if file_is_empty(target): raise ValueError(\"The target %s should exist\"%target)",
"def link_workspace_document(workspace_doc_uid):",
"def update_mkdocs_meta(path: pathlib.Path, module_list: List[Any]) -> None:\n yaml_structure = yaml.load(path.open('r'), yaml.FullLoader)\n nav = yaml_structure['nav']\n for index in range(len(nav)):\n if 'Reference' in nav[index].keys():\n nav[index]['Reference'] = []\n nav[index]['Reference'].append({'Overview': 'api_overview.md'})\n nav[index]['Reference'].append({'trestle': module_list})\n\n yaml_structure['nav'] = nav\n yaml.dump(yaml_structure, path.open('w'))",
"def scandeps(\n base: str,\n doc: Union[CWLObjectType, MutableSequence[CWLObjectType]],\n reffields: Set[str],\n urlfields: Set[str],\n loadref: Callable[[str, str], Union[CommentedMap, CommentedSeq, str, None]],\n urljoin: Callable[[str, str], str] = urllib.parse.urljoin,\n nestdirs: bool = True,\n) -> MutableSequence[CWLObjectType]:\n r: MutableSequence[CWLObjectType] = []\n if isinstance(doc, MutableMapping):\n if \"id\" in doc:\n if cast(str, doc[\"id\"]).startswith(\"file://\"):\n df, _ = urllib.parse.urldefrag(cast(str, doc[\"id\"]))\n if base != df:\n r.append({\"class\": \"File\", \"location\": df, \"format\": CWL_IANA})\n base = df\n\n if doc.get(\"class\") in (\"File\", \"Directory\") and \"location\" in urlfields:\n u = cast(Optional[str], doc.get(\"location\", doc.get(\"path\")))\n if u and not u.startswith(\"_:\"):\n deps: CWLObjectType = {\n \"class\": doc[\"class\"],\n \"location\": urljoin(base, u),\n }\n if \"basename\" in doc:\n deps[\"basename\"] = doc[\"basename\"]\n if doc[\"class\"] == \"Directory\" and \"listing\" in doc:\n deps[\"listing\"] = doc[\"listing\"]\n if doc[\"class\"] == \"File\" and \"secondaryFiles\" in doc:\n deps[\"secondaryFiles\"] = cast(\n CWLOutputAtomType,\n scandeps(\n base,\n cast(\n Union[CWLObjectType, MutableSequence[CWLObjectType]],\n doc[\"secondaryFiles\"],\n ),\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n ),\n )\n if nestdirs:\n deps = nestdir(base, deps)\n r.append(deps)\n else:\n if doc[\"class\"] == \"Directory\" and \"listing\" in doc:\n r.extend(\n scandeps(\n base,\n cast(MutableSequence[CWLObjectType], doc[\"listing\"]),\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n )\n )\n elif doc[\"class\"] == \"File\" and \"secondaryFiles\" in doc:\n r.extend(\n scandeps(\n base,\n cast(MutableSequence[CWLObjectType], doc[\"secondaryFiles\"]),\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n )\n )\n\n for k, v in doc.items():\n if k in reffields:\n for u2 in aslist(v):\n if isinstance(u2, MutableMapping):\n r.extend(\n scandeps(\n base,\n u2,\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n )\n )\n else:\n subid = urljoin(base, u2)\n basedf, _ = urllib.parse.urldefrag(base)\n subiddf, _ = urllib.parse.urldefrag(subid)\n if basedf == subiddf:\n continue\n sub = cast(\n Union[MutableSequence[CWLObjectType], CWLObjectType],\n loadref(base, u2),\n )\n deps2: CWLObjectType = {\n \"class\": \"File\",\n \"location\": subid,\n \"format\": CWL_IANA,\n }\n sf = scandeps(\n subid,\n sub,\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n )\n if sf:\n deps2[\"secondaryFiles\"] = cast(\n MutableSequence[CWLOutputAtomType], mergedirs(sf)\n )\n if nestdirs:\n deps2 = nestdir(base, deps2)\n r.append(deps2)\n elif k in urlfields and k != \"location\":\n for u3 in aslist(v):\n deps = {\"class\": \"File\", \"location\": urljoin(base, u3)}\n if nestdirs:\n deps = nestdir(base, deps)\n r.append(deps)\n elif doc.get(\"class\") in (\"File\", \"Directory\") and k in (\n \"listing\",\n \"secondaryFiles\",\n ):\n # should be handled earlier.\n pass\n else:\n r.extend(\n scandeps(\n base,\n cast(Union[MutableSequence[CWLObjectType], CWLObjectType], v),\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n )\n )\n elif isinstance(doc, MutableSequence):\n for d in doc:\n r.extend(\n scandeps(\n base,\n d,\n reffields,\n urlfields,\n loadref,\n urljoin=urljoin,\n nestdirs=nestdirs,\n )\n )\n\n if r:\n normalizeFilesDirs(r)\n\n return r",
"def maprefstofiles (self, ofpath) :\n with open (ofpath, \"w\") as ofp :\n for fid, fpath in self.audiocorpus.items () :\n if fid in self.references :\n trans = \"{0}\\t{1}\".format (fid, self.references [fid])\n print (trans, file=ofp)\n self.valid += 1\n\n print (\n \"Wrote: {0} potential targets to {1}.\".format (self.valid, ofpath),\n file=sys.stderr\n )\n\n return",
"def collect_ref_data(app, doctree):\n filename = doctree.attributes[\"source\"]\n\n # this needs to happen to make this work with sphinx-multiversion\n metadata = app.config.smv_metadata or {}\n current_version = app.config.smv_current_version\n if metadata and current_version:\n sourcedir = metadata.get(current_version, {}).get(\"sourcedir\")\n if sourcedir and filename.startswith(sourcedir):\n filename = filename[len(sourcedir) :]\n\n # otherwise lets just split off the current directory (not sphinx multiversion)\n filename = filename.replace(docs_dir, \"\").lstrip(\"/\")\n docname = filename.replace(\".md\", \"\")\n\n anchors = []\n references = []\n\n for node in doctree.traverse(nodes.raw):\n if \"name=\" in node.rawsource:\n match = re.search(r'name=\"([^\\\"]+)', node.rawsource)\n if match:\n anchors.append(match.group(1))\n elif \"id=\" in node.rawsource:\n match = re.search(r'id=\"([^\\\"]+)', node.rawsource)\n if match:\n anchors.append(match.group(1))\n\n for node in doctree.traverse(nodes.section):\n for target in frozenset(node.attributes.get(\"ids\", [])):\n anchors.append(target)\n\n for node in doctree.traverse(nodes.reference):\n uri = node.get(\"refuri\")\n if uri and not uri.startswith((\"http://\", \"https://\")):\n ref = to_reference(uri, basedoc=docname)\n references.append(ref)\n\n app.env.metadata[docname][\"anchors\"] = anchors\n app.env.metadata[docname][\"references\"] = references",
"def _build_link_tree(\n link_map: Dict[str, str], output_directory: Path, buck_root: Path\n) -> None:\n shutil.rmtree(output_directory, ignore_errors=True)\n output_directory.mkdir(parents=True)\n for destination, source in link_map.items():\n source_path = buck_root / source\n assert source_path.exists(), source_path\n destination_path = output_directory / destination\n destination_path.parent.mkdir(parents=True, exist_ok=True)\n destination_path.symlink_to(source_path)",
"def write_documentation(fh, mappings):\n usedin, examples = read_examples()\n fh.write(\"<html>\\n<body>\\n\")\n fh.write(\"<h1>Bibliotek-o to BIBFRAME conversion documentation</h1>\\n\\n\")\n bf_uris = set()\n bteko_uris = set()\n for mapping in mappings:\n bf_uris.add(contract_uri(mapping.bf_uri))\n bteko_uris.add(contract_uri(mapping.bteko_uri))\n bteko_uris.remove('')\n fh.write(\"<h2>Index by terms</h2>\\n\\n\")\n fh.write(\"<table>\\n\")\n fh.write(\"<tr><th>BIBFRAME terms</th><th>Bibliotek-o terms</th></tr>\\n\")\n for bf_uri, bteko_uri in zip_longest(sorted(bf_uris), sorted(bteko_uris)):\n fh.write('<tr><td>' + linked_term(bf_uri) + '</td><td>' + linked_term(bteko_uri) + '</td></tr>\\n')\n fh.write(\"</table>\\m\\n\")\n fh.write(\"<h2>Mappings by term involved</h2>\\n\\n\")\n for term in sorted(bf_uris):\n fh.write('<h3 id=\"%s\">%s</h3>\\n\\n' % (mkid(term), term))\n if (term in usedin):\n j = usedin[term]\n fh.write(\"<table>\\n\")\n fh.write(\"<tr><th>BIBFRAME</th><th>Bibliotek-o equivalent</th></td>\\n\")\n fh.write(\"<tr>\\n<td><pre>%s</pre></td>\\n\" % (html.escape(examples[j][0])))\n fh.write(\"<td><pre>%s</pre></td>\\n</tr>\\n\" % (html.escape(examples[j][1])))\n fh.write(\"</table>\\n\\n\")\n fh.write(\"</body>\\n</html>\\n\")",
"def Link_IDL_Files (Source_Path, Target_Path):\n global Files_Copied_Or_Linked\n global Files_Processed\n Debug_Print (\"Link_IDL_Files (\" + Source_Path + \", \" + Target_Path + \")\")\n \n for Entry in os.listdir (Source_Path):\n Debug_Print (\"Processing '\" + Entry + \"' - Entry [-4:] is '\" + Entry [-4:] + \"'\")\n Source_Entry_Path = os.path.join (Source_Path, Entry)\n # Don't process any directories:\n if not os.path.isdir(Source_Entry_Path):\n if Entry [-4:] == IDL_Suffix:\n Shadow_Entry_Path = os.path.join (Target_Path, Entry)\n Copy_Or_Link_A_File (Source_Entry_Path, Shadow_Entry_Path)\n Link_A_File (Shadow_Entry_Path, Target_IDL_Links_Path + \"/\" + Entry)\n Files_Copied_Or_Linked = Files_Copied_Or_Linked + 1\n Files_Processed = Files_Processed + 1",
"def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):\n # TODO Deal with xref so that they keep the proper path. Atm it'll just strip the path and leave only the id\n file_to_id_map = info['file_to_id_map']\n current_dir = cwd or os.path.dirname(src_file)\n cleaned_content = remove_conditional_content(content, info, tag=tag)\n links = LINKS_RE.finditer(cleaned_content)\n\n for link in links:\n link_text = link.group(0)\n link_file = link.group(1)\n link_anchor = link.group(2)\n link_title = link.group(3)\n\n if link_file is not None:\n fixed_link_file = link_file.replace(\".html\", \".adoc\")\n fixed_link_file_abs = os.path.abspath(os.path.join(current_dir, fixed_link_file))\n if fixed_link_file_abs in file_to_id_map:\n if fixed_link_file_abs.startswith(book_dir + os.sep) or fixed_link_file_abs == src_file:\n # We are dealing with a cross reference within the same book here\n if link_anchor is None:\n # Cross reference to the top of a topic, without an id being specified\n link_anchor = \"#\" + file_to_id_map[fixed_link_file_abs]\n\n fixed_link = \"xref:\" + link_anchor.replace(\"#\", \"\") + link_title\n else:\n # We are dealing with a cross reference to another book here\n external_link = EXTERNAL_LINK_RE.search(link_file)\n book_dir_name = external_link.group(1)\n\n # Find the book name\n book_name = book_dir_name\n for book in info['data']:\n if check_node_distro_matches(book, info['distro']) and book['Dir'] == book_dir_name:\n book_name = book['Name']\n break\n\n fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)\n\n if link_anchor is None:\n fixed_link = \"link:\" + fixed_link_file + \"#\" + file_to_id_map[fixed_link_file_abs] + link_title\n else:\n fixed_link = \"link:\" + fixed_link_file + link_anchor + link_title\n else:\n # Cross reference or link that isn't in the docs suite\n fixed_link = link_text\n if EXTERNAL_LINK_RE.search(link_file) is not None:\n rel_src_file = src_file.replace(os.path.dirname(book_dir) + \"/\", \"\")\n has_errors = True\n log.error(\"ERROR (%s): \\\"%s\\\" appears to try to reference a file not included in the \\\"%s\\\" distro\", rel_src_file, link_text.replace(\"\\n\", \"\"), info['distro'])\n sys.exit(-1)\n else:\n fixed_link = \"xref:\" + link_anchor.replace(\"#\", \"\") + link_title\n\n content = content.replace(link_text, fixed_link)\n\n return content",
"def update_markdown_links(markdown_dir, src_dir, dest_dir, file_mapping, strip_full_path=True, dry_run=False):\n # Compile a regular expression to match image links in markdown files, making sure https:// and http:// links are not matched\n image_link_regex = re.compile(rf\"!\\[\\]\\((?!https?://){src_dir}/(.+?)\\)\", re.IGNORECASE)\n\n # Iterate through the files in markdown_dir and its subdirectories\n for dirpath, _, filenames in os.walk(markdown_dir):\n for filename in filenames:\n # Check if the file is a markdown file\n if not filename.endswith(\".md\"):\n continue\n\n # Read the contents of the file\n file_path = os.path.join(dirpath, filename)\n with open(file_path, \"r\") as f:\n contents = f.read()\n\n # Replace image links using the file mapping\n new_contents = image_link_regex.sub(\n lambda match: f\", match.group(1)))})\",\n contents\n )\n\n # Strip any old directory names from the image links (e.g. \"Images/BearImages\" -> \"Images\")\n if strip_full_path:\n new_contents = image_link_regex.sub(\n lambda match: f\"))})\",\n new_contents\n )\n\n\n # Write the updated contents back to the file (if not doing a dry run)\n if not dry_run:\n with open(file_path, \"w\") as f:\n f.write(new_contents)\n\n if dry_run:\n print(f\"Would have updated {file_path}.\")\n print(f\"Old contents: {contents}\")\n print(f\"New contents: {new_contents}\")",
"def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n \n # get pathlist of documents folder\n pathlist = os.listdir(in_dir)\n \n # initialize variables\n termID = 1\n termdic = {} # format {term:termID}\n \n ps = PorterStemmer()\n\n # First create term-termID mapping dic\n for doc in pathlist:\n # open each document in folder\n f = open(os.path.join(in_dir, doc), 'r')\n print(\"doc: \"+doc)\n for line in f:\n # casefolding\n line = line.lower()\n \n # tokenize\n sent_line = nltk.sent_tokenize(line)\n for sent_tokens in sent_line:\n word_tokens = nltk.word_tokenize(sent_tokens)\n\n stemmed_tokens=[]\n for token in word_tokens:\n # stem tokens\n stemmed_word = ps.stem(token)\n # remove punctuations\n if stemmed_word not in list(string.punctuation):\n stemmed_tokens.append(stemmed_word)\n\n for stemmed_token in stemmed_tokens:\n if stemmed_token not in termdic.keys():\n termdic[stemmed_token] = termID\n termID += 1\n \n \n # blkSize = 10000\n # blkCount=1\n # pointer=1\n dic={} # format {term: docfreq,pointer}\n postings={} # format {term: postinglist}\n \n\n for doc in pathlist:\n f = open(os.path.join(in_dir, doc), 'r')\n print(\"doc: \"+doc)\n for line in f:\n # casefolding\n line = line.lower()\n \n # tokenize\n sent_line = nltk.sent_tokenize(line)\n for sent_tokens in sent_line:\n word_tokens = nltk.word_tokenize(sent_tokens)\n\n stemmed_tokens=[]\n for token in word_tokens:\n # stem tokens\n stemmed_word = ps.stem(token)\n # remove punctuations\n if stemmed_word not in list(string.punctuation):\n stemmed_tokens.append(stemmed_word)\n \n # update doc frequency and add posting to list\n for stemmed_token in stemmed_tokens:\n if termdic[stemmed_token] not in dic.keys():\n dic[termdic[stemmed_token]] = 1\n postings[termdic[stemmed_token]] = [int(doc)]\n if termdic[stemmed_token] in dic.keys() and int(doc) not in postings[termdic[stemmed_token]]:\n dic[termdic[stemmed_token]] +=1\n postings[termdic[stemmed_token]].append(int(doc))\n \n newdic={} # format {term: (docfreq,pointer)}\n \n # list of termdic keys -> terms\n termdiclist = list(termdic.keys())\n\n # dictionary to store in dictionary.txt\n for item in termdiclist:\n newdic[item] = (dic[termdic[item]],termdic[item])\n # print(newdic)\n with open (out_dict,'wb+') as fp:\n # for item in dic:\n # fp.write(str(termdiclist[item-1])+\" \"+str(dic[item])) \n # fp.write(\"\\n\")\n pickle.dump(newdic,fp)\n fp.close()\n \n # write out postings to postings file\n # if posting has skip pointer/ is tuple, separate by ','\n with open (out_postings,'w+') as fp:\n for posting in postings:\n postings[posting].sort()\n addSkipPointer(postings[posting])\n for item in postings[posting]:\n if type(item) is tuple:\n fp.write(str(item[0])+\",\"+str(item[1])+\" \")\n else:\n fp.write(str(item)+\" \")\n fp.write(\"\\n\")\n fp.close()\n\n # print(\"dic : \",dic)\n # print(\"postings : \",postings)\n \n return (dic,postings)",
"def SymLinkRel( fromFN, toFN, getio = None ):\n\n if getio: return dict( depends_on = toFN, creates = fromFN,\n attrs = dict( piperun_short = True ) )\n \n os.symlink( os.path.relpath( toFN, os.path.dirname( fromFN ) ),\n fromFN )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return package type to use for arguments.
|
def package_type(cls) -> str:
raise NotImplemented("Abstract method.")
|
[
"def package_type(self):\n ret = self._get_attr(\"packageType\")\n return ret",
"def get_pkg_type():\n plt = get_os_name()\n if plt in PACK_TYPES:\n return PACK_TYPES[plt]\n raise UnsupportedOsError(f'No supported Package type for platform \"{plt}\"')",
"def type(self):\n return self.kwargs.get(\"type\", str)",
"def get_arg_types(self):\n return None",
"def arg_types(self) -> List[ast.Type]:",
"def predefined_package_type_id(self):\n return self._predefined_package_type_id",
"def python_type(self):",
"def argpack(**kwargs):\n return taichi.lang.argpack.ArgPackType(**kwargs)",
"def get_arg_type(arg):\n # log.info(\"Module: {} Function: {}\".format(__name__, sys._getframe().f_code.co_name))\n try:\n type = int(arg)\n return \"int\"\n except:\n return \"str\"",
"def get_arg_type(t):\n return ADDRESS if is_address(t) else LITERAL",
"def _get_package_class(self):\n class_map = {\n 'ubuntu': package.UbuntuPackage()\n }\n p = package.Package()\n platform = p._get_platform()\n return class_map.get(platform)",
"def resolve_argument_type(self, val):\n if isinstance(val, utils.INT_TYPES):\n # Force all integers to be 64-bit\n return types.int64\n elif numpy_support.is_array(val):\n dtype = numpy_support.from_dtype(val.dtype)\n layout = numpy_support.map_layout(val)\n return types.Array(dtype, val.ndim, layout)\n\n tp = self.resolve_data_type(val)\n if tp is None:\n tp = getattr(val, \"_numba_type_\", types.pyobject)\n return tp",
"def optGetTypeName(*args):\n return _optcc.optGetTypeName(*args)",
"def return_type(self) -> ast.Type:",
"def argtypes(self):\n if self.dimension is not None:\n result = []\n if \"in\" in self.direction:\n #The only complication here is that the 'known' dimensionality could actually\n #be a function like \"size\" that needs information about other variables.\n #If we choose to ignore known shapes, we lose the error checking for the passed\n #in variables from the python side.\n if self.direction == \"(inout)\" and \":\" not in self.dimension:\n wstr = \", writeable\"\n else:\n wstr = \"\"\n\n if \":\" in self.dimension or \"size\" in self.dimension:\n template = 'ndpointer(dtype={}, ndim={}, flags=\"F{}\")'\n result.append(template.format(self.pytype, self.D, wstr))\n else:\n template = 'ndpointer(dtype={}, ndim={}, shape=({}), flags=\"F{}\")'\n sdim = self.dimension + (\"\" if self.D > 1 else \",\")\n result.append(template.format(self.pytype, self.D, sdim, wstr))\n elif self.direction == \"(out)\":\n result.append(\"c_void_p\")\n\n if self.D > 0 and \":\" in self.dimension:\n result.extend([\"c_int_p\" for i in range(self.D)])\n if (self.direction == \"(inout)\" and \":\" in self.dimension and\n (\"allocatable\" in self.modifiers or \"pointer\" in self.modifiers)):\n result.append(\"c_void_p\")\n return result\n else:\n ctype = self.ctype\n if ctype is not None:\n return [\"{}_p\".format(ctype.lower())]",
"def svn_fs_type(*args) -> \"char const **\":\n return _fs.svn_fs_type(*args)",
"def getTypeInfo():",
"def shell_type(self):\n return get_kind(type(self))",
"def manifest_type(self) -> str:\n return self._manifest_type"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate and return Bintray descriptor JSON for a package.
|
def bintray_descriptor_json(bintray_repository_name: str,
bintray_subject: str,
version: str,
revision: str,
version_tag: str,
package_path: str,
config: PackageConfig) -> str:
package_dir = os.path.dirname(package_path)
package_filename = os.path.basename(package_path)
include_pattern = '%s/(%s)' % (package_dir, package_filename,)
descriptor = {
"package": {
"name": config.name(),
"repo": bintray_repository_name,
"subject": bintray_subject,
"desc": config.summary(),
"website_url": config.url(),
"vcs_url": config.git_url(),
"github_use_tag_release_notes": True,
"github_release_notes_file": config.changelog_file(),
"licenses": [
config.license(),
],
"labels": config.tags(),
"public_download_numbers": True,
"public_stats": True,
},
"version": {
"name": '%s-%s' % (version, revision,),
"desc": "%s (%s)" % (version, revision,),
"released": datetime.datetime.today().strftime('%Y-%m-%d'),
"vcs_tag": version_tag,
"gpgSign": True,
},
"files": [
{
"includePattern": include_pattern,
"uploadPattern": "$1",
"matrixParams": {
"override": 1,
# Used for .deb files only
"deb_distribution": 'stable',
"deb_component": 'main',
"deb_architecture": 'all',
}
}
],
"publish": True,
}
return json.dumps(descriptor)
|
[
"def package_json(context: Context):\n context.write_template('package.json')",
"def create_datadescript(input_dir):\n print(f\"Creating a simple dataset_description.json in {input_dir}... \")\n name = Path(input_dir).stem\n vers = bids.__version__\n out = dict(Name=name, BIDSVersion=vers)\n with open(input_dir + \"/dataset_description.json\", \"w\") as f:\n json.dump(out, f)",
"def get_package_data(self) -> dict:\n return self.pack_data",
"def dataset_description_file(BIDS_DIR, XNAT, project):\n\n BIDSVERSION = \"1.0.1\"\n dataset_description = dict()\n dataset_description['BIDSVersion'] = BIDSVERSION\n dataset_description['Name'] = project\n dataset_description['DatasetDOI'] = XNAT.host\n project_info = XNAT.select('/project/' + project).get()\n project_info = ET.fromstring(project_info)\n PI_element = project_info.findall('{http://nrg.wustl.edu/xnat}PI')\n if len(PI_element) > 0:\n dataset_description['Author'] = PI_element[0][1].text, PI_element[0][0].text\n else:\n dataset_description['Author'] = \"No Author defined on XNAT\"\n dd_file = os.path.join(BIDS_DIR, project)\n if not os.path.exists(dd_file):\n os.makedirs(dd_file)\n with open(os.path.join(dd_file, 'dataset_description.json'), 'w+') as f:\n json.dump(dataset_description, f, indent=2)",
"def pd(self):\n d = dict()\n d[\"descriptor_extension\"] = \"yml\"\n d[\"version\"] = \"0.5\"\n p = dict()\n p[\"description\"] = self.manifest.get(\"description\")\n p[\"maintainer\"] = self.manifest.get(\"maintainer\")\n p[\"name\"] = self.manifest.get(\"name\")\n p[\"vendor\"] = self.manifest.get(\"vendor\")\n p[\"version\"] = self.manifest.get(\"version\")\n d[\"package\"] = p\n return d",
"def pkg_info_json(folder=None):\r\n # ---- Checks\r\n if not folder:\r\n folder = sys.prefix + \"\\\\conda-meta\"\r\n folder = Path(folder)\r\n if not folder.is_dir():\r\n print(\"\\nInvalid path... {}\".format(folder))\r\n return\r\n files = list(folder.glob(\"*.json\"))\r\n if not files:\r\n print(\"{} doesn't have any json files\".format(folder))\r\n return\r\n #\r\n # --- Package, Filename, Dependencies\r\n packages = []\r\n m0 = m1 = m2 = 0\r\n for f in files:\r\n ret = parse_json(f, key=\"depends\") # ---- look at dependencies only\r\n nme = str(f.name).rsplit(\"-\", 2)[0] # ---- split off the last two\r\n if len(ret) == 1:\r\n ret = ret[0]\r\n elif len(ret) > 1:\r\n srted = sorted(ret)\r\n ret = \"; \".join([i for i in srted if \"py\" not in i]) # `; ` used\r\n else:\r\n ret = \"None\"\r\n m0 = max(m0, len(nme))\r\n m1 = max(m1, len(str(f.name)))\r\n m2 = max(m2, len(ret))\r\n packages.append((nme, f.name, ret))\r\n dt1 = [(\"Package\", \"<U{}\".format(m0)), (\"Filename\", \"<U{}\".format(m1)),\r\n (\"Dependencies\", \"<U{}\".format(m2))]\r\n packages = np.asarray(packages, dtype=dt1)\r\n #\r\n # ---- Dependency, Counts\r\n z = []\r\n for dep in packages['Dependencies']:\r\n if dep not in (\"\", \" \"):\r\n z += dep.split(\"; \") # split on `; ` delimiter\r\n z = np.asarray(z)\r\n uniq, idx, cnts = np.unique(z, return_index=True, return_counts=True)\r\n uniq2 = [[u, u.split(\" \")[0]][\" \" in u] for u in uniq if u != \"\"]\r\n m0 = max(np.char.str_len(uniq2))\r\n m1 = np.max(np.char.str_len(uniq2)) + 5\r\n dt2 = [(\"Full_name\", \"<U{}\".format(m0)), (\"Counts\", \"i8\"),\r\n (\"Simple_name\", \"<U{}\".format(m1))]\r\n dep_counts = np.asarray(list(zip(uniq, cnts, uniq2)), dtype=dt2)\r\n #\r\n # ---- Package, Required_by\r\n required_by = []\r\n names = packages['Package']\r\n depends = packages['Dependencies']\r\n max_len = 0\r\n for nme in names:\r\n if nme in ('py', 'python'):\r\n required_by.append([nme, \"many\"])\r\n continue\r\n w = names[[nme in i for i in depends]]\r\n if np.size(w) > 0:\r\n v = w.tolist()\r\n v0 = \"; \".join([i.split(\"; \")[0] for i in v])\r\n max_len = max(max_len, len(v0))\r\n required_by.append([nme, v0])\r\n else:\r\n required_by.append([nme, \"None\"])\r\n r_dt = \"<U{}\".format(max_len)\r\n dt = np.dtype([('Package', '<U30'), ('Required_by', r_dt)])\r\n required_by = uts(np.asarray(required_by), dtype=dt)\r\n return packages, dep_counts, required_by",
"def _generate_swagger_json(self, app):\n self._paths.extract_from_app(app)\n\n swagger_object = {\n \"swagger\": self.swagger_version,\n \"info\": {\n \"title\": self._title,\n \"version\": self._version\n },\n \"paths\": {}\n }\n self._paths.add_to_spec(swagger_object)\n self._definitions.add_to_spec(swagger_object)\n\n return swagger_object",
"def genpkg(zipfile,\n application,\n application_version,\n bootloader,\n dev_revision,\n dev_type,\n dfu_ver,\n sd_req,\n softdevice,\n key_file):\n zipfile_path = zipfile\n\n if application_version == 'none':\n application_version = None\n\n if dev_revision == 'none':\n dev_revision = None\n\n if dev_type == 'none':\n dev_type = None\n\n sd_req_list = None\n\n if sd_req.lower() == 'none':\n sd_req_list = []\n elif sd_req:\n try:\n # This will parse any string starting with 0x as base 16.\n sd_req_list = sd_req.split(',')\n sd_req_list = list(map(int_as_text_to_int, sd_req_list))\n except ValueError:\n raise nRFException(\"Could not parse value for --sd-req. \"\n \"Hex values should be prefixed with 0x.\")\n\n if key_file and dfu_ver < 0.8:\n click.echo(\"Key file was given, setting DFU version to 0.8\")\n\n package = Package(dev_type,\n dev_revision,\n application_version,\n sd_req_list,\n application,\n bootloader,\n softdevice,\n dfu_ver,\n key_file)\n\n package.generate_package(zipfile_path)\n\n log_message = \"Zip created at {0}\".format(zipfile_path)\n try:\n click.echo(log_message)\n except OSError:\n print(log_message)",
"def pack(self):\n package = {}\n package['code_name'] = self.code_name\n package['location_time'] = self.location_time\n locations_identified = []\n for location in self.locations_identified:\n locations_identified.append(location.to_json())\n package['locations_identified'] = locations_identified\n return json.dumps(package)",
"def _create_project_manifest(self, napdr):\n # base structure\n pm = {\n \"descriptor_extension\": \"yml\",\n \"version\": \"0.5\",\n \"package\": {\n \"vendor\": napdr.vendor,\n \"name\": napdr.name,\n \"version\": napdr.version,\n \"maintainer\": napdr.maintainer,\n \"description\": napdr.description\n },\n \"files\": []\n }\n self.sources = []\n self.destinations = []\n # add entries for artifacts\n for pc in napdr.package_content:\n tmp = pc.copy()\n # remove checksum information\n del tmp[\"algorithm\"]\n del tmp[\"hash\"]\n # re-write path (source -> path)\n tmp[\"path\"] = os.path.join(self.remove_Definitions(tmp[\"source\"]))\n self.sources.append(tmp[\"source\"])\n self.destinations.append(tmp[\"path\"])\n del tmp[\"source\"]\n # re-write content type (content-type -> type)\n tmp[\"type\"] = tmp[\"content-type\"]\n del tmp[\"content-type\"]\n # add to pm\n pm.get(\"files\").append(tmp)\n return pm",
"def format_desc(self):\n return '\\nDescription:\\n{}\\n'.format(\n C(\n FormatBlock(get_pkg_description(self.package)).format(\n width=76,\n newlines=True,\n prepend=' '\n ),\n fore='green'\n )\n )",
"def json(self):\n with open(self.manifest_path, encoding='utf-8') as f:\n manifest = Manifest(f)\n job_json = manifest.json\n\n # Insert git branch information\n for pkg_doc in job_json['packages']:\n if self._probe_git:\n pkg_doc['git_branch'] = self.package_branch(pkg_doc['name'])\n else:\n pkg_doc['git_branch'] = 'unknown'\n\n # Insert git repo URLs\n for pkg_doc in job_json['packages']:\n pkg_doc['git_url'] = self.package_repo_url(pkg_doc['name'])\n\n return job_json",
"def dump_br_version(package, project, extra_tag='', git_dir=None):\n normalized = get_version(package, project, extra_tag, git_dir)\n sha = subprocess.check_output(\n ['git', 'rev-parse', 'HEAD'], cwd=CWD).strip().decode('utf-8')\n branch = _ref_from_sha(sha)\n pref = package_entries[package].br_version_prefix\n return json.dumps({pref+'_version': normalized,\n pref+'_sha': sha,\n pref+'_branch': branch})",
"def get_package_json(path):\n with open(os.path.join(path, \"elm-package.json\")) as p:\n return json.loads(p.read())",
"def generate_package_report(pkg):\n\n SOUP.find('div', {'id': 'description'}).contents = get_description(pkg)\n\n load_scripts(pkg)\n\n if exists(pkg + \"/Bom\"):\n get_file_list(pkg, \"\")\n\n for f in os.listdir(pkg):\n if splitext(f)[1] == '.pkg':\n get_file_list(pkg, f)",
"def make_def_file(self):\n dependencyData = []\n for dataset in self.dependencies:\n data = dataset.defFile.get_def_file_data()\n data['chkFileChecksum'] = dataset.chkFile.get_checksum()\n dependencyData.append(data)\n defData = {\n 'dataType': self.dataset.dataType.__name__,\n 'project': self.dataset.project,\n 'branch': self.dataset.branch,\n 'name': self.dataset.name,\n 'chunkCount': self.dataset.chunkCount,\n 'creationTime': datetime.now().isoformat(),\n 'dependencies': dependencyData,\n 'repo': self.dataset.repoData.to_data()\n }\n self.dataset.directory.mkdir(parents=True, exist_ok=True)\n with open(self.defFilename, 'wt') as defFile:\n defFile.write(json.dumps(defData, sort_keys=True, indent=4))",
"def schema() -> Dict:\n from pkg_resources import resource_string\n import json\n\n data = resource_string(\"ceeder.schemas\", \"cdr-v5.json\")\n return json.loads(data)",
"def createBinaryDescriptor() -> retval:\n ...",
"def build_package(package_data):\n name = package_data.get('name')\n # FIXME: having no name may not be a problem See #1514\n if not name:\n return\n\n description = package_data.get('description')\n version = package_data.get('version')\n declared_license = package_data.get('license')\n if declared_license:\n if isinstance(declared_license, str):\n declared_license = [declared_license]\n elif isinstance(declared_license, (list, tuple)):\n declared_license = [l for l in declared_license if l and l.strip()]\n else:\n declared_license = [repr(declared_license)]\n\n keywords = package_data.get('keywords') or []\n\n parties = []\n\n authors = package_data.get('authors') or []\n for author in authors:\n if isinstance(author, dict):\n name = author.get('name')\n email = author.get('email')\n url = author.get('homepage')\n party = models.Party(name=name, role='author', email=email, url=url)\n parties.append(party)\n elif isinstance(author, str):\n parties.append(models.Party(name=author, role='author'))\n else:\n parties.append(models.Party(name=repr(author), role='author'))\n\n homepage_url = package_data.get('homepage')\n\n repository = package_data.get('repository') or {}\n repo_type = repository.get('type')\n repo_url = repository.get('url')\n\n vcs_url = None\n if repo_type and repo_url:\n vcs_url = '{}+{}'.format(repo_type, repo_url)\n\n deps = package_data.get('dependencies') or {}\n dependencies = []\n for dep_name, requirement in deps.items():\n dependencies.append(\n models.DependentPackage(\n purl=PackageURL(type='bower', name=dep_name).to_string(),\n scope='dependencies',\n requirement=requirement,\n is_runtime=True,\n is_optional=False,\n )\n )\n\n dev_dependencies = package_data.get('devDependencies') or {}\n for dep_name, requirement in dev_dependencies.items():\n dependencies.append(\n models.DependentPackage(\n purl=PackageURL(type='bower', name=dep_name).to_string(),\n scope='devDependencies',\n requirement=requirement,\n is_runtime=False,\n is_optional=True,\n )\n )\n\n return BowerPackage(\n name=name,\n description=description,\n version=version,\n declared_license=declared_license,\n keywords=keywords,\n parties=parties,\n homepage_url=homepage_url,\n vcs_url=vcs_url,\n dependencies=dependencies\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
generate column with missing pid for subclasses, append label, concat the three properties, shuffle data
|
def get_testing_set(subclass_test, types_test, negatives_test):
# get difference between three columns
difference_column = str(set(negatives_test.columns[7:]).difference(subclass_test.columns[7:])).replace('{', '')
difference_column = difference_column.replace('}', '')
difference_column = difference_column.replace('\'', '')
# print(difference_column)
# get column position
column_position = types_test.columns.get_loc(difference_column)
# repeat zero for pattern x times
zero_pattern = [0] * len(subclass_test)
subclasses_new = subclass_test.copy()
subclasses_new.insert(loc=column_position, column=difference_column, value=zero_pattern)
# get copies to not change original data
types_new = types_test.copy()
negatives_new = negatives_test.copy()
# include label to data
subclasses_new['label'] = [0] * len(subclass_test)
types_new['label'] = [1] * len(types_test)
negatives_new['label'] = [2] * len(negatives_test)
# change order of id column
id_negative = negatives_new['_id']
del negatives_new['_id']
negatives_new.insert(loc=types_new.columns.get_loc('id'), column='id', value=id_negative)
# name columns all equally
subclasses_new.columns = types_new.columns
negatives_new.columns = types_new.columns
# stack data all together and reset index
all_testing_data = pd.concat([subclasses_new, types_new, negatives_new]).reset_index(drop=True)
# shuffle data
all_testing_data = shuffle(all_testing_data)
return all_testing_data
|
[
"def process_pclass(combined_data):\n # encoding into 3 categories:\n pclass_dummies = pd.get_dummies(combined_data['Pclass'], prefix=\"Pclass\")\n\n # adding dummy variables\n combined_data = pd.concat([combined_data, pclass_dummies], axis=1)\n\n # removing \"Pclass\" since it's no loner needed\n combined_data.drop('Pclass', axis=1, inplace=True)\n\n status('pclass')\n return combined_data",
"def _transform_non_hierarchical(self):\n if self.non_hierarchical_cols is None:\n return\n \n for col in self.non_hierarchical_cols:\n if is_numeric_dtype(self.data[col]):\n self.data[col] = self.data[col].astype(str)\n \n main_values = self.data[col].value_counts()[:self.max_non_hierarchical_classes].index\n self.data.loc[~self.data[col].isin(main_values), col] = \"others\"\n \n self.data[col] = self.data[col].astype(str)\n self.data[col] = self.data[col].str.lower()\n self.data[col] = self.data[col].str.strip()\n\n for value in self.data[col].unique():\n new_name = f\"{col}_{value}\"\n self.data[new_name] = 0\n self.data.loc[self.data[col] == value, new_name] = 1\n \n self.data = self.data.drop(col, axis=1)",
"def build_precinct_txt(self):\r\n\r\n self.base_df['address_direction'] = self.base_df.apply(\r\n lambda row: self.get_address_direction(row['vf_reg_cass_pre_directional']), axis=1)\r\n\r\n self.base_df['city'] = self.base_df.apply(\r\n lambda row: self.get_city(row['index'], row['vf_reg_cass_city']), axis=1)\r\n\r\n self.base_df['includes_all_addresses'] = self.base_df.apply(\r\n lambda row: self.includes_all_addresses(row['vf_reg_cass_street_name'], row['vf_reg_cass_city']), axis=1)\r\n\r\n self.base_df['includes_all_streets'] = self.base_df.apply(\r\n lambda row: self.includes_all_streets(), axis=1)\r\n\r\n self.base_df['odd_even_both'] = self.base_df.apply(\r\n lambda row: self.odd_even_both(row['index'], row['vf_reg_cass_street_num']), axis=1)\r\n\r\n self.base_df['precinct_id'] = self.base_df.apply(\r\n lambda row: self.get_precinct_id(row['van_precinctid']), axis=1) # could also use 'merge_key\"\r\n\r\n self.base_df['start_house_number'] = self.base_df.apply(\r\n lambda row: self.get_start_house_number(row['vf_reg_cass_street_num']), axis=1)\r\n\r\n self.base_df['end_house_number'] = self.base_df.apply(\r\n lambda row: self.get_end_house_number(row['vf_reg_cass_street_num']), axis=1)\r\n\r\n self.base_df['state'] = self.base_df.apply(\r\n lambda row: self.get_state(row['vf_reg_cass_state']), axis=1)\r\n\r\n self.base_df['street_direction'] = self.base_df.apply(\r\n lambda row: self.get_street_direction(row['vf_reg_cass_pre_directional']), axis=1)\r\n\r\n self.base_df['street_name'] = self.base_df.apply(\r\n lambda row: self.get_street_name(row['vf_reg_cass_street_name']), axis=1)\r\n\r\n self.base_df['street_suffix'] = self.base_df.apply(\r\n lambda row: self.get_street_suffix(row['vf_reg_cass_street_suffix']), axis=1)\r\n\r\n self.base_df['unit_number'] = self.base_df.apply(\r\n lambda row: self.get_unit_number(), axis=1)\r\n\r\n self.base_df['zip'] = self.base_df.apply(\r\n lambda row: self.get_zip(row['vf_reg_cass_zip']), axis=1)\r\n\r\n self.base_df['id'] = self.base_df.apply(\r\n lambda row: self.create_id(row['index']), axis=1)\r\n\r\n return self.base_df",
"def create_label(df, class_col, label_col, classes):\n\n # Integer labels\n labels = list(map(str, range(len(classes))))\n\n # Replace only works for same data type\n return (\n df.replace(classes, labels, subset=class_col)\n .withColumn(label_col, col(class_col).cast(\"integer\"))\n .drop(class_col)\n )",
"def _create_batch_split(self, df:pd.DataFrame,\n batch_size:int, ild_extra_rows:int,\n sub_folder:str, summary_str:str, shuffle:bool):\n summary = pd.DataFrame({'filename':[]})\n\n data = pd.DataFrame()\n \n if shuffle:\n #give each label a representation in ild \n for category in df[self.targetcol].unique():\n shuffled = df.loc[lambda x: df[self.targetcol] == category, :].sample(frac=1)\n data = data.append(shuffled[:1])\n df.drop(index=shuffled.index[0], inplace=True)\n data = self._replace_with_missing(df=data, index=data.shape[0])\n \n #shuffle all data\n df = df.sample(frac=1) \n \n df = self._replace_with_missing(df=df, index=ild_extra_rows)\n\n #add extra data to ild\n data = data.append(df[:ild_extra_rows])\n #check if first batch actually contains all labels\n if set(df[self.targetcol]) != set(data[self.targetcol]):\n raise ValueError(\"The initial data must contain all possible labels.\")\n \n df.drop(df.index[:ild_extra_rows], inplace=True)\n summary = self._save_df(data, summary, 'labeled_set.pkl.gzip', sub_folder)\n\n #create batch files\n for i in range(0,df.shape[0],batch_size):\n summary = self._save_df(df[i:i+batch_size], \n summary, 'data{0}.pkl.gzip'.format(i),\n sub_folder)\n\n summary.reset_index(inplace=True,drop=True)\n pd.to_pickle(summary, summary_str)",
"def get_labels_for_properties(self, pids: list, replace_qids: bool):\n cols = self._pids_to_df_cols(pids)\n\n # make list of qids to get labels for\n qids_getlabels = []\n for idx, row in self.doc_df.iterrows():\n for col in cols:\n if isinstance(row[col], str) and is_qid(row[col]):\n qids_getlabels.append(row[col])\n\n elif isinstance(row[col], list):\n [qids_getlabels.append(val) for val in row[col] if is_qid(val)]\n\n # get labels if the list is not empty\n if len(qids_getlabels) > 0:\n qids_getlabels = list(set(qids_getlabels))\n self.qid_label_mapping = self.ge.get_labels(\n qids_getlabels, timeout=self.timeout\n )\n\n if replace_qids:\n for col in cols:\n self.doc_df[col] = self.doc_df[col].map(\n lambda i: self._replace_qid_with_label(\n i, return_v_if_missing=True\n )\n )\n else:\n for col in cols:\n self.doc_df[col + \"Label\"] = self.doc_df[col].map(\n lambda i: self._replace_qid_with_label(\n i, return_v_if_missing=False\n )\n )",
"def generateMasterCSV(data, output_path, bad_list):\n # Begin labeling data\n print(\"Generating new master CSV...\")\n count = 250000\n data['img_num'] = []\n data['img_name'] = []\n data['subset'] = []\n for i in range(len(data['img_id'])):\n data['img_num'].append(count)\n data['img_name'].append(num2base62(count))\n # default label is excluded, all used data will be rewritten\n data['subset'].append('exclude')\n count += 1\n\n # Establish valid categories and number of images for testing subset\n print(\"Generating subset size...\", end=\" \")\n subdata = getSubsetSize(data, bad_list)\n print(\"Done!\\nLabeling subset indices...\")\n indicesdata = sortIndicesByCategory(data, subdata)\n\n # Split index lists into learning and testing sets, then label\n for i in indicesdata:\n print(\"Splitting and labeling class: \"+ i + \"...\", end=' ')\n learn, test = getSubLists(indicesdata[i], subdata['testsize'][subdata['valid'].index(i)])\n for j in learn:\n data['subset'][j] = 'learn'\n for j in test:\n data['subset'][j] = 'test'\n print('Done!')\n print(\"Classes labeled, removing excess data & writing to CSV...\", end=\" \")\n # Strip Unnecessary Data and create mastercsv\n data.pop('person')\n data.pop('hierarchy')\n data.pop('status')\n writeCSV(os.path.join(output_path, 'master.csv'), data)\n print(\"Done!\")\n return data",
"def _dumify_categorical_features(df):\n prepped = pd.DataFrame(index=df.index)\n for feature in df.columns:\n # print feature, df.dtypes[feature]\n if df.dtypes[feature] == 'object':\n dummied = _dummy_text_feature(df, feature)\n prepped = prepped.join(dummied)\n else:\n prepped = prepped.join(df[feature])\n return prepped",
"def getClassData(file_path=None, new_file_path=None):\n\n\tdf = pd.read_csv(file_path)\n\tprint(df.label.unique())\n\tlabel_list = df.label.unique()\n\tprint(df.shape)\n\tfor label in label_list:\n\t\tdf1 = df[df.label == label]\n\t\tdf1.drop(['label'], inplace=True, axis=1)\n\t\tfile_name = label + '.csv'\n\t\tnew_file_name = os.path.join(new_file_path, file_name)\n\n\t\tdf1.to_csv(new_file_name, index=False)",
"def __init__(self,ss,genotypeDataset, sample = 1.0):\n\n self.sample = sample\n new_col1 = when((col(\"alleles\")[0] == u'REF') & (col(\"alleles\")[1] == u'ALT'),1) \\\n .otherwise(when( (col(\"alleles\")[0] == u'ALT') & (col(\"alleles\")[1] == u'ALT'),2))\n\n genocounts = genotypeDataset.toDF().sample(False, self.sample) \\\n .withColumn(\"hethomclass\", new_col1) \\\n .groupBy(\"sampleid\", \"hethomclass\").count().collect()\n\n data_het = {}\n data_hom = {}\n for row in genocounts:\n curr = row.asDict()\n if(curr['hethomclass'] == 1):\n data_het[curr['sampleid']] = curr['count']\n if(curr['hethomclass'] == 2):\n data_hom[curr['sampleid']] = curr['count']\n\n self.hetHomRatio = []\n for sampleid in data_hom.keys():\n if sampleid in data_het.keys():\n self.hetHomRatio.append(float(data_het[sampleid])/float(data_hom[sampleid]))",
"def multiclass_dataset(train_files,test_files,\n label=0.9,bias=1.,\n scale_min=0., scale_max=1.,scale_prop=\"local\",\n feature_key=\"features\",target_key=\"target_midi\"):\n # read all features\n features = []\n targets = []\n feature = []\n target = []\n for file in train_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n feature = []\n target = []\n for file in test_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n \n # make data preprocessing\n data_preprocessing(features,bias,scale_min,scale_max,0,scale_prop)\n\n # make targets\n \n # check how many pitch classes we have\n all_keys = []\n for el in targets[0]:\n all_keys += el.tolist()\n for el in targets[1]:\n all_keys += el.tolist()\n classes = list(set(all_keys))\n classes.sort()\n print \"classes:\", classes\n print \"nr classes:\",len(classes)\n \n # make (binary) target data\n cl_targets = []\n targ = []\n for piece in targets[0]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n targ.append(target)\n cl_targets.append(targ)\n targ = []\n for piece in targets[1]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n targ.append(target)\n cl_targets.append(targ)\n \n # make train and test data\n trainin = features[0]\n testin = features[1]\n trainout = cl_targets[0]\n testout = cl_targets[1]\n\n return trainin, trainout, testin, testout",
"def _generate_classes(self,variant='joint',show=False):\r\n #self.positive_combinations = flatten([flatten([[tuple([ps[i],p1]) for p1 in ps[i+1:]] for i in range(len(ps)-1)]) for ps in self.links])\r\n self.positive_combinations = []\r\n for traj in self.links:\r\n self.positive_combinations += [(v,v2) for v,v2 in zip(traj[:-1],traj[1:])]\r\n self.idx_positive = [iv for iv,v in enumerate(self.combinations) if v in self.positive_combinations]\r\n self.idx_negative = list(set(range(len(self.combinations))).difference(self.idx_positive))\r\n self.negative_combinations = [self.combinations[v] for v in self.idx_negative]\r\n self.num_positive = len(self.idx_positive)\r\n self.num_negative = len(self.idx_negative)\r\n \r\n self.positive_examples = np.zeros((self.num_positive,self.num_info_types))\r\n for i,typ in enumerate(self.implemented_info):\r\n self.positive_examples[:,i] = [self.combi_info[typ][v] for v in self.idx_positive]\r\n\r\n self.negative_examples = np.zeros((self.num_negative,self.num_info_types))\r\n for i,typ in enumerate(self.implemented_info):\r\n self.negative_examples[:,i] = [self.combi_info[typ][v] for v in self.idx_negative]\r\n\r\n #bins = [np.linspace(0,max(self.combi_info[v])*1.5,self.num_bins[v]) for v in self.implemented_info]\r\n ranges = [[0,max(self.combi_info[v])] for v in self.implemented_info]\r\n bins = [int(r[1]/float(self.bin_spec[v]))+1 for r,v in zip(ranges,self.implemented_info)]\r\n \r\n p_positive, edges = np.histogramdd(self.positive_examples,bins=bins,range=ranges,normed=True)\r\n p_negative, edges = np.histogramdd(self.negative_examples,bins=bins,range=ranges,normed=True)\r\n\r\n self.probability_distributions = {'positive':p_positive,'negative':p_negative}\r\n self.edges = [.5*(v[1:]+v[:-1]) for v in edges] \r\n if show:\r\n fig = plt.figure()\r\n X, Y = np.meshgrid(*self.edges,indexing='ij')\r\n\r\n ax0 = plt.subplot(121,projection='3d')\r\n ax0.plot_surface(X,Y,p_positive)\r\n plt.title('Positive distribution')\r\n\r\n ax1 = plt.subplot(122,projection='3d')\r\n ax1.plot_surface(X,Y,p_negative)\r\n plt.title('Negative distribution')\r\n plt.show()",
"def add_dataframe(self, df):\n class_columns = [col for col in list(df) if col.startswith('Label')]\n if len(class_columns) == 0:\n df = pd.concat([df, pd.get_dummies(df['Class'], prefix='Label')], axis=1)\n self._df = self._df.append(df, ignore_index=True)\n self._df.fillna(value=0, inplace=True)\n self._shape = self._df.shape",
"def __init__(self, name='dataset', parent_datasets=None, pk_type=np.int, rng=None):\n self._columns = []\n self._name = name\n\n if parent_datasets is None:\n parent_datasets = []\n elif isinstance(parent_datasets, AbstractDataSet):\n parent_datasets = [parent_datasets]\n elif not isinstance(parent_datasets, list):\n parent_datasets = list(parent_datasets)\n\n self._parents = parent_datasets\n self._pk = DSColumn('pk', (), pk_type, self)\n\n self._sample = None\n if rng is None and self.parent_dataset is not None:\n self.rng = self.parent_dataset.rng\n else:\n self.rng = rng",
"def generate_stats(self):\n\n if self.test_set == 'test':\n print 'Error: test set has no labels'\n return\n\n lbbs_not_in_pbbs_df = pd.DataFrame(columns=['pid','z','y','x','d'])\n if self.classifier_pred_path is None:\n pbbs_df = pd.DataFrame(columns=['pid','prob','z','y','x','d','nod'])\n else:\n pbbs_df = pd.DataFrame(columns=['pid','prob','z','y','x','d','nod','c_prob'])\n\n n_annot = 0\n for name in self.filenames:\n #print name\n pbb = np.load(os.path.join(self.bbox_dir, name+'_pbb.npy'))\n # add nod\n pbb = np.concatenate([pbb, np.zeros((pbb.shape[0],1))], axis=1)\n\n # Include classifier scores\n # Use nan for patients that got pbbs but not classifier predictions\n # eg blacklist\n if self.classifier_pred_path is not None:\n pred_fname = os.path.join(self.classifier_pred_path, name+'_pred.npy')\n if os.path.exists(pred_fname):\n cl_scores = np.load(pred_fname)\n else:\n cl_scores = np.empty((pbb.shape[0],1))\n cl_scores[:] = np.nan\n\n pbb = np.concatenate([pbb,cl_scores], axis=1)\n pbb = pbb[pbb[:,0].argsort()][::-1]\n lbb = np.load(os.path.join(self.bbox_dir, name+'_lbb.npy'))\n n_annot += len(lbb)\n lab_hits = np.zeros(len(lbb))\n\n # determine ground truth label of pbb\n # exclude relevant pbbs that are redundant for purposes of FROC\n\n #print 'pbb len', len(pbb)\n it = range(len(pbb)) if self.topk is None else range(min(len(pbb),self.topk))\n for i in it:\n\n if self.pbb_cutoff is not None and pbb[i,0] < self.pbb_cutoff:\n break\n\n lbb_match = False\n redundant_hit = False\n for j in range(len(lbb)):\n if is_hit(pbb[i][1:4], lbb[j][:3], lbb[j][3]):\n if lab_hits[j] > 0:\n redundant_hit = True\n #print 'redundant tp!'\n #print name, 'pbb', pbb[i], 'lbb', lbb[j]\n #tp.append(pbb[i])\n lab_hits[j] += 1\n lbb_match = True\n break\n if lbb_match:\n pbb[i,5] = 1\n else:\n pbb[i,5] = 0\n\n if not redundant_hit:\n pbbs_df.loc[len(pbbs_df)] = [name] + list(pbb[i])\n missed = pd.DataFrame(columns=list('zyxd'), data = lbb[lab_hits == 0].reshape(-1,len(list('zyxd'))))\n missed['pid'] = name\n missed = missed[['pid','z','y','x','d']]\n lbbs_not_in_pbbs_df = pd.concat([lbbs_not_in_pbbs_df,missed], ignore_index=True)\n\n\n # convert scores to probabilities\n pbbs_probs = s_to_p(np.array(pbbs_df['prob']))\n pbbs_df['prob'] = pbbs_probs\n\n if self.classifier_pred_path is not None:\n pbbs_cprobs = s_to_p(np.array(pbbs_df['c_prob']))\n pbbs_df['c_prob'] = pbbs_cprobs\n\n # ensemble\n pbbs_df['ensemble'] = (pbbs_df['prob'] + pbbs_df['c_prob'])/2.0\n\n\n\n self.n_annot = n_annot\n self.pbbs = pbbs_df\n self.rel = pbbs_df[pbbs_df['nod']==1]\n self.irr = pbbs_df[pbbs_df['nod']==0]\n self.lbbs_not_in_pbbs = lbbs_not_in_pbbs_df\n print 'loaded {} pbbs'.format(len(pbbs_df))\n if self.test_set == 'train' or self.test_set == 'val':\n print 'saved pbbs missed {} out of {} annotations ({:.2%})'.format(len(lbbs_not_in_pbbs_df),\n n_annot,\n 1.0 * len(lbbs_not_in_pbbs_df)/n_annot)",
"def _build_table(self):\n output_data = self.data[self.type]\n if self.type == 'counts':\n logger.info(\"combining counts data\")\n for idx, (sample_id, sample_data) in enumerate(output_data.items()):\n data = sample_data[0]['htseq']\n data = data.rename(index=str, columns={'count': sample_id})\n if idx == 0:\n table_data = data\n else:\n table_data = pd.merge(table_data, data, on='geneName', sort=True)\n else:\n logger.info(\"combining non-counts data\")\n table_data = []\n # need to retrieve all possible header fields, in case of missing data\n header = []\n for sample_id, sample_data in list(output_data.items()):\n tmp_header = [field for source in sample_data\n for name, data in list(source.items())\n for field in list(data.keys())]\n header = sorted(list(set(header) | set(tmp_header)))\n logger.debug(\"header row: {}\".format(header))\n \n # now pull out each library's data\n for sample_id, sample_data in list(output_data.items()):\n # initialize with empty default vals\n curr_row_data = {k:'' for k in header} \n # fill in with available data\n for curr_data in sample_data:\n for data_source, results in list(curr_data.items()):\n for curr_name, curr_result in list(results.items()):\n curr_row_data[curr_name] = curr_result\n\n logger.debug(\"values: {}\".format(curr_row_data.values()))\n\n if not len(table_data):\n table_data.append(['libId'] + sorted(header))\n logger.debug(\"added header row: {}\".format(table_data[-1]))\n\n table_data.append(\n [sample_id] + [curr_row_data[h] for h in header]\n )\n logger.debug(\"added values row: {}\".format(table_data[-1]))\n return table_data",
"def generate_on_columns(self, columns):\n print(columns)\n for i in columns:\n self.field[i[0]][i[1]] = -1\n for i in range(len(self.field)):\n if -1 in self.field[i]:\n amount = self.field[i].count(-1)\n for j in range(len(self.field[i])):\n if self.field[i][j] == -1:\n for k in range(j, -1, -1):\n self.field[i][k] = self.field[i][k - 1] \n for k in range(amount):\n self.field[i][k] = random.randint(0, 5)\n # print(*self.field, sep='\\n') # for debug purposes\n self.all_sprites = pygame.sprite.Group()\n for i in range(len(self.field)):\n for j in range(len(self.field[i])):\n gem = pygame.sprite.Sprite(self.all_sprites)\n gem.image = self.load_image(self.colors[self.field[i][j]])\n gem.image = pygame.transform.scale(gem.image, (self.cell_size, self.cell_size - 5))\n gem.rect = gem.image.get_rect()\n gem.rect.x = i * 55 + 5\n gem.rect.y = j * 55 + 60",
"def compute_class_df(self):\n \n if ((self.the_class) and (isinstance(self.the_class, int))):\n\n # Create the bins from the classes\n self.data['the_class'] = LogReg.create_the_class(self, self.data.iloc[:,0])\n \n # Compute the probability\n the_sum = self.data.iloc[:,1:].groupby('the_class').sum()\n the_count = self.data.iloc[:,1:].groupby('the_class').count()\n self.class_prob = (the_sum / the_count).reset_index()\n \n # Remove classes from the main dataframe\n self.data.drop('the_class', axis=1, inplace=True)\n \n else:\n self.class_prob = None",
"def input_dataframe_generator(data_dir, test_data_dir ,classes, representing_channel):\n \n df = pd.DataFrame(columns= [\"file\" ,\"label\", \"class\", \"set\",\"uncertainty\" ,\"prediction\"] )\n if test_data_dir != []:\n data_directory = {\"train\" : data_dir, \"test\" : test_data_dir}\n else:\n data_directory = {\"train\" : data_dir}\n \n for dd in data_directory:\n train_data_path = data_directory[dd]\n for tdp in train_data_path:\n label = 0\n for cl in classes:\n df_dummy = pd.DataFrame(columns= [\"file\" ,\"label\", \"class\", \"set\",\"prediction\"] )\n df_dummy[\"file\"] = glob.glob(os.path.join(tdp , cl, \"*_\" + representing_channel + \"*\") ) \n df_dummy[\"label\"] = label\n df_dummy[\"class\"] = cl\n df_dummy[\"uncertainty\"] = -1.\n df_dummy[\"prediction\"] = -1\n df_dummy[\"set\"] = dd\n df = df.append(df_dummy, ignore_index=True)\n label = label + 1\n for cl in classes:\n df[cl+\"_probability\"] = -1.\n df_dummy[\"prediction\"] = df_dummy[\"prediction\"].astype(int)\n return df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check which zodiac sign
|
def zodiac_sign(sign_num):
global COLUMNS
CAPRICORN = 119
AQUARIUS = 218
PISCES = 320
ARIES = 419
TAURUS = 520
GEMINI = 620
CANCER = 722
LEO = 822
VIRGO = 922
LIBRA = 1022
SCORPIO = 1121
SAGGITARIUS = 1221
if sign_num <= CAPRICORN:
print("You are a CAPRICORN. That is a good sign!")
sign = "CAPRICORN"
if sign_num > SAGGITARIUS:
print("You are a CAPRICORN. That is a good sign!")
sign = "CAPRICORN"
elif CAPRICORN < sign_num <= AQUARIUS:
print("You are a AQUARIUS. That is a good sign!")
sign = "AQUARIUS"
elif AQUARIUS < sign_num <= PISCES:
print("You are a PISCES. That is a good sign!")
sign = "PISCES"
elif PISCES < sign_num <= ARIES:
print("You are a ARIES. That is a good sign!")
sign = "ARIES"
elif ARIES < sign_num <= TAURUS:
print("You are a TAURUS. That is a good sign!")
sign = "TAURUS"
elif TAURUS < sign_num <= GEMINI:
print("You are a GEMINI. That is a good sign!")
sign = "GEMINI"
elif GEMINI < sign_num <= CANCER:
print("You are a CANCER. That is a good sign!")
sign = "CANCER"
elif CANCER < sign_num <= LEO:
print("You are a LEO. That is a good sign!")
sign = "LEO"
elif LEO < sign_num <= VIRGO:
print("You are a VIRGO. That is a good sign!")
sign = "VIRGO"
elif VIRGO < sign_num <= LIBRA:
print("You are a LIBRA. That is a good sign!")
sign = "LIBRA"
elif LIBRA < sign_num <= SCORPIO:
print("You are a SCORPIO. That is a good sign!")
sign = "SCORPIO"
elif SCORPIO < sign_num <= SAGGITARIUS:
print("You are a SAGGITARIUS. That is a good sign!")
sign = "SAGGITARIUS"
if int(COLUMNS) > 140:
ascii_art(sign.lower())
|
[
"def get_zodiac_sign(day, month):\n if month == 3: #March\n if day <= 20:\n return 'Pisces'\n else:\n return 'Aries'\n if month == 4: #April\n if day <=19:\n return 'Aries'\n else:\n return 'Taurus'\n if month == 5: #May\n if day <=20:\n return 'Taurus'\n else:\n return 'Gemini'\n if month == 6: #June\n if day <= 20:\n return 'Gemini'\n else:\n return 'Cancer'\n if month == 7: #July\n if day <= 22:\n return 'Cancer'\n else:\n return 'Leo'\n if month == 8: #August\n if day <= 22:\n return 'Leo'\n else:\n return 'Virgo'\n if month == 9: #September\n if day <= 22:\n return 'Virgo'\n else:\n return 'Libra'\n if month == 10: #October\n if day <= 22:\n return 'Libra'\n else:\n return 'Scorpio'\n if month == 11: #November\n if day <= 21:\n return 'Scorpio'\n else:\n return 'Sagittarius'\n if month == 12: #December\n if day <= 21:\n return 'Sagittarius'\n else:\n return 'Capricorn'\n if month == 1: #January\n if day <= 19:\n return 'Capricorn'\n else:\n return 'Aquarius'\n if month == 2: #Feb\n if day <= 18:\n return 'Aquarius'\n else:\n return 'Pisces'",
"def could_sym_z(self):\n return symdata.could_sym(self._bz, symdata.EPS)",
"def is_sign_reversing(func):\r\n\tfor i in func.domain():\r\n\t\tif func(i).get_sign() != -i.get_sign():\r\n\t\t\treturn False\r\n\treturn True",
"def test_return_signif_code_Z():\n res = _return_signif_code_Z(\n Z_val, uncor_alpha=uncor_alpha, fdr_alpha=cor_alpha,\n bon_alpha=cor_alpha)\n\n assert all(res == np.array([4]*3+[3]+[2]+[1]*58+[0]*37)), (\"Error in the \\\n significance code vector\")",
"def _invert_signs(signs):\n return signs[0] < 0",
"def checkPowerSign(self,sign):\n\n if sign == \"^\":\n return True\n else:\n return False",
"def _sign(x):\n if _copysign(1.0, x) == -1.0:\n return \"-\"\n else:\n return \"+\"",
"def ZA( self, ZA, suffix = \"\" ) :\n\n sZA = endlmisc.strZASuffix( ZA, suffix )\n for z in self.zas :\n if ( z.sZA == sZA ) : return z \n return None",
"def test_z_trans(self):\n assert False",
"def test_is_number_sign_identified(self):\n negative_num, _ = PrettifyUtil.negativeNumber('0.1234')\n self.assertFalse(negative_num, msg=\"Number sign identified correctly\")\n negative_num, _ = PrettifyUtil.negativeNumber('-0.1234')\n self.assertTrue(negative_num, msg=\"Number sign identified correctly\")\n negative_num, _ = PrettifyUtil.negativeNumber('+0.1234')\n self.assertFalse(negative_num, msg=\"Number sign identified correctly\")",
"def sym_z(self):\n return self._sym_z",
"def signe(x):\n if x > 0 : return 1\n elif x < 0 : return -1\n else : return 0",
"def check_parity(parity_hitran,parity_exo):\n\n if parity_hitran == parity_exo:\n return True\n else:\n return False",
"def endl_ZSymbolToZ( symbol ) :\n\n try :\n return( chemicalElementMiscModule.ZFromSymbol[symbol] )\n except :\n return None",
"def _is_phase(phase):\n return phase in [\"+1\", \"-1\"]",
"def is_sign_preserving(func):\r\n\tfor i in func.domain():\r\n\t\tif func(i).get_sign() != i.get_sign():\r\n\t\t\treturn False\r\n\treturn True",
"def is_asian(char):\n\n # 0x3000 is ideographic space (i.e. double-byte space)\n # Anything over is an Asian character\n return ord(char) > IDEOGRAPHIC_SPACE",
"def __adjust_sign(self,hemi):\n pos = ['N','E']\n neg = ['S','W']\n if hemi:\n if hemi.upper() in neg:\n self.degrees = -abs(self.degrees)\n elif hemi.upper() in pos:\n self.degrees = abs(self.degrees)\n else:\n raise ValueError( \"Hemisphere should be N, S, E, or W. Why are you giving me %s?\" % (hemi,) )",
"def endl_ZSymbol( Z ) :\n try :\n return( chemicalElementMiscModule.symbolFromZ[Z] )\n except :\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets list of blocked (blacklisted) keys that should not be accepted. Typically, this functionality is used to block keys for refunded purchases or pirated keys so that they aren't recognized as valid anymore. The data bytearray contains SHA1 hashes of blocked keys (20 bytes per hash). The hashes are computed from license keys (not names) by first decoding the userentered key as base32 and then calculating SHA1 hash of the decoded data.
|
def set_blocked_keys(self, data):
# must be kept around, because el_set_blocked_keys() doesn't make a copy
self._blocked_keys_data = create_string_buffer(data)
_impl.el_set_blocked_keys(byref(self._blocked_keys_data), len(data))
|
[
"def test_blacklisted_key(self):\n\n key = \"1QCC5-W30DP-FGFRG-K1JEF-QUDLP\"\n KeyValidator.add_key_to_blacklist(key)\n key_status = KeyValidator.check_key(key)\n self.assertEqual(KeyStatus.BLACKLISTED, key_status)",
"def black_list_checking(self,meta):\n variable_length_messages = meta[1].value.split('bitxx')\n pubkey = variable_length_messages[1]\n \n if self.blacklist == None:\n pass\n #TODO: Turn on blacklisting\n elif self.blacklist(pubkey,int(meta[3].value)):\n raise Exception('Black listed')\n else:\n pass",
"async def fill_blacklist(self):\n query = 'SELECT * FROM (SELECT guild_id AS snowflake_id, blacklisted FROM guild_config UNION ALL SELECT user_id AS snowflake_id, blacklisted FROM users_data) WHERE blacklisted=\"TRUE\"'\n cur = await self.db.execute(query)\n data = await cur.fetchall()\n self.blacklist = {r[0] for r in data} or set()",
"def testApplicationBlocker( self ):\r\n\r\n #assert sys.argv[-1] == ASADMIN, \"BlockedList must be run as administrator to function\"\r\n\r\n # test to see if running as admin \r\n is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0\r\n if not is_admin:\r\n self.fail(\"BlockedList must be run as admin to function\")\r\n \r\n blockListA = BlockedList()\r\n blockListA.appDict[\"appA\"] = BLOCKED\r\n blockListA.appDict[\"appB\"] = BLOCKED\r\n blockListA.appDict[\"appC\"] = BLOCKED\r\n blockListA.appDict[\"appD\"] = UNBLOCKED\r\n\r\n blockListA.updateRegistry()\r\n\r\n assert blockListA.keyIsPresent( \"appA\" ), \"disallowRun registry not being correctly set\"\r\n assert blockListA.keyIsPresent( \"appB\" ), \"disallowRun registry not being correctly set\"\r\n assert blockListA.keyIsPresent( \"appC\" ), \"disallowRun registry not being correctly set\"\r\n assert blockListA.keyIsPresent( \"appD\" ) == False, \"disallowRun registry not being correctly set\"\r\n \r\n blockListA.disallowApps( True )\r\n\r\n assert blockListA.appsAreBlocked(), \"disallowApps not correctly modifying explorer registry\"\r\n\r\n blockListA.disallowApps( False )\r\n\r\n assert blockListA.appsAreBlocked() == False, \"disallowApps not correctly modifying explorer registry\"",
"def blocked_items(self, blocked_items):\n\n self._blocked_items = blocked_items",
"def _blacklist_cache_key(t):\n key_data = 'blacklist%(s_data)s' % {\n 's_data': t\n }\n if six.PY3:\n key_data = key_data.encode('utf-8')\n\n key = hashlib.sha1()\n key.update(key_data)\n return key.hexdigest()",
"def get_blacklisted_sense_keys(freqs):\n discarded = []\n msg.info('collecting blacklisted sense keys')\n for key, freq in freqs.items():\n try:\n term, sense = split_key(key)\n except ValueError:\n continue\n if sense and sense not in sense_whitelist:\n discarded.append(key)\n return discarded",
"def missing(self,keylist):\r\n\t\tmissing=[]\r\n\t\tfor key in keylist:\r\n\t\t\tif not self.raw.has_key(key):\r\n\t\t\t\tmissing.append(key)\r\n\t\treturn missing",
"def drop_password_key(data):\n if not isinstance(data, dict):\n return\n\n for key in data.keys():\n if key in ENCRYPT_LIST:\n del data[key]\n elif data[key] and isinstance(data[key], dict):\n drop_password_key(data[key])",
"async def blacklist_view(self, ctx: commands.Context):\r\n blacklisted = await self.config.blacklisted() or [\"None\"]\r\n await ctx.author.send(\r\n f\"The following IP addresses are blocked: {humanize_list(blacklisted)}\"\r\n )",
"def blacklist(self):\n self.blacklisted = True\n self.save()",
"def blacklist(_):\n raise CmdException(\"The `!!/blacklist` command has been deprecated. \"\n \"Please use `!!/blacklist-website`, `!!/blacklist-username`, \"\n \"`!!/blacklist-keyword`, or perhaps `!!/watch-keyword`. \"\n \"Remember to escape dots in URLs using \\\\.\")",
"def blacklist(self, peer, query):\n self.checkstat(\"blacklist\")",
"def add_black_hash(black_hash):\n global blacklist_hashes\n if not blacklist_hashes:\n load_hashes()\n blacklist_hashes.append(black_hash.hexdigest())\n save_hashes()",
"async def list(self, ctx):\n keylist = []\n try:\n for key in data[ctx.message.server.id].keys():\n keylist.append(key)\n keylist = ', '.join(keylist)\n await self.Aya.say('Blacklisted words: \\n`' + keylist + '`')\n except KeyError:\n await self.Aya.say('You must add a word to the blacklist before invoking this command.')",
"def testUntrustedBlockableWithImproperGlobalWhitelistRules(self):\n santa_blockable = test_utils.CreateSantaBlockable()\n santa_blockable.state = constants.STATE.UNTRUSTED\n santa_blockable.put()\n\n test_rule = rule_models.SantaRule(\n parent=santa_blockable.key,\n rule_type=constants.RULE_TYPE.BINARY,\n policy=constants.RULE_POLICY.WHITELIST,\n in_effect=True)\n test_rule.put()\n\n ballot_box = api.SantaBallotBox(santa_blockable.key.id())\n ballot_box.blockable = santa_blockable\n\n ballot_box._CheckRules()\n\n rule_query = rule_models.SantaRule.query()\n\n self.assertEqual(rule_query.count(), 1)\n\n rule = rule_query.get()\n\n self.assertFalse(rule.in_effect)",
"def _without_keys(self, data, exclude_keys):\n if type(data) == list:\n return [self._without_keys(entry, exclude_keys) for entry in data]\n\n if type(data) == dict:\n return {\n key: self._without_keys(value, exclude_keys)\n if type(value) in [list, dict]\n else value\n for key, value in data.items()\n if key not in exclude_keys\n }\n\n return data",
"def unblock_numbers():\n print 'Adding some blocked numbers'\n client = create_client()\n # block some numbers\n recipients = [\"+61412345678\", \"+61412345676\", \"+61412345675\"]\n result = client.block_numbers(recipients)\n\n print 'Blocked: %d, failed: %d' % (result._blocked, result._failed)\n\n # unblock the first item only\n recipients = recipients[:1]\n print 'Unblocking:', recipients\n result = client.unblock_numbers(recipients)\n\n print 'Unblocked: %d, failed: %d' % (result._unblocked, result._failed)\n\n # check the remaining blocked numbers\n blocked = client.get_blocked_numbers()\n for recipient in blocked:\n print 'Remaining blocked:', recipient.value",
"def load_hashes():\n global blacklist_hashes\n blacklist_hashes = json.load(open(bl_file_path, 'r'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies that the license key associated with 'name' is valid.
|
def verify_license_key(self, key, name):
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(name, str):
name = name.encode('utf-8')
return bool(_impl.el_verify_license_key(self.ctxt, key, name))
|
[
"def check_license(self, name):\n [license_] = self._license(name).licenses\n if not _license_is_active(license_):\n if (\n license_.expiration\n and datetime.strptime(license_.expiration, \"%Y-%m-%d\").date()\n < date.today()\n ):\n raise ExpiredLicenseError(name)\n raise MissingLicenseError(name)",
"def generate_license_key(self, name):\n if isinstance(name, str):\n name = name.encode('utf-8')\n size = _impl.el_generate_license_key(self.ctxt, name, None)\n buf = create_string_buffer(size)\n if _impl.el_generate_license_key(self.ctxt, name, byref(buf)) == -1:\n raise RuntimeError('error generating license key')\n return buf.value.decode('utf-8')",
"def test_license(self):\n for font in self.fonts:\n self.assertEqual(\n font.customParameters['license'],\n LICENSE,\n (\"License is incorrect.\\n\\n\"\n \"font.customParameters['license'] must be '%s'\") % LICENSE\n )",
"def test_validate_on_invalid_name(self):\n args = (enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n kwargs = {'name': 0}\n\n self.assertRaises(TypeError, SymmetricKey, *args, **kwargs)",
"def valid_license(element):\n license = element['image_license']\n return license in [\"Attribution License\", \"No known copyright restrictions\"]",
"def verifylicense(ctx):\n res = run(\"find . -name '*.go' | grep -v dev-env\", hide=\"out\")\n no_license = False\n for file in res.stdout.splitlines():\n res = run(\"grep -q License {}\".format(file), warn=True)\n if not res.ok:\n no_license = True\n print(\"{} is missing license\".format(file))\n if no_license:\n raise Exit(message=\"#### Files with no license found.\\n#### Please run \"\"inv bumplicense\"\" to add the license header\")",
"def test_licenses_schema(od_licenses_json):\n for key in od_licenses_json:\n license_validator.validate(od_licenses_json[key])",
"def license_missing_error(self, license_name):\n return any(\n self.filtered_errors(\n lambda error: error[\"message\"]\n == self.license_missing_error_format.format(\n license_name,\n ),\n )\n )",
"def check_match(self, **kwargs: Any) -> bool:\n name = safe_name(kwargs['name']).lower()\n if name not in self.safety_db.keys():\n return False\n\n version = kwargs['version']\n try:\n version = Version(version)\n except InvalidVersion: # pragma: no cover\n try:\n version = LegacyVersion(version)\n logger.debug(f'Package {name}=={version} is not a valid PEP 440 version, trying Legacy versioning')\n except InvalidVersion:\n logger.debug(f\"Package {name}=={version} has an invalid version\")\n return False\n\n for requirement in self.safety_db[name]:\n if version in requirement.specifier:\n logger.debug(f\"Safety DB MATCH: Release {name}=={version} matches specifier {requirement.specifier}\")\n return True\n return False",
"def _verify_incredibuild_licence(licence_name, platform_name):\t\n\ttry:\n\t\tresult = subprocess.check_output(['xgconsole.exe', '/QUERYLICENSE'])\n\texcept:\n\t\terror = '[ERROR] Incredibuild not found on system'\n\t\treturn False, \"\", error\n\t\t\n\tif not licence_name in result:\n\t\terror = '[ERROR] Incredibuild on \"%s\" Disabled - Missing IB licence: \"%s\"' % (platform_name, licence_name)\n\t\treturn False, \"\", error\n\t\t\n\treturn True,\"\", \"\"",
"def license_expired_error(self, license_name):\n return any(\n self.filtered_errors(\n lambda error: error[\"message\"]\n == self.license_expired_error_format.format(\n license_name,\n ),\n )\n )",
"def check_name(self,name) :\n return self.name == name",
"def test_keys_same_name(self):\n self.assertEqual(\"Multiple TSIG keys with name 'test.key.'\",\n tsig_keys.check({'keys':\n ['test.key:QklORCAxMCBpcyBjb29sCg==',\n 'test.key:b3RoZXIK']}))",
"def test_invalid_client_contact_name():\n party = copy.deepcopy(CLIENT_PARTY)\n party['contact']['name'] = 'Name too long XXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n\n is_valid, errors = validate(party, 'clientParty', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid",
"def test_keys_valid(self):\n self.assertEqual(None, tsig_keys.check({'keys':\n ['testkey:QklORCAxMCBpcyBjb29sCg==',\n 'test.key:QklORCAxMCBpcyBjb29sCg==:hmac-sha1']}))",
"def test_get_license_passes(self):\n response = self.client.get(self.single_licence_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body[\"name\"], TEST_LICENSE_NAME)\n self.assertEqual(response_body[\"description\"], TEST_LICENSE_DESCRIPTION)\n self.assertEqual(response_body[\"plainText\"], TEST_LICENSE_PLAINTEXT)",
"def storage_name_valid(name: str) -> bool:\n if re.match('[a-z0-9]{3,24}$', name):\n return True\n return False",
"def verify_unit_name(self, name):\n return self.student.student_class.units.filter(name=name).exists()",
"def test_has_license(self, repo, license_key, expected):\n test = GithubOrgClient('birdsAREreal')\n self.assertEqual(test.has_license(repo, license_key), expected)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a new license key for 'name'.
|
def generate_license_key(self, name):
if isinstance(name, str):
name = name.encode('utf-8')
size = _impl.el_generate_license_key(self.ctxt, name, None)
buf = create_string_buffer(size)
if _impl.el_generate_license_key(self.ctxt, name, byref(buf)) == -1:
raise RuntimeError('error generating license key')
return buf.value.decode('utf-8')
|
[
"def generate_key(ctx, name):\n click.echo(f\"Generating key file {name}.key...\")\n\n # key generation\n key = Fernet.generate_key()\n\n # string the key in a file\n with open(f'{name}.key', 'wb') as file_key:\n file_key.write(key)\n\n click.echo(f\"Key file {name}.key successfully generated!\")\n click.echo(\"Save {name}.key somewhere secure, you will not be able to recover files encrypted using this key \"\n \"without it.\")",
"def create_key_pair(self, name):\r\n raise NotImplementedError(\r\n 'create_key_pair not implemented for this driver')",
"def _gen_key(version):\n priv = keys.generate_sign_key()\n pub = keys.public_sign_key(priv)\n return trcs.Key(version=version, priv_key=priv, pub_key=pub)",
"def generate_key(name, key_name='DEFAULT'):\n\n\t# TODO: Backward compatibility\n\tif isinstance(key_name, bytes):\n\t\tkey_name = key_name.decode()\n\n\tkey_name = key_name.lower()\n\n\t# TODO: Create a batter Exception type\n\tassert key_name == 'default', 'For now, everything will be encrypted using it'\n\n\t# First step: Generate the encryption key\n\t# We don't need more than 256, so we take the highest under 512 bits\n\t# Key sizes are sorted by default, so we just have to take the last one\n\tkey_length = ENCRYPTION_KEY_SIZE\n\n\tprint('Generating a %s bits key...' % (key_length * 8), flush=True, end='')\n\tkey = os.urandom(key_length)\n\tprint('done', flush=True)\n\n\ttry:\n\t\tos.chdir(name)\n\texcept FileNotFoundError:\n\t\t# TODO: Create a batter Exception type\n\t\traise\n\telse:\n\t\ttry:\n\t\t\tos.makedirs('keyring', exist_ok=True)\n\t\t\tos.chdir('keyring')\n\t\t\ttry:\n\t\t\t\t# !!! WARNING !!! RACE CONDITION\n\t\t\t\t# Process-based and Thread-bases\n\t\t\t\ttry:\n\t\t\t\t\topen(key_name, 'rb').close()\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tpassword, confirmation = '01'\n\n\t\t\t\t\tmsg = msg0 = 'Please enter a password to encrypt it:'\n\t\t\t\t\tmsg1 = 'Passwords mismatch, Please re enter a password to encrypt it:'\n\n\t\t\t\t\twhile password != confirmation:\n\t\t\t\t\t\t# TODO: Restrict password to secure ones\n\t\t\t\t\t\tpassword = getpass.getpass(msg)\n\t\t\t\t\t\tconfirmation = getpass.getpass('Please confirm your password to encrypt it:')\n\t\t\t\t\t\t# TODO: Create a special type of password: PIN code\n\t\t\t\t\t\tif password != confirmation:\n\t\t\t\t\t\t\tmsg = msg1\n\t\t\t\t\t\telif len(password) > ENCRYPTION_KEY_SIZE // 8:\n\t\t\t\t\t\t\tmsg = 'Password too long' + msg0\n\t\t\t\t\t\t\tpassword, confirmation = '01'\n\n\t\t\t\t\t# A way to check the key, maybe I can do better\n\t\t\t\t\tkey = encrypt_data(password.encode().zfill(ENCRYPTION_KEY_SIZE // 8), key + b' is the right key')\n\t\t\t\t\twith open(key_name, 'wb') as f:\n\t\t\t\t\t\tf.write(key)\n\n\t\t\t\telse:\n\t\t\t\t\traise RuntimeError(\n\t\t\t\t\t\t'Key already defined'\n\t\t\t\t\t)\n\t\t\tfinally:\n\t\t\t\tos.chdir('..')\n\t\tfinally:\n\t\t\tos.chdir('..')",
"def generate_keyname():\n return str(uuid.uuid1())",
"def _create_key(self):\n return uuid.uuid4().hex",
"def create_key(event):\n\tevent.key = event.name.lower(), event.start_date.year\n\treturn event.key",
"def make_keys(self, path_name='~/.nanopub/id'):\n self._run_command(f'{NANOPUB_JAVA_SCRIPT} mkkeys -a RSA -f {path_name}')",
"def make_key(self, key, version=None):\n if version is None:\n version = self.version\n\n new_key = self.key_func(self.key_prefix, key, version)\n return new_key",
"def create_key(self, sp, creator, email):\n import hashlib\n import random\n\n from dateutil.relativedelta import relativedelta\n\n date = timezone.now() + relativedelta(months=1)\n activation_key = hashlib.sha1(str(random.random()).encode(\"utf-8\")).hexdigest()\n key = self.create(sp=sp, creator=creator, activation_key=activation_key, email=email, valid_until=date)\n return key",
"def CreateKey(*, session, name):\n ec2conn = session.connect_to(\"ec2\")\n return ec2conn.create_key_pair(key_name=name)",
"def generate_key():\n return pyelliptic.ECC(curve=__CURVE)",
"def make_key(self):\n\t\tif self.key:\n\t\t\tif not os.path.isfile(os.path.join(self.root, self.key + \".biprivatekey\")):\n\t\t\t\tprint_green(\"\\nRequested key does not exist.\")\n\t\t\t\tret = subprocess.call([self.dscreatekey, self.key], stdout = subprocess.DEVNULL if self.quiet else None, stderr = subprocess.DEVNULL if self.quiet else None) # Created in root\n\t\t\t\tif ret == 0:\n\t\t\t\t\tprint_blue(\"Created: \" + os.path.join(self.root, self.key + \".biprivatekey\"))\n\t\t\t\telse:\n\t\t\t\t\tprint_error(\"Failed to create key!\")\n\n\t\t\t\ttry:\n\t\t\t\t\tprint_blue(\"Copying public key to release directory.\\n\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.makedirs(os.path.join(self.release_dir, \"Keys\"))\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tshutil.copyfile(os.path.join(self.root, self.key + \".bikey\"), os.path.join(self.release_dir, \"Keys\", self.key + \".bikey\"))\n\n\t\t\t\texcept:\n\t\t\t\t\tprint_error(\"Could not copy key to release directory.\\n\")\n\t\t\t\t\traise\n\n\t\t\telse:\n\t\t\t\tprint_green(\"\\nNOTE: Using key \" + os.path.join(self.root, self.key + \".biprivatekey\\n\"))\n\n\t\t\tself.key = os.path.join(self.root, self.key + \".biprivatekey\")",
"def create_ssh_key(name):\n instance = get_instance(name)\n with settings(host_string=instance.public_dns_name):\n run('ssh-keygen -C \"caguilar@dwdandsolutions.com\" -t rsa')\n print \"Authorize this on github \\n\"\n run(\"cat ~/.ssh/id_rsa.pub\")",
"def __regenerate_key(args):\n print(\"\\nRegeneration key...\")\n\n acs_client = __get_communication_management_client()\n\n key_type = {\"key_type\": args.type}\n key = acs_client.communication_service.regenerate_key(args.resource_group_name, args.resource_name, RegenerateKeyParameters(**key_type))\n print(key)",
"def key(self, name, secret):\n return self._send_command(\"key %s %s\" % (name, secret))",
"def gen_api_key():\n m = hashlib.sha256()\n m.update(get_random_word(12))\n return unicode(m.hexdigest()[:12])",
"def generate_self_id_key() -> SigningKey:\n k = key.generate_signing_key(\"x\")\n k.version = encode_pubkey(k)\n return k",
"def license_name(self, value):\n self.logger.warn(\n \"Setting values on license_name will NOT update the remote Canvas instance.\"\n )\n self._license_name = value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Collapse feature table to user specified taxa level (ASV by default).
|
def collapse_taxa(feature_table_artifact, taxonomy_artifact, collapse_level="asv"):
collapse_level = collapse_level.lower()
if(collapse_level not in VALID_COLLAPSE_LEVELS):
raise AXIOME3Error("Specified collapse level, {collapse_level}, is NOT valid!".format(collapse_level=collapse_level))
# handle ASV case
if(collapse_level == "asv"):
# By default, feature table has samples as rows, and ASV as columns
feature_table_df = feature_table_artifact.view(pd.DataFrame)
# Transpose feature table
feature_table_df_T = feature_table_df.T
# By default, taxonomy has ASV as rows, and metadata as columns
taxonomy_df = taxonomy_artifact.view(pd.DataFrame)
# Combine the two df (joins on index (ASV))
combined_df = feature_table_df_T.join(taxonomy_df)
# Drop "Confidence" column and use numeric index
final_df = combined_df.drop(["Confidence"], axis="columns").reset_index(drop=True)
return final_df
table_artifact = collapse(table=feature_table_artifact, taxonomy=taxonomy_artifact, level=VALID_COLLAPSE_LEVELS[collapse_level])
# By default, it has samples as rows, and taxa as columns
collapsed_df = table_artifact.collapsed_table.view(pd.DataFrame)
# Transpose
collapsed_df_T = collapsed_df.T
# Append "Taxon" column
collapsed_df_T["Taxon"] = collapsed_df_T.index
# Reset index
final_df = collapsed_df_T.reset_index(drop=True)
return final_df
|
[
"def collapse_tax(self):\n try:\n for level in self.inputs['levels']:\n if level != 'otu':\n for x in list(self.levels['otu']):\n self.levels[level][x] = _data_bin(self.otu[x], self.n[level], level + '_' + x)\n self.write_bioms()\n except TypeError:\n logger.error(\"Could not collapse taxonomy\", exc_info=True)",
"def select_taxa_from_summary(df, taxa):\n df['filter'] = df.taxa.apply(\n lambda x: True if int(taxa) in x else False)\n return df[df['filter'] == True]",
"def collapse(item, progs=True):\n item.setParam('Tree', 'collapse')\n\n if progs and item.Type() == robolink.ITEM_TYPE_PROGRAM:\n item.ShowInstructions(False)",
"def feature_collapsing(features):\n new_features = []\n group_number = 0\n mzrtgroup = []\n for feature in features:\n if feature.mzrtgroup == group_number:\n mzrtgroup.append(feature)\n else:\n # assert feature.mzrtgroup == group_number + 1 # to do: there are a case, when borders are empty\n new_features.extend(collapse_mzrtgroup(mzrtgroup, group_number))\n mzrtgroup = [feature]\n group_number = feature.mzrtgroup\n new_features.extend(collapse_mzrtgroup(mzrtgroup, group_number))\n return new_features",
"def _taxonomy_tree_from_features(self, features):\n feature_taxons = self._features.loc[features]\n tree_data = ((i, [taxon.lstrip() for taxon in lineage.split(';')])\n for i, lineage in feature_taxons['Taxon'].items())\n return skbio.TreeNode.from_taxonomy(tree_data)",
"def veal_tax(self):\r\n keys = list(creds_rtypes[\"tax\"].keys())\r\n vals = list(creds_rtypes[\"tax\"].values())\r\n if self.rev_tax.cget('text') == 'Reveal Tax':\r\n self.tax_lbl.configure(text=f'Tax(in %)\\n{keys[0]} {chr(129130)} {vals[0]}\\n{keys[1]} {chr(129130)}'\r\n f' {vals[1]}\\n{keys[2]} {chr(129130)} '\r\n f'{vals[2]}', justify=LEFT)\r\n self.rev_tax.config(text='Hide Tax')\r\n else:\r\n self.tax_lbl.configure(text='Tax')\r\n self.rev_tax.config(text='Reveal Tax')",
"def collapse_items(self):\n self.populate_selected_tree()",
"def make_en_masse(level=1, fcols=None, fcols_abbrev=None, force_cols=None):\n # NOTE: the rollup code will try to make a combined indexe on all the\n # features --- in the test table. That's great, but it won't work\n # if some features are in test_aux. So comment that out in the\n # create_rollup code for now.\n if force_cols is None:\n force_cols = []\n all_cols = [c for c in ABBREVS_F['abtrain'].keys() if c not in ('block31', 'user_id','aux_dia','orig_destination_distance')]\n if fcols is not None:\n all_cols = fcols\n if fcols_abbrev is not None:\n all_cols = [ABBREVS_R['abtrain'] for c in fcols_abbrev if c not in ('blk', 'uid','adia','odis')]\n todo = list()\n\n # Remove any forced columns\n if force_cols != []:\n all_cols = [ c for c in all_cols if c not in force_cols ]\n \n all_cols.sort()\n N = len(all_cols)\n\n # TODO: consider making a recursive version of this to handle\n # levels without hardcoding... but realistically, with ~30\n # columns, even doing two levels with 30*29=870 pairs is too much,\n # and I don't expect to do three levels with 30*29*28 tuples,\n # much less four levels.\n if level == 0:\n todo.append([])\n elif level == 1:\n for i in range(N):\n todo.append(force_cols + [all_cols[i]])\n elif level == 2:\n for i in range(N):\n for j in range(i+1, N):\n todo.append(force_cols + [all_cols[i], all_cols[j]])\n elif level == 3:\n for i in range(N):\n for j in range(i+1, N):\n for k in range(j+1, N):\n todo.append(force_cols + [all_cols[i], all_cols[j], all_cols[k]])\n\n for cols in todo:\n tblname = 'abtrain'\n #if all([c in ABBREVS_F['abtrain'] for c in cols]):\n # tblname = 'btrain'\n START = time.time()\n print('{start}> start of processing {cols}'.format(cols=repr(cols), start=time.strftime('%Y-%m-%dT%H:%M:%S')))\n stmt = create_rollup(fcols=cols, tblname=tblname)\n exec(stmt)\n END = time.time()\n elapsed = END-START\n print('{end}> {elapsed:0.0f} sec ({emin:0.1f} min)) processing {cols}'.format(cols=repr(cols), end=time.strftime('%Y-%m-%dT%H:%M:%S'), elapsed=elapsed, emin=elapsed/60.0))",
"def make_taxa_table(samples, tool_name):\n taxa_tbl = {}\n for sample in samples:\n try:\n taxa_tbl[sample['name']] = sample[tool_name]['taxa']\n except KeyError:\n pass\n\n taxa_tbl = pd.DataFrame.from_dict(taxa_tbl, orient='index')\n taxa_tbl = taxa_tbl.apply(lambda col: col / col.sum(), axis=0)\n\n return taxa_tbl",
"def remove_taxa_aln_tre(self, taxon_label):\n tax = self.aln.taxon_namespace.get_taxon(taxon_label)\n tax2 = self.tre.taxon_namespace.get_taxon(taxon_label)\n if tax:\n self.aln.remove_sequences([tax])\n self.aln.taxon_namespace.remove_taxon_label(taxon_label) # raises an error if label not found\n # the first prune does not remove it sometimes...\n self.tre.prune_taxa([tax2])\n self.tre.prune_taxa_with_labels([taxon_label])\n self.tre.prune_taxa_with_labels([tax2])\n self.otu_dict[tax.label]['^physcraper:status'] = \"deleted\"\n else:\n self.otu_dict[taxon_label]['^physcraper:status'] = \"deleted, updated otu_dict but was never in tre or aln!\"",
"def drop_features_without_taxa(\n self, **kwargs: Any\n ) -> Optional[AnyGenericIdentifier]:\n ids_to_drop = self.find_features_without_taxa()\n return self._remove_features_by_id(ids_to_drop, **kwargs)",
"def test_summarize_taxonomic_agreement_standard(self):\n exp = ['A\\t3\\t\\'1\\',\\'2\\',\\'3\\'\\t100.00%\\t66.67%\\t33.33%\\tA\\tB,Z\\t'\n 'C,D,T\\n']\n obs = summarize_taxonomic_agreement(self.otu_map1, self.tax_map1, 3)\n self.assertEqual(obs, exp)",
"def format_summarize_taxa(summary, header, delimiter=';'):\n yield \"%s\\n\" % '\\t'.join(header)\n for row in summary:\n # taxon is tuple, join together for foo;bar;foobar\n taxon = row[0]\n line = [delimiter.join(taxon)]\n\n # add on otu counts\n line.extend(map(str, row[1:]))\n\n yield \"%s\\n\" % '\\t'.join(line)",
"def toggle_subtotal(self, tax=\"%\", discounts=False, shipping=False):\n self.fields = {\"tax\": tax,\n \"discounts\": discounts, \"shipping\": shipping}",
"def Convert_AvidaSpop_To_StdPhylogeny(input_fpath, output_fpath=None, output_format=\"csv\", minimal_output=False):\n # Is input_fpath a valid file?\n if (not os.path.isfile(input_fpath)):\n raise ValueError(\"Failed to find provided input file ({})\".format(input_fpath))\n\n # Is output_format valid?\n if (not output_format in VALID_OUT_FORMATS):\n raise ValueError(\"Invalid output format provided ({}). Valid arguments include: {}\".format(output_format, VALID_OUT_FORMATS))\n\n output_fpath = output_fpath if (output_fpath != None) else input_fpath.replace(\".spop\", \"_standard-phylogeny.{}\".format(output_format))\n\n # -- surgery to get this to work on output of analyze mode genotype detail file --\n dat_file_contents = read_avida_dat_file(input_fpath)\n avida_data = {field:[] for field in dat_file_contents[0].keys()}\n for line in dat_file_contents:\n for field in line:\n avida_data[field].append(line[field])\n\n # Clean up avida data to play with standard.\n # avida_data[\"ancestor_list\"] = [list([\"none\" if anc == \"(none)\" else anc for anc in anc_lst]) for anc_lst in avida_data.pop(\"parents\")]\n avida_data[\"ancestor_list\"] = [[anc_list] for anc_list in avida_data[\"parent_id\"]]\n avida_data[\"origin_time\"] = copy.deepcopy(avida_data[\"update_born\"])\n avida_data[\"id\"] = list(avida_data[\"genotype_id\"])\n # -- end surgery --\n\n # Are all IDs unique?\n id_set = set(avida_data[\"id\"])\n if (len(avida_data[\"id\"]) != len(id_set)):\n raise ValueError(\"Avida organism IDs must be unique!\")\n\n # Convert Avida data into pandas data frame.\n df = pd.DataFrame(data = avida_data)\n\n # Drop any fields we want to delete.\n del_fields = []\n if minimal_output:\n # What fields should we delete (if we're doing minimal output)?\n min_fields = [\"id\", \"ancestor_list\", \"origin_time\"]\n del_fields = [field for field in avida_data if not field in min_fields]\n df.drop(del_fields, axis=1, inplace=True)\n\n # Adjust the header so that standard fields are up front.\n stds_hd = [\"id\", \"ancestor_list\", \"origin_time\"]\n new_header = stds_hd + [field for field in avida_data if (not field in stds_hd) and (not field in del_fields)]\n # Write output in requested format.\n\n # print(len(df.id.unique()))\n df.set_index(\"id\", inplace=True, drop=False)\n\n if (output_format == \"csv\"):\n with open(output_fpath, \"w\"):\n df.to_csv(output_fpath, sep=\",\", columns=new_header, index=False, index_label=False)\n elif (output_format == \"json\"):\n with open(output_fpath, \"w\"):\n df.to_json(output_fpath, orient=\"index\")\n\n return True",
"def test_generate_taxonomic_agreement_summary_ref_only(self):\n exp = {'A': [2, ['1', '2'], [100.0, 100.0, 50.0], [['A'], ['B'],\n ['C', 'D']]],\n 'B': [1, ['3'], [100.0, 100.0, 100.0], [['A'], ['Z'], ['T']]]}\n obs = _generate_taxonomic_agreement_summary(self.otu_map2,\n self.tax_map1, 3)\n self.assertFloatEqual(obs, exp)",
"def __subset_to_useful_features(self):\n #expand features / subset features here!\n self.input_df = self.input_df[['Snippet', 'Full Name',\n 'Avatar', 'Professions',\n 'Gender']]",
"def _to_ufo_features(\n font: GSFont,\n ufo: Font | None = None,\n generate_GDEF: bool = False,\n master: GSFontMaster | None = None,\n expand_includes: bool = False,\n) -> str:\n if not master:\n expander = PassThruExpander()\n else:\n expander = TokenExpander(font, master)\n\n prefixes = []\n for prefix in font.featurePrefixes:\n strings = []\n if prefix.name != ANONYMOUS_FEATURE_PREFIX_NAME:\n strings.append(\"# Prefix: %s\\n\" % prefix.name)\n strings.append(autostr(prefix.automatic))\n strings.append(expander.expand(prefix.code))\n prefixes.append(\"\".join(strings))\n\n prefix_str = \"\\n\\n\".join(prefixes)\n\n class_defs = []\n for class_ in font.classes:\n prefix = \"@\" if not class_.name.startswith(\"@\") else \"\"\n name = prefix + class_.name\n class_defs.append(\n \"{}{} = [ {}\\n];\".format(\n autostr(class_.automatic), name, expander.expand(class_.code)\n )\n )\n class_str = \"\\n\\n\".join(class_defs)\n\n feature_defs = []\n for feature in font.features:\n code = expander.expand(feature.code)\n lines = [\"feature %s {\" % feature.name]\n notes = feature.notes\n feature_names = None\n if font.format_version == 2 and notes:\n m = re.search(\"(featureNames {.+};)\", notes, flags=re.DOTALL)\n if m:\n name = m.groups()[0]\n # Remove the name from the note\n notes = notes.replace(name, \"\").strip()\n feature_names = name.splitlines()\n else:\n m = re.search(r\"^(Name: (.+))\", notes)\n if m:\n line, name = m.groups()\n # Remove the name from the note\n notes = notes.replace(line, \"\").strip()\n # Replace special chars backslash and doublequote for AFDKO syntax\n name = name.replace(\"\\\\\", r\"\\005c\").replace('\"', r\"\\0022\")\n feature_names = [\"featureNames {\", f' name \"{name}\";', \"};\"]\n elif font.format_version == 3 and feature.labels:\n feature_names = []\n feature_names.append(\"featureNames {\")\n for label in feature.labels:\n langID = _to_name_langID(label[\"language\"])\n name = label[\"value\"]\n name = name.replace(\"\\\\\", r\"\\005c\").replace('\"', r\"\\0022\")\n if langID is None:\n feature_names.append(f' name \"{name}\";')\n else:\n feature_names.append(f' name 3 1 0x{langID:X} \"{name}\";')\n feature_names.append(\"};\")\n if notes:\n lines.append(\"# notes:\")\n lines.extend(\"# \" + line for line in notes.splitlines())\n if feature_names:\n lines.extend(feature_names)\n if feature.automatic:\n lines.append(\"# automatic\")\n if feature.disabled:\n lines.append(\"# disabled\")\n lines.extend(\"#\" + line for line in code.splitlines())\n else:\n lines.append(code)\n\n # Manual kern features in glyphs also have the automatic code added after them\n # We make sure it gets added with an \"Automatic Code...\" marker if it doesn't\n # already have one.\n if _is_manual_kern_feature(feature) and not re.search(\n INSERT_FEATURE_MARKER_RE, code\n ):\n lines.append(INSERT_FEATURE_MARKER_COMMENT)\n\n lines.append(\"} %s;\" % feature.name)\n feature_defs.append(\"\\n\".join(lines))\n fea_str = \"\\n\\n\".join(feature_defs)\n\n if generate_GDEF:\n assert ufo is not None\n regenerate_opentype_categories(font, ufo)\n\n full_text = \"\\n\\n\".join(filter(None, [class_str, prefix_str, fea_str])) + \"\\n\"\n full_text = full_text if full_text.strip() else \"\"\n\n if not full_text or not expand_includes:\n return full_text\n\n # use feaLib Parser to resolve include statements relative to the GSFont\n # fontpath, and inline them in the output features text.\n feature_file = StringIO(full_text)\n include_dir = os.path.dirname(font.filepath) if font.filepath else None\n fea_parser = parser.Parser(\n feature_file,\n glyphNames={glyph.name for glyph in font.glyphs},\n includeDir=include_dir,\n followIncludes=expand_includes,\n )\n doc = fea_parser.parse()\n return doc.asFea()",
"def summarise_feature_table(data):\n np.seterr(all='ignore')\n median_features = pd.DataFrame(np.array(np.median(data,axis=0))).T\n median_features.columns = ['median_' + str(col) for col in data]\n\n min_features = pd.DataFrame(np.array(np.min(data,axis=0))).T\n min_features.columns = ['min_' + str(col) for col in data]\n max_features = pd.DataFrame(np.array(np.max(data,axis=0))).T\n max_features.columns = ['max_' + str(col) for col in data]\n\n SD_features = pd.DataFrame(np.array(np.std(data,axis=0))).T\n SD_features.columns = ['std_' + str(col) for col in data]\n CV_features = pd.DataFrame(np.array(np.std(data,axis=0))/np.array(np.nanmedian(data,axis=0))).T\n CV_features.columns = ['CV_' + str(col) for col in data]\n CD_features = pd.DataFrame(np.array(np.var(data,axis=0))/np.array(np.nanmedian(data,axis=0))).T\n CD_features.columns = ['CD_' + str(col) for col in data]\n IQR_features = pd.DataFrame(np.array(np.subtract(*np.nanpercentile(data, [75, 25],axis=0)))).T\n IQR_features.columns = ['IQR_' + str(col) for col in data]\n QCD_features = pd.DataFrame(np.array(np.subtract(*np.nanpercentile(data, [75,25],axis=0)))/np.array(np.add(*np.nanpercentile(data, [75, 25],axis=0)))).T\n QCD_features.columns = ['QCD_' + str(col) for col in data]\n\n\n all_features = pd.concat([median_features.reset_index(drop=True),\n min_features.reset_index(drop=True),\n max_features.reset_index(drop=True),\n SD_features.reset_index(drop=True),\n CV_features.reset_index(drop=True),\n CD_features.reset_index(drop=True),\n IQR_features.reset_index(drop=True),\n QCD_features.reset_index(drop=True)], axis=1)\n return all_features"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filter dataframe by a specified column by abundance
|
def filter_by_abundance(df, abundance_col, cutoff=0.2):
if(abundance_col not in df.columns):
raise AXIOME3Error("Column {col} does not exist in the dataframe".format(col=abundance_col))
filtered_df = df[df[abundance_col] >= cutoff]
if(filtered_df.shape[0] == 0):
raise AXIOME3Error("No entries left with {cutoff} abundance threshold".format(cutoff=cutoff))
return filtered_df
|
[
"def filter(self, df):\n pass",
"def filter_df(df, filter_column, filter_value):\n return df[df[filter_column] == filter_value]",
"def select_taxa_from_summary(df, taxa):\n df['filter'] = df.taxa.apply(\n lambda x: True if int(taxa) in x else False)\n return df[df['filter'] == True]",
"def filtercountry(df, country):\n mask = df.country.eq(country)\n return df[mask]",
"def _filter(self, df, filters):\n df = df.loc[(df[list(filters)] == pd.Series(filters)).all(axis=1)]\n return df.reset_index(drop=True).copy()",
"def filter_data(df, column_name,column_value,index_drop):\n df = df.loc[df[column_name] == column_value].reset_index(drop = index_drop)\n return df #returning the data structure ",
"def _apply_attr_filters(self, df):\n mask = pd.Series([True] * len(df))\n for degree_substr in self.attr_filters['final_major']:\n mask = mask & ~df['final_major'].str.contains(f'{degree_substr}')\n return df[mask]",
"def filter_loans(df, exclude_by_country):\n loans_mask = False\n for country,loans in exclude_by_country.iteritems():\n loans_mask |= ((df.dwh_country_id==country) & ~(df.fk_loan.isin(loans)))\n return df[loans_mask]",
"def apply_fact_filt(self, df):\n if self.fact_filt is not None:\n df = df.iloc[:, self.fact_filt]\n\n return df",
"def dataWNfilter(df):\n\t\n\t#WIDE\n\tdf = wide_filter(df,'combinado')\n\n\t#NARROW\n\tdf = narrow_filter(df,\"mb_total_qt\")\n\tdf = narrow_filter(df,\"arpu_negocio_promedio\")\n\n\treturn df",
"def fuzzy_filter(df, col, val, return_verbose=False):\n values = set(df[col].tolist())\n matched_value, fuzzy_match_ratio = process.extractOne(val, values)\n if return_verbose:\n return df.loc[df[col] == matched_value], matched_value, fuzzy_match_ratio\n else:\n return df.loc[df[col] == matched_value]",
"def filter_categories_below_threshold(data_df, display_threshold):\n\n return data_df[data_df.iloc[:, 0] > display_threshold]",
"def filter_expr(self):\n return lambda df: reduce(and_, [(b.filter_expr()(df)) for b in self.bins])",
"def _filter_df_by_dict(self, df, filter_dict):\n return df.loc[\n (df[list(filter_dict)] == pd.Series(filter_dict)).all(axis=1)]",
"def df_filter(self, df, features={}, attributes={}, mode='overlap'):\n # Build query.\n query = []; feature_ranges = []\n for f, r in features.items():\n feature_ranges.append(r)\n # Filter by features.\n if mode == 'overlap':\n # Determine whether two ranges overlap:\n # https://stackoverflow.com/questions/325933/determine-whether-two-date-ranges-overlap/325964#325964\n query.append(f'`{f} <`>={r[0]}')\n query.append(f'`{f} >`<={r[1]}')\n elif mode == 'contain':\n query.append(f'`{f} >`>={r[0]}')\n query.append(f'`{f} <`<={r[1]}')\n for attr, r in attributes.items():\n # Filter by attributes.\n query.append(f'{attr}>={r[0]} & {attr}<={r[1]}')\n # Filter dataframe.\n df = df.query(' & '.join(query))\n if features != {}:\n # If using features, compute overlap proportions,\n # and store this in a new column of the dataframe.\n # There's a lot of NumPy wizardry going on here!\n feature_ranges = np.array(feature_ranges)\n node_ranges = np.dstack((df[[f'{f} >' for f in features]].values,\n df[[f'{f} <' for f in features]].values))\n overlap = np.maximum(0, np.minimum(node_ranges[:,:,1], feature_ranges[:,1]) \n - np.maximum(node_ranges[:,:,0], feature_ranges[:,0]))\n df['overlap'] = np.prod(overlap / (node_ranges[:,:,1] - node_ranges[:,:,0]), axis=1) \n return df",
"def remove_outliers(df):\n final_df = df[(np.abs(stats.zscore(df.iloc[:, 1:len(df.columns)])) < 3).all(axis=1)]\n\n return final_df",
"def filter(self):\n\n fc_filt = self.df[self.l2fc_name].abs() < self._log2_fc\n fdr_filt = self.df[\"padj\"] > self._alpha\n outliers = self.df[\"padj\"].isna()\n\n filt_df = self.df.copy()\n filt_df[fc_filt.values | fdr_filt.values | outliers] = np.NaN\n return filt_df",
"def filter_row_by_data_type_audf(col_name, data_type):\n\n data_type = parse_python_dtypes(data_type)\n return abstract_udf(col_name, filter_row_by_data_type, \"boolean\", data_type)",
"def filter_df(self):\n df = self.fusion_df()\n df1 = df.loc[df['Price per month (£)'] >= self.price_min]\n df2 = df1.loc[df1['Price per month (£)'] <= self.price_max]\n df3 = df2.loc[df2['Bedrooms']>=self.bedroom]\n df4 = df3.loc[df3['Bathrooms']>=self.bathroom]\n \n return df4"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Alphabetically sort dataframe by a given column Input;
|
def alphabetical_sort_df(df, cols):
for col in cols:
if(col not in df.columns):
raise AXIOME3Error("Column {col} does not exist in the dataframe".format(col=col))
sorted_df = df.sort_values(by=cols)
return sorted_df
|
[
"def sort_by(df, column_name): # reformat date so the sorting starts from year-month-date\n\n df = df.sort_values(by=[column_name]) # sort the reformat date value\n return df",
"def order_by_col(df: DataFrame, column: str) -> DataFrame:\n df = df.sort(col(column).asc())\n return df",
"def sort_df(df, sort_column, ascending=True):\n return df.sort_values(by=sort_column, ascending=ascending)",
"def sort_by(column, table):\n \n return sorted(table, key = lambda row: row[column])",
"def sort_data_frame(df, column_name_list, is_sorted_ascending=True):\n return df.sort_values(by=column_name_list, ascending=is_sorted_ascending)",
"def sort_colums_for_csv(column_name):\n\n if column_name in column_sort_dict:\n return column_sort_dict[column_name]\n else:\n return ord(column_name[0]) + 99",
"def sort_columns(dataframe):\n return dataframe.reindex_axis(sorted(dataframe.columns), axis=1)",
"def sortCaseInsensitive():\n pass",
"def dataFrameOrdenado(dataFrame,columna = None): \n dataOrdenada = pd.read_csv(dataFrame)\n \n if columna != None:\n try: \n dataOrdenada = dataOrdenada.sort_values(columna)\n except KeyError:\n print(\"No es correcto el nombre de la columna\")\n \n return dataOrdenada",
"def sort(self, col=None):\n if hasattr(self, 'fyear') and col is None:\n i = np.argsort(self.fyear)\n self.fyear = self.fyear[i]\n self.data = self.data[i,:]\n print 'using column `fyear` to sort data'\n else:\n # 0 is default if `col` not specified\n if col is None: \n col = 0\n i = np.argsort(self.data[:,col])\n self.data = self.data[i,:]\n print 'using column `%d` to sort data' % col",
"def sortby(tree, col, descending):\n # grab values to sort\n data = [(tree.set(child, col), child) \\\n for child in tree.get_children('')]\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, \\\n int(not descending)))",
"def sort_table(table, cols):\r\n for col in reversed(cols):\r\n table = sorted(table, key=operator.itemgetter(col))\r\n return table",
"def organize_cols(df, cols):\n # Generate a list of all the columns in the dataframe that are not\n # included in cols\n data_cols = [c for c in df.columns.tolist() if c not in cols]\n data_cols.sort()\n organized_cols = cols + data_cols\n return(df[organized_cols])",
"def clickon(self, event):\n self._sort_by(self.columns.index(event.widget['text']))",
"def sort(self,column,order = Qt.AscendingOrder):\r\n super().sort(column,order)\r\n\r\n # set to false to display the underlying data model row index\r\n # in the vertical header\r\n self._enable_stable_index = True",
"def csvsort(inputfile: str, outputfile: str, columnchoice: str) -> None:\n fileread = readfile(inputfile)\n sorteddata = sortdata(fileread, columnchoice)\n writefile(sorteddata, outputfile)",
"def sort_dataframe_columns(dframe, required_columns):\n extra_cols = dframe.columns.difference(required_columns)\n sorted_cols = list(required_columns) + extra_cols.sort_values().tolist()\n return dframe.loc[:, sorted_cols]",
"def sort_method(sort_column):\n # Default to sorting by last name\n method_to_use = sort_by_last_name\n if sort_column == 1:\n method_to_use = sort_by_first_name\n elif sort_column == 2:\n method_to_use = sort_by_nee\n elif sort_column == 3:\n method_to_use = sort_by_last_name\n elif sort_column == 4:\n method_to_use = sort_by_first_year\n elif sort_column == 5:\n method_to_use = sort_by_last_year\n elif sort_column == 6:\n method_to_use = sort_by_email\n elif sort_column == 7:\n method_to_use = sort_by_instrument\n return method_to_use",
"def sortdata(inputdata: list, columnchoice: str) -> list:\n if len(inputdata) == 0:\n raise ValueError(\"This list is empty\")\n return [inputdata[0]] + sorted(\n inputdata[1:], key=lambda a: a[inputdata[0].index(columnchoice)]\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Chopping dna into small (150 bp) peices
|
def chop_dna(dna):
read_len = 150
max_ovl = 50
min_coverage = 5
out = []
dna_len = len(dna)
base_id = dna.id
starts = []
start = 0
read_n = math.floor((dna_len - max_ovl)/(read_len - max_ovl))
if read_n > 1:
ovl_len = (read_len * read_n - dna_len)/(read_n - 1)
else:
ovl_len = max_ovl
cnt = 0
for i in range(read_n):
for ii in range(min_coverage):
if i == read_n - 1:
out_seq = dna[int(start) : ]
else:
out_seq = dna[int(start) : int(start + read_len)]
out_seq.id = base_id + "_" + str(cnt)
out_seq.letter_annotations["phred_quality"] = [40] * len(out_seq)
out.append(out_seq)
cnt += 1
start += (read_len - ovl_len)
return out
|
[
"def flower(pen, n, size):\n for i in range(n):\n petal(pen, size)\n pen.right(360/n)",
"def get_seedling_objects(image, min_size=4000, max_size=80000):\n\n filled = erode(image)\n labelled = label_objects(filled)\n big_objs = keep_objects_in_bracket(labelled, min_size, max_size)\n plants, _ = ndimage.label(big_objs)\n return plants",
"def crop(a, n):\n start = random.randint(0, len(a) - n - 1)\n return a[start:start+n]",
"def Pentadendrite(self, depth=5,size=200):\n\n t.penup()\n t.setposition(size/(2*math.tan(math.pi/5)),-size/2)\n t.pendown()\n for _ in range(5):\n self.McWortersPentigree(depth,size)\n t.left(360//5)",
"def add_ailerons(profile, percentage, hinge_depth):\n chord = column_max(profile, 0) - column_min(profile, 0) \n aileron_chord = percentage * chord / 100\n cut = column_max(profile, 0) - aileron_chord\n halfway = int(len(profile)/2)\n cut_end_index = index_closest_to(cut, [x[0] for x in profile[:halfway]])\n\n ## depth includes 2*kerf at this point: hinge_depth needs to account for\n ## that, plus the thickness of the hinge remaining\n top = min(profile[:halfway], key = lambda x: abs(x[0]-profile[cut_end_index][0]))[1]\n bottom = min(profile[halfway:], key = lambda x: abs(x[0]-profile[cut_end_index][0]))[1]\n depth = top - bottom - hinge_depth\n travel = depth * 0.3 ## hardcoded for now -- how wide the cut\n cut_start_index = index_closest_to(cut + travel, [x[0] for x in profile[:halfway]])\n\n if DEBUG: \n print(\"aileron cutout:\", cut_start_index, cut_end_index)\n if cut_end_index - cut_start_index > 1:\n ## linear interpolation along the cut to keep # points constant\n for i in range(cut_start_index + 1, cut_end_index):\n x, y = profile.pop(i)\n ym = linear_interpolate(profile[cut_start_index][0], profile[cut_start_index][1], \n profile[cut_end_index][0], (bottom+hinge_depth), x)\n profile.insert(i, [x, ym])\n ## add in hinge bottom\n profile.pop(cut_end_index)\n profile.insert(cut_end_index, [profile[cut_end_index][0], (bottom+hinge_depth)] )\n \n return profile",
"def cut_slice(pizza, r1, c1, r2, c2):\n for i in range(r1, r2+1):\n for j in range(c1, c2+1):\n pizza[i][j] = 'x'",
"def population_from_dna(dna_tools, dna: tyDna, pop_size: int,\n num_of_mutations: int, img)-> tyPop:\n dnas = [dna] # the original is always part of population\n # reseed population by mutating dna\n for _ in range(pop_size-1):\n mut_dna = dna_tools.mutate_dna(dna)\n for _ in range(num_of_mutations-1):\n mut_dna = dna_tools.mutate_dna(mut_dna)\n dnas.append(mut_dna)\n # add_scores\n pop = [(dna_tools.evaluate_dna(dna, img), dna) for dna in dnas]\n return pop",
"def padantadva(self):\n # PMS: don\"t want to do it if anmanosca just applied\n [self.Isthana1,self.Iyatna1] = identify(self.Linary[self.Index - 1])\n if (self.Linary[self.Index + 1] == sktch) and (self.Iyatna1 == idirgha):\n self.insertary(sktt, self.Index)\n self.Index = self.Index + 1",
"def reduce_branch_length(size):\n\n factor = round(random.uniform(0.6, 0.95), 2)\n return size * factor",
"def generate_sons(state):\n child = []\n\n for i in range(0, 7):\n for j in range(0, 7):\n\n # Check possible move up\n if (filled_valid_position(state, i, j)\n and filled_valid_position(state, i - 1, j)\n and empty_valid_position(state, i - 2, j)):\n\n new_state = copy.deepcopy(state) # Copy the current state\n new_state[i][j] = 0 # Remove pin from current position\n new_state[i - 1][j] = 0 # Remove jumped pin\n new_state[i - 2][j] = 1 # Set new pin position\n child.append((new_state, [(i, j), (i - 2, j)]))\n\n # Check possible move down\n if (filled_valid_position(state, i, j)\n and filled_valid_position(state, i + 1, j)\n and empty_valid_position(state, i + 2, j)):\n\n new_state = copy.deepcopy(state) # Copy the current state\n new_state[i][j] = 0 # Remove pin from current position\n new_state[i + 1][j] = 0 # Remove jumped pin\n new_state[i + 2][j] = 1 # Set new pin position\n child.append((new_state, [(i, j), (i + 2, j)]))\n\n # Check possible move left\n if (filled_valid_position(state, i, j)\n and filled_valid_position(state, i, j - 1)\n and empty_valid_position(state, i, j - 2)):\n\n new_state = copy.deepcopy(state) # Copy the current state\n new_state[i][j] = 0 # Remove pin from current position\n new_state[i][j - 1] = 0 # Remove jumped pin\n new_state[i][j - 2] = 1 # Set new pin position\n child.append((new_state, [(i, j), (i, j - 2)]))\n\n # Check possible move right\n if (filled_valid_position(state, i, j)\n and filled_valid_position(state, i, j + 1)\n and empty_valid_position(state, i, j + 2)):\n\n new_state = copy.deepcopy(state) # Copy the current state\n new_state[i][j] = 0 # Remove pin from current position\n new_state[i][j + 1] = 0 # Remove jumped pin\n new_state[i][j + 2] = 1 # Set new pin position\n child.append((new_state, [(i, j), (i, j + 2)]))\n\n return child",
"def pickplant(self):\n for tree in self.plants:\n if (self.gimme_distance((tree[0],tree[1])) <\n self.gimme_distance(self.myplant[:2])):\n\n self.myplant = (tree[0],tree[1],tree[2])",
"def monty_shows(doors, prize_pos, choice):\n # FIXME: nasty code, to make it cleaner\n doors_clone = list(doors)\n doors_clone[prize_pos] = 'x' # kill the prize position\n doors_clone[choice] = 'x' # kill the participant's choice\n\n return doors_clone.index('g')",
"def denoise(feature, n_pos):\n new_feature = []\n for i in range(len(feature)):\n f_vec = []\n for j in range(1900):\n if j not in n_pos:\n f_vec.append(feature[i][1][j])\n new_feature.append((feature[i][0], f_vec))\n return new_feature",
"def random_walk(size=(480, 640), num_dots=300, offset=40, progression=15, dtype=np.uint8):\n frame = np.zeros(size, dtype=dtype)\n fill = dtype(-1)\n dilator = DILATION_FUNCTIONS[dtype]\n selem = morphology.disk(3, dtype=dtype)\n\n # start at the center of the frame\n walk = [[size[0]/2, size[1]/2]]\n\n # do this imperatively\n for n in range(num_dots):\n last = walk[n]\n # print(last)\n node = [\n last[0] + np.random.randint(-offset, offset+1),\n last[1] + np.random.randint(-offset, offset+1)]\n # print(node)\n walk.append(node)\n\n walk = np.array(walk, dtype=np.int)\n\n # print(walk)\n # TODO: hahahahhahaha vectorize it numbnuts\n while True:\n for n in range(num_dots-1):\n new = np.random.randint(-progression, progression+1, size=2)\n biggest_step = max(np.abs(new))\n for off in range(biggest_step):\n # print(off)\n # print(off/biggest_step)\n walk[n:] += (new * (off / biggest_step)).astype(np.int)\n # walk[n:] += new\n walk = walk % np.asarray(size)\n frame.fill(0)\n frame[walk.T.tolist()] = fill\n\n yield dilator(frame, selem=selem)",
"def _make_pockets(self, depth = None):\n depth = depth or self.z_pocket\n # Pockets\n pockets = self._get_segments(self.s.cols, self.s.rows, checker=self.s.is_pocket)\n # print \"Pockets\", pockets\n\n direction = 1\n for row in range(self.s.rows):\n if not pockets[row]:\n continue\n if direction==-1:\n pocket_cols = list(reversed([[end, begin] for begin, end in pockets[row]]))\n else:\n pocket_cols = pockets[row]\n for a, b in pocket_cols:\n self._make_pocket(row, a, b, depth=depth)\n direction = -direction\n self._set_cutting(False)",
"def drawDucks(duckSize):\n pass #TODO drawduck ",
"def add_life(self):\n #print('Adding Plants and Animals')\n from noise import pnoise2\n import random\n random.seed()\n octaves = (random.random() * 0.5) + 0.5\n freq = 16.0 * octaves\n for y in range(self.world.grd.grid_height - 1):\n for x in range(self.world.grd.grid_width - 1):\n pixel = self.world.grd.get_tile(y,x)\n if pixel == 'X': # denoise blocks of mountains\n n = int(pnoise2(x/freq, y / freq, 1)*12+3)\n if n < 1 and random.randint(1,10) > 7:\n self.world.grd.set_tile(y, x, 'A')",
"def nicepos(mask,validr=[250,600],step=[25,25]):\n #check the pilmask,it should be the set all the valid pixel value as 1\n #and blind pixel value as zero.\n #In principle, the beam position should be in the detector for the saxs.\n #herein, we use (x,y) stands for the beam position.\n #the (0,0) point is in the top-left-most corner\n #i,j=0,0\n #write good positions into an ascii file\n fidw=open('mirror1m.dat','w')\n fidw.write('#step,[25,25]\\n')\n pilmask=deepcopy(mask)\n #determine the size of pilmask\n pily,pilx=mask['map'].shape\n #get the position in the loop\n for j in np.arange(validr[0],validr[1],step[0]):\n for i in np.arange(validr[0],validr[1],step[1]):\n #determine the size of array constructed\n print 'position of beam(x,y) ',j,' ',i\n tmpmask=np.zeros((pily*2,pilx*2),dtype=np.int)\n #print tmpmask.shape\n #put pilmask into the center of tmpmask\n tmpmask[pily-i:pily-i+pily,pilx-j:pilx-j+pilx]=mask['map']\n pilmask['map']=tmpmask\n res=flipharmony(pilmask)\n #set the filename of res\n res['filename']='BeamPos1M'+'_'+str(j)+'_'+str(i)\n #show the image\n sf_show(res,win=1,auto=1)\n yn=raw_input('Input y or n to save pos: ')\n if yn == 'y':\n mmax=pilmask['map'].max()\n msum=np.sum(pilmask['map'])\n print 'Mask: max, ',mmax,' sum, ',msum\n fidw.write(str(j)+' '+str(i)+'\\n')\n sf_show(res,win=1,auto=1,svg=1)\n pklwrite(res,res['filename'])\n fidw.close()",
"def drop_bed(height):\n g.write('G0 Z%d F1200' %height)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Apply automatic hysteresis thresholding. Apply automatic hysteresis thresholding by automatically choosing the high and low thresholds of standard hysteresis threshold. low_prop is the proportion of edge pixels which are above the low threshold and high_prop is the proportion of pixels above the high threshold.
|
def hyst_thresh_auto(edges_in: np.array, low_prop: float, high_prop: float) -> np.array:
######################################################
# calculate thresholds based on the proportion values
l_thres = np.percentile(edges_in, (1.-low_prop)*100)
h_thres = np.percentile(edges_in, (1.-high_prop)*100)
# call the hysteresis threshold function
hyst_out = hyst_thresh(edges_in, l_thres, h_thres)
######################################################
return hyst_out
|
[
"def hysteresis_threshold(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def hysteresis_threshold(image, low_threshold, high_threshold):\r\n high_mask = image > high_threshold\r\n low_mask = image > low_threshold\r\n return ndimage.binary_dilation(high_mask, mask=low_mask, iterations=-1)",
"def setThresholds(self,low, high):\n\n self.thrsh[0] = low\n\n self.thrsh[1] = high\n\n conn = sqlite3.connect(self.getHub().getDBFile())\n\n # update the state data for the current actuator\n conn.execute(\"UPDATE SENSORS SET LOWLIMIT = '%f', UPPERLIMIT = '%f' WHERE NODEID = '%s' AND INSTID = '%s'\" % (low,high,self.getNode().nodeID,self.instID))\n conn.commit()\n\n conn.close()",
"def hysteresis():\n print(\">>> Running hysteresis simulation\")\n region = df.Region(p1=(-50e-9, -50e-9, -50e-9), p2=(50e-9, 50e-9, 50e-9))\n mesh = df.Mesh(region=region, cell=(5e-9, 5e-9, 5e-9))\n\n system = mm.System(name=\"hysteresis\")\n system.energy = (\n mm.Exchange(A=1e-12)\n + mm.UniaxialAnisotropy(K=4e5, u=(0, 0, 1))\n + mm.DMI(D=1e-3, crystalclass=\"T\")\n )\n\n def Ms_fun(point):\n x, y, z = point\n if x**2 + y**2 + z**2 <= 50e-9**2:\n return 1e6\n else:\n return 0\n\n system.m = df.Field(mesh, dim=3, value=(0, 0, -1), norm=Ms_fun)\n\n Hmin = (0, 0, -1 / mm.consts.mu0)\n Hmax = (0, 0, 1 / mm.consts.mu0)\n\n hd = oc.HysteresisDriver()\n hd.drive(system, Hmin=Hmin, Hmax=Hmax, n=21, dirname=dirname)",
"def define_energy_threshold(self, method_lo_threshold='area_max', **kwargs):\n # TODO: define method for the high energy threshold\n\n # It is important to update the low and high threshold for ON and OFF\n # vector, otherwise Sherpa will not understand the files\n for obs in self.observations:\n if method_lo_threshold == 'area_max':\n aeff_thres = kwargs['percent'] / 100 * obs.aeff.max_area\n thres = obs.aeff.find_energy(aeff_thres)\n obs.on_vector.lo_threshold = thres\n obs.off_vector.lo_threshold = thres\n else:\n raise ValueError('Undefine method for low threshold: {}'.format(\n method_lo_threshold))",
"def hystLow(img, img_gauss, sd=0, mean=0, diff=40, init_low=0.05, gen_high=0.8, mode='memb'):\n if mode == 'memb':\n masks = {'2sd': ma.masked_greater_equal(img, 2*sd), # values greater then 2 noise sd \n 'mean': ma.masked_greater(img, mean)} # values greater then mean cytoplasm intensity\n elif mode == 'cell':\n masks = {'2sd': ma.masked_greater_equal(img, 2*sd)}\n\n logging.info('masks: {}'.format(masks.keys()))\n\n low_val = {}\n control_diff = False\n for mask_name in masks:\n mask_img = masks[mask_name]\n\n logging.info('Mask {} lower treshold fitting in progress'.format(mask_name))\n\n mask_hyst = filters.apply_hysteresis_threshold(img_gauss,\n low=init_low*np.max(img_gauss),\n high=gen_high*np.max(img_gauss))\n diff_mask = np.sum(ma.masked_where(~mask_hyst, mask_img) > 0)\n\n if diff_mask < diff:\n raise ValueError('Initial lower threshold is too low!')\n logging.info('Initial masks difference {}'.format(diff_mask))\n\n low = init_low\n\n i = 0\n control_diff = 1\n while diff_mask >= diff:\n mask_hyst = filters.apply_hysteresis_threshold(img_gauss,\n low=low*np.max(img_gauss),\n high=gen_high*np.max(img_gauss))\n diff_mask = np.sum(ma.masked_where(~mask_hyst, mask_img) > 0)\n\n low += 0.01\n\n i += 1\n # is cytoplasm mean mask at initial lower threshold value closed? prevent infinit cycle\n if i == 75:\n logging.fatal('Lower treshold for {} mask {:.2f}, control difference {}px'.format(mask_name, low, control_diff))\n raise RuntimeError('Membrane in mean mask doesn`t detected at initial lower threshold value!')\n \n\n # is cytoplasm mask at setted up difference value closed?\n if mask_name == 'mean':\n control_diff = np.all((segmentation.flood(mask_hyst, (0, 0)) + mask_hyst))\n if control_diff == True:\n logging.fatal('Lower treshold for {} mask {:.2f}, masks difference {}px'.format(mask_name, low, diff_mask))\n raise ValueError('Membrane in {} mask doesn`t closed, mebrane unlocated at this diff value (too low)!'.format(mask_name))\n\n low_val.update({mask_name : low})\n logging.info('Lower tresholds {}\\n'.format(low_val))\n\n return low_val",
"def calculate_thresholds(minimum, maximum, low_thresh_ratio, high_thresh_ratio):\n analog_range = maximum - minimum\n return Thresholds(low_thresh = analog_range * low_thresh_ratio, \\\n high_thresh = analog_range * high_thresh_ratio, \\\n middle = analog_range / 2)",
"def setMeasureThreshold(self, low, high=255, dwell=4000, measureStep=8):\n\t\t\t\tself._measureLow = low\n\t\t\t\tif self._searchLow < low:\n\t\t\t\t\tself._searchLow = low\n\t\t\t\tif self._searchHigh > high:\n\t\t\t\t\tself._searchHigh = high\n\t\t\t\tself._measureHigh = high\n\t\t\t\tself._measureDwell = dwell\n\t\t\t\tself._measureStep = measureStep",
"def imageThresholding(img, s_thresh=(100, 255), h_thresh=(15, 100),\n sx_thresh=(20, 100), sy_thresh=(100, 100)):\n img = np.copy(img)\n # Convert to HLS color space and separate the V channel\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)\n h_channel = hsv[:,:,0]\n s_channel = hsv[:,:,2]\n l_channel = hsv[:,:,1]\n \n # Sobel x\n # Take the derivative in x\n sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) \n # Absolute x derivative to accentuate lines away from horizontal\n abs_sobelx = np.absolute(sobelx) \n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n \n # Threshold x gradient\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1\n \n # Sobel y #\n sobely = cv2.Sobel(l_channel, cv2.CV_64F, 0, 1)\n # Absolute y derivative to accentuate lines away from vertical\n abs_sobely = np.absolute(sobely)\n scaled_sobely = np.uint8(255*abs_sobely/np.max((abs_sobely)))\n \n sybinary = np.zeros_like(scaled_sobely)\n sybinary[(scaled_sobely >= sy_thresh[0]) & (scaled_sobely <= sy_thresh[1])] = 1\n\n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n \n h_binary = np.zeros_like(h_channel)\n h_binary[(h_channel >= h_thresh[0]) & (h_channel <= h_thresh[1])] = 1\n \n # Stack each channel\n # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might\n # be beneficial to replace this channel with something else.\n combined_binary = np.zeros_like(sxbinary)\n #combined_binary[(s_binary == 1) | (sxbinary == 1) | (sybinary == 1)] = 1\n combined_binary[((s_binary == 1) & (h_binary == 1))| (sxbinary == 1) | (sybinary == 1) ] = 1\n return combined_binary",
"def apply_threshold(self, threshold):\n \n self.data['model'] = self.probabilities.apply(self.threshold_decision, args=(threshold,))",
"def onSetThreshold(self, obj, event, lower, upper):\n\t\tn = self.histograms.index(obj)\n\t\tdataUnit = self.dataUnits[n]\n\t\tminval, maxval = dataUnit.getDataSource().getOriginalScalarRange()\n\t\t#print \"Original scalar range=\",minval,maxval\n\t\tif lower == 0 and upper == 255:\n\t\t\tself.shift = 0\n\t\t\tself.scale = 255.0 / maxval\n\t\telse:\n\t\t\tupper = upper * (maxval / 255.0)\n\t\t\tlower = lower * (maxval / 255.0)\n\t\t\tself.shift = -int(lower)\n\t\t\tself.scale = 255.0 / ((upper - lower))\n\n\t\tdataUnit.getDataSource().setIntensityScale(self.shift, self.scale)\n\t\tdataUnit.resetColorTransferFunction()\n\t\tself.preview.updatePreview(1)",
"def double_threshold(image, upper, lower):\n\n # create lists for pixel identification\n strong = []\n weak = []\n suppressed = []\n\n # https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.where.html \n # get the index values where the edge is greater than the upper bound\n strong = np.where(image > upper)\n\n # get the index values where the edge is greater than the lower bound but\n # less than the upper bound\n weak = np.where((image >= lower) & (image < upper))\n\n # get the index values where the edge is lower than the lower bound\n suppressed = np.where(image < lower)\n\n # set the suppressed index values to 0\n image[suppressed[0], suppressed[1]] = 0\n\n # set the weak values to lower bound of 45\n image[weak[0], weak[1]] = 45\n\n # set the weak values to upper bound of 45\n image[strong[0], strong[1]] = 255\n\n # write output to file\n out = OUT_FOLDER+\"/threshold.jpg\"\n cv2.imwrite(out, image)\n\n # return the matrix of edges, and indexes of strong and weak edges\n return image, weak, strong",
"def setSaturatedThreshold(self, lower, upper) -> None:\n ...",
"def thresholding(self, thval=130):\n self.thval = thval\n self.temp_img[self.temp_img < thval] = thval",
"def set_critical_low(self):\r\n set_to = 20\r\n count = 0\r\n self.clickbtn(\"ClickCriticalLow\")\r\n xpath_slider = self.util.read_xpath_list_from_xml(self.object_repo, \"SetCriticalLow\",\r\n self.my_object)\r\n value = int(self.object.element_get_property(self.util.client, xpath_slider[0]['zone'],\r\n xpath_slider[0]['xpath'],\r\n xpath_slider[0]['index'],\r\n \"text\", self.logger_name))\r\n\r\n if value == 20:\r\n self.clickbtn(\"ClickBackBtn\")\r\n return\r\n while value > set_to:\r\n if count == 20:\r\n break\r\n self.object.drag(self.util.client, xpath_slider[0]['zone'], xpath_slider[0]['xpath'],\r\n xpath_slider[0]['index'], xpath_slider[0]['comment'], 0, 100,\r\n self.logger_name)\r\n value = int(self.object.element_get_property(self.util.client, xpath_slider[0]['zone'],\r\n xpath_slider[0]['xpath'],\r\n xpath_slider[0]['index'], \"text\",\r\n self.logger_name))\r\n\r\n self.clickbtn(\"SliderOK\")\r\n self.clickbtn(\"ClickBackBtn\")",
"def __calc_threshold(img, max_thresh_val=85):\n min_val = np.min(img)\n thresh = min(min_val + 0.3 * (255 - min_val), max_thresh_val)\n _, img_thresh = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = np.ones((2, 2), np.uint8)\n img_thresh = cv2.morphologyEx(img_thresh, cv2.MORPH_OPEN, kernel)\n kernel = np.ones((2, 2), np.uint8)\n img_thresh = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)\n if debug:\n cv2.imshow('Improved Thresh', img_thresh)\n return img_thresh",
"def HardThresholding(data,thresh):\n thresh_data = np.copy(data)\n thresh_data[np.abs(thresh_data) < thresh] = 0.\n return thresh_data",
"def _apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def update_thresh_gui_elements( self ):\n self.thresh_slider.SetThumbPosition( 255. - self.GetThresholdScrollbar() )\n self.thresh_low_slider.SetThumbPosition( 255. - self.GetThresholdLowScrollbar() )\n self.thresh_textinput.SetValue( str(params.n_bg_std_thresh) )\n self.thresh_low_textinput.SetValue( str(params.n_bg_std_thresh_low) )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write out the extracted spectrum to a text file. If the file already exists, this will not overwrite it. The first For each spectrum in ap_list, it will add a columns onto the output file so that the first column is always wavelength, the second column is flux, and the third column is sigma, and then repeat the flux and sigma columns
|
def write_extract_text(ofile, ap_list, clobber=False):
if os.path.isfile(ofile) and not clobber: return
#open the file
dout=saltio.openascii(ofile, 'w')
#first extract warr, assume it is the same for all frames
warr=ap_list[0].wave
#write out the spectrum
for i in range(len(warr)):
outstr='%7.3f '% warr[i]
for ap in ap_list:
flux=ap.ldata[i]
try:
fvar=abs(ap.lvar[i])**0.5
except:
fvar=1
outstr+="%7.3f %7.3f " % (flux, fvar)
outstr+='\n'
dout.write(outstr)
dout.close()
return
|
[
"def write_extract_fits(ofile, ap_list, clobber=False):\n #delete the file\n if os.path.isfile(ofile) and clobber: saltio.delete(ofile)\n\n #create the primary array\n hdu = pyfits.PrimaryHDU()\n hdulist = pyfits.HDUList([hdu])\n\n #create the columns and the \n for ap in ap_list:\n fvar=abs(ap.lvar)**0.5\n #create the columns\n col1=pyfits.Column(name='wavelength', format='D', unit='Angstroms', array=ap.wave)\n col2=pyfits.Column(name='counts', format='D', unit='Counts', array=ap.ldata)\n col3=pyfits.Column(name='counts_err', format='D', array=ap.lvar)\n\n #add to the table\n tbhdu=pyfits.new_table([col1, col2, col3])\n hdulist.append(tbhdu) \n\n #write it out\n hdulist.writeto(ofile)\n return",
"def output_spectrum(self, spectrum_id, filepath):\n data = self.dict(\"SELECT * FROM spectra WHERE id={}\".format(spectrum_id)).fetchone()\n if data:\n import csv\n fn, header = '{}{}.txt'.format(filepath, data['filename'] or spectrum_id), repr([repr(r) for r in data['header'].ascardlist()]) if data['header'] else None\n if data['header']:\n with open(fn, 'w' ) as f:\n keys, vals, coms = zip(*[eval(l) for l in eval(header)])\n klen, vlen, clen = [len(max(['1' if isinstance(j,bool) and j else '0' if isinstance(j,bool) else str(j) for j in i], key=len)) for i in [keys,vals,coms]]\n for k,v,c in zip(keys,vals,coms): csv.writer(f, delimiter='\\t').writerow(['# {!s:{}}'.format(k,klen)+'= {!s:{}}'.format(v,vlen)+' / {!s:{}}'.format(c,clen)])\n csv.writer(f, delimiter='\\t').writerow([' '])\n u.dict2txt({str(w):{'flux [{}]'.format(data['flux_units']):str(f), 'unc [{}]'.format(data['flux_units']):str(e)} for w,f,e in zip(data['wavelength'],data['flux'],data['unc'])}, fn, column1='# wavelength [{}]'.format(data['wavelength_units']), append=True)\n else: print \"No spectrum found with id {}\".format(spectrum_id)",
"def write_file(output_name, parsed_xQTL_list):\n with open(output_name, \"w\") as thefile:\n thefile.write(\"metabolite\\tchr\\tpeak_mb\\tinf_mb\\tsup_mb\\tlod\\n\")\n for xQTL in parsed_xQTL_list:\n xQTL = [str(element) for element in xQTL]\n line = \"\\t\".join(xQTL)\n thefile.write(line + \"\\n\")",
"def WriteSparky(self, fileName):\n print 'writing a .list file', fileName\n chemhandle = TextFile.TextFile(fileName, 'w')\n \n chemhandle.write('%s %s %s %s %s %s' %('Group','Atom','Nuc',\\\n 'Shift','Sdev',\\\n 'Assignments'))\n chemhandle.write('\\n')\n chemhandle.write('\\n')\n \n for EACH in self.atomlist:\n #those with 999.000 don't have an assignment:\n if EACH.shift and EACH.shift != '999.000':\n a=AminoAcid.AminoAcid(EACH.aminoacid)[0]+str(EACH.residuenumber)\n az = len(a)\n ak = 5 - az\n \n b=EACH.atomname[0]\n bz=len(b)\n bk = 7 - bz\n \n c=EACH.atomtype\n cz=len(c)\n ck = 5 - cz\n \n d=str(EACH.shift)\n dz=len(d)\n dk = 9 - dz\n \n e=str(EACH.shifterror)\n ez=len(e)\n ek = 7 - ez\n \n f='1'\n fz=len(f)\n fk = 7 - fz\n\n first = (ak) * ' '\n second= (bk) * ' '\n third = (ck) * ' '\n fourth = (dk) * ' '\n fifth = (ek) * ' '\n sixth = (fk) * ' '\n \n first=first+a\n second=second+b\n third=third+c\n fourth=fourth+d\n fifth=fifth+e\n sixth=sixth+f\n \n chemhandle.write(first+second+third+fourth+fifth+sixth+'\\n')\n\n\n chemhandle.write('\\n')\n chemhandle.close()",
"def write_result(result_list):\n with open('FG_EXPLANATIONS.txt', 'w') as file:\n for element in result_list:\n file.write(element)\n file.write('\\n')",
"def write_output(Count_trigram, Count_bigram, input_file, output_name):\n output_file = file(output_name, \"w\")\n input_file.seek(0)\n l = input_file.readline()\n while l:\n line = l.strip()\n fields = line.split(\" \")\n assert len(fields)==3\n log_pr = cal_trigram_param(Count_trigram, Count_bigram, fields) # Calculate using naive estimator.\n l = line + \" \" + str(log_pr) + \"\\n\"\n output_file.write(l)\n l = input_file.readline()\n output_file.close()",
"def export_data(self):\n folder = os.path.dirname(self.single_spec_filename[0])\n filename_ext = os.path.basename(self.single_spec_filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()",
"def write_spectrum_dat(self, f, scanpoint=0):\n if self.processed_spectrum is None:\n return False\n\n m_lambda = self.processed_spectrum[\"m_lambda\"][scanpoint]\n m_spec = self.processed_spectrum[\"m_spec\"][scanpoint]\n m_spec_sd = self.processed_spectrum[\"m_spec_sd\"][scanpoint]\n m_lambda_fwhm = self.processed_spectrum[\"m_lambda_fwhm\"][scanpoint]\n\n stacked_data = np.c_[m_lambda, m_spec, m_spec_sd, m_lambda_fwhm]\n np.savetxt(f, stacked_data, delimiter=\"\\t\")\n\n return True",
"def write_to_file(englist):\n\n # Output to text file\n text_file = open(\"engraving_output.txt\", \"w\")\n count = 0\n\n # Write to the text file\n text_file.write(\"_ = empty line if present.\")\n text_file.write(\"\\n\")\n text_file.write(\"\\n\")\n for x in englist:\n text_file.write(\"ITEM \" + str(count + 1) + \"\\n\")\n for y in x:\n if not y:\n text_file.write(\"_\")\n else:\n text_file.write(y + \"\\n\")\n text_file.write(\"\\n\")\n text_file.write(\"\\n\")\n count += 1\n\n # Close text file\n text_file.close()",
"def write_spectrum_xml(self, f, scanpoint=0):\n if self.processed_spectrum is None:\n return\n\n s = string.Template(spectrum_template)\n d = dict()\n d[\"title\"] = self.cat.sample_name\n d[\"time\"] = strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())\n\n m_lambda = self.processed_spectrum[\"m_lambda\"]\n m_spec = self.processed_spectrum[\"m_spec\"]\n m_spec_sd = self.processed_spectrum[\"m_spec_sd\"]\n m_lambda_fwhm = self.processed_spectrum[\"m_lambda_fwhm\"]\n\n # sort the data\n sorted = np.argsort(self.m_lambda[0])\n\n r = m_spec[:, sorted]\n lmda = m_lambda[:, sorted]\n dlmda = m_lambda_fwhm[:, sorted]\n dr = m_spec_sd[:, sorted]\n d[\"n_spectra\"] = self.processed_spectrum[\"n_spectra\"]\n d[\"runnumber\"] = \"PLP{:07d}\".format(self.cat.datafile_number)\n\n d[\"r\"] = repr(r[scanpoint].tolist()).strip(\",[]\")\n d[\"dr\"] = repr(dr[scanpoint].tolist()).strip(\",[]\")\n d[\"lmda\"] = repr(lmda[scanpoint].tolist()).strip(\",[]\")\n d[\"dlmda\"] = repr(dlmda[scanpoint].tolist()).strip(\",[]\")\n thefile = s.safe_substitute(d)\n\n with possibly_open_file(f, \"wb\") as g:\n if \"b\" in g.mode:\n thefile = thefile.encode(\"utf-8\")\n\n g.write(thefile)\n g.truncate()\n\n return True",
"def write_to_file(file, sweetberry, inas):\n\n with open(file, 'w') as pyfile:\n\n pyfile.write('inas = [\\n')\n\n for rec in inas:\n if rec['sweetberry'] != sweetberry:\n continue\n\n # EX : ('sweetberry', 0x40, 'SB_FW_CAM_2P8', 5.0, 1.000, 3, False),\n channel, i2c_addr = Spower.CHMAP[rec['channel']]\n record = (\" ('sweetberry', 0x%02x, '%s', 5.0, %f, %d, 'True')\"\n \",\\n\" % (i2c_addr, rec['name'], rec['rs'], channel))\n pyfile.write(record)\n\n pyfile.write(']\\n')",
"def write_freq_out_file(results):\n with open(\"freq.out\", \"w\") as output:\n for i in results['Frequencies [cm-1]']:\n output.write(f\"{i:.3f}\\n\")",
"def writeAnalysis():\n perf_file = open(\"performance.txt\",\"a\")\n perf_file.write(\"Analysis: The download URL's choosen were done so with the intent of limiting the number of bytes \\n\"\n + \"that are shown for each individual table. I found that the easist means to accomplish this was to \\n\"\n + \"utilize the filtration of North America for parts A, B, and B-nested. This could be done using the orderBy and equalTo methods. \\n\"\n + \"Additionally, for part C I found that I could filter on GNP by starting at the threshold value of 10000. \\n\"\n + \"This was accomplished by using the orderBy and startAt methods.\")\n perf_file.close()",
"def write_to_file(self):\n print('Writing to a file')\n file_out = open('../output/report.csv', 'w')\n file_out.write('Border,Date,Measure,Value,Average\\n')\n for timestamp, border_measures in self.report_dict.items():\n for border_measure, attributes in border_measures.items():\n file_out.write(border_measure[0] + ',')\n file_out.write(timestamp.strftime(\"%d/%m/%Y %I:%M:%S %p\") + ',')\n file_out.write(str(border_measure[1]) + ',')\n file_out.write(str(attributes['sum']) + ',')\n file_out.write(str(attributes['running_total']))\n file_out.write('\\n')",
"def writeWv(self, FileName): # Verified 2020.0115\n\n #check if self.iqData is complex\n if isinstance(self.iqData[0], complex):\n self.__complex2iqiq__()\n\n self.NumberOfSamples = len(self.iqiqList) // 2\n\n #Find maximum magnitude and scale for max to be FullScale (1.0)\n power = []\n for n in range(self.NumberOfSamples):\n power.append(abs(self.iqiqList[2*n]**2 + self.iqiqList[2*n+1]**2))\n scaling = math.sqrt(max(power))\n\n self.iqiqList = [iq / scaling for iq in self.iqiqList] # normalize to magnitude 1\n rms = math.sqrt(sum(power)/self.NumberOfSamples)/scaling # calculate rms in dB (below full scale)\n rms = abs(20*math.log10(rms)) # Convert to dB\n self.iqiqList = [math.floor(iq * 32767 +.5) for iq in self.iqiqList] # Convert to int16\n\n try:\n file = open(FileName, \"wb\")\n file.write(\"{TYPE: SMU-WV,0}\".encode(\"ASCII\"))\n file.write(\"{COMMENT: R&S WaveForm, TheAE-RA}\".encode(\"ASCII\"))\n file.write((\"{DATE: \" + str(datetime.today())+ \"}\").encode(\"ASCII\"))\n file.write((\"{CLOCK:\" +str(self.fSamplingRate) + \"}\").encode(\"ASCII\"))\n file.write((\"{LEVEL OFFS:\" + \"{:2.4f}\".format(rms) + \",0}\").encode(\"ASCII\"))\n file.write((\"{SAMPLES:\" + str(self.NumberOfSamples) + \"}\").encode(\"ASCII\"))\n # if(m1start > 0 && m1stop > 0)\n # %Control Length only needed for markers\n # fprintf(file_id,'%s',['{CONTROL LENGTH:' num2str(data_length) '}']);\n # fprintf(file_id,'%s',['{CLOCK MARKER:' num2str(fSamplingRate) '}']);\n # fprintf(file_id,'%s',['{MARKER LIST 1: ' num2str(m1start) ':1;' num2str(m1stop) ':0}']);\n # end\n file.write((\"{WAVEFORM-\" + str(4*self.NumberOfSamples+1) + \": #\").encode(\"ASCII\"))\n file.write(struct.pack(\"h\"*len(self.iqiqList),*self.iqiqList))\n file.write(\"}\".encode(\"ASCII\"))\n file.close()\n except:\n print(\"File (\" + FileName +\") write error!\")\n return 0\n return self.NumberOfSamples",
"def add_spectra(speclist: list, outfile: str, **kwargs):\n if hsp is None:\n raise ImportError('write_pha_spec depends on heasoftpy. Install it first')\n\n nmax = kwargs.get('nmax', 10)\n nspec = len(speclist)\n nbatch = nspec//nmax\n\n if nbatch > 1:\n batches = [speclist[i:i+nbatch] for i in range(0, nspec, nbatch)]\n # combine spectra in the batches\n spec = [add_spectra(slist, f'{outfile}_{idx}', **kwargs)\n for idx,slist in enumerate(batches)]\n\n # combine the batches to produce one file\n out = add_spectra(spec, outfile, **kwargs)\n os.system(f'rm -rf {\" \".join(spec)} '\n f'{\" \".join([s.replace(\"pha\", \"rsp\") for s in spec])}')\n return out\n\n with open('tmp.add', 'w', encoding='utf8') as filep:\n filep.write('\\n'.join(speclist))\n if len(glob.glob(f'{outfile}.???')) != 0:\n os.system(f'rm {outfile}.???')\n qaddrmf = kwargs.get('qaddrmf', 'no')\n qsubback = kwargs.get('qsubback', 'no')\n clobber = kwargs.get('clobber', 'yes')\n out = hsp.addspec( # pylint: disable=no-member\n infil='tmp.add', outfil=outfile,\n qaddrmf=qaddrmf, qsubback=qsubback, clobber=clobber)\n\n if out.returncode != 0:\n logfile = 'add_spectra.log'\n with open(logfile, 'w', encoding='utf8') as filep:\n filep.write(str(out))\n raise RuntimeError(f'ERROR in addspec; Writing log to {logfile}')\n\n return f'{outfile}.pha'",
"def write_EMinfos(data, A_band, F_75_2000):\n filename = 'infos_recoveredEMsources_sim20160929.dat'\n lines = []\n \n GPStime = data['GPStime']\n date = data['date']\n m1 = data['mass1']\n m2 = data['mass2']\n dist = data['distances']\n snr = data['SNR']\n RA = data['RA']\n dec = data['dec']\n iota = data['inclination']\n skymaps = data['file_skymap']\n egw = data['Egw']\n\n print 'Creating EM infos data file {}...'.format(filename)\n \n \n with open(filename, 'a') as datafile:\n\n print >> datafile, '# GPStime date mass1 mass2 distance SNR RA dec inclination skymap Egw A_band F_75_2000'\n print >> datafile, '# s Msun Msun Mpc rad rad rad erg ph/s/cm2/keV erg/s/cm2'\n\n for gps_, date_, m1_, m2_, d_, snr_, RA_, dec_, iota_, sky_, egw_, aband_, f_ in zip(GPStime, date, m1, m2, dist, snr, RA, dec, iota, skymaps, egw, A_band, F_75_2000):\n\n print >> datafile, gps_+' '+date_+' '+str(m1_)+' '+str(m2_)+' '+str(d_)+' '+str(snr_)+' '+str(RA_)+' '+str(dec_)+' '+str(iota_)+' '+sky_+' '+str(egw_)+' '+str(aband_)+' '+str(f_)",
"def write_spectrum(self, specfile, emin, emax, nchan, overwrite=False):\n spec = np.zeros(nchan)\n ebins = np.linspace(emin, emax, nchan + 1, endpoint=True)\n emid = 0.5 * (ebins[1:] + ebins[:-1])\n\n for fn in self.filenames:\n with h5py.File(fn, \"r\") as f:\n d = f[\"data\"]\n spec += np.histogram(d[\"eobs\"][:], bins=ebins)[0]\n\n col1 = fits.Column(\n name=\"CHANNEL\", format=\"1J\", array=np.arange(nchan).astype(\"int32\") + 1\n )\n col2 = fits.Column(name=\"ENERGY\", format=\"1D\", array=emid.astype(\"float64\"))\n col3 = fits.Column(name=\"COUNTS\", format=\"1J\", array=spec.astype(\"int32\"))\n col4 = fits.Column(\n name=\"COUNT_RATE\", format=\"1D\", array=spec / self.parameters[\"exp_time\"]\n )\n\n coldefs = fits.ColDefs([col1, col2, col3, col4])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.name = \"SPECTRUM\"\n\n tbhdu.header[\"DETCHANS\"] = spec.shape[0]\n tbhdu.header[\"TOTCTS\"] = spec.sum()\n tbhdu.header[\"EXPOSURE\"] = self.parameters[\"exp_time\"]\n tbhdu.header[\"LIVETIME\"] = self.parameters[\"exp_time\"]\n tbhdu.header[\"CONTENT\"] = \"pi\"\n tbhdu.header[\"HDUCLASS\"] = \"OGIP\"\n tbhdu.header[\"HDUCLAS1\"] = \"SPECTRUM\"\n tbhdu.header[\"HDUCLAS2\"] = \"TOTAL\"\n tbhdu.header[\"HDUCLAS3\"] = \"TYPE:I\"\n tbhdu.header[\"HDUCLAS4\"] = \"COUNT\"\n tbhdu.header[\"HDUVERS\"] = \"1.1.0\"\n tbhdu.header[\"HDUVERS1\"] = \"1.1.0\"\n tbhdu.header[\"CHANTYPE\"] = \"pi\"\n tbhdu.header[\"BACKFILE\"] = \"none\"\n tbhdu.header[\"CORRFILE\"] = \"none\"\n tbhdu.header[\"POISSERR\"] = True\n tbhdu.header[\"RESPFILE\"] = \"none\"\n tbhdu.header[\"ANCRFILE\"] = \"none\"\n tbhdu.header[\"MISSION\"] = \"none\"\n tbhdu.header[\"TELESCOP\"] = \"none\"\n tbhdu.header[\"INSTRUME\"] = \"none\"\n tbhdu.header[\"AREASCAL\"] = 1.0\n tbhdu.header[\"CORRSCAL\"] = 0.0\n tbhdu.header[\"BACKSCAL\"] = 1.0\n\n hdulist = fits.HDUList([fits.PrimaryHDU(), tbhdu])\n\n hdulist.writeto(specfile, overwrite=overwrite)",
"def write(self):\n\n # Write lines according to qst3 requirements for gaussian\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n\n # Mol coords have to specified r -> p -> ts, otherwise gaussian will complain\n for coords, name in zip(self.mol_coords, ('reactant', 'product', 'ts')):\n file.write(self.molecule_name + ' {}\\n\\n'.format(name))\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in coords))\n file.write('\\n')\n\n file.write('\\n')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write out the extracted spectrum to a FITS table. If the file already exists, this will not overwrite it. For each spectrum in ap_list, it will add another extension to the fits file. Each extension will have the first column as wavelength, the second column as counts, and the third column as sigma on the counts.
|
def write_extract_fits(ofile, ap_list, clobber=False):
#delete the file
if os.path.isfile(ofile) and clobber: saltio.delete(ofile)
#create the primary array
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
#create the columns and the
for ap in ap_list:
fvar=abs(ap.lvar)**0.5
#create the columns
col1=pyfits.Column(name='wavelength', format='D', unit='Angstroms', array=ap.wave)
col2=pyfits.Column(name='counts', format='D', unit='Counts', array=ap.ldata)
col3=pyfits.Column(name='counts_err', format='D', array=ap.lvar)
#add to the table
tbhdu=pyfits.new_table([col1, col2, col3])
hdulist.append(tbhdu)
#write it out
hdulist.writeto(ofile)
return
|
[
"def new_fits(outfile, **kwargs):\n # Fake data\n sci_data = numpy.arange(10000, dtype='float').reshape(100,100)\n err_data = numpy.sqrt(sci_data) # Poisson error\n dq_data = numpy.zeros(sci_data.shape, dtype='int16') # No bad pixel\n\n # Create individual extensions\n hdu_hdr = pyfits.PrimaryHDU()\n hdu_sci = pyfits.ImageHDU(sci_data)\n hdu_err = pyfits.ImageHDU(err_data)\n hdu_dq = pyfits.ImageHDU(dq_data)\n\n # Modify headers\n \n hdu_hdr.header['FILENAME'] = outfile\n hdu_hdr.header['NEXTEND'] = 3\n \n hdu_sci.header['BUNIT'] = 'COUNTS'\n hdu_sci.header['EXTNAME'] = 'SCI'\n hdu_sci.header['EXTVER'] = 1\n\n hdu_err.header['BUNIT'] = 'COUNTS'\n hdu_err.header['EXTNAME'] = 'ERR'\n hdu_err.header['EXTVER'] = 1\n\n hdu_dq.header['BUNIT'] = 'UNITLESS'\n hdu_dq.header['EXTNAME'] = 'DQ'\n hdu_dq.header['EXTVER'] = 1\n\n # Create multi-extension FITS\n hduList = pyfits.HDUList([hdu_hdr])\n hduList.append(hdu_sci)\n hduList.append(hdu_err)\n hduList.append(hdu_dq)\n\n # Write to file\n hduList.writeto(outfile, **kwargs)",
"def fits_write(hdulist, outfile, comment=None, history=None, overwrite=False):\n hdulist.writeto(outfile, checksum=True, overwrite=overwrite)",
"def to_ogip_files(self, outdir=None, use_sherpa=False, overwrite=False):\n # TODO: refactor and reduce amount of code duplication\n outdir = Path.cwd() if outdir is None else make_path(outdir)\n outdir.mkdir(exist_ok=True, parents=True)\n\n phafile = f\"pha_obs{self.name}.fits\"\n\n bkgfile = phafile.replace(\"pha\", \"bkg\")\n arffile = phafile.replace(\"pha\", \"arf\")\n rmffile = phafile.replace(\"pha\", \"rmf\")\n\n counts_table = self.counts.to_table()\n counts_table[\"QUALITY\"] = np.logical_not(self.mask_safe)\n counts_table[\"BACKSCAL\"] = self.acceptance\n counts_table[\"AREASCAL\"] = np.ones(self.acceptance.size)\n meta = self._ogip_meta()\n\n meta[\"respfile\"] = rmffile\n meta[\"backfile\"] = bkgfile\n meta[\"ancrfile\"] = arffile\n meta[\"hduclas2\"] = \"TOTAL\"\n counts_table.meta = meta\n\n name = counts_table.meta[\"name\"]\n hdu = fits.BinTableHDU(counts_table, name=name)\n hdulist = fits.HDUList([fits.PrimaryHDU(), hdu, self._ebounds_hdu(use_sherpa)])\n\n if self.gti is not None:\n gti_hdu = self.gti.to_hdulist()\n hdulist += gti_hdu\n\n hdulist.writeto(str(outdir / phafile), overwrite=overwrite)\n\n self.aeff.write(outdir / arffile, overwrite=overwrite, use_sherpa=use_sherpa)\n\n if self.counts_off is not None:\n counts_off_table = self.counts_off.to_table()\n counts_off_table[\"QUALITY\"] = np.logical_not(self.mask_safe)\n counts_off_table[\"BACKSCAL\"] = self.acceptance_off\n counts_off_table[\"AREASCAL\"] = np.ones(self.acceptance.size)\n meta = self._ogip_meta()\n meta[\"hduclas2\"] = \"BKG\"\n\n counts_off_table.meta = meta\n name = counts_off_table.meta[\"name\"]\n hdu = fits.BinTableHDU(counts_off_table, name=name)\n hdulist = fits.HDUList(\n [fits.PrimaryHDU(), hdu, self._ebounds_hdu(use_sherpa)]\n )\n hdulist.writeto(str(outdir / bkgfile), overwrite=overwrite)\n\n if self.edisp is not None:\n self.edisp.write(\n str(outdir / rmffile), overwrite=overwrite, use_sherpa=use_sherpa\n )",
"def writeFits(sOutFileName_p, data_p,header=None):\n data_p=np.rollaxis(data_p,2,0)\n if header==None:\n afits.writeto(sOutFileName_p,data_p,clobber=True)\n else:\n hdu=afits.PrimaryHDU(data=data_p,header=header,uint=True)\n hduList=afits.HDUList([hdu])\n hduList.writeto(sOutFileName_p,clobber=True)",
"def fits_out(out_file, in_file, meta_dict, freq, fracrms_power, fracrms_err,\n leahy_power, file_desc):\n\n ## Check that the output file name has FITS file extension\n assert out_file[-4:].lower() == \"fits\", \"ERROR: Output file must have \"\\\n \"extension '.fits'.\"\n\n print(\"\\nOutput file: %s\" % out_file)\n\n out_table = Table()\n out_table.add_column(Column(data=freq, name='FREQ', unit='Hz'))\n out_table.add_column(Column(data=fracrms_power, name='POWER'))\n out_table.add_column(Column(data=fracrms_err, name='ERROR'))\n out_table.add_column(Column(data=leahy_power, name='LEAHY'))\n\n out_table.meta['TYPE'] = file_desc\n out_table.meta['DATE'] = str(datetime.now())\n out_table.meta['EVTLIST'] = in_file\n out_table.meta['DT'] = np.mean(meta_dict['dt'])\n out_table.meta['N_BINS'] = meta_dict['n_bins']\n out_table.meta['SEGMENTS'] = meta_dict['n_seg']\n out_table.meta['SEC_SEG'] = meta_dict['n_seconds']\n out_table.meta['EXPOSURE'] = meta_dict['exposure']\n out_table.meta['DETCHANS'] = meta_dict['detchans']\n out_table.meta['MEANRATE'] = meta_dict['mean_rate']\n out_table.meta['RMS_REF'] = meta_dict['rms']\n out_table.meta['NYQUIST'] = meta_dict['nyquist']\n out_table.meta['DF'] = np.mean(meta_dict['df'])\n out_table.meta['ADJUST'] = \"%s\" % str(meta_dict['adjust_seg'])\n\n out_table.write(out_file, overwrite=True)",
"def write_spectrum(self, specfile, emin, emax, nchan, overwrite=False):\n spec = np.zeros(nchan)\n ebins = np.linspace(emin, emax, nchan + 1, endpoint=True)\n emid = 0.5 * (ebins[1:] + ebins[:-1])\n\n for fn in self.filenames:\n with h5py.File(fn, \"r\") as f:\n d = f[\"data\"]\n spec += np.histogram(d[\"eobs\"][:], bins=ebins)[0]\n\n col1 = fits.Column(\n name=\"CHANNEL\", format=\"1J\", array=np.arange(nchan).astype(\"int32\") + 1\n )\n col2 = fits.Column(name=\"ENERGY\", format=\"1D\", array=emid.astype(\"float64\"))\n col3 = fits.Column(name=\"COUNTS\", format=\"1J\", array=spec.astype(\"int32\"))\n col4 = fits.Column(\n name=\"COUNT_RATE\", format=\"1D\", array=spec / self.parameters[\"exp_time\"]\n )\n\n coldefs = fits.ColDefs([col1, col2, col3, col4])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.name = \"SPECTRUM\"\n\n tbhdu.header[\"DETCHANS\"] = spec.shape[0]\n tbhdu.header[\"TOTCTS\"] = spec.sum()\n tbhdu.header[\"EXPOSURE\"] = self.parameters[\"exp_time\"]\n tbhdu.header[\"LIVETIME\"] = self.parameters[\"exp_time\"]\n tbhdu.header[\"CONTENT\"] = \"pi\"\n tbhdu.header[\"HDUCLASS\"] = \"OGIP\"\n tbhdu.header[\"HDUCLAS1\"] = \"SPECTRUM\"\n tbhdu.header[\"HDUCLAS2\"] = \"TOTAL\"\n tbhdu.header[\"HDUCLAS3\"] = \"TYPE:I\"\n tbhdu.header[\"HDUCLAS4\"] = \"COUNT\"\n tbhdu.header[\"HDUVERS\"] = \"1.1.0\"\n tbhdu.header[\"HDUVERS1\"] = \"1.1.0\"\n tbhdu.header[\"CHANTYPE\"] = \"pi\"\n tbhdu.header[\"BACKFILE\"] = \"none\"\n tbhdu.header[\"CORRFILE\"] = \"none\"\n tbhdu.header[\"POISSERR\"] = True\n tbhdu.header[\"RESPFILE\"] = \"none\"\n tbhdu.header[\"ANCRFILE\"] = \"none\"\n tbhdu.header[\"MISSION\"] = \"none\"\n tbhdu.header[\"TELESCOP\"] = \"none\"\n tbhdu.header[\"INSTRUME\"] = \"none\"\n tbhdu.header[\"AREASCAL\"] = 1.0\n tbhdu.header[\"CORRSCAL\"] = 0.0\n tbhdu.header[\"BACKSCAL\"] = 1.0\n\n hdulist = fits.HDUList([fits.PrimaryHDU(), tbhdu])\n\n hdulist.writeto(specfile, overwrite=overwrite)",
"def documentFreqSets(HTs,item_list,min_sup,filename):\n f = open(\"Results/Freq_Items_\" + filename + \"_sup:\" + str(min_sup) + \".txt\",'w')\n\n for HT in HTs:\n\n writeSetsToFile(HT.root,item_list,f)\n\n f.close()",
"def write_extensions_all(self, all_exten_dir=None):\n \n if type(all_exten_dir) == type(None):\n # all_exten_dir encodes the instrument and date \n all_exten_dir = f'{os.path.abspath(self.data_dir+\"/..\")}/dets_ALL_'\n all_exten_dir = f'{all_exten_dir}{self.instrument}_{self.date}' \n run(f\"mkdir -p {all_exten_dir}\", shell=True) # make all_exten_dir\n\n for fi in self.files:\n for n in range(self.nextend):\n if n < 10:\n det_name = f\"0{n}\"\n else:\n det_name = str(n)\n exten = self.__get_extension(fi, n+1)\n\n new_f = fi.replace(\".fits.fz\", f\"_det{det_name}.fits\")\n exten.writeto(f\"{all_exten_dir}/{new_f}\", overwrite=True, \n output_verify=\"ignore\") # write them\n \n print(\"Extracted headers/images for all detectors of \"+\n f\"{self.instrument} on {self.date}\", flush=True)\n print(f\"Written to new .fits files in {all_exten_dir}\", flush=True)",
"def write(self, file_name) :\n\n # Add the data\n Col = pyfits.Column(name='DATA', format=self.data_format, \n array=self.data)\n columns = [Col,]\n \n # Add all the other stored fields.\n for field_name in self.field.iterkeys() :\n Col = pyfits.Column(name=field_name,\n format=self.formats[field_name],\n array=self.field[field_name])\n columns.append(Col)\n coldefs = pyfits.ColDefs(columns)\n # Creat fits header data units, one for the table and the mandatory\n # primary.\n tbhdu = pyfits.new_table(coldefs)\n prihdu = pyfits.PrimaryHDU()\n # Add the write history.\n fname_abbr = ku.abbreviate_file_path(file_name)\n self.history.add('Written to file.', ('File name: ' + fname_abbr,))\n # Add the history to the header.\n bf.write_history_header(prihdu.header, self.history)\n\n # Combine the HDUs and write to file.\n hdulist = pyfits.HDUList([prihdu, tbhdu])\n hdulist.writeto(file_name, clobber=True)\n if self.feedback > 0 :\n print 'Wrote data to file: ' + fname_abbr",
"def write_extract_text(ofile, ap_list, clobber=False):\n if os.path.isfile(ofile) and not clobber: return\n \n #open the file\n dout=saltio.openascii(ofile, 'w')\n\n #first extract warr, assume it is the same for all frames\n warr=ap_list[0].wave\n\n #write out the spectrum\n for i in range(len(warr)):\n outstr='%7.3f '% warr[i]\n for ap in ap_list:\n flux=ap.ldata[i]\n try:\n fvar=abs(ap.lvar[i])**0.5\n except:\n fvar=1\n outstr+=\"%7.3f %7.3f \" % (flux, fvar)\n outstr+='\\n'\n dout.write(outstr)\n dout.close()\n return",
"def write(self):\n \n hdulist = fits.HDUList()\n\n level0 = self.get_level0()\n hdulist.append(level0)\n \n level1 = self.get_level1()\n hdulist.append(level1)\n \n level2 = self.get_level2()\n hdulist.append(level2)\n \n level3 = self.get_level3()\n hdulist.append(level3)\n \n level4 = self.get_level4()\n hdulist.append(level4)\n \n hdulist.writeto(self.metadata_file,clobber=True)\n print('Output metadata to '+self.metadata_file)",
"def export_data(self):\n folder = os.path.dirname(self.single_spec_filename[0])\n filename_ext = os.path.basename(self.single_spec_filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()",
"def write_freq_out_file(results):\n with open(\"freq.out\", \"w\") as output:\n for i in results['Frequencies [cm-1]']:\n output.write(f\"{i:.3f}\\n\")",
"def add_spectra(speclist: list, outfile: str, **kwargs):\n if hsp is None:\n raise ImportError('write_pha_spec depends on heasoftpy. Install it first')\n\n nmax = kwargs.get('nmax', 10)\n nspec = len(speclist)\n nbatch = nspec//nmax\n\n if nbatch > 1:\n batches = [speclist[i:i+nbatch] for i in range(0, nspec, nbatch)]\n # combine spectra in the batches\n spec = [add_spectra(slist, f'{outfile}_{idx}', **kwargs)\n for idx,slist in enumerate(batches)]\n\n # combine the batches to produce one file\n out = add_spectra(spec, outfile, **kwargs)\n os.system(f'rm -rf {\" \".join(spec)} '\n f'{\" \".join([s.replace(\"pha\", \"rsp\") for s in spec])}')\n return out\n\n with open('tmp.add', 'w', encoding='utf8') as filep:\n filep.write('\\n'.join(speclist))\n if len(glob.glob(f'{outfile}.???')) != 0:\n os.system(f'rm {outfile}.???')\n qaddrmf = kwargs.get('qaddrmf', 'no')\n qsubback = kwargs.get('qsubback', 'no')\n clobber = kwargs.get('clobber', 'yes')\n out = hsp.addspec( # pylint: disable=no-member\n infil='tmp.add', outfil=outfile,\n qaddrmf=qaddrmf, qsubback=qsubback, clobber=clobber)\n\n if out.returncode != 0:\n logfile = 'add_spectra.log'\n with open(logfile, 'w', encoding='utf8') as filep:\n filep.write(str(out))\n raise RuntimeError(f'ERROR in addspec; Writing log to {logfile}')\n\n return f'{outfile}.pha'",
"def write_bintables_mef(cat, zps, sky, exp, outdir):\n\n print('Attempting to write binary tables as multi-extension FITS')\n\n # for now assume that cat, zps, sky tables all exist\n\n assert(os.path.exists(outdir))\n\n outname = (os.path.split(exp.fname_im))[-1]\n\n outname = outname.replace('.fits', '-summary.fits')\n\n outname = os.path.join(outdir, outname)\n\n outname_tmp = outname + '.tmp'\n\n assert(not os.path.exists(outname))\n assert(not os.path.exists(outname_tmp))\n\n hdul = fits.HDUList(hdus=[fits.PrimaryHDU(header=exp.header),\n fits.BinTableHDU(data=cat, header=exp.header),\n fits.BinTableHDU(data=zps, header=exp.header),\n fits.BinTableHDU(data=sky, header=exp.header)])\n\n hdul[1].header['EXTNAME'] = 'CATALOG'\n hdul[2].header['EXTNAME'] = 'ZEROPOINTS'\n hdul[3].header['EXTNAME'] = 'SKY'\n\n hdul.writeto(outname_tmp)\n\n os.rename(outname_tmp, outname)",
"def to_fits(self, filename, **kwargs):\n kwargs['flux_col'] = 'THROUGHPUT'\n kwargs['flux_unit'] = units.THROUGHPUT\n\n # There are some standard keywords that should be added\n # to the extension header.\n bkeys = {'expr': (str(self), 'synphot expression'),\n 'tdisp1': 'G15.7',\n 'tdisp2': 'G15.7'}\n\n if 'ext_header' in kwargs:\n kwargs['ext_header'].update(bkeys)\n else:\n kwargs['ext_header'] = bkeys\n\n specio.write_fits_spec(filename, self.wave, self.thru, **kwargs)",
"def _write_healpix_filename(filename, hdr, output_struct):\n hdu_list = fits.HDUList()\n\n hdu = fits.BinTableHDU(data=output_struct, header=fits.Header())\n\n for n in hdr:\n if n not in FITS_RESERVED:\n hdu.header[n] = hdr[n]\n hdu_list.append(hdu)\n\n hdu_list.writeto(filename, overwrite=True)",
"def create_data_tables(table):\n data = mcf.read_data_file(table)\n\n for ent in data:\n if mcf.is_neumeric(ent):\n obsid = ent.strip()\n else:\n atemp = re.split('\\s+', ent)\n obsid = atemp[0]\n\n if mcf.is_neumeric(obsid) == False:\n continue\n\n print(str(obsid))\n\n fits = hcf.run_arc5gl(0, 0, obsid=obsid, level='2', filetype='evt2')\n\n if fits == False:\n write_on_skip_file(obsid)\n print(\"Data is not extracted\")\n continue\n#\n#--- if there are multiple output, use only first one\n#\n if isinstance(fits, list):\n fits = fits[0]\n\n xxx = 999\n #if xxx == 999:\n try:\n out = extract_count_stats(fits)\n #else:\n except:\n cmd = 'rm -f ' + fits + '*'\n os.system(cmd)\n write_on_skip_file(obsid)\n print(\"Analysis Failed\")\n continue\n\n if out[-1] <0:\n cmd = 'rm -f ' + fits + '*'\n os.system(cmd)\n write_on_skip_file(obsid)\n print(\"No Output\")\n continue\n\n line = str(obsid) + '\\t'\n\n if float(obsid) < 1000:\n line = line + '\\t'\n\n line = line + str(fits) + '\\t'\n line = line + out[7] + '\\t'\n line = line + '%2.1f' % round(out[6],1) + '\\t'\n line = line + '%2.2f' % round(out[5],2) + '\\t'\n line = line + '%2.2f' % round(out[8],2) + '\\t'\n line = line + '%2.4f' % round(out[9],4) + '\\n'\n\n if out[-1] == 0:\n outfile = data_dir + 'hrc_s_0_results'\n if out[-1] == 1:\n outfile = data_dir + 'hrc_s_10_results'\n if out[-1] == 2:\n outfile = data_dir + 'hrc_s_25_results'\n if out[-1] == 3:\n outfile = data_dir + 'hrc_s_m10_results'\n if out[-1] == 4:\n outfile = data_dir + 'hrc_s_m25_results'\n\n if out[-1] == 10:\n outfile = data_dir + 'hrc_i_0_results'\n if out[-1] == 11:\n outfile = data_dir + 'hrc_i_10_results'\n if out[-1] == 12:\n outfile = data_dir + 'hrc_i_25_results'\n if out[-1] == 13:\n outfile = data_dir + 'hrc_i_m10_results'\n if out[-1] == 14:\n outfile = data_dir + 'hrc_i_m25_results'\n\n with open(outfile, 'a') as fo:\n fo.write(line)\n\n cmd = 'rm -f *fits*'\n os.system(cmd)",
"def toFits(self, outfile):\n self._log(\"info\", \"Converting to FITS file\")\n hdus = [fits.PrimaryHDU()]\n for detector in self.detectors:\n self._log(\"info\", \"Converting detector {} to FITS extension\".format(detector.name))\n hdus.append(detector.imageHdu)\n hdulist = fits.HDUList(hdus)\n hdulist.writeto(outfile, overwrite=True)\n self._log(\"info\", \"Created FITS file {}\".format(outfile))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the folder of this script with frozen compatibility the folder of THIS script.
|
def __get_this_folder():
return os.path.split(os.path.abspath(os.path.realpath(
__get_this_filename())))[0]
|
[
"def scripts_folder(self):\n return ScriptUtils.defaultScriptsFolder()",
"def lock_directory(self):\n from os.path import join, basename\n return join(self._parent_directory, \".\" + basename(self.filename) + \"-pylada_lockdir\")",
"def get_frozen_version(self):\n # todo: put this to use\n basedir = os.path.dirname(sys.executable)\n versions = []\n for dname in os.listdir(basedir):\n dirname = op.join(basedir, dname)\n if op.isdir(dirname) and dname.startswith(self.get_name() + '_'):\n versions.append((dname.split('_')[-1], dirname))\n versions.sort(key=lambda x: versionstring(x[0]))\n if versions:\n return versions[-1]\n return None, None",
"def get_result_folder():\n return _result_folder",
"def get_renat_path():\n return _folder",
"def get_sys_scripts_folder() -> str:\n return os.path.join(get_sys_folder(), 'Scripts')",
"def get_artella_python_folder():\n\n return None",
"def prefabs_directory(self):\n return self.get_directory(PREFABS)",
"def _get_folder(self) -> \"std::string\" :\n return _core.FolderDialog__get_folder(self)",
"def get_script_dir():\n return os.path.dirname(os.path.realpath(__file__)) + '/'",
"def current_python_script_directory() -> str:\n return os.path.dirname(os.path.realpath(__file__))",
"def get_exe_dir():\n if getattr(sys, 'frozen', False):\n bdir = os.path.dirname(sys.executable)\n elif 'SWAK_EXE_DIR' in os.environ:\n return os.environ['SWAK_EXE_DIR']\n else:\n bdir = os.path.dirname(os.path.abspath(__file__))\n return bdir",
"def get_build_base_folder(self):\n return os.path.join(self.folder, \"build\")",
"def _GetToolsParentDir():\n return os.path.abspath(os.path.join(*expand_owners.DIR_ABOVE_TOOLS))",
"def get_output_folder(self):\n return self.output_folder",
"def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Syllabus.SYLLABUS_FILES_LOCATION,\n str(self.unique_id)[0:2])",
"def _get_resourceFolder(self) -> \"std::string\" :\n return _core.Workspace__get_resourceFolder(self)",
"def base_directory():\n return os.path.dirname(os.path.realpath(__file__)) + os.path.sep",
"def world_path(self) -> str:\n return self._directory",
"def get_target_folder():\n default_folder = os.path.join(settings.BASE_DIR, '..', 'dumps', 'localhost')\n folder = os.path.realpath(os.path.expanduser(\n getattr(settings, 'DUMP_LOCAL_DATA_TARGET_FOLDER', default_folder)\n ))\n return folder"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.