query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once. | def installed(cls):
try:
return cls._installed
except AttributeError:
name = "mezzanine.pages.middleware.PageMiddleware"
installed = middlewares_or_subclasses_installed([name])
setattr(cls, "_installed", installed)
return installed | [
"def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)",
"def test_deferred_page_classes_are_not_registered(self):\n list(SimplePage.objects.defer(\"content\"))\n simplepage_subclasses = [\n cls for cls in get_page_models() if issubclass(cls, SimplePage)\n ]\n self.assertEqual(simplepage_subclasses, [SimplePage])",
"def test_installed_handlers__no_installed_apps(self):\r\n self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER]\r\n self._check_get_handlers()",
"def _install(cls):\n if not cls._hook:\n cls._hook = cls()\n cls._insert_hook()",
"def pre_installation(self):\n pass",
"def autodiscover():\n from django.utils.importlib import import_module\n global LOADED\n if LOADED:\n return\n LOADED = True\n for app in settings.INSTALLED_APPS:\n try:\n import_module(\"%s.page_widgets\" % app)\n except ImportError, e:\n if \"WidgetModel\" in \"%s\" % e:\n traceback.print_exc(file=sys.stdout)\n pass",
"def install(self, page):\n self.page = page\n self.page.route(\"**\", self.router)",
"def is_perm_middleware_installed():\n return 'coop_cms.middleware.PermissionsMiddleware' in django_settings.MIDDLEWARE",
"def _is_plugin_installed(self):",
"def test_installed_handlers__installed_apps(self):\r\n self.settings['INSTALLED_APPS'] = [self.ECHO_APP]\r\n self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER,\r\n self.ECHO_HANDLER]\r\n self._check_get_handlers(PingHandler, EchoHandler)",
"def install(self):\r\n if not self.installed:\r\n sys.settrace(self.tracer)\r\n self.installed = True",
"def __bool__(self):\n return self.installed",
"def page_setup(self):\n return self.container['page_setup']",
"def set_installed(self):\n self._installed = True",
"def _register_middleware(self) -> None:\n pass",
"def is_installed(cls):\n return find_spec_or_loader(cls.module) is not None",
"def process_request(self, request): # pylint: disable=R0201\n\n error = (\"The Django CAS middleware requires authentication \"\n \"middleware to be installed. Edit your MIDDLEWARE_CLASSES \"\n \"setting to insert 'django.contrib.auth.middleware.\"\n \"AuthenticationMiddleware'.\")\n assert hasattr(request, 'user'), error",
"def update_installed(self):\r\n self.installed = sorted(PluginLoader.list_plugins().values(), key=lambda x:x.name)",
"def _install_content_handlers(self):\n @self.app.before_first_request\n def init_content():\n self.content.initialize(self.app.config)\n self.content.load()\n\n if self.freezing:\n return\n\n @self.app.before_request\n def auto_update_content():\n # avoid reloading on static files:\n if request.endpoint == 'static':\n return\n\n # reload on explicit view requests only (e.g. not favicons):\n if request.endpoint in self.app.view_functions:\n self.content.load()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if all the arguments it receives are numeric (according to | def are_numeric(*values):
for value in values:
if not is_numeric(value):
return False
return True | [
"def isnumeric(self): # real signature unknown; restored from __doc__\n return False",
"def isNumeric(self) -> bool:\n ...",
"def check_value(self, iterable):\n allnumeric = True\n for item in iterable:\n if type(item) not in [int, float]:\n allnumeric = False\n return allnumeric",
"def check_for_float_and_int(check):",
"def param_is_numeric(self, param):\n return isinstance(self.param(param), (float, int))",
"def number_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Number):\n name = type(var).__name__\n raise DigitError(\n 'Function {} expected number, {} got instead.'.format(func, name))",
"def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass",
"def isNumeric(value):\n return isinstance(value, int) or isinstance(value, float) or isinstance(value, long)",
"def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False",
"def isNumeric(obj):\n return isinstance(obj, (int, float, bool))",
"def hasCorrectNumberArguments(self, *args):\n return _libsbml.ASTBasePlugin_hasCorrectNumberArguments(self, *args)",
"def is_numeric(self) -> bool:\n return False",
"def isnumeric(object):\n return isinstance(object, (int, float, decimal.Decimal, np.number))",
"def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)",
"def isnumeric(self):\n return isnumeric(self)",
"def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)",
"def _is_numeric(some_num):\n try:\n float(some_num)\n return True\n except:\n return False",
"def numeric(*args):",
"def is_numeric(x):\n if isinstance(x, NUMBER_TYPES):\n return True\n elif isinstance(x, np.ndarray):\n return x.dtype.type not in NUMPY_NON_TYPES\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the unitwise definition corresponding to attrname | def _get_wavelength_attrs_with_units(self, attrname, units='AA'):
attr = self._lick[attrname]
if self.wavelength_unit is not None:
if units is None:
return attr * unit[self.wavelength_unit]
else:
return (attr * unit[self.wavelength_unit]).to(units)
else:
return attr | [
"def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * Unit(self.wavelength_unit)\n else:\n return (attr * Unit(self.wavelength_unit)).to(units)\n else:\n return attr",
"def getFieldAttrByName(self,fieldname,attr):\n field = self.getFieldByName(fieldname)\n if attr == \"type\" or attr == \"fieldtype\":\n return field.getType()\n elif attr == \"unit\":\n return field.getUnit()\n raise MyDataException(\"The fields do not have an attribute of the type \" + str(attr) + \".\")",
"def get_attr_spec(self, **kwargs):\n attr_name = getargs('attr_name', kwargs)\n return self.__attr2spec.get(attr_name)",
"def _parse_unit_attr(attr: str) -> str:\n parts = attr.split('_', maxsplit=1)\n valid_attr = len(parts) == 2 and parts[0] == \"unit\"\n if not valid_attr:\n raise ValueError(\"{0} is not a valid unit attribute.\".format(attr))\n return parts[1]",
"def attribute(category_name):",
"def __getattr__(self, name):\n if name in self.__units:\n return self.__units[name]\n if len(unit_to_type) == 0:\n evalunitdict()\n try:\n v = self.__data[name]\n except KeyError:\n raise AttributeError\n if isinstance(v, dict):\n U = Units(self.__data[name], name)\n else:\n U = str_to_unit(name)\n self.__units[name] = U\n return U",
"def mineral_attr(attribute):\n return attribute[0]",
"def get_unit_dict(self, attr_names: list = [], min_ratio: float = None, outfile='') -> dict:\n\n return self.unit_extractor.get_unit_dict(attr_names, min_ratio, outfile)",
"def unit_of_measurement(self):\n return self._def[ATTR_UNIT]",
"def test_unit_as_attribute():\n meter = Unit.m\n assert meter.__class__.__name__ == \"Quantity\" # Quantity class is not easily available in pint\n assert meter.magnitude == 1\n assert str(meter.units) == \"meter\"",
"def attrName(self, longName=False, includeNode=False):\n \n pass",
"def getUnitDefinition(self, *args):\n return _libsbml.Model_getUnitDefinition(self, *args)",
"def attr_case_name(self, name):\n\n lower_name = name.lower()\n for i in self.attrs():\n if lower_name == i.lower():\n return i\n # check if attribute present in higher order structures\n for key in self.keys_nD():\n for i in self[key].children.attrs():\n if lower_name == i.lower():\n return i\n # nothing was found if still here\n # pass name back, free to be whatever\n return name",
"def get_unit(self,tag):",
"def _attr_name(n: int) -> str:\n return f'attr{n:03d}'",
"def getFieldAttrByName(self,fieldname,attr):\n return self.fields.getFieldAttrByName(fieldname,attr)",
"def getAttribute(self, name):\n return self.attributes[name]",
"def attributeDecl(self, elem, name, type, defi, defaultValue, nameList):\n pass",
"def AttributeString(self) -> str:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scan for independent loops and set up dictionaries. | def main(self, verbose=0):
indepdict=self.scan_for_loop(self.indeploop)
pegdict1 = self.scan_for_loop(self.pegloop1)
pegdict2 = self.scan_for_loop(self.pegloop2)
if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0:
return dict()
alldict = dict(indepdict)
alldict.update(pegdict1)
alldict.update(pegdict2)
indepcomb=self.get_combo_list(indepdict, 0)
pegcomb1=self.get_combo_list(pegdict1, 1)
pegcomb2=self.get_combo_list(pegdict2, 1)
allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2)
datasets = self.prepare_looped_datasets(alldict, allcombs)
createdfiles = self.create_input_files(datasets)
if verbose == 1:
self.print_list(indepcomb)
self.print_list(pegcomb1)
self.print_list(pegcomb2)
self.print_list(allcombs)
for datakey in datasets:
self.print_list(datasets[datakey])
return createdfiles | [
"def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict",
"def find_loop_nest_with_map(kernel: LoopKernel) -> Mapping[str, AbstractSet[str]]:\n result = {}\n\n from loopy.kernel.data import ConcurrentTag, IlpBaseTag\n\n all_nonpar_inames = {\n iname for iname in kernel.all_inames()\n if not kernel.iname_tags_of_type(iname,\n (ConcurrentTag, IlpBaseTag))}\n\n iname_to_insns = kernel.iname_to_insns()\n\n for iname in all_nonpar_inames:\n result[iname] = {other_iname\n for insn in iname_to_insns[iname]\n for other_iname in kernel.insn_inames(insn) & all_nonpar_inames}\n\n return result",
"def prepare_iterative_solution(self):",
"def createAllDictionaries(self):\n self.makeSentenceLengths()\n self.makeWords()\n self.makeStems()\n self.makeWordLengths()",
"def initialize_sets(self):\n for block in self.blocks:\n # Insert phi nodes from SSA stage into the assignments of the block\n for phi in block.phis:\n block.gen.setdefault(phi, []).insert(0, phi)\n\n # Update the kill set with the variables that are assigned to in\n # the block\n block.kill = set(block.gen)\n block.output = set(block.gen)\n #for entry in block.bound:\n # block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen",
"def do_loop():\n\n for i_val in i_vals:\n for j_val in j_vals:\n # Build a dictionary of replacements for the placeholders\n replacements = dict(i=i_val, j=j_val)\n\n # Start a thread to execute CCMD when ready.\n exe = ThreadCounter(replacements)\n exe.start()",
"def find_loop_nest_around_map(kernel: LoopKernel) -> Mapping[str, AbstractSet[str]]:\n result: Dict[str, Set[str]] = {}\n\n all_inames = kernel.all_inames()\n\n iname_to_insns = kernel.iname_to_insns()\n\n # examine pairs of all inames--O(n**2), I know.\n from loopy.kernel.data import IlpBaseTag\n for inner_iname in all_inames:\n result[inner_iname] = set()\n for outer_iname in all_inames:\n if inner_iname == outer_iname:\n continue\n\n if kernel.iname_tags_of_type(outer_iname, IlpBaseTag):\n # ILP tags are special because they are parallel tags\n # and therefore 'in principle' nest around everything.\n # But they're realized by the scheduler as a loop\n # at the innermost level, so we'll cut them some\n # slack here.\n continue\n\n if iname_to_insns[inner_iname] < iname_to_insns[outer_iname]:\n result[inner_iname].add(outer_iname)\n\n for dom in kernel.domains:\n for outer_iname in dom.get_var_names(isl.dim_type.param):\n if outer_iname not in all_inames:\n continue\n\n for inner_iname in dom.get_var_names(isl.dim_type.set):\n result[inner_iname].add(outer_iname)\n\n return result",
"def construct_dicts(self):\n self.construct_data_dict(train=True)\n self.construct_data_dict()\n self.construct_data_dict(test=True)\n\n\n self.construct_label_dict(train=True)\n\n\n self.construct_label_dict()\n self.construct_span_dict(train=True)\n self.construct_span_dict()\n\n for k, v in self.span_dict.items():\n self.span_dict[k] = Counter(v)\n self.construct_label_desc_dict()",
"def algorithm_loop(self):",
"def organise_scans(self):\n self.wh_to_th = {}\n self.th_to_wh = {}\n\n wh_to_th_metrics = []\n th_to_wh_metrics = []\n wh_to_th_params = {}\n th_to_wh_params = {}\n wh_to_th_minim_info = {}\n th_to_wh_minim_info = {}\n wh_to_th_minim_info['time'] = []\n wh_to_th_minim_info['iterations'] = []\n wh_to_th_minim_info['funcevals'] = []\n wh_to_th_minim_info['status'] = []\n th_to_wh_minim_info['time'] = []\n th_to_wh_minim_info['iterations'] = []\n th_to_wh_minim_info['funcevals'] = []\n th_to_wh_minim_info['status'] = []\n\n for injparam in sorted(self.data_sets.keys()):\n injlabels = self.labels[injparam].dict\n for injkey in self.data_sets[injparam].keys():\n h0_metric_val = self.data_sets[injparam][injkey][\n 'h0_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n h1_metric_val = self.data_sets[injparam][injkey][\n 'h1_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n if h1_metric_val > h0_metric_val:\n bestfit = 'h0'\n altfit = 'h1'\n else:\n bestfit = 'h1'\n altfit = 'h0'\n\n wh_to_th_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)]['fid_asimov']\n th_to_wh_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)]['fid_asimov']\n\n wh_to_th_metrics.append(wh_to_th_fit['metric_val'])\n th_to_wh_metrics.append(th_to_wh_fit['metric_val'])\n\n for systkey in wh_to_th_fit['params'].keys():\n if systkey not in wh_to_th_params.keys():\n wh_to_th_params[systkey] = []\n wh_to_th_params[systkey].append(\n wh_to_th_fit['params'][systkey]\n )\n for systkey in th_to_wh_fit['params'].keys():\n if systkey not in th_to_wh_params.keys():\n th_to_wh_params[systkey] = []\n th_to_wh_params[systkey].append(\n th_to_wh_fit['params'][systkey]\n )\n\n wh_to_th_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_time'])\n wh_to_th_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n wh_to_th_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n wh_to_th_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n \n th_to_wh_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_time'])\n th_to_wh_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n th_to_wh_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n th_to_wh_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n\n wh_to_th_params['bestfit'] = bestfit\n wh_to_th_params['altfit'] = altfit\n th_to_wh_params['bestfit'] = bestfit\n th_to_wh_params['altfit'] = altfit\n\n self.wh_to_th['metrics'] = wh_to_th_metrics\n self.th_to_wh['metrics'] = th_to_wh_metrics\n self.wh_to_th['params'] = wh_to_th_params\n self.th_to_wh['params'] = th_to_wh_params\n self.wh_to_th['minim_info'] = wh_to_th_minim_info\n self.th_to_wh['minim_info'] = th_to_wh_minim_info",
"def initialize(self): \n \n \n fixed_counts = {}\n partial_counts = {}\n \n self.ctx.static = []\n self.ctx.partial = {}\n self.ctx.select = []\n \n self.ctx.compositions = []\n self.ctx.sites_refactored = {}\n \n # We collect the sites into their composition and calculate the theoretical number of occupied sites\n for site in self.ctx.structure:\n if self.__is_partial(site):\n self.ctx.sites_refactored.setdefault(site.species_and_occu, [])\n if site.species_and_occu not in self.ctx.compositions:\n self.ctx.compositions.append(site.species_and_occu)\n \n self.ctx.partial.setdefault(site.species_and_occu, [[]])\n self.ctx.partial.get(site.species_and_occu)[0].append(site)\n \n partial_counts.setdefault(site.species_and_occu, [[0, 0] for s in site.species_and_occu])\n \n for i, element in enumerate(site.species_and_occu):\n partial_counts[site.species_and_occu][i][0] += site.species_and_occu.get(element)\n partial_counts[site.species_and_occu][i][1] += site.species_and_occu.get(element)\n else:\n self.ctx.static.append(PeriodicSite(site.specie, site.coords, site.lattice, True, True))\n fixed_counts.setdefault(site.specie, 0)\n fixed_counts[site.specie] += 1\n \n # If all sites are static, then no need to do anything.\n if len(self.ctx.static) == len(self.ctx.structure):\n self.ctx.do_break = 0\n self.out('structures.%s' % self.inputs.structure.uuid, self.inputs.structure)\n return\n \n # We compile the number of occupied site for each partial composition while not going over the theoretical number\n for comp in partial_counts:\n self.ctx.rs.shuffle(self.ctx.partial.get(comp)[0])\n for i, sp in enumerate(comp):\n partial_counts[comp][i][0] = np.floor(partial_counts[comp][i][0])\n \n # Calculation of the departure from the composition. \n error = {\n el: self.ctx.structure.composition.get(el) - fixed_counts.get(el, 0)\n for el in self.ctx.structure.composition\n }\n\n for comp in partial_counts:\n for i, sp in enumerate(comp):\n error[sp] -= partial_counts.get(comp)[i][0]\n\n # Adding ions to sites with the highest departure from theoretical number as long as the error\n # is greater than 0.5.\n for element in error:\n while error[element] > 0.5:\n if error[element] > 0:\n max_error = (None, 0)\n for i, comp in enumerate(partial_counts):\n if element in comp:\n for j, sp in enumerate(comp):\n if sp == element:\n err = (partial_counts.get(comp)[j][1] - partial_counts.get(comp)[j][0]) ** 2\n if err > max_error[1]:\n max_error = ((comp, j), err)\n partial_counts.get(max_error[0][0])[max_error[0][1]][0] += 1\n error[element] -= 1\n \n self.ctx.configurations = tuple()\n self.ctx.configuration_hashes = tuple()\n self.ctx.configuration_steps = tuple()\n self.ctx.configuration_energies = tuple()\n \n for comp in partial_counts:\n # For each site, calculate log10 of the multinomial factor,\n # it will be used to scale the probability of each site to \n # be used for a swap.\n n = 0\n for i in range(len(self.ctx.partial.get(comp)[-1])):\n n += np.log10(i + 1)\n \n for i, sp in enumerate(comp):\n for j in range(int(partial_counts.get(comp)[i][0])):\n n -= np.log10(j + 1)\n \n for _ in range(int(partial_counts.get(comp)[i][0])):\n site = self.ctx.partial.get(comp)[-1].pop(0)\n self.ctx.partial.get(comp).insert(0, PeriodicSite(Specie(sp, self.ctx.charges.get(sp.value, 0)), \n site.coords, site.lattice, True, True))\n self.ctx.sites_refactored.get(comp).append(sp)\n leftovers = self.ctx.partial.get(comp).pop()\n \n for j in range(len(leftovers)):\n n -= np.log10(j + 1)\n \n for site in leftovers:\n self.ctx.partial.get(comp).insert(0, PeriodicSite(self.ctx.vacancy, \n site.coords, site.lattice, True, True))\n self.ctx.sites_refactored.get(comp).append(self.ctx.vacancy.element)\n\n for _ in range(np.ceil(n).astype(int)):\n self.ctx.select.append(comp)\n \n for sites_refactored in self.ctx.sites_refactored.values():\n self.ctx.rs.shuffle(sites_refactored)\n \n self.ctx.idxes = [idx for idx in range(len(self.ctx.select))]\n self.ctx.sites = self.ctx.partial\n del self.ctx.partial\n \n self.ctx.partial_refactored = []\n # (site #, element) -> particle #\n self.ctx.indices = {}\n i = 0\n \n for site in self.ctx.structure:\n if self.__is_partial(site):\n for element in site.species_and_occu.keys():\n self.ctx.indices[(i, element)] = len(self.ctx.partial_refactored)\n self.ctx.partial_refactored.append(PeriodicSite(Specie(element, self.ctx.charges.get(element.value)), site.coords, site.lattice, True, True))\n i += 1\n \n self.ctx.all_indices = set(range(len(self.ctx.partial_refactored)))\n structure = Structure.from_sites(self.ctx.partial_refactored)\n self.ctx.ewald = EwaldSummation(structure)\n\n self.ctx.energy = self.__ewald(self.ctx.sites_refactored) * np.ones(1)\n self.ctx.tested = np.empty(0, dtype=np.float)\n self.ctx.accepted = np.empty(0, dtype=np.float)\n\n if self.inputs.verbose:\n self.report('Starting structure: E = %f' % self.ctx.energy[-1])",
"def iterate_results(results, extract_fn):\n outputs = {}\n for environment, environment_results in results.items():\n if environment not in outputs:\n outputs[environment] = {}\n for experimental_setting, setting_results in environment_results.items():\n outputs[environment][experimental_setting] = []\n for config, seeds_results in setting_results.items():\n for seed, actual_results in seeds_results.items():\n output = extract_fn(actual_results)\n outputs[environment][experimental_setting].append(output)\n outputs[environment][experimental_setting] = np.array(outputs[environment][experimental_setting])\n return outputs",
"def __staticLoopBoundScanning(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # generate the lower and upper values of each inter-tile loop\n val_table = {}\n for iname in outer_loop_inames:\n _, _, _, st_exp, _ = loop_info_table[iname]\n lval = ast.IdentExp(self.__getTileIterName(iname, tile_level))\n t = ast.BinOpExp(\n ast.IdentExp(self.__getTileSizeName(iname, tile_level)),\n ast.ParenthExp(st_exp.replicate()),\n ast.BinOpExp.SUB,\n )\n uval = ast.BinOpExp(lval.replicate(), ast.ParenthExp(t), ast.BinOpExp.ADD)\n val_table[iname] = (lval, uval)\n\n # iterate over each statement to determine loop bounds that are affine functions\n # of outer loop iterators\n lb_exps_table = {}\n ub_exps_table = {}\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # determine the value of the new lower loop bound\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_prolog:\n t = self.__findMinMaxVal(\n \"max\", lb_exp.replicate(), lb_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # determine the value of the new upper loop bound\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_epilog:\n t = self.__findMinMaxVal(\n \"min\", ub_exp.replicate(), ub_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)",
"def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)",
"def startLoop():\n running_loop = True\n while running_loop:\n if vars.kill:\n # Kill if user wants\n running_loop = False\n return\n symbols = list(vars.symbols.keys()) # ticker symbols being tracked\n # interating over them to scrape articles\n for symbol in symbols:\n # Grabbing data for ticker symbol data\n sym_contents = summarizer(symbol,\n depth=vars.depth,\n threads=vars.threads,\n top_n=vars.top_n,\n url_ignore=vars.url_ignore\n )\n # Reformatting for this scripts purpose\n for ind, content in enumerate(sym_contents):\n summary = \". \".join(content['ranked_sentences'])\n # Keeping only relevant data\n results_dict = {\"Title\": content['title'],\n 'URL': content['url'],\n \"Summary\": summary}\n # making index for article\n article_count = len(vars.symbols) + ind\n # adding article to the vars class\n vars.symbols[symbol][str(article_count)] = results_dict\n # adding url used to url_ignore to prevent scraping twice\n vars.url_ignore += [content['url']]\n # Making an Overall summary for all articles pertaining to a ticker symbol\n overall_summary = \"\"\n for key in vars.symbols[symbol].keys():\n if key.isdigit(): # Dont want to add the Overall summary in the overall summary\n overall_summary = \"{} {}\".format(overall_summary, vars.symbols[symbol][key]['Summary'])\n # joining top_n ranked sentences for Overall Summary\n vars.symbols[symbol]['OverallSummary'] = \". \".join(rank(overall_summary)[:vars.top_n])\n results = vars",
"def _build_loops_map(self):\n line_index, stack = 0, []\n\n while line_index < len(self._script):\n for position, instruction in enumerate(self._script[line_index][1]):\n line_no = self._script[line_index][0]\n instruction_position = (line_index, position)\n\n if \"[\" == instruction:\n stack.append((line_no, instruction_position))\n elif \"]\" == instruction:\n if 0 == len(stack):\n raise BrainfuckException(\n \"[Syntax error] Unexpected closing bracket in line %s at position %s\" % (\n line_no + 1, position + 1))\n\n start = stack.pop()\n self._loops_map[start[1]] = instruction_position\n self._loops_map[instruction_position] = start[1]\n\n line_index += 1\n\n if len(stack) > 0:\n bracket_position = stack.pop()\n\n raise BrainfuckException(\"[Syntax error] Unclosed bracket in line %s at position %s\" % (\n bracket_position[0] + 1, bracket_position[1][1] + 1))",
"def crunch_numbers(self):\n self.num_unique_loops = len(self.list_loops)\n self.lens_unique_loops = [len(loop) for loop in self.list_loops]\n lens_pre_loops = [len(run['pre_loop']) for run in\n self.list_of_runs]\n\n self.avg_len_pre_loop = sum(lens_pre_loops)/len(self.lens_pre_loops)",
"def substitute_self_loops(self) -> Dict[Variable, Variable]:\n # dev note: we can cythonize this for better performance\n\n mapping: Dict[Variable, Variable] = dict()\n\n self._substitute_self_loops_from_model(self.objective, mapping)\n\n for comparison in self.constraints.values():\n self._substitute_self_loops_from_model(comparison.lhs, mapping)\n\n # finally add the constraints for the variables\n for v, new in mapping.items():\n self.add_constraint([(v, 1), (new, -1)], rhs=0, sense='==', label=new)\n\n return mapping",
"def _init_dictionaries(self):\n\t\t# Dictionary contatining all actionPotential\n\t\tself.actionPotentials = {}\n\t\t# Dictionary containing all cells id.\n\t\t# Cells id are used by neuron to communicate synapses between different cells in different hosts. Ids (gids) can be any integer, they just need to be unique.\n\t\tself.cellsId = {}\n\t\t# Dictionary containing all cells\n\t\tself.cells = {}\n\n\t\tself._nMuscles = len(self._infoMuscles)\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\t# Create sub-dictionaries for all DoF\n\t\t\tself.actionPotentials[muscle]={}\n\t\t\tself.cellsId[muscle]={}\n\t\t\tself.cells[muscle]={}\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\t# add lists containing cell ids/cells/ap\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tself.cellsId[muscle][cellName]=[]\n\t\t\t\tself.cells[muscle][cellName]=[]\n\t\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\n\t\t# Add special cells (specifc for some muscles or not muscle related)\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tif not groupOrMuscle in self.cellsId.keys():\n\t\t\t\tself.actionPotentials[groupOrMuscle]={}\n\t\t\t\tself.cellsId[groupOrMuscle]={}\n\t\t\t\tself.cells[groupOrMuscle]={}\n\n\t\t\tself.cellsId[groupOrMuscle][cellName]=[]\n\t\t\tself.cells[groupOrMuscle][cellName]=[]\n\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare looped lines from looping dictionary. | def prepare_looped_lines(self, alldict, comblist):
loopline_dict=dict()
for stridx in comblist:
lidx = int(stridx.split('-')[0])
loopidx = int(stridx.split('-')[1])
loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\n'
return loopline_dict | [
"def _processLines(self):\n self.nlines = len(self.lines)\n self.params = {}\n self._pline = {}\n for i,line in enumerate(self.lines):\n if (line[0] is not '#') & (line.strip() is not ''):\n spl = line.split()\n self.params[spl[0]] = ' '.join(spl[1:])\n self._pline[spl[0]] = i\n self.nkeys = self.params.keys().__len__()",
"def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict",
"def prepare_section_lines_data(self):\n for _hd in self.hour_data:\n if not _hd.state: # если state равен 0 - то это неконтролируемое сечение\n continue\n for _ld in _hd.line_data:\n if _ld.skip:\n continue\n for _line in _ld.lines:\n if _line.get_line_hour_state(_hd.hour):\n self.section_lines_data.append((\n _hd.hour, _line.parallel_num, _ld.node_from_code, _ld.node_to_code,\n _ld.div, self.code\n ))",
"def duplicateOneliners(self, key, line):\n # line = line.split(\"\\t\")[1].strip(\"\\\"\")\n reps = self.options.bootreps2run\n while reps != 0:\n yield reps, line\n reps -= 1",
"def populateLines(self):\n self.mainLines = [self.inputName + \": \" + self.inputLine]",
"def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict",
"def prepare_section_lines_impex_data(self):\n if (not self.dpgs) or (not self.type):\n # если нет ГТП или тип равен 0 - то это не импорт/экспорт\n return\n for _hd in self.hour_data:\n for _ld in _hd.line_data:\n if _ld.skip:\n continue\n for _line in _ld.lines:\n if _line.get_line_hour_state(_hd.hour):\n self.section_lines_impex_data.append((\n _hd.hour, _line.parallel_num, _ld.node_from_code, _ld.node_to_code,\n _ld.div, self.code\n ))",
"def _prepareLines(self, header):\n self.Lines = []\n Header = \"\"\"# This file was created by gnuplotFile.py version %s on %s. \n# This file is written with the intent of being used to plot the data with\n# Gnuplot. It contains no information regarding how the plot should be\n# formatted.\n# =============================================================================\n#\n# %s$ %s\n# %s\n# =============================================================================\n# \"\"\" %(__version__, \n time.asctime(), os.uname()[1], string.join(sys.argv), header)\n self.Lines.append(Header)\n \n if not isinstance(self.data, dict):\n raise TypeError, \"Data must be in dictionary type.\"\n \n keys = self.data.keys()\n keys.sort(cmp=self.sortKeys)\n datalines = []\n nline = '# '\n tline = '# '\n cline = '# '\n counter = 1\n\n # Prepare Column Headers\n for key in keys:\n if len(self.data[key]) == 3:\n nline = nline + ' %-12i %-12i %-12i' %(counter, counter+1,\n counter+2)\n tline = tline + \"|%s|\" %self.pad(key)\n cline = cline + ' %-12s %-12s %-12s ' %('x-coord', 'y-coord', 'error')\n counter +=3\n elif len(self.data[key]) == 2:\n nline = nline + ' %-12i %-12i' %(counter, counter+1)\n tline = tline + \"|%s|\" %self.pad(key)\n cline = cline + ' %-12s %-12s ' %('x-coord', 'y-coord')\n counter +=2\n else:\n raise TypeError(\"Data must be tuples of length 2 or 3.\")\n\n # Prepare Data lines\n for i in xrange(self.Longest()):\n line = \" \"\n for key in keys:\n if len(self.data[key][0]) > i:\n if len(self.data[key]) == 3:\n line += ' %-12.5G %-12.5G %-12.5G ' %(\n self.data[key][0][i], self.data[key][1][i], self.data[key][2][i])\n elif len(self.data[key]) == 2:\n line += ' %-12.5G %-12.5G ' %(\n self.data[key][0][i], self.data[key][1][i])\n else: # Data list doesn't contain this many elements\n line += ' '*(12*len(self.data[key]) + len(self.data[key]) + 1)\n datalines.append(line)\n\n\n self.Lines.append(nline)\n self.Lines.append(tline)\n self.Lines.append(cline)\n self.Lines.extend(datalines)",
"def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))",
"def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))",
"def create_dictionary(lines):\n\n main_dict = {}\n\n # For each conversation we read in\n # (conversation defined as a grouping of sentences, \n # each conversation delimited by an extra newline)\n print(\"Creating main dictionary...\")\n\n from tqdm import tqdm \n pbar = tqdm(total=len(lines))\n for convo in lines:\n\n convo = convo.split('\\n')\n if len(convo) < 2:\n continue\n\n statement = \"\"\n response = \"\"\n\n # Since not every 'statement' may be on the same line, this function\n # operates by building upon a string until the next thing it sees\n # is clearly a new line (beginning with a capital letter).\n have_statement = False\n have_response = False\n building_line = False\n current_line = \"\"\n line_num = 0\n my_name = \"\"\n while line_num < len(convo):\n build_count = 0\n current_line = convo[line_num]\n\n if line_num >= len(convo):\n break\n\n name_match = re.match(r'^\\s*\\D\\.\\s*', convo[line_num])\n\n line_num+=1\n if name_match:\n my_name = name_match.group(0)\n\n while line_num < len(convo) and re.match(r'^\\s+', convo[line_num]):\n current_line = current_line + \" \" + convo[line_num].lstrip()\n line_num+=1\n build_count+=1\n building_line = True\n\n if not have_statement:\n current_line = re.sub(r'^A*B*C*D*E*\\.\\s+(\\.*\\(.*\\))?\\s*', '', current_line)\n statement = current_line\n have_statement = True\n\n elif not have_response:\n current_line = re.sub(r'^A*B*C*D*E*\\.\\s+(\\.*\\(.*\\))?\\s*', '', current_line)\n response = current_line\n stmt_obj = NlpObj(statement)\n resp_obj = NlpObj(response)\n\n main_dict[stmt_obj] = resp_obj\n statement = response # once the current line is stored, \n # the current response is used as the new statement\n pbar.update(1)\n pbar.close()\n return main_dict",
"def build_other_lines(self, starting_words, line_number):\n syllables = 0\n line = \"\"\n syllable_num = 6\n rhyme = self.rhyme_a\n if line_number == 4:\n syllable_num = 4\n rhyme = self.rhyme_b\n while syllables < syllable_num:\n next_word = self.n_gram.retreive_next_word(starting_words)\n syllables += self.get_syllables(next_word)\n line += next_word + \" \"\n starting_words = (starting_words[1], next_word)\n next_word = self.n_gram.retreive_ryming_word(starting_words, rhyme)\n line += next_word\n starting_words = (starting_words[1], next_word)\n if line_number == 2:\n self.second_line = line\n elif line_number == 4:\n self.fourth_line = line\n else:\n self.fifth_line = line\n return starting_words",
"def _assignPars(self):\n import numpy as np\n for key in self.params.keys():\n param = self.params[key]\n if type(param) is not type('xxx'):\n param = np.str(param)\n \n #### New parameter?\n if self._pline.has_key(key):\n self.lines[self._pline[key]] = key + ' ' + param +'\\n'\n else:\n self.lines.append(key + ' ' + param +'\\n')\n \n self._processLines()",
"def scriptfilePrep(prepdict):\r\n #print the dictionary going into combining files\r\n \r\n# pkeys=prepdict.keys()\r\n# for key in pkeys.sort():\r\n# print key+': '+str(prepdict[key])\r\n \r\n station=prepdict['station']\r\n if len(prepdict['rrstation'].split(';'))>1: \r\n rrstation=prepdict['rrstation'].split(';')\r\n else:\r\n rrstation=[prepdict['rrstation'],prepdict['rrstation'],\r\n prepdict['rrstation']]\r\n\r\n try:\r\n fdictc=prepdict['fdict']\r\n except KeyError:\r\n fdictc=None\r\n #cacherate\r\n cacherate=prepdict['cacherate']\r\n \r\n #list of components to combine\r\n combcomplst=prepdict['elecori'].split(',')+prepdict['magori'].split(',')\r\n complstp=prepdict['elecori'].split(',')+prepdict['magori'].split(',')\r\n combcomplstr=prepdict['rrmagori'].split(',')\r\n complstpr=prepdict['rrmagori'].split(',')\r\n dec=int(prepdict['dec'])\r\n if dec==0:\r\n dec=1\r\n \r\n if len(prepdict['day'].split(';'))>1 or len(prepdict['day'].split(','))>1:\r\n \r\n dayslst=prepdict['day'].split(';')\r\n dstartlst=prepdict['start'].split(';')\r\n dstoplst=prepdict['stop'].split(';')\r\n drrstartlst=prepdict['rrstart'].split(';')\r\n drrstoplst=prepdict['rrstop'].split(';')\r\n cfilelst=[]\r\n rrcfilelst=[]\r\n nread=[]\r\n nskipr=[]\r\n \r\n for ii,days in enumerate(dayslst):\r\n combcomplst=prepdict['elecori'].split(',')+prepdict['magori'].split(',')\r\n combcomplstr=prepdict['rrmagori'].split(',')\r\n if len(days.split(','))>1:\r\n daylst=days.split(',')\r\n startlst=dstartlst[ii].split(',')\r\n stoplst=dstoplst[ii].split(',')\r\n dlst=[]\r\n nreadi=0\r\n for dd in range(len(daylst)):\r\n ddict={}\r\n ddict['day']=daylst[dd]\r\n ddict['start']=startlst[dd]\r\n ddict['stop']=stoplst[dd]\r\n try:\r\n ddict['filt']=prepdict['filtered']\r\n except KeyError:\r\n pass\r\n dlst.append(ddict)\r\n nreadi+=float(ddict['stop'][0:2])-float(ddict['start'][0:2])\r\n nreadi=int(nreadi*3600*float(prepdict['df'])/float(dec))\r\n cfilelsti,fileslsti=mt.combineFiles(prepdict['dirpath'],\r\n prepdict['station'],\r\n dlst,cacherate,\r\n complst=combcomplst,\r\n dec=dec,fdict=fdictc)\r\n #remote reference\r\n if rrstation[ii]!=station:\r\n rrstartlst=drrstartlst[ii].split(',')\r\n rrstoplst=drrstoplst[ii].split(',')\r\n rrdlst=[]\r\n #nreadr=0\r\n for dd in range(len(daylst)):\r\n rrddict={}\r\n rrddict['day']=daylst[dd]\r\n rrddict['start']=rrstartlst[dd]\r\n rrddict['stop']=rrstoplst[dd]\r\n try:\r\n rrddict['filt']=prepdict['rrfiltered']\r\n except KeyError:\r\n pass\r\n rrdlst.append(rrddict)\r\n \r\n rrcfilelsti,rrfileslsti=mt.combineFiles(prepdict['dirpath'],\r\n rrstation[ii],\r\n rrdlst,\r\n cacherate,\r\n complst=combcomplstr,\r\n dec=dec,\r\n fdict=fdictc)\r\n #get number of points to skip for remote reference\r\n nskipri=int((float(dlst[0]['start'][0:2])-\\\r\n float(rrdlst[0]['start'][0:2]))*\\\r\n (float(prepdict['df'])/float(dec)))\r\n else:\r\n rrcfilelsti=[]\r\n for cfile in cfilelsti:\r\n for rcomp in combcomplstr:\r\n if cfile.find(rcomp)>=0:\r\n rrcfilelsti.append(cfile)\r\n nskipri=0 \r\n #if multiple files but not continuous. \r\n else:\r\n day=days\r\n start=dstartlst[ii]\r\n stop=dstoplst[ii]\r\n try:\r\n filt=prepdict['filtered']\r\n except KeyError:\r\n filt=''\r\n \r\n #get number of points to read\r\n nreadi=int((float(stop[0:2])-float(start[0:2]))*3600*\\\r\n float(prepdict['df'])/dec)\r\n \r\n #make a directory path\r\n if filt!='':\r\n cdirpath=os.path.join(prepdict['dirpath'],station,day,filt)\r\n else:\r\n cdirpath=os.path.join(prepdict['dirpath'],station,day)\r\n #combine files\r\n #print 'combining ',cdirpath,start,stop,cacherate,combcomplst,dec\r\n cfilelsti,fileslsti=mt.combineFewFiles(cdirpath,\r\n station,\r\n start,stop,cacherate,\r\n complst=combcomplst,\r\n d=dec,fdict=fdictc)\r\n\r\n #remote reference\r\n if rrstation[ii]!=station:\r\n rrday=days\r\n rrstart=drrstartlst[ii]\r\n rrstop=drrstoplst[ii]\r\n try:\r\n filt=prepdict['rrfiltered']\r\n except KeyError:\r\n filt=''\r\n if filt!='':\r\n rrcdirpath=os.path.join(prepdict['dirpath'],\r\n rrstation[ii],rrday,filt)\r\n else:\r\n rrcdirpath=os.path.join(prepdict['dirpath'],\r\n rrstation[ii],rrday) \r\n \r\n rrcfilelsti,rrfileslsti=mt.combineFewFiles(rrcdirpath,\r\n rrstation[ii],\r\n rrstart,rrstop,\r\n cacherate,\r\n complst=\r\n combcomplstr,\r\n d=dec,\r\n fdict=fdictc)\r\n #get number of points to skip for remote reference\r\n nskipri=(int((float(dstartlst[ii][0:2])-\\\r\n float(drrstartlst[ii][0:2]))*\\\r\n (float(prepdict['df'])/float(dec))))\r\n else:\r\n rrcfilelsti=[]\r\n for cfile in cfilelsti:\r\n for rcomp in combcomplstr:\r\n if cfile.find(rcomp)>=0:\r\n rrcfilelsti.append(cfile)\r\n nskipri=0 \r\n #append files to a list\r\n cfilelst.append(cfilelsti)\r\n rrcfilelst.append(rrcfilelsti)\r\n nread.append(nreadi)\r\n nskipr.append(nskipri) \r\n #else normal input for one day \r\n else: \r\n #dirpath for timeseries\r\n try:\r\n cdirpath=prepdict['cdirpath']\r\n except KeyError:\r\n try:\r\n cdirpath=os.path.join(prepdict['dirpath'],prepdict['station'],\r\n prepdict['day'],prepdict['filtered'])\r\n except KeyError:\r\n cdirpath=os.path.join(prepdict['dirpath'],prepdict['station'],\r\n prepdict['day'])\r\n #dirpath for remote reference station \r\n try:\r\n cdirpathr=prepdict['cdirpathr']\r\n except KeyError:\r\n try:\r\n cdirpathr=os.path.join(prepdict['dirpath'],rrstation[0],\r\n prepdict['day'],prepdict['rrfiltered'])\r\n except KeyError:\r\n cdirpathr=os.path.join(prepdict['dirpath'],rrstation[0],\r\n prepdict['day'])\r\n \r\n #start and stop time for both data and remote reference time series\r\n stime=prepdict['start']\r\n etime=prepdict['stop']\r\n \r\n stimer=prepdict['rrstart']\r\n etimer=prepdict['rrstop']\r\n \r\n #Combine time series files\r\n cfilelst,fileslst=mt.combineFewFiles(cdirpath,station,stime,etime,\r\n cacherate,complst=combcomplst,\r\n d=dec,fdict=fdictc)\r\n \r\n #combine remote reference files\r\n if rrstation[0]!=station:\r\n rrcfilelst,rrfileslst=mt.combineFewFiles(cdirpathr,rrstation[0],\r\n stimer,etimer,cacherate,\r\n complst=combcomplstr,\r\n d=dec,fdict=fdictc)\r\n #get number of points to skip for remote reference\r\n if float(dec)==float(0):\r\n dec=1\r\n else:\r\n pass \r\n \r\n nskipr=int((float(stime[0:2])-float(stimer[0:2]))*\\\r\n (float(prepdict['df'])/float(dec))) \r\n else:\r\n rrcfilelst=[]\r\n for cfile in cfilelst:\r\n for rcomp in combcomplstr:\r\n if cfile.find(rcomp)>=0:\r\n rrcfilelst.append(cfile)\r\n nskipr=0\r\n \r\n \r\n #get number of points to read, length of fft and maximum decimations\r\n if float(dec)==float(0):\r\n dec=1\r\n else:\r\n pass \r\n nread=int(3600*(float(prepdict['df'])/float(dec))*\\\r\n (float(etime[0:2])-float(stime[0:2])))\r\n \r\n\r\n #sampling frequency\r\n if float(prepdict['dec'])==0.0:\r\n prepdict['dec']=str(1)\r\n deltat='-'+str(float(prepdict['df'])/float(prepdict['dec'])) \r\n \r\n try:\r\n nfft=prepdict['nfft']\r\n except KeyError:\r\n #get max length of time window\r\n npow=np.floor(np.log2(float(max(nread)))-16)\r\n if npow>6:\r\n nfftpow=17\r\n elif npow>=2 and npow<=6:\r\n nfftpow=16\r\n elif npow>=-2 and npow<2:\r\n nfftpow=15\r\n elif npow>=-6 and npow<-2:\r\n nfftpow=14\r\n elif npow>=-6 and npow<-2:\r\n nfftpow=13\r\n nfft=2**nfftpow\r\n try:\r\n nsctmax=prepdict['nsctmax']\r\n except KeyError:\r\n #get max length of time window\r\n npow=np.floor(np.log2(float(max(nread)))-16)\r\n #get max number of windows\r\n nsctmax=nfftpow-4\r\n \r\n \r\n #make sure the cfilelst is in order according to complst\r\n cfarray=np.array(cfilelst)\r\n try:\r\n nds,ndf=cfarray.shape\r\n except ValueError:\r\n nds=0\r\n \r\n if nds==0:\r\n cflst=[]\r\n for comp in complstp:\r\n for cfile in cfarray:\r\n if fnmatch.fnmatch(cfile,'*.'+comp):\r\n cflst.append(cfile)\r\n else:\r\n cflst=[]\r\n for ii in range(nds):\r\n cfnlst=[]\r\n for comp in complstp:\r\n for cfile in cfarray[ii]: \r\n if fnmatch.fnmatch(cfile,'*.'+comp):\r\n cfnlst.append(cfile)\r\n cflst.append(cfnlst)\r\n \r\n #make sure the rrcfilelst is in order according to complst\r\n rrcfarray=np.array(rrcfilelst)\r\n try:\r\n rrnds,rrndf=rrcfarray.shape\r\n except ValueError:\r\n rrnds=0\r\n \r\n if rrnds==0:\r\n rrcflst=[]\r\n for compr in complstpr:\r\n for rcfile in rrcfarray:\r\n if fnmatch.fnmatch(rcfile,'*.'+compr):\r\n rrcflst.append(rcfile)\r\n else:\r\n rrcflst=[]\r\n for ii in range(rrnds):\r\n rrcfnlst=[]\r\n for compr in complstpr:\r\n for rcfile in rrcfarray[ii]: \r\n if fnmatch.fnmatch(rcfile,'*.'+compr):\r\n rrcfnlst.append(rcfile)\r\n rrcflst.append(rrcfnlst) \r\n \r\n processingdict={}\r\n processingdict['cfilelst']=cflst\r\n processingdict['rrcfilelst']=rrcflst\r\n processingdict['station']=station\r\n processingdict['deltat']=deltat\r\n processingdict['nfft']=nfft\r\n processingdict['nsctmax']=nsctmax\r\n processingdict['nread']=nread\r\n \r\n for pkey in prepdict.keys():\r\n processingdict[pkey]=prepdict[pkey]\r\n try:\r\n processingdict['nskipr']=prepdict['nskipr']\r\n except KeyError:\r\n try:\r\n if len(nskipr)!=len(processingdict['cfilelst']):\r\n nskipr=[0 for ii in range(len(processingdict['cfilelst']))]\r\n except TypeError:\r\n nskipr=nskipr\r\n processingdict['nskipr']=nskipr\r\n\r\n return processingdict",
"def parse (self, lines, itertools_cycle_iterator):\n get_func = GetFunc(itertools_cycle_iterator)\n for line, self.actual_numline in zip(lines, itertools.count(1)):\n yield \"%s\\n\" % get_func(line)",
"def preprocess_belscript(lines):\n\n set_flag = False\n for line in lines:\n if set_flag is False and re.match(\"SET\", line):\n set_flag = True\n set_line = [line.rstrip()]\n # SET following SET\n elif set_flag and re.match(\"SET\", line):\n yield f\"{' '.join(set_line)}\\n\"\n set_line = [line.rstrip()]\n # Blank line following SET yields single line SET\n elif set_flag and re.match(\"\\s+$\", line):\n yield f\"{' '.join(set_line)}\\n\"\n yield line\n set_flag = False\n\n # Append second, third, ... lines to SET\n elif set_flag:\n set_line.append(line.rstrip())\n else:\n yield line",
"def refresh_lines(self):\n for line_data in self._data_lines:\n line = BasketLine.from_dict(self, line_data)\n pricing_context = PricingContext(shop=self.shop, customer=self.customer, supplier=line.supplier)\n line.cache_info(pricing_context)\n self._add_or_replace_line(line)",
"def convertLineData(self):\n rows = []\n currentRoute = None\n\n for line in self.tfp.lines:\n # Each line is a 3-tuple: key, value, list-of-children.\n\n # Add comments as simple strings\n if line[0] == 'smcw':\n cmt = line[1].strip()\n if not cmt==';;<<Trnbuild>>;;':\n rows.append(cmt)\n continue\n\n # Handle Line attributes\n if line[0] == 'lin_attr':\n key = None\n value = None\n comment = None\n # Pay attention only to the children of lin_attr elements\n kids = line[2]\n for child in kids:\n if child[0]=='lin_attr_name': key=child[1]\n if child[0]=='attr_value': value=child[1]\n if child[0]=='semicolon_comment': comment=child[1].strip()\n\n # If this is a NAME attribute, we need to start a new TransitLine!\n if key=='NAME':\n if currentRoute:\n rows.append(currentRoute)\n currentRoute = TransitLine(name=value)\n else:\n currentRoute[key] = value # Just store all other attributes\n\n # And save line comment if there is one\n if comment: currentRoute.comment = comment\n continue\n\n # Handle Node list\n if line[0] == \"lin_node\":\n # Pay attention only to the children of lin_attr elements\n kids = line[2]\n node = None\n for child in kids:\n if child[0]=='nodenum':\n node = Node(child[1])\n if child[0]=='lin_nodeattr':\n key = None\n value = None\n for nodechild in child[2]:\n if nodechild[0]=='lin_nodeattr_name': key = nodechild[1]\n if nodechild[0]=='attr_value': value = nodechild[1]\n if nodechild[0]=='semicolon_comment': comment=nodechild[1].strip()\n node[key] = value\n if comment: node.comment = comment\n currentRoute.n.append(node)\n continue\n\n # Got something other than lin_node, lin_attr, or smcw:\n WranglerLogger.critical(\"** SHOULD NOT BE HERE: %s (%s)\" % (line[0], line[1]))\n\n # End of tree; store final route and return\n if currentRoute: rows.append(currentRoute)\n return rows",
"def get_unidecode_lines(self, lines_dict):\n for line in lines_dict:\n line['parnter_name'] = unicode(line['partner_name'])\n return lines_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare looped datasets from looping lines. | def prepare_looped_datasets(self, alldict, allcombs):
datasets_dict=dict()
numcombs = len(allcombs)
combct = 0
while combct < numcombs:
newdata = list(self.baseinput.data)
loopedlines = dict()
loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])
for lvalidx in loopedlines.keys():
newdata[lvalidx] = loopedlines[lvalidx]
datasets_dict[combct] = newdata
combct = combct + 1
return datasets_dict | [
"def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)",
"def create_data_generators(shuffle=True, novelty_type='normal', item_to_include='None',\n scale_level=1):\n\n total_noi_i = 10 # Number of processed images from one environemnt i\n noe = 1 # Numer of environments\n n_p = 32 # Patch size, patch --> n_p x n_p\n\n novelty = novelty_type\n datasets = []\n\n for i in range(noe):\n\n # Load only images of the environment which includes images of the stated novel item.\n if item_to_include is not None and novelty == 'novel_item':\n dataset_env_i = PolycraftDatasetWithSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level,\n item_name=item_to_include)\n datasets.append(dataset_env_i)\n # We only process the one environment with the item (maybe change this\n # if we have more than one environement per novel_item!?)\n break\n\n # No specific item given which should be included.\n else:\n dataset_env_i = PolycraftDatasetNoSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level)\n datasets.append(dataset_env_i)\n\n final_dataset = ConcatDataset(datasets)\n\n total_noi = len(final_dataset) # Total number of processed images from all datasets\n\n if(total_noi < 7):\n print('Number of samples too small for splitting dataset in training-/valid-/test set.')\n\n train_noi = int(0.7 * total_noi) # Number of images used for training (70 %)\n valid_noi = int(0.15 * total_noi) # Number of images used for validation (15 %)\n test_noi = total_noi - train_noi - valid_noi # Number of images used for testing (15 %)\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n final_dataset, [train_noi, valid_noi, test_noi])\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n return train_loader, valid_loader, test_loader",
"def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data",
"def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator",
"def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))",
"def make_adcDatasets(self):\n for run in self.runs:\n dataset_list = self.find_dataset_file(run)\n if len(dataset_list) == 0:\n adc_dataset = adcDataset(run, self.powers, self.bin_power)\n adc_dataset.fill_dataset()\n adc_dataset.save(self.saveto)\n else:\n print(f'ADC file {dataset_list[0]} exists')\n continue",
"def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()",
"def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()",
"def _create_datasets(self):\n\n datasets = {}\n\n for l in self.layers:\n for s in l.sublayers:\n for c in s.connections:\n if c in datasets:\n datasets[c].append((l.level, s.sublevel))\n else:\n datasets[c] = [(l.level, s.sublevel)]\n\n set_datasets = set()\n\n for key, value in datasets.items():\n set_datasets.add(tuple(value))\n\n for ds in set_datasets:\n self.datasets[ds] = Data()\n\n for key, value in datasets.items():\n self.layers[key[0]]\\\n .sublayers[key[1]]\\\n .src_dataset = self.datasets[tuple(value)]\n\n for s in self.sublayers():\n # Fill destination datasets\n for c in s.connections:\n s.dst_datasets.add(self.layers[c[0]]\n .sublayers[c[1]]\n .src_dataset)\n # Fill sublayers without destination dataset\n if not s.connections:\n s.dst_datasets.add(self.output_data)\n\n # Fill input data\n for data, sublayers in self.input_data.items():\n for sl in sublayers:\n self.layers[sl[0]].sublayers[sl[1]].src_dataset = data",
"def prepare_dataset(fpath):\n raise NotImplementedError",
"def dynamic_dataset(iterated):\n if not isinstance(iterated, dataset_ops.Dataset):\n return iterated\n\n def epoch_dataset_number_helper(i):\n return dataset_ops.Dataset.zip(\n (dataset_ops.Dataset.from_tensors(i).repeat(), iterated))\n\n epoch_numbers = dataset_ops.Dataset.range(2)\n return epoch_numbers.flat_map(epoch_dataset_number_helper)",
"def prepare_data(self, *args, **kwargs) -> None:\n if PROCESSED_DATA_DIRNAME.exists():\n return\n print(\"ChuNomSyntheticPages.prepare_data: preparing ChuNom lines for synthetic ChuNom pages creation...\")\n print(\"Getting ChuNom lines and loading labels...\")\n\n all_crops, all_labels = get_all_crops_and_labels()\n train_crops, train_labels, train_names = get_patch_crops_and_labels_by_group(all_crops, all_labels, \"train\")\n val_crops, val_labels, val_names = get_patch_crops_and_labels_by_group(all_crops, all_labels, \"val\")\n test_crops, test_labels, test_names = get_patch_crops_and_labels_by_group(all_crops, all_labels, \"test\")\n\n print(f\"Saving images and labels at {PROCESSED_DATA_DIRNAME}...\")\n save_images_and_labels(crops=train_crops, labels=train_labels, names=train_names, split=\"train\",\n data_dirname=PROCESSED_DATA_DIRNAME)\n save_images_and_labels(crops=val_crops, labels=val_labels, names=val_names, split=\"val\", data_dirname=PROCESSED_DATA_DIRNAME)\n save_images_and_labels(crops=test_crops, labels=test_labels, names=test_names, split=\"test\", data_dirname=PROCESSED_DATA_DIRNAME)",
"def train_chunk_wise(self, clf, d, current_epoch):\n \n for dataset in d[\"datasets\"]:\n # Loading the dataset\n print(\"Loading data for \",dataset, \" dataset\") \n for building in d[\"datasets\"][dataset]['buildings']:\n # Loading the building\n train=DataSet(d[\"datasets\"][dataset]['path'])\n print(\"Loading building ... \",building)\n train.set_window(start=d[\"datasets\"][dataset]['buildings'][building]['start_time'],end=d[\"datasets\"][dataset]['buildings'][building]['end_time'])\n mains_iterator = train.buildings[building].elec.mains().load(chunksize = self.chunk_size, physical_quantity='power', ac_type = self.power['mains'], sample_period=self.sample_period)\n appliance_iterators = [train.buildings[building].elec[app_name].load(chunksize = self.chunk_size, physical_quantity='power', ac_type=self.power['appliance'], sample_period=self.sample_period) for app_name in self.appliances]\n print(train.buildings[building].elec.mains())\n for chunk_num,chunk in enumerate (train.buildings[building].elec.mains().load(chunksize = self.chunk_size, physical_quantity='power', ac_type = self.power['mains'], sample_period=self.sample_period)):\n # Loading the chunk for the specifeid building\n #Dummry loop for executing on outer level. Just for looping till end of a chunk\n print(\"Starting enumeration..........\")\n train_df = next(mains_iterator)\n appliance_readings = []\n for i in appliance_iterators:\n try:\n appliance_df = next(i)\n except StopIteration:\n appliance_df = pd.DataFrame()\n appliance_readings.append(appliance_df)\n\n if self.DROP_ALL_NANS:\n train_df, appliance_readings = self.dropna(train_df, appliance_readings)\n \n if self.artificial_aggregate:\n print (\"Creating an Artificial Aggregate\")\n train_df = pd.DataFrame(np.zeros(appliance_readings[0].shape),index = appliance_readings[0].index,columns=appliance_readings[0].columns)\n for app_reading in appliance_readings:\n train_df+=app_reading\n train_appliances = []\n\n for cnt,i in enumerate(appliance_readings):\n train_appliances.append((self.appliances[cnt],[i]))\n\n self.train_mains = [train_df]\n self.train_submeters = train_appliances\n clf.partial_fit(self.train_mains, self.train_submeters, current_epoch)\n \n\n print(\"...............Finished the Training Process ...................\")",
"def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 2000:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])\n\n print('Finished preprocessing the CelebA dataset...')",
"def _prepare_dataset(self):\n loads = pd.concat(ul.total_experiment_load())\n return [ul.add_temperatures(loads, period) \n for period in ul.experiment_periods()]",
"def collectLineups(self) -> None:\r\n for dataset in self._lineups_datasets:\r\n self._lineup_values += (self.readCSV(dataset))",
"def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData",
"def build_data():\n i = 0\n while i < N_SAMPLE :\n sys.stdout.write(\"\\rLoading file \" + str(i+1) + \"/\" + str(N_SAMPLE))\n sys.stdout.flush()\n # There might be errors when sampling files (because file is too big\n # or because of a git clone error or because there are no c files in \n # the repo). If an error is encountered, we just\n # go to the next iteration and don't download the file.\n try: \n file_sampler()\n except:\n i = i - 1\n i = i + 1",
"def _generate_dataset(self, X, Y, i, i_end):\n set_size = 0\n limit_list = list()\n for sent in Y[i:i_end]:\n # Edited below to use integer value of EOS symbol\n EOS = self.output_dictionary[\"stopseq\"]\n limit = np.where(sent==EOS)[0][0] # the position of the symbol EOS\n set_size += limit + 1\n limit_list.append(limit)\n\n # Generate blank arrays for the set\n I_1 = np.zeros((set_size, self.encoder_seq_length))\n I_2 = np.zeros((set_size, self.decoder_seq_length))\n # This below is a big array\n Y_set = np.zeros((set_size, self.num_decoder_tokens))\n count = 0\n for l in range(0, (i_end - i)):\n limit = limit_list[l]\n # We only need to create examples up to the length of the title\n for m in range(1, limit+1):\n # Generate our one-hot y out\n one_hot_out = np.zeros((1, self.num_decoder_tokens))\n # This builds our one-hot generation into our training loop\n # The l and m respectively iterate through the samples and the output sequence elements\n one_hot_out[0, Y[l+i][m]] = 1\n # Create a blank row/array for a partial input for our summary model - this is fed into the decoder\n partial_input = np.zeros((1, self.decoder_seq_length))\n partial_input[0, -m:] = Y[l+i][0:m]\n # This fills in each sample of the training data, i.e. count increments up to set size\n I_1[count, :] = X[l+i]\n I_2[count, :] = partial_input\n Y_set[count, :] = one_hot_out\n count += 1\n\n # Shuffle the I_1, I_2 and Y_set vectors for better training - trick from RL\n # - see here - np.take(X,np.random.permutation(X.shape[0]),axis=0,out=X);\n indices = np.random.permutation(I_1.shape[0])\n np.take(I_1, indices, axis=0, out=I_1)\n np.take(I_2, indices, axis=0, out=I_2)\n np.take(Y_set, indices, axis=0, out=Y_set)\n return ([I_1, I_2], Y_set)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create independently looped input files. | def create_input_files(self, datasets_dict):
ifname = self.keywords['inputfile']
dirstem = os.path.dirname(ifname)
basename = os.path.basename(ifname).split('.')[0]
createdfiles=list()
if dirstem == "":
dirstem = os.getcwd()
dkeys = datasets_dict.keys()
dkeys.sort()
dct=1
for didx in dkeys:
newfile = MASTFile()
newfile.data = list(datasets_dict[didx])
newname="%s/loop_%s_%s.inp" % (dirstem, basename, str(dct).zfill(2))
newfile.to_file(newname)
#createdfiles.append(os.path.basename(newname))
createdfiles.append(newname)
dct=dct+1
return createdfiles | [
"def create_temp_files(self):\n for input_file in os.listdir(self.input_path):\n full_filename = self.input_path + input_file\n print(full_filename)\n if self.format == \"xml\":\n data_loader = loader.AbstractsXmlLoader(full_filename, config=Config(None))\n else:\n data_loader = loader.AbstractsTextLoader(full_filename, input_parser.AbstractsParser())\n loaded_data = data_loader.load_(as_=\"dict\")\n output_file = self.output_path + \"pubmed_tempfile\" + str(self.filepart_index)\n with open(output_file, 'wb') as filehandle:\n pickle.dump(loaded_data, filehandle)\n self.filepart_index += 1",
"def _make_files(self, dir, num_files=10):\n for i in range(num_files):\n self._make_random_file(dir)",
"def createFiles(self):\n open('testes/1.txt', 'a').close()\n sleep(0.01)\n open('testes/2.rar', 'a').close()\n sleep(0.01)\n open('testes/3.txt', 'a').close()\n self.files = os.listdir('testes/')",
"def processInputFolder(self):\n for file in os.listdir(self.config[\"inputPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateImages(file)",
"def make_input_files(path_to_inputfiles, db_result, nr_assays = 4000, nr_files = 100):\n\n # The number 4000 is arbitary, there are no published limits from the NLM API side, apart from the advice that it's more efficient to submit many lines per file than few. \n \n def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk\n\n # Create inputfiles directory and remove old contents if there were any\n \n if not os.path.exists(path_to_inputfiles):\n os.makedirs(path_to_inputfiles)\n os.system('rm -rf {}/*'.format(path_to_inputfiles))\n \n # Create inputfiles\n filenumber = 0\n for chunk in grouper(nr_assays, db_result):\n filenumber += 1\n\n directory = '000'+str(int(1+filenumber/nr_files)) \n if not os.path.exists(path_to_inputfiles+'/'+directory):\n os.makedirs(path_to_inputfiles+'/'+directory)\n\n filepath = path_to_inputfiles+'/'+directory+'/input' + str(filenumber) + '.txt'\n \n file = open(filepath, 'w')\n for item in chunk:\n \n file.write('\"'+str(item[0])+'\"|'+item[1].replace('prostrate', 'prostate').replace('Prostrate', 'Prostate')+'\\n') # typo in ChEMBL_21 and ChEMBL_22. Should be fixed in 23... \n \n file.close()",
"def create_files(self):\n for n, file_name, output in zip(self.square_dimensions, self.file_names, self.outputs):\n with open(self.examples_folder + file_name, 'w') as f:\n f.write('c ' + file_name + ' : ' + ' '.join(self.prefix.split('_')).capitalize() + ' CNF file python generated.\\n')\n f.write(output)\n self._created_files = True",
"def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()",
"def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))",
"def file_generator(tmpdir):\n if not \"test_output_1.h5\" in [f.basename for f in tmpdir.listdir()]:\n writer1 = File(str(tmpdir.join('test_output_1.h5')), mode='w',\n write_particles=True, write_triggers=False,\n write_antenna_triggers=False, write_rays=False,\n write_noise=False, write_waveforms=False,\n require_trigger=False)\n writer2 = File(str(tmpdir.join('test_output_2.h5')), mode='w',\n write_particles=True, write_triggers=False,\n write_antenna_triggers=False, write_rays=False,\n write_noise=False, write_waveforms=False,\n require_trigger=False)\n writer1.open()\n writer2.open()\n np.random.seed(SEED)\n gen = CylindricalGenerator(dr=5000, dz=3000, energy=1e9)\n for _ in range(10):\n writer1.add(gen.create_event())\n for _ in range(10):\n writer2.add(gen.create_event())\n writer1.close()\n writer2.close()\n return FileGenerator([str(tmpdir.join('test_output_1.h5')),\n str(tmpdir.join('test_output_2.h5'))])",
"def build_input_files(filename, base_path = 'input_files', out = sys.stdout):\n calling_dir = os.getcwd()\n \n # I'm doing this because I need it later\n file_path, file_name = os.path.split(filename)\n \n with open(filename, 'r') as f:\n txt = f.read()\n \n ## First Parse the FDS file\n param_dict, IOoutput = FDSa_parser(txt, file_name, out)\n # param_dict, sweep_param_dict, prms_in_axis = calculate_params(param_dict, axes)\n\n for key_ in param_dict.keys():\n txt = txt.replace(param_dict[key_][0], key_)\n formatted_trials, logfile, IOoutput = eval_parsed_FDS(param_dict, out)\n \n print(\"formatted_trials\", formatted_trials[0])\n ## Make input files and directories\n for i, value_set in enumerate(formatted_trials):\n print(i,value_set)\n tmp_txt = txt\n # make a directory\n case_name = 'case_'+int2base(i, 26)\n # FDS uses uppercase reseved keywords, and so will we\n value_set['TITLE'] = case_name\n input_directory_builder(case_name, base_path)\n # populate the input file\n print(tmp_txt.count(list(value_set.keys())[1]))\n print(value_set)\n with open('tmp_txt', 'w') as f:\n f.write(str(tmp_txt))\n \n tmp_txt = tmp_txt.format(**value_set) ## The format command doesn't like : or . because it things its a float format\n # create the file name\n fname = os.path.join(calling_dir, base_path, \n case_name, case_name + '.fds')\n # write the input file to the directory\n with open(fname, 'w') as f:\n f.write(str(tmp_txt))\n \n log_path_name = os.path.join(calling_dir, base_path, file_name[:-4] + '.log')\n \n # write the augmented fds log file\n\n with open(log_path_name, 'a') as f:\n f.write(logfile)\n \n return IOoutput",
"def make_dummy_files(paths):\n for p in paths:\n make_dummy_file(p)",
"def multi_file_manager(files, mode='rt'):\n files = [open(file, mode) for file in files]\n yield files\n for file in files:\n file.close()",
"def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)",
"def _create_files(file_dir_list):\n # exact_Ture = True\n # intermediate_address = keyword_dict['intermediate_dir']\n # plot_output_address = keyword_dict['plot_output_address']\n # source_address = keyword_dict['source_address']\n # source_address = keyword_dict['source_address']\n\n \"\"\"create all dir for files\"\"\"\n for i in file_dir_list:\n if not os.path.isdir(i):\n os.mkdir(i)\n print(\"created\", i)",
"def stage_input_file(workdir_path, files):\n if not isinstance(files, list):\n files = [files]\n\n for file_dict in files:\n location = urlparse(file_dict['location'])\n if 'basename' in file_dict:\n dest_path = os.path.join(workdir_path, file_dict['basename'])\n else:\n dest_path = os.path.join(workdir_path, os.path.basename(location.path))\n shutil.copy(location.path, dest_path)\n file_dict['path'] = dest_path\n\n for i, secondary_file in enumerate(file_dict.get('secondaryFiles', [])):\n stage_input_file(workdir_path, file_dict['secondaryFiles'][i])",
"def gen_input_files( A, yV, fname_common = 'ann'):\n # in file\n no_of_set = A.shape[0]\n no_of_input = A.shape[1]\n const_no_of_output = 1 # Now, only 1 output is considerd.\n with open(\"{}_in.data\".format( fname_common), \"w\") as f:\n f.write( \"%d %d %d\\n\" % (no_of_set, no_of_input, const_no_of_output))\n for ix in range( no_of_set):\n for iy in range( no_of_input):\n f.write( \"{} \".format(A[ix,iy]))\n f.write( \"\\n{}\\n\".format( yV[ix,0]))\n print((\"{}_in.data is saved for trainig.\".format( fname_common)))\n\n # run file \n with open(\"{}_run.data\".format( fname_common), \"w\") as f:\n #In 2015-4-9, the following line is modified since it should not be \n #the same to the associated line in ann_in data but it does not include the output length. \n f.write( \"%d %d\\n\" % (no_of_set, no_of_input))\n for ix in range( no_of_set):\n for iy in range( no_of_input):\n f.write( \"{} \".format(A[ix,iy]))\n f.write( \"\\n\") \n print((\"{}_run.data is saved for testing.\".format( fname_common)))",
"def _create_files(self):\n try:\n yield (File(\n self.piece_length,\n self.info_dict['info']['name'],\n self.info_dict['info']['length']))\n self.logger.info('Appended file {} of length {}'.format(\n self.info_dict['info']['name'], self.info_dict['info']['length']))\n except KeyError:\n for f in self.info_dict['info']['files']:\n self.logger.info('Appending file {} of length {}'.format(\n f['path'][len(f['path'])-1], f['length']))\n yield (File(self.piece_length, f['path'], f['length']))",
"def file_generator(self):\n for root, sub_dir, files in os.walk(self.input_dir):\n for file in files:\n yield os.path.join(root, file)",
"def MakeFiles(self, NUM):\r\n\t\tfile_names = []\r\n\t\tfor i in xrange(2):\r\n\t\t\tself.file_seph.acquire()\r\n\t\t\tpref = 'temp_' + str(NUM) + '_'\r\n\t\t\tnewNamedFile = tempfile.NamedTemporaryFile(prefix = pref, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdir = self.scratch_dir)\r\n\t\t\tfile_names.append(newNamedFile.name)\r\n\t\t\tnewNamedFile.close()\r\n\t\treturn file_names"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract constant names from sybdb.h to use as python constants | def extract_constants(freetds_include="sybdb.h", constants_file="bcp_constants.py"):
fileno, source_file = mkstemp(suffix=".c", text=True)
write(fileno, "#include <{}>".format(freetds_include).encode())
close(fileno)
fileno, include_directives = mkstemp(suffix=".txt")
close(fileno)
if ON_WINDOWS:
cmd_template = "cl /E {includes} {source} > {output}"
else:
cmd_template = "cpp {includes} '{source}' > '{output}'"
cmd = cmd_template.format(
output=normpath(include_directives),
source=normpath(source_file),
includes=" ".join(
"-I{}".format(normpath(_include)) for _include in include_dirs
)
)
fifo = Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True)
fifo.communicate()
fifo.wait()
remove(source_file)
if fifo.returncode < 0:
raise Exception("Cannot run preprocessor step")
row_regex = re.compile('[\r\n]+')
field_regex = re.compile('[\s]+')
with open(include_directives, "r") as fd:
include_paths = list(
_filename
for contents in [fd.read()]
for _row in row_regex.split(contents) if _row.find(freetds_include) > -1
for _index, _word in enumerate(field_regex.split(_row)) if _index == 2
for _filename in [_word.strip('"')] if exists(_filename)
)
remove(include_directives)
for include_file in include_paths:
with open(include_file, "r") as fd:
definition_pairs = [
(_values[1], int(_values[2]))
for contents in [fd.read()]
for _row in row_regex.split(contents)
for _values in [field_regex.split(_row)] if len(_values) == 3 and _values[0] == "#define" and _values[2].isdigit()
]
if len(definition_pairs):
with open(constants_file, "w") as output_fd:
output_fd.write("\n".join("%s=%d" % _row for _row in definition_pairs))
break
else:
raise Exception("Couldn't find a freetds include file") | [
"def getConstants():\n \n out = []\n \n api = __import__('api')\n\n for constant in dir(api):\n if constant[0].isupper():\n id = getattr(api, constant)\n if type(id).__name__ not in [\"function\", \"type\"]:\n out.append(constant)\n\n return out",
"def _constants(self):",
"def available_symbologies():\n consts = [d[8:] for d in dir(zint) if d.startswith('BARCODE_')]\n\n return [d for d in consts if d not in IGNORE_ZINT_CONSTS]",
"def GetAllConstantNames(self):\n callResult = self._Call(\"GetAllConstantNames\", )\n\n if callResult is None:\n return None\n\n return callResult",
"def _parseKeyNames(lib):\n _keyNames = {}\n for attr in dir(lib): # from the modules variables\n if attr[:6] == 'TCODK_': # get the K_* constants\n _keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs\n return _keyNames",
"def get_consts(self):\n consts = []\n for key in self.constants:\n consts.append({\n 'key': key,\n 'value': self.constants[key],\n })\n return consts",
"def parse_constants(header):\n exclude = [re.compile(expr) for expr in ['FREEIMAGE_.*',\n 'DLL_.*',\n 'GCC.*',\n '_WINDOWS_.*',\n 'SEEK.*',\n 'FI_RGBA.*',\n 'FI_DEFAULT.*',\n 'FI_ENUM.*',\n 'FI_STRUCT.*',\n 'PLUGINS',\n 'FI_COLOR_PALETTE_SEARCH_MASK']]\n\n constants = OrderedDict()\n pattern = re.compile('\\s*#define\\s*(\\S+)\\s*([^,\\n\\s]*)')\n for line in header:\n result = pattern.match(line)\n if (result is not None and\n all(p.match(result.group(1)) is None for p in exclude)):\n key, value = result.group(1), result.group(2)\n try:\n value = int(value, 0)\n except ValueError:\n value = constants[value]\n constants[key] = value\n return constants",
"def get_all_macros(filename_wchar):\n # print(wchar_string)\n filename_str = ffi.string(filename_wchar)\n print(\"filename: {}\".format(filename_str))\n ovba = VBA_Parser(filename_str)\n # TODO: check if there are macros\n vba_code_str = ovba.get_vba_code_all_modules()\n # convert python str to C string\n return ffi.new(\"wchar_t[]\", vba_code_str)",
"def show():\n from clu.naming import qualified_name\n from clu.scripts import ansicolors as colors\n from clu.repl.ansi import (print_ansi_centered as center,\n print_ansi_name_value as keyval)\n import os\n \n # Mapping interface to the consts module:\n C = ModuleMap(consts)\n \n # Header + footer:\n length = len(C)\n mdname = qualified_name(consts)\n header = f'CONSTS MODULE ({length} consts defined)'\n footer = f'Module: {mdname} » {length} definitions'\n \n # Print header:\n center(filler='–', color=colors.gray)\n center(header, color=colors.yellow)\n print()\n \n # Calculate the longest constant name,\n # used to align multi-line name-value pairs:\n most = C.most()\n rest = consts.SEPARATOR_WIDTH - (most + 8)\n \n # Calculate the item separator for multi-item consts:\n SEP = \",\\n\" + (\" \" * (most + 5))\n \n # Inline function to truncate long members:\n def truncate(iterable): # pragma: no cover\n for item in (str(itx) for itx in iterable):\n if len(item) > rest:\n yield f\"“{item[:rest]}…”\"\n else:\n yield f\"“{item}”\"\n \n # Main printing loop:\n for const_name in C:\n const_value = C[const_name]\n \n if isnormative(const_value):\n \n if isbytes(const_value):\n const_value = str(const_value, encoding=consts.ENCODING)\n elif ispath(const_value):\n const_value = os.fspath(const_value)\n \n if const_name.endswith('PATH') and os.pathsep in const_value:\n keyval(const_name, SEP.join(truncate(const_value.split(os.pathsep))),\n most=most)\n else:\n keyval(const_name, f\"“{const_value}”\",\n most=most)\n \n elif isiterable(const_value):\n keyval(const_name, SEP.join(truncate(const_value)),\n most=most)\n \n elif isnumber(const_value):\n keyval(const_name, f\"«{const_value!r}»\",\n most=most)\n \n else:\n keyval(const_name, const_value,\n most=most)\n \n # Print footer:\n print()\n center(footer, color=colors.cyan)\n center(filler='–', color=colors.gray)",
"def get_codegen_names():\n return list(REGISTERED_CODEGEN.keys())",
"def get_constants(prefix):\n return {getattr(socket, name): name \n for name in dir(socket) if name.startswith(prefix)}",
"def ParseConstBinding(self):",
"def find_const_pointers(exportsFile):\n\n CONSTPTR_RE = re.compile(r'\\w* \\* const') # \"type * const\"\n with open(exportsFile) as f:\n for lnum, line in enumerate(f):\n match = CONSTPTR_RE.search(line)\n if match is not None:\n cpexport[lnum] = line\n for _,ln in enumerate(cpexport):\n match = MANGLING_RE.search(cpexport[ln])\n if match is not None:\n sig = match.group('signature')\n nameStart = sig.rfind('::')\n if nameStart < 0:\n continue\n nameStart = nameStart + 2\n nameEnd = sig.index('(')\n cpexportNames.append(sig[nameStart:nameEnd])",
"def parse_defines(self):\n for line in self.header.splitlines():\n if line.lower().startswith(\"#define\"):\n _, line = line.strip().split(None, 1) # remove #define\n if \" \" in line:\n symbol, value = line.split(None, 1)\n if value.isdigit():\n value = int(value)\n elif value.startswith(\"0x\"):\n value = int(value, 16)\n elif value in self.types:\n self.types[symbol] = self.types[value]\n else:\n symbol = line\n value = \"\"\n self.constants[symbol] = value\n return self.constants",
"def _get_global_constants(self) -> None:\n pass",
"def get_constants(prefix):\n return dict( (getattr(socket, n), n)\n for n in dir(socket)\n if n.startswith(prefix)\n )",
"def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s",
"def package_macros(self):\n from re import sub\n NAME = sub(r'[\\.\\-\\s]', '_', self.name.upper())\n return [('HAVE_' + NAME, '1')]",
"def _const2seqc(self):\n \n #if no constants are defined, return an empty string\n if not bool(self.constants.__dict__):\n return \"\"\n\n seqc = \"//Constants definition\\n\"\n \n for name, value in self.constants.__dict__.items():\n seqc += f\"const {name:s} = {value};\\n\"\n\n seqc += '\\n'\n return seqc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get open accounts Returns array with active account numbers | async def get_open_accounts(self):
result = []
URL = API_HOST + "/api/resources/header"
async with async_timeout.timeout(TIMEOUT):
response = await self.session.get(URL)
json_data = await response.json()
accounts = json_data["data"]["accounts"]["data"]["data"]
for account in accounts:
if account["statusCategory"] == STATUS_CATEGORY_OPEN:
result.append(account["accountNumber"])
return result | [
"def active_accounts(self):\n # TODO: Figure out what accounts are active based on memberships.\n return self.accounts.all()",
"def user_open_orders(self):\n response = self.query('user_open_orders')\n return response",
"def list_accounts(self):\n pass",
"def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)",
"def getAccounts(self):\n query = (\"SELECT account from %s \" % (self.__tablename__), )\n results = self.sql_fetchall(query)\n return [x[0] for x in results]",
"def get_accounts(self):\r\n return self._accounts",
"def list_active_customers():\n active_count = Customer.select().where(Customer.status).count()\n LOGGER.info(\"Number of active customers retrieved\")\n return active_count",
"def fetch_accounts(self):\n return self.fetch('/accounts')",
"def returnOpenOrders(self, account=None):\n if not account:\n if \"default_account\" in config:\n account = config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n\n orders = self.dpay.rpc.get_open_orders(account, limit=1000)\n return orders",
"def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts",
"def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active",
"def get_users(self, next_openid=None):\n result = self.get_json(USERS_URL, params=USERS_QUERY_PARAMS,\n access_token=self.get_access_token(), next_openid=next_openid)\n return result.get('data', {}).get('openid', []), \\\n result['total'], result['count'], result.get('next_openid', None)",
"def user_accounts(user):\n return Account.active.filter(primary_user=user)",
"def get_accounts():\n with db_connect() as db:\n results = db.execute(select([accounts]).where(\n accounts.c.customer_id == active_user.id)).fetchall()\n accts = {i[0]: getattr(sys.modules[__name__], i.account_type)(\n i.customer_id, i.id, i.balance) for i in results}\n for k, v in accts.items():\n print(\n f\"Account ID: {k} | Account Balance: {v.balance} | Account Type: {v.account_type}\")\n return accts",
"def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)",
"def list_active_customers():\n LOGGER.info('Getting active customers')\n return cm.Customer.select().where(cm.Customer.customer_status).count()",
"def display_accounts(cls):\n return cls.account_list",
"def getIncidentList(self, open=False):\n if self.verbose is True: print \"Getting list of ALL incidents\"\n data = {\n \"sort_by\":\"created_on:desc\",\n 'limit': self.buffersize\n }\n if open is True: data['status'] = 'triggered,acknowledged'\n return self.request(\"%s/incidents\" % self.baseurl, data, token=True, method=\"GETARG\")",
"def get_accounts(self) -> list:\n response = self.TradeAPI.makeRequest(\"GET\", \"account/list\")\n response = response.json()\n response = response[\"results\"]\n return response"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get budget billing data | async def __getBBL_async(self, account, projectedBillData) -> dict:
_LOGGER.info("Getting budget billing data")
data = {}
try:
async with async_timeout.timeout(TIMEOUT):
response = await self.session.get(
URL_BUDGET_BILLING_PREMISE_DETAILS.format(account=account)
)
if response.status == 200:
r = (await response.json())["data"]
dataList = r["graphData"]
# startIndex = len(dataList) - 1
billingCharge = 0
budgetBillDeferBalance = r["defAmt"]
projectedBill = projectedBillData["projected_bill"]
asOfDays = projectedBillData["as_of_days"]
for det in dataList:
billingCharge += det["actuallBillAmt"]
calc1 = (projectedBill + billingCharge) / 12
calc2 = (1 / 12) * (budgetBillDeferBalance)
projectedBudgetBill = round(calc1 + calc2, 2)
bbDailyAvg = round(projectedBudgetBill / 30, 2)
bbAsOfDateAmt = round(projectedBudgetBill / 30 * asOfDays, 2)
data["budget_billing_daily_avg"] = bbDailyAvg
data["budget_billing_bill_to_date"] = bbAsOfDateAmt
data["budget_billing_projected_bill"] = float(projectedBudgetBill)
async with async_timeout.timeout(TIMEOUT):
response = await self.session.get(
URL_BUDGET_BILLING_GRAPH.format(account=account)
)
if response.status == 200:
r = (await response.json())["data"]
data["bill_to_date"] = float(r["eleAmt"])
data["defered_amount"] = float(r["defAmt"])
except Exception as e:
_LOGGER.error(e)
return data | [
"def billing(self) -> pulumi.Output['outputs.BucketBillingResponse']:\n return pulumi.get(self, \"billing\")",
"def GetCampaignBudget(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def bill(self):\n if self.last_billed >= timezone.now().date():\n return Bill.objects.filter(period_end=timezone.now().date()).get()\n\n if self.next_billing > timezone.now().date():\n raise ValueError(\"Can not bill before the end of billing period - close it instead.\")\n\n contractor = defaults.vendor()\n bill = Bill.objects.create(\n vendor=self.vendor, logo=contractor.logo.path if contractor.logo else None,\n subscriber=self.vendor.address,\n period_start=self.last_billed, period_end=self.next_billing,\n contractor=contractor.address, contractor_bank=contractor.bank_account)\n\n months, rest = monthmod(self.last_billed, self.next_billing)\n # tranform date to datetime for billing useng StatisticsManager\n last_billed = datetime(self.last_billed.year, self.last_billed.month, self.last_billed.day)\n # bill by months (because of Discounts and better visibility on the bill)\n for month in range(months.months):\n total = Decimal(\"0.00\")\n for tariff, price in Statistics.objects.bill(self.vendor,\n last_billed + monthdelta(month),\n last_billed + monthdelta(month + 1)):\n bill.add_item(tariff, price, settings.TAX)\n total += price\n\n discount = Discount.objects.cut_the_price(self.vendor, total)\n if discount is not None:\n bill.add_item(*discount)\n\n # bill the remaining time (if there is some)\n if rest.days > 1:\n total = Decimal(\"0.00\")\n for tariff, price in Statistics.objects.bill(self.vendor,\n last_billed + months,\n last_billed + months + rest):\n bill.add_item(tariff, price, settings.TAX)\n total += price\n\n discount = Discount.objects.cut_the_price(self.vendor, total)\n if discount is not None:\n bill.add_item(*discount)\n\n if bill.total < 0:\n bill.add_item(_(\"Rounding price to zero\"), -1 * bill.total)\n\n self.last_billed = self.next_billing\n\n self.save() # periods change is taken care of in .save() method\n bill.save()\n bill.send()\n return bill",
"def budget(self):\n return self._budget",
"def test_billing_data(self):\n try:\n self.client.connect()\n now = datetime.datetime.now()\n expected = [\n 'data_volume',\n 'total',\n 'start',\n 'end',\n 'http_heavy',\n 'http_light',\n 'bill_type'\n ]\n # Test using year and month\n year = now.year\n month = now.month\n data = self.client.bill(year, month)\n self.assertTrue(all(x in expected for x in data.keys()))\n #Test without year and month arguments\n del data\n data = self.client.bill()\n self.assertTrue(all(x in expected for x in data.keys()))\n finally:\n self.client.disconnect()",
"def billing(self) -> Optional[pulumi.Input['BucketBillingArgs']]:\n return pulumi.get(self, \"billing\")",
"def budget_details(self):\n print \"Our budget to buy bikes wholesale is now {}.\".format(str(self.budget))",
"def get_billing_info(self, project_id):\n\n try:\n name = self.repository.projects.get_name(project_id)\n results = self.repository.projects.get_billing_info(name)\n LOGGER.debug('Getting the billing information for a project,'\n ' project_id = %s, results = %s', project_id, results)\n return results\n except (errors.HttpError, HttpLib2Error) as e:\n if isinstance(e, errors.HttpError) and e.resp.status == 404:\n LOGGER.warning(e)\n return {}\n api_exception = api_errors.ApiExecutionError(\n 'billing_info', e, 'project_id', project_id)\n LOGGER.exception(api_exception)\n raise api_exception",
"def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")",
"def budget_filter(self) -> 'outputs.GoogleCloudBillingBudgetsV1beta1FilterResponse':\n return pulumi.get(self, \"budget_filter\")",
"def get_budgets(self) -> list:\n return self.budget_manager.get_budgets()",
"def get_budgets(self) -> list:\n return list(self.budgets.values())",
"def test_companies_company_id_data_bill_payments_get(self):\n pass",
"def billing_info(self):\n return self._billing_info",
"def get_campaign_budget(client, customer_id, resource_name):\n ga_service = client.get_service('GoogleAdsService', version='v2')\n query = ('SELECT campaign_budget.id, campaign_budget.name, '\n 'campaign_budget.resource_name FROM campaign_budget WHERE '\n 'campaign_budget.resource_name = \"{}\"'.format(resource_name))\n response = ga_service.search(customer_id, query, PAGE_SIZE)\n budget = list(response)[0].campaign_budget\n return budget",
"def get_budget(self, category: BudgetCategory) -> Budget:\n return self.budgets.get(category, None)",
"def getBillings(self):\n return self.session.query(Billing).all() or []",
"def budget():\n pass",
"def project_budget_funding(self):\n return self._project_budget_funding"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get data from appliance usage | async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict:
_LOGGER.info("Getting appliance usage data")
JSON = {"startDate": str(lastBilledDate.strftime("%m%d%Y"))}
data = {}
try:
async with async_timeout.timeout(TIMEOUT):
response = await self.session.post(
URL_APPLIANCE_USAGE.format(account=account), json=JSON
)
if response.status == 200:
electric = (await response.json())["data"]["electric"]
full = 100
for e in electric:
rr = round(float(e["percentageDollar"]))
if rr < full:
full = full - rr
else:
rr = full
data[e["category"].replace(" ", "_")] = rr
except Exception as e:
_LOGGER.error(e)
return {"energy_percent_by_applicance": data} | [
"def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/ApiUsage/{applicationId}/\"))",
"def get_usage_data(username, password):\n usage_req = XfinityUsage(username, password, browser_name=\"firefox-headless\")\n return usage_req.run()",
"def get_account_usage_request(self):\n\n self.init_login()\n response = self._session.get('https://broadband.vodafone.'\n 'ie/myaccount/usage',\n headers=self.get_headers(),\n cookies=self.get_cookies()\n )\n\n log.info(\"'Your Broadband Usage' in result? {}\".format(\n \"Your Broadband Usage\" in response.text))\n\n if \"Error Occurred\" in response.text:\n log.error(\"‼️ 'Error Occurred' in response.\")\n if \"Your Broadband Usage\" in response.text:\n log.info(\"✅ Looking good. 'Your Broadband Usage' in result.\")\n bill_period = self.get_xpath_value(\n response, XP_USAGE_PERIOD_CURRENT)\n total_used_value, total_used_unit, total_used_percent, total_used_value_mb = self.get_usage_values(\n self.get_xpath_value(response, XP_USAGE_TOTAL_DATA_USED))\n total_uploaded_value, total_uploaded_used_unit, total_uploaded_used_percent, total_uploaded_value_mb = \\\n self.get_usage_values(\n self.get_xpath_value(response, XP_USAGE_DATA_UPLOADED))\n total_downloaded_value, total_downloaded_used_unit, total_downloaded_used_percent, total_downloaded_value_mb = \\\n self.get_usage_values(\n self.get_xpath_value(response, XP_USAGE_DATA_DOWNLOADED))\n total_time_spent_online = self.get_xpath_value(\n response, XP_TIME_SPENT_ONLINE)\n total_updated_time = self.get_xpath_value(\n response, XP_USAGE_UPDATED)\n\n today_downloaded_value, today_downloaded_used_unit, today_downloaded_used_percent, today_downloaded_value_mb = \\\n self.get_usage_values(\n self.get_xpath_value(response, XP_USAGE_DATA_DOWNLOADED_TODAY_SO_FAR))\n today_uploaded_value, today_uploaded_used_unit, today_uploaded_used_percent, today_uploaded_value_mb = \\\n self.get_usage_values(\n self.get_xpath_value(response, XP_USAGE_DATA_UPLOADED_TODAY_SO_FAR))\n today_ip_address = self.get_xpath_value(\n response, XP_USAGE_IP_ADDRESS_TODAY)\n today_online_time = self.get_xpath_value(\n response, XP_USAGE_ONLINE_TIME_TODAY)\n\n AccountUsageDetails = namedtuple(\"AccountUsageDetails\",\n [\"bill_period\",\n \"total_time_spent_online\",\n \"total_used_value\",\n \"total_used_value_mb\",\n \"total_used_unit\",\n \"total_used_percent\",\n \"last_updated\",\n \"total_uploaded_value\",\n \"total_uploaded_value_mb\",\n \"total_uploaded_used_unit\",\n \"total_downloaded_value\",\n \"total_downloaded_value_mb\",\n \"total_downloaded_used_unit\",\n \"total_updated_time\",\n \"today_downloaded_value\",\n \"today_downloaded_value_mb\",\n \"today_downloaded_used_unit\",\n \"today_uploaded_value\",\n \"today_uploaded_value_mb\",\n \"today_uploaded_used_unit\",\n \"today_ip_address\",\n \"today_online_time\"\n ])\n account_usage_details = AccountUsageDetails(bill_period,\n total_time_spent_online,\n total_used_value,\n total_used_value_mb,\n total_used_unit,\n total_used_percent,\n datetime.now(),\n total_uploaded_value,\n total_uploaded_value_mb,\n total_uploaded_used_unit,\n total_downloaded_value,\n total_downloaded_value_mb,\n total_downloaded_used_unit,\n total_updated_time,\n today_downloaded_value,\n today_downloaded_value_mb,\n today_downloaded_used_unit,\n today_uploaded_value,\n today_uploaded_value_mb,\n today_uploaded_used_unit,\n today_ip_address,\n today_online_time)\n log.debug(account_usage_details)\n self.logged_in = True\n self.data = account_usage_details\n return account_usage_details\n\n return None",
"def retr_devices_by_app( app ) :\n\n\t\t\t_logger.info( '...retr_devices_by_app...' )\n\t\t\toutput = []\n\t\t\ttry :\n\t\t\t\tdb = mongo.db.auth_devices\n\t\t\t\tfor device in db.find( { 'app_tags' : app } ) :\n\t\t\t\t\toutput.append({'moniker' : device['moniker'] ,\n\t\t\t\t\t\t\t\t 'description' : device['description'] ,\n\t\t\t\t\t\t\t\t 'active' : device['active'] ,\n\t\t\t\t\t\t\t\t 'device_id' : device['device_id'] ,\n\t\t\t\t\t\t\t\t 'enlisted' : device['enlisted'] ,\n\t\t\t\t\t\t\t\t 'last_kown_remote_ip' : device['last_known_remote_ip'] ,\n\t\t\t\t\t\t\t\t 'engaged' : device['engaged'] ,\n\t\t\t\t\t\t\t\t 'canononical_user' : device['canonical_user'] ,\n\t\t\t\t\t\t\t\t 'scope' : device['scope'] ,\n\t\t\t\t\t\t\t\t 'segment' : device['segment']\n\t\t\t\t\t})\n\t\t\texcept Exception as e :\n\t\t\t\t _logger.error( '...retr_devices_by_app %s' % e.message )\n\t\t\treturn jsonify({'result' : output})",
"def test_get_application_information(self):\n pass",
"def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos",
"def print_app_data(self):\n print(\"===================================\")\n print(\"== RESULTS: ==\")\n print(\"===================================\")\n\n # Analog application results\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n print(\"Number of analog application processed: {}\".format(len(self.analog_apps)))\n if (self.verbose):\n for app in self.analog_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Bay number : {}\".format(device[\"bay_number\"]))\n print(\" - Channel number : {}\".format(device[\"channel_number\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of faults : {}\".format(len(device[\"faults\"])))\n for fault_id,fault_data in device[\"faults\"].items():\n print(\" Fault data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}_T{}\".format(fault_data[\"name\"], fault_data[\"bit_positions\"][0]))\n print(\" - ID : {}\".format(fault_id))\n print(\" - Name : {}\".format(fault_data[\"name\"]))\n print(\" - Description : {}\".format(fault_data[\"description\"]))\n print(\" - Bit positions : {}\".format(fault_data[\"bit_positions\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"--------------------------\")\n\n # Digital application result\n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n print(\"Number of digital application processed: {}\".format(len(self.digital_apps)))\n if (self.verbose):\n for app in self.digital_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of inputs : {}\".format(len(device[\"inputs\"])))\n for input in device[\"inputs\"]:\n print(\" Input data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}\".format(input[\"name\"]))\n print(\" - Name : {}\".format(input[\"name\"]))\n print(\" - Bit position : {}\".format(input[\"bit_position\"]))\n print(\" - Zero name : {}\".format(input[\"zero_name\"]))\n print(\" - One name : {}\".format(input[\"one_name\"]))\n print(\" - Alarm state : {}\".format(input[\"alarm_state\"]))\n print(\" - Debounce : {}\".format(input[\"debounce\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"----------------------------\")\n\n\n print(\"===================================\")\n\n print('Found {} link nodes:'.format(len(self.link_nodes)))\n for k,v in self.link_nodes.items():\n print('{}: {}'.format(k, v['type']))",
"def usage_information(self):\n return self._usage_information",
"def gather_metric(self):\n result = self._shell.run(self.ADB_COMMAND)\n stdout = result.stdout.splitlines()\n adb_version = stdout[0].split()[-1]\n # Revision information will always be in next line\n adb_revision = stdout[1].split()[1]\n\n response = {\n self.ADB_VERSION: adb_version,\n self.ADB_REVISION: adb_revision\n }\n return response",
"def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def getEnergyUsage():\n energy_data = asyncio.run(plug.get_emeter_realtime())\n\n return energy_data",
"def data_setup_appliances():\n appliance_list = []\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance1\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance2\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance3\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance4\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance5\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance1\", gpio_pin_id=13))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance2\", gpio_pin_id=15))\n return appliance_list",
"def get_sufficient_stats(data):\n pass",
"def test_get_canary_results_by_application_using_get(self):\n pass",
"def user_sends_get_call_to_the_devices():\n web_app.list_devices()",
"def get_app_definition_data(\n self,\n start: int,\n limit: int,\n) -> list:\n return self._get(\n \"/spPortal/internetDb/ipIntelligence?start={}&limit={}\".format(\n start, limit\n )\n )",
"def main():\n from pprint import pprint\n mobilealerts = MobileAlerts()\n pprint(\"Scanning for Mobile Alerts gateways..\")\n mobilealerts.update()\n pprint(mobilealerts.entries)",
"def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200",
"def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the default form class used for user registration. | def get_form_class(self, request):
return RegistrationForm | [
"def get_form_class(self, request):\n\t\treturn RegistrationForm",
"def get_registration_form_class():\n custom_class = getattr(django_settings, 'REGISTRATION_FORM', None)\n if custom_class:\n return load_module(custom_class)\n else:\n return OpenidRegisterForm",
"def get_form_class(self):\n if self.form_class:\n form_class = self.form_class\n else:\n form_class = model_form(self.get_model(), db_session=db.session)\n return form_class",
"def get_form_class(self):\n return self.form_class",
"def get_form_class(self):\r\n return modelform_factory(self.model)",
"def get_token_form_class(self):\n from two_factor.forms import AuthenticationTokenForm\n\n return AuthenticationTokenForm",
"def get_form_class():\n return RazorPaymentForm",
"def get_form_class(self, form_key):\n return self.get_form_classes()[form_key]",
"def get_form_class(self):\n\t\treturn formset_factory(super(FormsetMixin, self).get_form_class(), **self.get_formset_kwargs())",
"def metadata_form_class(self):\n return self.metadata_form.get_form()",
"def form_class(self):\n\n if not self.definition:\n return None\n\n return parse_form(self.definition)",
"def _form_class_factory(model_class):\n class FormClass(MemberForm):\n class Meta:\n model = model_class\n return FormClass",
"def get_form_classes(self):\n return {\n **self.form_classes\n }",
"def get_newsletter_form():\n return load_class('COOP_CMS_NEWSLETTER_FORM', 'coop_cms.forms.NewsletterForm')",
"def get_newsletter_settings_form():\n return load_class('COOP_CMS_NEWSLETTER_SETTINGS_FORM', 'coop_cms.forms.NewsletterSettingsForm')",
"def form(self):\n if getattr(self, \"_form\", None) is None:\n data = {}\n if hasattr(self, \"default_form_data\"):\n data.update(self.default_form_data)\n data = session.checker.get(self.form_class.__name__, data)\n self._form = self.form_class(formdata=request.form, data=data)\n return self._form",
"def _get_dynamic_form_class_from_factory(self):\n form_class = model_forms.modelform_factory(\n AddressModel, exclude=['user_shipping', 'user_billing'])\n return form_class",
"def _get_bulk_change_form_class(self):\n return BulkChangeFormWizardHandlerPluginsForm",
"def get_form_class(self):\n login_try_count = self.request.session.get('login_try_count', 0)\n\n # If the form has been submitted...\n if self.request.method == \"POST\":\n self.request.session['login_try_count'] = login_try_count + 1\n\n if login_try_count >= 20:\n return CaptchaAuthenticationForm\n\n return super(LoginView, self).get_form_class()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the sum tree data structure for the given replay capacity. | def __init__(self, capacity):
assert isinstance(capacity, int)
if capacity <= 0:
raise ValueError(
'Sum tree capacity should be positive. Got: {}'.format(capacity))
self.nodes = []
self.depth = int(np.ceil(np.log2(capacity)))
self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx
self.high_idx = capacity + self.low_idx
self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.
self.capacity = capacity
self.highest_set = 0
self.max_recorded_priority = 1.0 | [
"def ConstructTree(self):\n step = 0\n totalNodes = 0\n while step <= self.__steps:\n self.__nodes[step] = {}\n nUps = 0\n while nUps <= totalNodes:\n combins = BinomialOptionModel.__nCr(totalNodes, nUps)\n self.__nodes[step][nUps] = BinomNode(self.__underlyingStart, nUps, totalNodes - nUps, step, combins)\n nUps += 1\n totalNodes += 1\n step += 1\n # Price the option at each node:\n self.__CalcOptionPrices()\n # Determine asset prices at each node:\n self.__CalcAssetPrices()\n # Compute all the hedge ratios at each node:\n self.__ComputeSCHRs()\n # Compute all stock + bond replicating portfolio hedge ratios at each node:\n self.__ComputeSBHRs()",
"def __init__(self, size):\n\n self._root = Node()\n size_left = int(size/2)\n # Initialization of the tree\n self._root.left = self._createSubtree(self._root, 0, size_left) # [a,b[\n self._root.right = self._createSubtree(self._root, size_left, size)\n self._max_priority = 1",
"def __init__(self, capacity):\n self.capacity = capacity\n # empty cache\n self.cache = {}\n self.head = None\n self.tail = None",
"def __init__(self, capacity, data_type=float, allow_overwrite=True):\n super(Memory, self).__init__()\n self._arr = np.empty(capacity, data_type)\n self._left_index = 0\n self._right_index = 0\n self._capacity = capacity\n self._data_type = data_type\n self._allow_overwrite = allow_overwrite",
"def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial",
"def __init__(self, capacity):\n self.capacity = capacity # Capacity of your stack\n self.items = [None] * capacity # initializing the stack\n self.num_items = -1 # number of elements in the stack",
"def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost",
"def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1",
"def __init__(self, capacity=10):\n\n self._board = [None] * capacity \n self._n = 0",
"def grow(self, X, depth):\n #your code here\n \n return Node(X, depth, left, right, kind, w, b)",
"def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0",
"def __init__(self):\n self._size = 0\n self._array = [None] * BinaryTree.DEFAULT_CAPACITY",
"def create_large_tree():\n value_of_nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'a', 'b', 'c', 'd', 'e']\n tree = ''\n depth = 0\n count = 0\n\n while depth < 4:\n if depth == 0:\n tree = [value_of_nodes[0], [], []]\n depth += 1\n count += 1\n elif depth == 1:\n for i in [1,2]:\n tree[i] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n elif depth == 2:\n for i,j in itertools.product([1,2], repeat=depth):\n tree[i][j] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n elif depth == 3:\n for i, j, k in itertools.product([1,2], repeat=depth):\n tree[i][j][k] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n return tree",
"def __init__(self, capacity=10):\n\n self._board = [None] * capacity # list of 10 None elements\n self._n = 0 # number of actual entries",
"def expand(self, policy):\n if self.children != {}: return\n actionWeights = policy(self.state)\n for action in actionWeights:\n succ = self.state.succ(self.state.player, action)\n self.children[action] = TreeNode(succ, actionWeights[action], self)",
"def __init__(self,capacity = 10):\n self._board = [None] * capacity # reserve space for future scores\n self._n = 0 # number of actual entries",
"def _allocate_power(self):\n # number of powerplants\n size = len(self.powerplants)\n\n # initialize an allocation snapshot\n allocation = {\n 'p_list': [0] * size,\n 'curr_index': 0\n }\n\n def _reallocate(allocation, new_power, new_index):\n \"\"\"Update and return a new allocation snapshot.\n \"\"\"\n curr_index = allocation['curr_index']\n new_allocation = copy.deepcopy(allocation)\n new_allocation['p_list'][curr_index] = new_power\n new_allocation['curr_index'] = new_index\n return new_allocation\n\n def _get_total_cost(allocation, size):\n \"\"\"Return the total cost of the current allocation.\n \"\"\"\n total_cost = 0\n for i in range(size):\n total_cost += allocation['p_list'][i] * self.powerplants[i].real_cost\n return total_cost\n\n # once the target load reached, the allocation snapshot will be saved in this variable\n fully_allocated = None\n queue = [allocation]\n while queue:\n # get the current snapshot from the queue\n allocation = queue.pop(0)\n\n curr_index = allocation['curr_index']\n total_power = sum(allocation['p_list'])\n remaining_load = self.load - total_power\n\n if remaining_load == 0: # power allocated correctly\n total_cost = _get_total_cost(allocation, size)\n # replace the current fully_allocated object if the new one is more cost efficient\n if not fully_allocated or fully_allocated['total_cost'] > total_cost:\n fully_allocated = allocation\n fully_allocated['total_cost'] = total_cost\n continue\n elif not 0 <= curr_index < size: # ignore queue element if current index is out of reach\n continue\n\n # power limits of the current powerplant\n curr_pmin = self.powerplants[curr_index].pmin\n curr_pmax = self.powerplants[curr_index].pmax\n\n if remaining_load > 0: # under power\n if remaining_load >= curr_pmax:\n new_power = curr_pmax\n elif remaining_load >= curr_pmin:\n new_power = remaining_load\n else: # required power lower than the current powerplant's pmin\n new_power = 0\n # add a different scenario to the queue: allocate pmin and try to decrease\n # the power of the previous index\n new_allocation = _reallocate(allocation=allocation, new_power=curr_pmin, new_index=curr_index - 1)\n queue.append(new_allocation)\n new_allocation = _reallocate(allocation=allocation, new_power=new_power, new_index=curr_index + 1)\n queue.append(new_allocation)\n\n else: # over power\n excess_load = abs(remaining_load)\n curr_power = allocation['p_list'][curr_index]\n if excess_load >= curr_pmax:\n new_power = 0\n elif curr_power - excess_load >= curr_pmin:\n new_power = curr_power - excess_load\n else: # required power lower than the current powerplant's pmin\n new_power = 0\n if curr_pmin != 0:\n # add a different scenario to the queue: allocate pmin and try to decrease\n # the power of the previous index\n new_allocation = _reallocate(allocation=allocation, new_power=curr_pmin, new_index=curr_index - 1)\n queue.append(new_allocation)\n new_allocation = _reallocate(allocation=allocation, new_power=new_power, new_index=curr_index - 1)\n queue.append(new_allocation)\n\n results = []\n if not fully_allocated:\n error = 'Power could not be allocated correctly, please verify the payload.'\n app.logger.error(error)\n self.errors.append(error)\n else:\n for i, powerplant in enumerate(self.powerplants):\n results.append({\n 'name': powerplant.name,\n 'p': fully_allocated['p_list'][i]\n })\n return results",
"def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()",
"def __init__(self, capacity: int) -> None:\n self.capacity = capacity # 해시 테이블의 크기를 지정\n self.table = [None] * self.capacity # 해시 테이블(리스트)을 선언"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs stratified sampling using the sum tree. | def stratified_sample(self, batch_size, rng):
if self._total_priority() == 0.0:
raise Exception('Cannot sample from an empty sum tree.')
indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size),
batch_size, self.depth)
return np.minimum(indices - self.low_idx, self.highest_set) | [
"def bootstrap_sampling(tree_stats, rng, num_trials=100, priors=\"conditional\"):\n bound_rng = partial(rng, size=num_trials)\n stat_result, levels, labels = tree_stats\n max_level, n_classes = stat_result.shape[:2]\n if (isinstance(priors, (list, numpy.ndarray)) and\n len(priors) == n_classes) or priors == \"total\":\n # adding asscalar to enforce as int type\n sample_sizes = numpy.asscalar(stat_result.sum().astype(numpy.int))\n elif priors == \"conditional\":\n sample_sizes = stat_result.sum(axis=(0, -1)).astype(numpy.int)\n else:\n raise ValueError('no such {:s} sample scheme'.format(priors))\n replica = sample_scheme(sample_sizes, labels, bound_rng, priors)\n resamples = _place_replica(stat_result, replica)\n assert(resamples.sum() == num_trials * stat_result.sum())\n assert(numpy.allclose(resamples.sum(axis=(0, -2, -1)) / resamples.sum(),\n stat_result.sum(axis=(1, -1)) / stat_result.sum(), atol=1e-4))\n return(resamples)",
"def _sample(self, node):\n if self.is_leaf(node):\n return\n\n if node == self.root:\n theta = self.root.cat[0]\n else:\n parent_val = node.ancestor.sample\n theta = node.cat[parent_val]\n\n post_theta = np.zeros(len(theta)) # posterior catagorial distribution\n for i in range(len(theta)):\n post_theta[i] = theta[i] * self.s_fun[(node.name, i)]\n\n post_theta = self.normalize_p_vec(post_theta)\n\n sample = self.get_sample_from_cat(post_theta)\n # write sample to this node\n node.sample = sample\n\n print(\"{},{},{}\".format(node.name, sample, post_theta[sample]))\n\n for c in node.descendants:\n self._sample(c)",
"def test_weighted_one_sample(self):\n #should match web site calculations\n envs = self.count_array\n bound_indices = bind_to_array(self.nodes, envs)\n sum_descendants(bound_indices)\n bl = self.branch_lengths\n tip_indices = [n._leaf_index for n in self.t.tips()]\n result = weighted_unifrac_matrix(bl, envs, tip_indices)\n for i in range(len(result)):\n one_sam_res = weighted_one_sample(i, bl, envs, tip_indices)\n self.assertEqual(result[i], one_sam_res)\n self.assertEqual(result[:,i], one_sam_res)\n\n #should work with branch length corrections\n td = bl.copy()[:,newaxis]\n tip_bindings = bind_to_parent_array(self.t, td)\n tips = [n._leaf_index for n in self.t.tips()]\n tip_distances(td, tip_bindings, tips)\n result = weighted_unifrac_matrix(bl, envs, tip_indices, bl_correct=True,\n tip_distances=td)\n for i in range(len(result)):\n one_sam_res = weighted_one_sample(i, bl, envs, tip_indices,\n bl_correct=True, tip_distances=td)\n self.assertEqual(result[i], one_sam_res)\n self.assertEqual(result[:,i], one_sam_res)",
"def _sample_proportional(self): \n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n \n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n \n return indices",
"def test_subsampling(self):",
"def uniform_sample(X, y, S, b, d):\n\n\tX['label'] = y\n\n\tW = pd.DataFrame({'group': [1, 1, 0, 0], 'label': [1, 0, 1, 0]})\n\n\t# Calculate weight for each combination of sensitive attribute and class,\n\t# given by the number of examples in each group divided by the number\n\t# that should be in each group if the data were non-discriminatory\n\t# NOTE: Algorithm 4 in the paper actually usees a denominator that appears to be wrong...\n\tweights = [[len(X[X[S] == s]) * len(X[X['label'] == c]) / float(len(X)*0.25) \n\t\t\t\t# / float(len(X) * len(X[(X[S] == s) & (X['label'] == c)])) \\\n\t\t\t\tfor c in [1, 0]] for s in [1, 0]]\n\n\tsizes = [[len(X[(X[S] == s) & (X['label'] == c)]) for c in [1, 0]] for s in [1, 0]]\n\n\tW['weight'] = [i for j in weights for i in j]\n\tW['size'] = [i for j in sizes for i in j]\n\tW = W.assign(num = lambda x: x.size * x.weight)\n\n\t# Divide the data into the four groups based on class/group\n\tdp = X[(X[S] == b) & (X['label'] == d)]\n\tdn = X[(X[S] == b) & (X['label'] != d)]\n\tfp = X[(X[S] != b) & (X['label'] == d)]\n\tfn = X[(X[S] != b) & (X['label'] != d)]\n\n\t# Uniformly sample from each group\n\tdp = dp.sample(n = W.loc[(W['group'] == b) & (W['label'] == d), 'num'].iloc[0].astype(int), replace = True)\n\tdn = dn.sample(n = W.loc[(W['group'] == b) & (W['label'] != d), 'num'].iloc[0].astype(int), replace = True)\n\tfp = fp.sample(n = W.loc[(W['group'] != b) & (W['label'] == d), 'num'].iloc[0].astype(int), replace = True)\n\tfn = fn.sample(n = W.loc[(W['group'] != b) & (W['label'] != d), 'num'].iloc[0].astype(int), replace = True)\n\n\tX_prime = pd.concat([dp, dn, fp, fn])\n\tX.drop('label', axis = 1, inplace = True)\n\ty_prime = X_prime['label'].tolist()\n\tX_prime = X_prime.drop('label', axis = 1)\n\n\treturn(X_prime, y_prime)",
"def test_sum_as_intermediate_node(self):\n # Some values for the sum layer\n in_features = 10\n in_channels = 3\n out_channels = 5\n num_repetitions = 7\n n = 2\n parent_indices = torch.randint(out_channels, size=(n, in_features))\n\n # Create sum layer\n sum_layer = layers.Sum(\n in_features=in_features, in_channels=in_channels, out_channels=out_channels, num_repetitions=num_repetitions\n )\n\n # Choose `in_features` number of random indexes from 0 to in_channels-1 which will have probability of 1.0 in\n # the sum layer weight tensor\n rand_indxs = torch.randint(in_channels, size=(in_features, num_repetitions))\n rep_idxs = torch.randint(num_repetitions, size=(n,))\n\n # Artificially set sum weights (probabilities) to 1.0\n weights = torch.zeros(in_features, in_channels, out_channels, num_repetitions)\n for r in range(num_repetitions):\n weights[range(in_features), rand_indxs[:, r], :, r] = 1.0\n sum_layer.weights = nn.Parameter(torch.log(weights))\n\n # Perform sampling\n ctx = SamplingContext(n=n, parent_indices=parent_indices, repetition_indices=rep_idxs)\n sum_layer.sample(context=ctx)\n\n # Assert that the sample indexes are those where the weights were set to 1.0\n for i in range(n):\n self.assertTrue((rand_indxs[:, rep_idxs[i]] == ctx.parent_indices[i, :]).all())",
"def test_generate_spn(self, num_decomps, num_subsets, num_mixtures, num_input_mixtures,\n input_dims, input_dist, balanced, node_type, log_weights):\n\n if input_dist == spn.DenseSPNGenerator.InputDist.RAW \\\n and num_input_mixtures != 1:\n # Redundant test case, so just return\n return\n\n # Input parameters\n num_inputs = input_dims[0]\n num_vars = input_dims[1]\n num_vals = 2\n\n printc(\"\\n- num_inputs: %s\" % num_inputs)\n printc(\"- num_vars: %s\" % num_vars)\n printc(\"- num_vals: %s\" % num_vals)\n printc(\"- num_decomps: %s\" % num_decomps)\n printc(\"- num_subsets: %s\" % num_subsets)\n printc(\"- num_mixtures: %s\" % num_mixtures)\n printc(\"- input_dist: %s\" % (\"MIXTURE\" if input_dist ==\n spn.DenseSPNGenerator.InputDist.MIXTURE else \"RAW\"))\n printc(\"- balanced: %s\" % balanced)\n printc(\"- num_input_mixtures: %s\" % num_input_mixtures)\n printc(\"- node_type: %s\" % (\"SINGLE\" if node_type ==\n spn.DenseSPNGenerator.NodeType.SINGLE else \"BLOCK\" if\n node_type == spn.DenseSPNGenerator.NodeType.BLOCK else\n \"LAYER\"))\n printc(\"- log_weights: %s\" % log_weights)\n\n # Inputs\n inputs = [spn.IndicatorLeaf(num_vars=num_vars, num_vals=num_vals, name=(\"IndicatorLeaf_%d\" % (i+1)))\n for i in range(num_inputs)]\n\n gen = spn.DenseSPNGenerator(num_decomps=num_decomps,\n num_subsets=num_subsets,\n num_mixtures=num_mixtures,\n input_dist=input_dist,\n balanced=balanced,\n num_input_mixtures=num_input_mixtures,\n node_type=node_type)\n\n # Generate Sub-SPNs\n sub_spns = [gen.generate(*inputs, root_name=(\"sub_root_%d\" % (i+1)))\n for i in range(3)]\n\n # Generate random weights for the first sub-SPN\n with tf.name_scope(\"Weights\"):\n spn.generate_weights(sub_spns[0], tf.initializers.random_uniform(0.0, 1.0),\n log=log_weights)\n\n # Initialize weights of the first sub-SPN\n sub_spn_init = spn.initialize_weights(sub_spns[0])\n\n # Testing validity of the first sub-SPN\n self.assertTrue(sub_spns[0].is_valid())\n\n # Generate value ops of the first sub-SPN\n sub_spn_v = sub_spns[0].get_value()\n sub_spn_v_log = sub_spns[0].get_log_value()\n\n # Generate path ops of the first sub-SPN\n sub_spn_mpe_path_gen = spn.MPEPath(log=False)\n sub_spn_mpe_path_gen_log = spn.MPEPath(log=True)\n sub_spn_mpe_path_gen.get_mpe_path(sub_spns[0])\n sub_spn_mpe_path_gen_log.get_mpe_path(sub_spns[0])\n sub_spn_path = [sub_spn_mpe_path_gen.counts[inp] for inp in inputs]\n sub_spn_path_log = [sub_spn_mpe_path_gen_log.counts[inp] for inp in inputs]\n\n # Collect all weight nodes of the first sub-SPN\n sub_spn_weight_nodes = []\n\n def fun(node):\n if node.is_param:\n sub_spn_weight_nodes.append(node)\n spn.traverse_graph(sub_spns[0], fun=fun)\n\n # Generate an upper-SPN over sub-SPNs\n products_lower = []\n for sub_spn in sub_spns:\n products_lower.append([v.node for v in sub_spn.values])\n\n num_top_mixtures = [2, 1, 3]\n sums_lower = []\n for prods, num_top_mix in zip(products_lower, num_top_mixtures):\n if node_type == spn.DenseSPNGenerator.NodeType.SINGLE:\n sums_lower.append([spn.Sum(*prods) for _ in range(num_top_mix)])\n elif node_type == spn.DenseSPNGenerator.NodeType.BLOCK:\n sums_lower.append([spn.ParallelSums(*prods, num_sums=num_top_mix)])\n else:\n sums_lower.append([spn.SumsLayer(*prods * num_top_mix,\n num_or_size_sums=num_top_mix)])\n\n # Generate upper-SPN\n root = gen.generate(*list(itertools.chain(*sums_lower)), root_name=\"root\")\n\n # Generate random weights for the SPN\n with tf.name_scope(\"Weights\"):\n spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0),\n log=log_weights)\n\n # Initialize weight of the SPN\n spn_init = spn.initialize_weights(root)\n\n # Testing validity of the SPN\n self.assertTrue(root.is_valid())\n\n # Generate value ops of the SPN\n spn_v = root.get_value()\n spn_v_log = root.get_log_value()\n\n # Generate path ops of the SPN\n spn_mpe_path_gen = spn.MPEPath(log=False)\n spn_mpe_path_gen_log = spn.MPEPath(log=True)\n spn_mpe_path_gen.get_mpe_path(root)\n spn_mpe_path_gen_log.get_mpe_path(root)\n spn_path = [spn_mpe_path_gen.counts[inp] for inp in inputs]\n spn_path_log = [spn_mpe_path_gen_log.counts[inp] for inp in inputs]\n\n # Collect all weight nodes in the SPN\n spn_weight_nodes = []\n\n def fun(node):\n if node.is_param:\n spn_weight_nodes.append(node)\n spn.traverse_graph(root, fun=fun)\n\n # Create a session\n with self.test_session() as sess:\n # Initializing weights\n sess.run(sub_spn_init)\n sess.run(spn_init)\n\n # Generate input feed\n feed = np.array(list(itertools.product(range(num_vals),\n repeat=(num_inputs*num_vars))))\n batch_size = feed.shape[0]\n feed_dict = {}\n for inp, f in zip(inputs, np.split(feed, num_inputs, axis=1)):\n feed_dict[inp] = f\n\n # Compute all values and paths of sub-SPN\n sub_spn_out = sess.run(sub_spn_v, feed_dict=feed_dict)\n sub_spn_out_log = sess.run(tf.exp(sub_spn_v_log), feed_dict=feed_dict)\n sub_spn_out_path = sess.run(sub_spn_path, feed_dict=feed_dict)\n sub_spn_out_path_log = sess.run(sub_spn_path_log, feed_dict=feed_dict)\n\n # Compute all values and paths of the complete SPN\n spn_out = sess.run(spn_v, feed_dict=feed_dict)\n spn_out_log = sess.run(tf.exp(spn_v_log), feed_dict=feed_dict)\n spn_out_path = sess.run(spn_path, feed_dict=feed_dict)\n spn_out_path_log = sess.run(spn_path_log, feed_dict=feed_dict)\n\n # Test if partition function of the sub-SPN and of the\n # complete SPN is 1.0\n self.assertAlmostEqual(sub_spn_out.sum(), 1.0, places=6)\n self.assertAlmostEqual(sub_spn_out_log.sum(), 1.0, places=6)\n self.assertAlmostEqual(spn_out.sum(), 1.0, places=6)\n self.assertAlmostEqual(spn_out_log.sum(), 1.0, places=6)\n\n # Test if the sum of counts for each value of each variable\n # (6 variables, with 2 values each) = batch-size / num-vals\n self.assertEqual(np.sum(np.hstack(sub_spn_out_path), axis=0).tolist(),\n [batch_size // num_vals]*num_inputs*num_vars*num_vals)\n self.assertEqual(np.sum(np.hstack(sub_spn_out_path_log), axis=0).tolist(),\n [batch_size // num_vals]*num_inputs*num_vars*num_vals)\n self.assertEqual(np.sum(np.hstack(spn_out_path), axis=0).tolist(),\n [batch_size // num_vals] * num_inputs * num_vars * num_vals)\n self.assertEqual(np.sum(np.hstack(spn_out_path_log), axis=0).tolist(),\n [batch_size // num_vals] * num_inputs * num_vars * num_vals)",
"def _sample_proportional(self) -> List[int]:\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n \n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n \n return indices",
"def sample(self):\n \"*** YOUR CODE HERE ***\"\n items = self.items() # Extract the items, a tuple of values and distributions\n values = [i[0] for i in items] # Values of the distribution\n dist = [i[1] for i in items] # Distribution\n self.normalize() # In case the total does not sum to 1\n random_sample = random.random() # A random sample, random number between 0 and 1\n iteration, iteration_dist = 0, dist[0] # Initialization of i, the total will be calculated iteratively\n while random_sample > iteration_dist: # If random sample exceeds total, then the corresponding value will be the weight\n iteration += 1 # Iterate i\n iteration_dist += dist[iteration] # Add the i'th element of distribution to the total\n return values[iteration]",
"def sampleTournament(cls, segment, temp, config):\n current = segment.scoreTree.root\n min = int(config.minimize)\n \n while True:\n children = sorted(current.children, key=lambda c: not c.left)\n \n if len(children) == 0:\n break\n \n try:\n if children[min].max_score == children[1-min].min_score:\n p = .5\n else:\n p = 1. / (1. + ((1. - config.pressure) ** (config.learningRate / temp * (2 ** children[0].height))))\n except:\n p = 1.0\n \n if p < 1.0:\n p = (p / (1. - p))\n p *= children[min].area\n div = p + children[1-min].area\n if div > 0.0:\n p /= div\n else:\n p = 0.5\n \n rnd = random.random_sample()\n if rnd < p:\n current = children[min]\n else:\n current = children[1-min]\n \n return current.point, current.point.partition_node",
"def test_add_sampling(self):\n sampl1 = Sampling(\"Sampling 1\", \"first sampling\")\n sam1 = Sampler(\"Sampler 1\", \"first sampler\")\n sam1.add_sampling(sampl1)",
"def straight_prune_subsample(neuron, number_of_nodes):\n if(neuron.n_node > 200):\n neuron, distance = straight_subsample_with_fixed_number(neuron, 200)\n sp_neuron, state = prune(neuron=neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n while(~state):\n distance += 1\n sp_neuron = straigh_subsample(neuron, distance)\n sp_neuron, state = prune(neuron=sp_neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n return sp_neuron",
"def reset(self):\n self.st = segment_tree.SegmentTreeSampler(self.n, np.ones(self.n) * self.reg, self.random_state)",
"def _sample_proportional(self) -> List[int]:\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx%len(self))\n\n return indices",
"def test_generated_sample_distribution(\n jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)\n):\n\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n \"{} sampling method taken from upstream, no need to\"\n \"test generated samples.\".format(jax_dist.__name__)\n )\n\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05",
"def straigh_subsample(neuorn, distance):\n\n # Selecting the main points: branching nodes and end nodes\n selected_index = get_main_points()\n\n # for each segment between two consecuative main points, a few nodes from the segment will be added to the selected node.\n # These new nodes will be selected base on the fact that neural distance of two consecuative nodes is around 'distance'.\n # Specifically, it starts from the far main point, and goes on the segment toward the near main point. Then the first node which is\n # going to add has the property that it is the farest node from begining on the segment such that its distance from begining is\n # less than 'distance'. The next nodes will be selected similarly.\n\n for i in selected_index:\n upList = np.array([i], dtype = int)\n index = neuorn.parent_index[i]\n dist = neuorn.distance_from_parent[i]\n while(~np.any(selected_index == index)):\n upList = np.append(upList,index)\n index = neuorn.parent_index[index]\n dist = np.append(dist, sum(neuorn.distance_from_parent[upList]))\n dist = np.append(0, dist)\n (I,) = np.where(np.diff(np.floor(dist/distance))>0)\n I = upList[I]\n selected_index = np.append(selected_index, I)\n selected_index = np.unique(selected_index)\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron",
"def sample(self, k):\n result = \"\"\n current = self.gen_beginning()\n for i in range(0, k):\n result += current[0] + \" \"\n t = tuple(current)\n if t in self.dict:\n c_sum = self.dict[t][self.sum_index]\n rand = random.randint(0, c_sum)\n new_term = \"\"\n for term, count in self.dict.iteritems():\n if rand > count:\n rand -= count\n else:\n new_term = term\n break\n current.remove(current[0])\n current.append(new_term)\n else:\n current = self.gen_beginning()\n return result",
"def _densification(self, candidate):\n\n\t\t# If the candidate is not in the sample, add it\n\t\tif candidate not in self._sample_graph.nodes():\n\t\t\tself._sample_graph.add_node(candidate)\n\n\t\tsample_G = nx.Graph()\n\t\tsample_G.add_edges_from(self._sample['edges'])\n\n\t\t# Initialize a new sub sample\n\t\tsub_sample = {'edges': set(), 'nodes': {'close': set(), 'open': set()}}\n\t\tsub_sample['nodes']['open'].add(candidate)\n\n\t\t# Initialize densification and expansion scores\n\t\tscore_den = self._scoreDen(sub_sample)\n\t\tscore_exp = self._scoreExp(sub_sample)\n\n\t\t# # Initialize unobs list\n\t\t# self._den_unobs = []\n\n\t\tprev_score = score_den\n\t\tscore_change = 1.\n\t\tscore_list = []\n\t\tTHRESHOLD = 1.\n\t\tisConverge = False\n\n\t\t# Perform densification until one of the conditions is met:\n\t\t# \t1. Densification score is less than the expansion score\n\t\t# \t2. Budget allocated has run out\n\t\t# \t3. There are no more open nodes\n\t\t# TODO: Densification switch criteria\n\t\twhile self._cost < self._budget and len(sub_sample['nodes']['open']) > 0:\n\n\t\t# while self._cost < self._budget and len(sub_sample['nodes']['open']) > 0 \\\n\t\t# \tand not isConverge:\n\n\t\t\t# Get the list of nodes to perform densification on\n\t\t\t#den_nodes = self._getDenNodes(sub_sample['nodes']['open'])\n\t\t\t# TODO: Nodes for densify should be filter out ?\n\t\t\tden_nodes = sub_sample['nodes']['open']\n\n\t\t\t# t = len(set(self._sample['nodes']['open']).union(set(self._sample['nodes']['close'])))\n\t\t\tdegree_observed = sample_G.degree(den_nodes)\n\n\n\t\t\tif len(den_nodes) != 1:\n\t\t\t\t#degree_observed_sorted = _mylib.sortDictByValues(degree_observed,reverse=True)\n\t\t\t\t#current = degree_observed_sorted[0][0]\n\t\t\t\t#n_deg = degree_observed_sorted[0][1]\n\t\t\t\tcurrent_p = self.get_highest_true_deg_node(list(den_nodes))\n\t\t\t\tcurrent = current_p[0]\n\t\t\t\tn_deg = current_p[1]\n\t\t\t\tprint(\" Den: node {} true deg {}\".format(current, n_deg))\n\t\t\telse:\n\t\t\t\tprint('no den nodes')\n\t\t\t\tcurrent = list(den_nodes)[0]\n\t\t\t\tprint('current',current)\n\t\t\t\tn_deg = degree_observed[current]\n\n\n\n\n\t\t\t# Query the neighbors of current\n\t\t\tnodes, edges, c = self._query.neighbors(current)\n\t\t\tsample_G.add_edges_from(edges)\n\n\t\t\t# TODO: Record\n\t\t\tself._node_picked_obs_deg.append(n_deg)\n\t\t\tself._node_picked_true_deg.append(len(nodes))\n\n\n\t\t\t# Update the densification and expansion scores\n\t\t\tscore_den = self._scoreDen(sub_sample, nodes, score_den)\n\t\t\tscore_exp = self._scoreExp(sub_sample, score_exp)\n\t\t\tscore_list.append(score_den)\n\n\t\t\t# Store the densification and expansion scores\n\t\t\tself._score_den_list.append(score_den)\n\t\t\tself._score_exp_list.append(score_exp)\n\t\t\tself._densi_count += 1\n\n\t\t\t# Update the sub sample\n\t\t\tsub_sample = self._updateSubSample(sub_sample, nodes, edges, current)\n\n\t\t\t# Add edges to sub_graph\n\t\t\tfor e in edges:\n\t\t\t\tself._sample_graph.add_edge(e[0], e[1])\n\n\t\t\t# Update the cost\n\t\t\tself._increment_cost(c)\n\n\t\t\t# TODO: just a dummy statement for MOD method\n\t\t\tif self._exp_type != 'mod' and score_exp > score_den:\n\t\t\t\tbreak\n\n\t\t\tr_score = self._score_R(sub_sample)\n\t\t\tself._r_score.append(r_score)\n\n\t\t\t# print('score_den:{}\\t score_exp:{} \\t obs_nodes:{}\\t cost:{} '.format(\\\n\t\t\t# score_den, score_exp, len(sub_sample['nodes']['open']) + len(sub_sample['nodes']['close']), self._cost))\n\n\t\t# Update the sample with the sub sample\n\t\tself._updateSample(sub_sample)\n\n\t\t# Return list of potential expansion nodes\n\t\treturn self._getExpNodes()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints a message only when app is in debug mode | def print_debug(message):
if current_app.debug:
print(message) | [
"def debug(msg):\n debug = False\n if debug:\n print msg",
"def checkDebug(message):\n if debug == True:\n print(message)",
"def DebugMessage(message=\"\"):\n if global_debug:\n print(\"\\033[93m DEBUG: \" + message + \"\\033[0m\")",
"def debug(msg):\n if debug_level >= 1:\n print msg",
"def _IsDebug():\n return properties.VALUES.core.verbosity.Get() == 'debug'",
"async def dev_debug(self, ctx):\n self.bot.debug = not self.bot.debug\n debug_mode = 'ON' if self.bot.debug else 'OFF'\n await ctx.send(f'Debug mode: {debug_mode}')",
"def printdebug(self, msg):\n if self.debug > 0:\n print(msg)",
"def debugLog(message):\n if debugFlag != None:\n print \"#debug: \" + str(message)",
"def dbg(text):\n if DEBUG is not None:\n print \"DEBUG: %s\" % text",
"def debug(self, message):\n if self.debug_on:\n print(f\"PD @ {message}\")",
"def debug_mode():\n\n # store state and switch console to debug\n _log_state.debug_logs()",
"def debug():\n\n return",
"def debug():",
"def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)",
"def is_debug(self):\r\n return False",
"def debug_print(text):\r\n if settings.debug:\r\n print (text)",
"def debug_print(text):\n if settings.debug:\n print(text)",
"def stampadebug(messaggio):\n if settings.DEBUG_PRINT:\n print(messaggio)",
"def isdebug():\r\n return 'DEBUG' in globals() and bool(DEBUG)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function will optionally print a header guard for `cl_khr_fp64` if a 64bit type is used as the source or destination and return a bool that indicates whether this guard will need closed after the calling function has finished printing functions that use the 64bit source/destination type. | def conditional_guard(src, dst):
int64_count = 0
float64_count = 0
float16_count = 0
if src in int64_types or dst in int64_types:
int64_count = 1
if src in float64_types or dst in float64_types:
float64_count = 1
if src in float16_types or dst in float16_types:
float16_count = 1
if float16_count > 0:
print("#ifdef cl_khr_fp16")
if float64_count > 0:
#In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be
print("#ifdef cl_khr_fp64")
return 1 + float16_count
elif int64_count > 0:
print("#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)")
return 1 + float16_count
return float16_count | [
"def is_format_header(h):\n\n return h.dtype == header_image_dtype or h.dtype == header_image_dtype.newbyteorder()",
"def condition(segl: MessageSegment, segr: MessageSegment):\n return isPrintable(segl.bytes) and isPrintable(segr.bytes)",
"def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)",
"def is_H(self):\n return True",
"def is64bit():\r\n return platform.machine().endswith('64')",
"def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return",
"def _has_4px_data(self):\n if self.fmt_version == self.FMT_NEW:\n if self._sess_header['frame_footer_size'] >= 512:\n return True\n return np.dtype(self.NEW_FRAME_FOOTER_CAMLINK_DTYPE).itemsize == self._sess_header['frame_footer_size']\n\n # maybe this is sufficient\n # return int(self._sess_footer2['4px_size']) > 0\n\n footer_size = int(self._sess_footer['footer_size'])\n offset_to_4px = int(self._sess_footer2['offset_to_4px'])\n\n return footer_size == offset_to_4px + 8 * self.nfrms",
"def is64bit(self):\n return platform.machine().endswith('64')",
"def longdouble_lte_float64():\n return np.longdouble(2**53) == np.longdouble(2**53) + 1",
"def is_os_64bit():\n import platform\n return platform.machine().endswith('64')",
"def heat_type_is_exact(ht_dtype: Type[datatype]) -> bool:\n return ht_dtype in _exact",
"def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...",
"def is_64bit(self):\n return self.machine == 'x86_64'",
"def isFloor(self, x, y):\n\t\treturn self.getValue(x, y) == self.floor_char",
"def is_flooded(self):\n return len(self.flooded_tiles) == self._num_tiles",
"def use_long_headers(header_row, long_to_short_dict):\n col_matches = 0\n for value in header_row:\n if FieldCleaner.clean_string(value) in long_to_short_dict:\n col_matches += 1\n # if most of column headers are in the long format,\n # we'll treat the file as having long headers\n return col_matches > .5 * len(header_row)",
"def IsWow64(self):\n return hasattr(self, 'Wow64Process') and self.Wow64Process.v() != 0",
"def check_32bit(pe): \n bits = True\n if not hex(pe.FILE_HEADER.Machine) == '0x14c':\n bits = False\n return bits",
"def _checkTexture2D(internalFormat, shape,\n format_=None, type_=gl.GL_FLOAT, border=0):\n height, width = shape\n gl.glTexImage2D(gl.GL_PROXY_TEXTURE_2D, 0, internalFormat,\n width, height, border,\n format_ or internalFormat,\n type_, c_void_p(0))\n width = gl.glGetTexLevelParameteriv(\n gl.GL_PROXY_TEXTURE_2D, 0, gl.GL_TEXTURE_WIDTH)\n return bool(width)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This helper function returns the correct clc core conversion function name for a given source and destination type, with optional size, mode and saturation arguments. | def clc_core_fn_name(dst, size='', mode='', sat=''):
return "__clc_convert_{DST}{N}{SAT}{MODE}".format(DST=dst, N=size, SAT=sat, MODE=mode) | [
"def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()",
"def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname",
"def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"",
"def cython_functionname(self, t, cycyt=None):\n if cycyt is None:\n t = self.canon(t)\n if isinstance(t, basestring):\n return t, self.cython_functionnames[t]\n elif t[0] in self.base_types:\n return t, self.cython_functionnames[t[0]]\n return self.cython_functionname(t, self.cython_functionnames[t[0]])\n d = {}\n for key, x in zip(self.template_types[t[0]], t[1:-1]):\n if isinstance(x, basestring):\n val = self.cython_functionnames[x] if x in self.cython_functionnames \\\n else x\n elif isinstance(x, Number):\n val = str(x).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif x[0] in self.base_types:\n val = self.cython_functionnames[x[0]]\n else:\n _, val = self.cython_functionname(x, self.cython_functionnames[x[0]])\n d[key] = val\n return t, cycyt.format(**d)",
"def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)",
"def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif",
"def get_ctype_name(*args):\n return _ida_hexrays.get_ctype_name(*args)",
"def ggml_format_name(tensor: ffi.CData, fmt: ffi.CData, *args2) -> ffi.CData:\n ...",
"def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname",
"def get_class_decoder_function_name(name):\n name = get_class_functional_name(name)\n return 'decode_{0}'.format(name)",
"def convert_C_instruction(self, instruction):\n comp, dest, jump = self.parse(instruction)\n\n return f\"111{convert_comp(comp)}{convert_dest(dest)}\" \\\n f\"{convert_jump(jump)}\"",
"def ggml_op_name(op: int) -> ffi.CData:\n ...",
"def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"",
"def getconversiontype(self, *args, **kwargs):\n return _coordsys.coordsys_getconversiontype(self, *args, **kwargs)",
"def ctl_transform_to_colorspace_name(ctl_transform):\n\n if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,\n ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):\n name = ctl_transform.target\n else:\n name = ctl_transform.source\n\n return beautify_colorspace_name(name)",
"def name_from_dist(dist_func):\n return str(dist_func).split()[0].split('.')[-1][:-4]",
"def getColorTransferFunction(self):\t\t \n\t\treturn self.ctf",
"def get_costfctname(self):\n\t\ttxterrfct = \"{self.errfctname}\".format(self=self)\n\t\tif hasattr(self, 'regullam') and self.regullam is not None:\n\t\t\ttxterrfct += \"+{self.regullam:1.0e}x{self.regulfctname}\".format(self=self)\n\t\treturn txterrfct",
"def get_kernel_name(optype):\n if \"conv\" in optype and \"dwconv\" not in optype:\n optype = \"conv-bn-relu\"\n if \"dwconv\" in optype:\n optype = \"dwconv-bn-relu\"\n if optype == \"fc-relu\":\n optype = \"fc\"\n if optype == \"max-pool\":\n optype = \"maxpool\"\n if optype == \"avg-pool\":\n optype = \"avgpool\"\n if optype in [\"global-pool\", \"gap\"]:\n optype = \"global-avgpool\"\n if optype == \"channel_shuffle\":\n optype = \"channelshuffle\"\n if optype in [\"bn-relu\"]:\n optype = \"bnrelu\"\n if optype in [\"add-relu\"]:\n optype = \"addrelu\"\n\n if optype in [\"SE\", \"SE-relu\", \"se\", \"se-relu\"]:\n optype = \"se\"\n\n return optype"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply weight normalization module from all of the layers. | def apply_weight_norm(self):
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm) | [
"def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):\n torch.nn.utils.weight_norm(m)\n self.apply(_apply_weight_norm)",
"def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()",
"def _init_weights_resnet(self):\n for m in self.modules():\n torchutils.weights_init_hetruncatednormal(m, dense_gaussian=self.dense_gaussian)",
"def apply_weight_norm(self):\n return self._apply_weight_norm",
"def normalize_weights(self):\n\n w = tf.reshape(self.w, [-1, self.w_shape[-1]])\n u = self.u\n\n with tf.name_scope(\"spectral_normalize\"):\n for _ in range(self.power_iterations):\n v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True))\n u = tf.math.l2_normalize(tf.matmul(v, w))\n u = tf.stop_gradient(u)\n v = tf.stop_gradient(v)\n sigma = tf.matmul(tf.matmul(v, w), u, transpose_b=True)\n self.u.assign(tf.cast(u, self.u.dtype))\n self.w.assign(\n tf.cast(tf.reshape(self.w / sigma, self.w_shape), self.w.dtype)\n )",
"def _normalize_dense_layer(self):\n old_weights = self.model.get_layer(name=\"dense_layer\").get_weights()[0]\n norm_weights = old_weights / np.linalg.norm(old_weights, axis=0)\n\n self.model.get_layer(name=\"dense_layer\").set_weights([norm_weights])",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def __normalize_weights(self):\n result = norm(self.weights, 1)\n self.weights = self.weights / result if not result == 0.0 else self.weights",
"def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1",
"def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_",
"def standartize_conv_weights(model: nn.Module) -> None:\n for m in model.modules():\n if not isinstance(m, nn.modules.conv._ConvNd):\n continue\n shape = m.weight.shape\n parametrize.register_parametrization(\n m, 'weight', nn.LayerNorm(shape[1:], elementwise_affine=False))",
"def apply_weight_norm(module, name='', dim=0, hook_child=True):\r\n return apply_reparameterization(module, reparameterization=WeightNorm, hook_child=hook_child,\r\n name=name, dim=dim)",
"def freeze_bn(self):\n for layer in self.modules():\n if isinstance(layer, nn.modules.batchnorm._BatchNorm):\n layer.eval()",
"def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)",
"def normalise(self):\n for filter in self.filters:\n filter.normalise()",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)",
"def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register stats for denormalization as buffer. | def register_stats(self, stats):
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.") | [
"def _weight2buffer(self):\n delattr(self.module, 'weight')\n self.module.register_buffer('weight', self.weight.data)\n if hasattr(self.module, 'bias') and self.module.bias is not None:\n delattr(self.module, 'bias')\n self.module.register_buffer('bias', self.bias.data)",
"def register_uninitialized_buffer(\n self, name: str, dtype: Optional[torch.dtype] = None\n ):\n if dtype is None:\n dtype = torch.float64\n\n self.register_buffer(name, nn.parameter.UninitializedBuffer(dtype=dtype))",
"def add_stats_to_buffer(stats, replay_buffer):\n for s in stats:\n state = one_hot_encode(s[0])\n probs = torch.Tensor(s[1])\n value = torch.Tensor([s[2]])\n replay_buffer.add((state,\n probs,\n value))",
"def _cast_buffers(self,\n dtype: Optional[torch.dtype] = None,\n memo: Optional[Set] = None) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, XlaFullyShardedDataParallel):\n # Allow any child FSDP instances to handle their own buffers.\n module._cast_buffers(dtype=dtype, memo=memo)\n elif module not in memo:\n memo.add(module)\n for name, buf in module.named_buffers(recurse=False):\n if buf is None:\n continue\n if torch.is_floating_point(buf):\n orig_dtype = buf.dtype\n cast_dtype = dtype or self.buffer_dtype\n if orig_dtype != cast_dtype:\n buf = buf.to(cast_dtype)\n buf._orig_dtype = orig_dtype\n if buf.device != self.xla_device:\n buf = buf.to(self.xla_device)\n setattr(module, name, buf)",
"def _CreateStatsCollector(self):",
"def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()",
"def set_batch_stats(self, x):\n\n if self.set_stats_f is None:\n self.set_stats_f = theano.function(\n inputs=[self.input],\n updates=[(self.bm, self.m), (self.bv, self.v)]\n )\n\n self.set_stats_f(x.astype(dtype))",
"def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])",
"def _cast_buffers(\n self,\n device: Optional[torch.device] = None,\n dtype: Optional[Dict[str, torch.dtype]] = None,\n memo: Optional[Set] = None,\n recurse: bool = True,\n ) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, FullyShardedDataParallel) and recurse:\n # Allow any child FSDP instances to handle their own buffers.\n module._cast_buffers(device=device, dtype=dtype, memo=memo, recurse=recurse)\n elif module not in memo:\n memo.add(module)\n for name, buf in module.named_buffers(recurse=False):\n if buf is None:\n continue\n buf = buf.to(device=device or self.compute_device)\n if name not in self._buffer_name_to_orig_dtype:\n self._buffer_name_to_orig_dtype[name] = buf.dtype\n # If given, cast buffer to the given dtype. This is used to\n # suppport mixed precision for buffers\n # (given by self.mixed_precision.buffer_dtype) and also used\n # to restore the buffer dtype to the original precision for\n # state_dict() calls.\n # Note that non-floating point buffers are not casted.\n if torch.is_floating_point(buf):\n # We are restoring the original buffer type in\n # preparation for checkpoint.\n if dtype:\n buf = buf.to(dtype=dtype[name])\n # Note that we don't pass in self.mixed_precision.buffer_dtype\n # recursively into _cast_buffers, as we want to respect\n # mp config for child FSDP instances.\n elif self._mixed_precision_enabled_for_buffers():\n buf = buf.to(self.mixed_precision.buffer_dtype)\n\n setattr(module, name, buf)",
"def Buffer(self) -> _n_0_t_7[_n_0_t_6]:",
"def register_output_buffer(self, buffer_name: Union[str, List[str]]):\n\n if not isinstance(buffer_name, list):\n buffer_name = [buffer_name]\n\n for name in buffer_name:\n buffer_info = BufferInfo(name)\n self._output_buffers.append(buffer_info)",
"def register_stat(self, register_stat):\n self._register_stat = register_stat",
"def _buffer(self, enable=True):\n self._isBuffered = enable",
"def test_histogram_observer_consistent_buffer_shape(self):\n obs = HistogramObserver()\n min_shape_before = obs.min_val.shape\n max_shape_before = obs.max_val.shape\n for _ in range(2):\n obs(torch.randn(4, 4, 4, 4))\n self.assertEqual(min_shape_before, obs.min_val.shape)\n self.assertEqual(max_shape_before, obs.max_val.shape)",
"def min_output_buffer(self, *args, **kwargs):\n return _qtgui_swig.histogram_sink_f_sptr_min_output_buffer(self, *args, **kwargs)",
"def filter(self, buffer):\n self.allbuffers[self.i, :] = buffer\n self.i += 1\n return 0.0",
"def allocbuffer(self, fps):\n self.buf = np.zeros((fps, self.packlen), np.uint8)\n self.fps = fps",
"def _write_data_to_buffer(self, bunch):\n\n ps_coords = {'x': None, 'xp': None, 'y': None,\n 'yp': None, 'z': None, 'dp': None}\n for coord in ps_coords:\n ps_coords[coord] = getattr(bunch, coord)\n if pm.device == 'GPU':\n stream = next(gpu_utils.stream_pool)\n ps_coords[coord] = ps_coords[coord].get_async(stream=stream)\n if pm.device == 'GPU':\n if gpu_utils.use_streams:\n for stream in gpu_utils.streams:\n stream.synchronize()\n\n # TODO: calculate these cell stats on the GPU instead of the CPU!!!\n n_cl, x_cl, xp_cl, y_cl, yp_cl, z_cl, dp_cl = cp.calc_cell_stats(\n ps_coords['x'], ps_coords['xp'], ps_coords['y'],\n ps_coords['yp'], ps_coords['z'], ps_coords['dp'],\n self.beta_z, self.radial_cut, self.n_radial_slices,\n self.n_azimuthal_slices)\n\n self.buffer_cell['mean_x'][:,:,0] = x_cl[:,:]\n self.buffer_cell['mean_xp'][:,:,0] = xp_cl[:,:]\n self.buffer_cell['mean_y'][:,:,0] = y_cl[:,:]\n self.buffer_cell['mean_yp'][:,:,0] = yp_cl[:,:]\n self.buffer_cell['mean_z'][:,:,0] = z_cl[:,:]\n self.buffer_cell['mean_dp'][:,:,0] = dp_cl[:,:]\n self.buffer_cell['macroparticlenumber'][:,:,0] = n_cl[:,:]\n\n for stats in self.stats_to_store:\n self.buffer_cell[stats] = np.roll(\n self.buffer_cell[stats], shift=-1, axis=2)",
"def set_scribe_buffer(buffer_enabled):\r\n LogOptions._SCRIBE_BUFFER = buffer_enabled"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove weight normalization module from all of the layers. | def remove_weight_norm(self):
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm) | [
"def remove_weight_norm(self):\n def _remove_weight_norm(m):\n try:\n if isinstance(m, torch.nn.Conv1d) \\\n or isinstance(m, torch.nn.ConvTranspose2d):\n torch.nn.utils.remove_weight_norm(m)\n #logging.debug(f\"Weight norm is removed from {m}.\")\n logging.info(f\"Weight norm is removed from {m}.\")\n elif isinstance(m, torch.nn.GRU):\n list_name = []\n for name, param in m.named_parameters():\n list_name.append(name)\n logging.info(list_name)\n for name in list_name:\n if 'weight' in name and '_g' in name:\n torch.nn.utils.remove_weight_norm(m, name=name.replace('_g',''))\n #logging.debug(f\"Weight norm is removed from {m} {name}.\")\n logging.info(f\"Weight norm is removed from {m} {name}.\")\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)",
"def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_",
"def remove_weight_norm(module, name='', remove_all=False):\r\n return remove_reparameterization(module, reparameterization=WeightNorm,\r\n name=name, remove_all=remove_all)",
"def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)",
"def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)",
"def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None",
"def _reset_weight_grads(self):\n for l in self.layers:\n l['weights'].reset_grad()\n l['bias'].reset_grad()",
"def prune_weights(self):\n pass",
"def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)",
"def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()",
"def _init_weights_resnet(self):\n for m in self.modules():\n torchutils.weights_init_hetruncatednormal(m, dense_gaussian=self.dense_gaussian)",
"def RemoveBatchNormLayers(network, batch_norm_names):\n i = 0\n j = 0\n while i < len(network.layer) and j < len(batch_norm_names): \n if network.layer[i].name == batch_norm_names[j]:\n del network.layer[i]\n j += 1\n else:\n i += 1\n \n if j != len(batch_norm_names):\n print j, len(batch_norm_names)\n raise AssertionError('All batch norm layers were not removed')",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1",
"def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))",
"def _normalize_dense_layer(self):\n old_weights = self.model.get_layer(name=\"dense_layer\").get_weights()[0]\n norm_weights = old_weights / np.linalg.norm(old_weights, axis=0)\n\n self.model.get_layer(name=\"dense_layer\").set_weights([norm_weights])",
"def remove_weight_scale(module: Module, name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'scale', name)",
"def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})",
"def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply weight normalization module from all of the layers. | def apply_weight_norm(self):
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm) | [
"def apply_weight_norm(self):\n\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):\n torch.nn.utils.weight_norm(m)\n self.apply(_apply_weight_norm)",
"def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()",
"def _init_weights_resnet(self):\n for m in self.modules():\n torchutils.weights_init_hetruncatednormal(m, dense_gaussian=self.dense_gaussian)",
"def apply_weight_norm(self):\n return self._apply_weight_norm",
"def normalize_weights(self):\n\n w = tf.reshape(self.w, [-1, self.w_shape[-1]])\n u = self.u\n\n with tf.name_scope(\"spectral_normalize\"):\n for _ in range(self.power_iterations):\n v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True))\n u = tf.math.l2_normalize(tf.matmul(v, w))\n u = tf.stop_gradient(u)\n v = tf.stop_gradient(v)\n sigma = tf.matmul(tf.matmul(v, w), u, transpose_b=True)\n self.u.assign(tf.cast(u, self.u.dtype))\n self.w.assign(\n tf.cast(tf.reshape(self.w / sigma, self.w_shape), self.w.dtype)\n )",
"def _normalize_dense_layer(self):\n old_weights = self.model.get_layer(name=\"dense_layer\").get_weights()[0]\n norm_weights = old_weights / np.linalg.norm(old_weights, axis=0)\n\n self.model.get_layer(name=\"dense_layer\").set_weights([norm_weights])",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def __normalize_weights(self):\n result = norm(self.weights, 1)\n self.weights = self.weights / result if not result == 0.0 else self.weights",
"def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1",
"def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_",
"def standartize_conv_weights(model: nn.Module) -> None:\n for m in model.modules():\n if not isinstance(m, nn.modules.conv._ConvNd):\n continue\n shape = m.weight.shape\n parametrize.register_parametrization(\n m, 'weight', nn.LayerNorm(shape[1:], elementwise_affine=False))",
"def apply_weight_norm(module, name='', dim=0, hook_child=True):\r\n return apply_reparameterization(module, reparameterization=WeightNorm, hook_child=hook_child,\r\n name=name, dim=dim)",
"def freeze_bn(self):\n for layer in self.modules():\n if isinstance(layer, nn.modules.batchnorm._BatchNorm):\n layer.eval()",
"def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)",
"def normalise(self):\n for filter in self.filters:\n filter.normalise()",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)",
"def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a humanized rstring representing time difference between now() and the input timestamp. The output rounds up to days, hours, minutes, or seconds. 4 days 5 hours returns '4 days' 0 days 4 hours 3 minutes returns '4 hours', etc... | def time_since(timestamp=None):
rstr = ""
if not timestamp or not isinstance(timestamp, datetime.datetime):
return rstr
now = timezone.now()
timediff = now - timestamp
days = timediff.days
weeks = days//7
months = days//30
minutes = timediff.seconds % 3600 // 60
seconds = timediff.seconds % 3600 % 60
hours = minutes // 60
if days > 365:
return "> a year"
if months > 0:
if months == 1:
tstr = "month"
else:
tstr = "months"
rstr = rstr + "%s %s" % (months, tstr)
return rstr
if weeks > 0:
if weeks == 1:
tstr = "week"
else:
tstr = "weeks"
rstr = rstr + "%s %s" % (weeks, tstr)
return rstr
if days > 0:
if days == 1:
tstr = "day"
else:
tstr = "days"
rstr = rstr + "%s %s" % (days, tstr)
return rstr
elif hours > 0:
if hours == 1:
tstr = "hour"
else:
tstr = "hours"
rstr = rstr + "%s %s" % (hours, tstr)
return rstr
elif minutes > 0:
if minutes == 1:
tstr = "min"
else:
tstr = "mins"
rstr = rstr + "%s %s" % (minutes, tstr)
return rstr
elif seconds > 0:
if seconds == 1:
tstr = "sec"
else:
tstr = "secs"
rstr = rstr + "%s %s" % (seconds, tstr)
return rstr
else:
return "Now" | [
"def humanizeTimeDiff(timestamp = None):\n import datetime\n \n timeDiff = datetime.datetime.now() - timestamp\n days = timeDiff.days\n hours = timeDiff.seconds/3600\n minutes = timeDiff.seconds%3600/60\n seconds = timeDiff.seconds%3600%60\n \n str = \"\"\n tStr = \"\"\n if days > 0:\n if days == 1: tStr = \"day ago\"\n else: tStr = \"days ago\"\n str = str + \"%s %s\" %(days, tStr)\n return str\n elif hours > 0:\n if hours == 1: tStr = \"hour ago\"\n else: tStr = \"hours ago\" \n str = str + \"%s %s\" %(hours, tStr)\n return str\n elif minutes > 0:\n if minutes == 1:tStr = \"min ago\"\n else: tStr = \"mins ago\" \n str = str + \"%s %s\" %(minutes, tStr)\n return str\n elif seconds > 0:\n if seconds == 1:tStr = \"sec ago\"\n else: tStr = \"secs ago\"\n str = str + \"%s %s\" %(seconds, tStr)\n return str\n else:\n return None",
"def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(second_diff)) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(second_diff / 60)) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(second_diff / 3600)) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(int(day_diff / 7)) + \" weeks ago\"\n if day_diff < 365:\n return str(int(day_diff / 30)) + \" months ago\"\n return str(int(day_diff / 365)) + \" years ago\"",
"def humanize_ts(timestamp=False):\n now = datetime.utcnow()\n diff = now - datetime.strptime(timestamp, \"%Y-%m-%d %H:%M:%S\")\n second_diff = diff.seconds\n day_diff = diff.days\n print(diff, now, datetime.strptime(timestamp, \"%Y-%m-%d %H:%M:%S\"))\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(second_diff)) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(second_diff / 60)) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(second_diff / 3600)) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(int(day_diff / 7)) + \" weeks ago\"\n if day_diff < 365:\n return str(int(day_diff / 30)) + \" months ago\"\n return str(int(day_diff / 365)) + \" years ago\"",
"def humanizeTimeDiff(timestamp = None):\n from datetime import datetime\n\n now = datetime.now()\n reverse = now < timestamp\n if not reverse:\n timeDiff = now - timestamp\n else:\n timeDiff = timestamp - now\n years = timeDiff.days/365\n months = timeDiff.days%365/30\n days = timeDiff.days%365%30\n hours = timeDiff.seconds/3600\n minutes = timeDiff.seconds%3600/60\n seconds = timeDiff.seconds%3600%60\n\n if not reverse:\n str = u\"%s %s전\"\n else:\n str = u\"%s %s후\"\n if years > 0:\n str = str %(years, u\"년\")\n return str\n elif months > 0:\n str = str %(months, u\"달\")\n return str\n elif days > 0:\n str = str %(days, u\"일\")\n return str\n elif hours > 0:\n str = str %(hours, u\"시간\")\n return str\n elif minutes > 0:\n str = str %(minutes, u\"분\")\n return str\n elif seconds > 0:\n str = str %(seconds, u\"초\")\n return str\n else:\n return u\"방금전\"",
"def timedelta_to_string(timestamp):\n if timestamp.seconds // 3600 == 0:\n return str(timestamp.seconds // 60 % 60) + 'm'\n elif timestamp.seconds // 60 % 60 == 0:\n return str(timestamp.seconds // 3600) + 'h'\n\n return '{}h {}m'.format(str(timestamp.seconds // 3600),\n str(timestamp.seconds // 60 % 60))",
"def pretty_date(time):\n now = datetime.now()\n diff = now - time\n\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"1 day ago\"\n return str(day_diff) + \" days ago\"",
"def getHumanReadableTime(self, timestamp):\n if timestamp < 60: \n if timestamp == 1: return '%d second' % timestamp\n else: return '%d seconds' % timestamp\n elif timestamp >= 60 and timestamp < 3600:\n timestamp = round(timestamp/60)\n if timestamp == 1: return '%d minute' % timestamp\n else: return '%d minutes' % timestamp\n else:\n timestamp = round(timestamp/3600)\n if timestamp == 1: return '%d hour' % timestamp\n else: return '%d hours' % timestamp",
"def _get_timedelta_str(self, seconds):\n minutes = seconds // 60\n hours = 0\n while minutes > 60:\n hours += 1\n minutes -= 60\n if hours > 0:\n result = _(\"{} hours {} minutes\").format(hours, minutes)\n else:\n result = _(\"{} minutes\").format(minutes)\n return result",
"def human_duration(elapsed):\n return '%02dm %02ds' % divmod(elapsed, 60)",
"def get_pretty_date(time=False):\n now = datetime.datetime.now()\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif type(time) is float:\n diff = now - datetime.datetime.fromtimestamp(int(time))\n elif isinstance(time,datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"",
"def pretty_duration(self): # method\n hour = self.duration/360\n minut = (self.duration-hour*360)/60\n sec = self.duration-hour*360-minut*60\n return \"%i hours %i minutes %i seconds\" % (hour, minut, sec)",
"def time_since(since):\n now = time.time()\n s = now - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)",
"def duration(t):\n if t < 10:\n return '%.2fs' % t\n elif t < 60:\n return '%.0fs' % t\n else:\n h = int(t // 3600)\n m = int((t - h * 3600) // 60)\n s = t - h * 3600 - m * 60\n if h < 1:\n return '%02d:%02d' % (m, s)\n else:\n return '%02dh:%02dm:%02ds' % (h, m, s)",
"def getTimestring():\n (dt, micro) = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f').split('.')\n dt = \"%s%03d\" % (dt, int(micro) / 1000)\n return dt",
"def duration_human_readable(self) -> str:\n result = \"\"\n if self.duration_seconds > 24 * 3600:\n seconds_of_one_day = 24 * 3600\n result += \"{} {} \".format(\n int(self.duration_seconds / seconds_of_one_day),\n \"Tage\",\n )\n result += \"{:02}:{:02}:{:02}h\".format(\n int(self.duration_seconds / 3600),\n int(self.duration_seconds / 60) % 60,\n int(self.duration_seconds) % 60,\n )\n return result",
"def get_timestamp() -> str:\n return pretty_time(datetime.datetime.now())",
"def time_remaining_str(self):\n return time_string(self.time_remaining())",
"def getTimeDifferenceValue(td):\n SECOND = 1\n MINUTE = 60 * SECOND\n HOUR = 60 * MINUTE\n DAY = 24 * HOUR\n WEEK = 7 * DAY\n MONTH = 30 * DAY\n \n timenow = datetime.now();\n difference = timenow - td;\n\n delta = difference.days * DAY + difference.seconds \n \n minutes = delta / MINUTE\n hours = delta / HOUR\n days = delta / DAY\n weeks = delta / WEEK\n months = delta / MONTH\n \n if delta < 0:\n return \"Please give time after current time\"\n if delta < 10 * SECOND:\n return \"just now\" \n if delta < 1 * MINUTE: \n return str(delta) + \" seconds ago\"\n if delta < 60 * MINUTE: \n return str(minutes) + \" minutes ago\"\n if delta < 24 * HOUR:\n return str(hours) + \" hours ago\"\n if delta < 1 * WEEK:\n return \"one week ago\"\n if delta < 4 * WEEK:\n return str(weeks) + \" weeks ago\"\n if delta < 1 * DAY: \n return \"one day ago\"\n if delta < 30 * DAY: \n return str(days) + \" days ago\"\n if delta < 1 * MONTH: \n return \"one month ago\"\n else:\n return str(months) + \" months ago\"",
"def format_time_diff(timediff):\n if timediff > 60 * 60:\n hour = int(timediff / (60 * 60))\n return str(hour) + \" H \" + format_time_diff(timediff - (hour * 60 * 60))\n elif timediff > 60:\n min = int(timediff / 60)\n return str(min) + ' M ' + format_time_diff(timediff - (min * 60))\n else:\n return str(round(timediff, 3)) + \" S\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return elements in the message with given parameters match is the type of elements you want to get (check the parse_type variable to see possibilities) using ! at start of match will reverse the value of positive occurences will create the nth indexes elements to capture None will find everything | def finder(self, match="w", occurences=None, start=None, stop=None, trigger=True, positive=True, reverse=False, keep_prefix=False):
res = []
length = len(self.parse_type)
if occurences != None:
occurences = str(occurences)
index_array = self.indexes(occurences, 1)
is_capturing = (start == None)
target = 0
if match == None:
match = "xwoifmrcs"
if len(match) > 0 and match[0] == "!":
positive = (positive == False)
for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol
if is_capturing == False:
if type(start) == type(0):
is_capturing = (idx == start)
else:
is_capturing = (self.parse_type[idx] in start)
if stop != None:
if trigger == True or is_capturing == True:
if type(stop) == type(0) and (idx == stop):
break
if type(stop) == " " and (self.parse_type[idx] in stop):
break
if is_capturing == True:
if (self.parse_type[idx] in match) == positive:
if target in index_array:
res.append(self.parse_msg[idx][(keep_prefix == False and self.parse_type[idx] in "ox"):])
target += 1
if len(res) == 0:
return None
return res | [
"def _any_depth_parse(match):\n markers = [match.p1, match.p2, match.p3, match.p4, match.p5, match.p6]\n for idx in (4, 5):\n if markers[idx]:\n markers[idx] = mtypes.emphasize(markers[idx])\n return [m for m in markers if m]",
"def _parse_pb_prune(matches):\n if len(matches) != 2:\n return matches\n\n pb1, pb2 = matches\n # Break a tie between reflection request/response by just \"guessing\" it is\n # a response.\n if isinstance(pb1, REFLECTION_REQUEST) and isinstance(\n pb2, REFLECTION_RESPONSE\n ):\n return [pb2]\n if isinstance(pb1, REFLECTION_RESPONSE) and isinstance(\n pb2, REFLECTION_REQUEST\n ):\n return [pb1]\n\n return matches",
"def main():\n\n # | 匹配左右任意一个表达式\n ret1 = re.match(r\"[a-zA-Z0-9]{4,12}@(163|126)\\.com$\", \"12345@163.com\")\n print(\"ret1=\", ret1.group())\n print(\"ret1's group(1)=\", ret1.group(1))\n ret2 = re.match(r\"[a-zA-Z0-9]{4,12}@(163|126)\\.com$\", \"12345@126.com\")\n print(\"ret2=\", ret2.group())\n print(\"ret2's group(1)=\", ret2.group(1))\n\n # 一个分组 :用数字当分组名\n html_str1 = \"<h1>我来演示正则表达式分组</h1>\"\n ret3 = re.match(r\"<(\\w*)>.*</(\\1)>\", html_str1)\n print(\"ret3=\", ret3.group())\n\n # 二个分组 :用数字当分组名\n html_str2 = \"<body><h1>我来演示正则表达式分组</h1></body>\"\n ret4 = re.match(r\"<(\\w*)><(\\w*)>.*</(\\2)></(\\1)>\", html_str2)\n print(\"ret4=\", ret4.group())\n print(\"ret4's group(1)=\", ret4.group(1))\n print(\"ret4's group(2)=\", ret4.group(2))\n\n # 给分组起别名\n html_str3 = \"<header><h1>我来演示正则表达式分组</h1></header>\"\n ret5 = re.match(r\"<(?P<name1>\\w*)><(?P<name2>\\w*)>.*</(?P=name2)></(?P=name1)>\", html_str3)\n print(\"ret5=\", ret5.group())\n print(\"ret5's group(1)=\", ret5.group(1))\n print(\"ret5's group(2)=\", ret5.group(2))\n print(\"ret5's name1=\", ret5.group(\"name1\"))\n print(\"ret5's name2=\", ret5.group(\"name2\"))\n\n # 匹配失败,因为html_str4不满足分组name1前后内容必须一样\n html_str4 = \"<body><h1>我来演示正则表达式分组</h1></header>\"\n ret6 = re.match(r\"<(?P<name1>\\w*)><(?P<name2>\\w*)>.*</(?P=name2)></(?P=name1)>\", html_str4)\n print(\"type or ret6=\", type(ret6))\n # print(\"ret6=\", ret5.group())\n # print(\"ret6's group(1)=\", ret6.group(1))\n # print(\"ret6's group(2)=\", ret6.group(2))",
"def tok_match_record(matchlist, remainder_str, xtoken, matched_substr):\n\tpstr_infostr = matched_substr\n\txtok_infostr = re.sub(r'<([^<>\"]{1,3})\\w*(?: \\w+=\"([^<>\"]{1,3})\\w*\")?>',\n\t r'\\1',\n\t xtoken.tagout)\n\t# print(\"SAVE p-substr:'%s' =~ m/%s/ix\" % (pstr_infostr,xtok_infostr),file=sys.stderr)\n\t\n\t# -a- préparation du substitut balisé en xml\n\t# £ pseudo_out == 'rendu'\n\t# debg\n\tpseudo_out = xtoken.tagout+str_escape(matched_substr)+xtoken.endout\n\t\n\t# -b- enregistrement\n\tmatchlist.append(pseudo_out)\n\ti = len(matchlist)\n\t\n\t# -c- effacement dans le remainder\n\t# (substitution par un renvoi à la \\4 ex: #(#4#)#)\n\t# £todo!!! : interdire matches dans les renvois précédents (exemple n° volume == n° de renvoi) !\n\tremainder_str = re.sub(xtoken.re, \"#(#%i-%s#)#\" % (i, xtok_infostr), remainder_str)\n\t\n\treturn(matchlist, remainder_str)",
"def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif key == 'args':\n for (exp, act) in zip(value, record.get(key)):\n if not re.search(str(exp), str(act)):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches",
"def parse(self, msg):\n parsed_content = {}\n match = self.pattern.match(msg)\n if not match:\n return None\n for key in self.params:\n parsed_content[key] = match.group(key)\n return parsed_content",
"def alt_parser(patterns):\n from reparse.util import remove_lower_overlapping\n get_first = lambda items: [i[0] for i in items]\n get_second = lambda items: [i[1] for i in items]\n\n def parse(line):\n output = []\n for pattern in patterns:\n results = pattern.scan(line)\n if results and any(results):\n output.append((pattern.order, results))\n return get_first(reduce(remove_lower_overlapping, get_second(sorted(output)), []))\n\n return parse",
"def split_match(self, match):\n\n match, line, col, error, warning, message, near = super().split_match(match)\n\n if match:\n message = '[xvlog] ' + message\n\n return match, line, col, error, warning, message, near",
"def __regex2tuple(match):\n return tuple([ match.group(i) for i in range(1, match.lastindex + 1) ])",
"def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches",
"def message_matches(cls, msg, regex):\n m = regex.match(msg.text)\n if m:\n return m.groups()\n return None",
"def match_any_composition(self, match=None):\n pass",
"def match_any_data(self, match=None):\n pass",
"def test_list_extract_group(self):\n\n t = r.ExtractTransformer(pattern=[\"one\", \"(two)\"])\n self.assertEqual(t.re.pattern, \"(one)|(two)\")",
"def match(self):\n\n # We initate this variable which gonna contain the returned data\n result = []\n\n # We compile the regex string\n to_match = comp(self.regex)\n\n # In case we have to use the implementation of ${BASH_REMATCH} we use\n # re.findall otherwise, we use re.search\n if self.rematch: # pylint: disable=no-member\n pre_result = to_match.findall(self.data)\n else:\n pre_result = to_match.search(self.data)\n\n if self.return_data and pre_result is not None: # pylint: disable=no-member\n if self.rematch: # pylint: disable=no-member\n for data in pre_result:\n if isinstance(data, tuple):\n result.extend(list(data))\n else:\n result.append(data)\n\n if self.group != 0: # pylint: disable=no-member\n return result[self.group] # pylint: disable=no-member\n else:\n result = pre_result.group(\n self.group # pylint: disable=no-member\n ).strip()\n\n return result\n elif (\n not self.return_data # pylint: disable=no-member\n and pre_result is not None\n ):\n return True\n return False",
"def test_searchOrMessageSet(self):\n return self._messageSetSearchTest('OR 2:* 2:*', [2, 3, 4, 5])",
"def filter_args_num(self, matches: str, args: int) -> List[str]:\n filtered: List[str] = []\n if args == 1:\n for i, match in enumerate(matches):\n if match.endswith(\"/arg\"):\n filtered.append(matches[i][:-4])\n else:\n for i, match in enumerate(matches):\n if match.endswith(\"/arg[%d]\" % args):\n # Make sure we don't cause an IndexError (end of list)\n # Check to make sure arg + 1 doesn't exist\n if (i == (len(matches) - 1) or\n not matches[i + 1].endswith(\"/arg[%d]\" %\n (args + 1))):\n filtered.append(matches[i][:-len(\"/arg[%d]\" % args)])\n\n return filtered",
"def test_multi_no_match_return_expr(self):\n eq_(None,line_matches_greps(self.line,[\"foo\",\"idontmatch\"]))",
"def match(self):\n\n # We initate this variable which gonna contain the returned data\n result = []\n\n # We compile the regex string\n to_match = comp(self.regex)\n\n # In case we have to use the implementation of ${BASH_REMATCH} we use\n # re.findall otherwise, we use re.search\n if self.rematch: # pylint: disable=no-member\n pre_result = to_match.findall(self.data)\n else:\n pre_result = to_match.search(self.data)\n\n if self.return_data and pre_result: # pylint: disable=no-member\n if self.rematch: # pylint: disable=no-member\n for data in pre_result:\n if isinstance(data, tuple):\n result.extend(list(data))\n else:\n result.append(data)\n\n if self.group != 0: # pylint: disable=no-member\n return result[self.group] # pylint: disable=no-member\n\n else:\n result = pre_result.group(\n self.group # pylint: disable=no-member\n ).strip()\n\n return result\n\n elif not self.return_data and pre_result: # pylint: disable=no-member\n return True\n\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if parameters does match the parse_type match is the amount of each parse_type elements you want to search. You can write www to check 3 words in a row ranges follow the same syntax as occurences except it targets indexes | def checker(self, match="xw", ranges="0,1", in_a_row=True, reverse=False):
res = []
length = len(self.parse_type)
if ranges != None:
ranges = str(ranges)
index_array = self.indexes(ranges)
substring = ""
for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol
if idx in index_array:
substring += self.parse_type[idx]
if in_a_row == True:
return (match in substring)
if in_a_row == False:
target = 0
for i in substring:
target += (match[target] == i)
return (target == maxi)
if in_a_row == None:
for i in self.parse_type:
if i in match:
match = match.replace(i, '', 1)
return (match == "")
return None | [
"def can_process(self, statement):\r\n set1 = ['sweet', 'room']\r\n set2 = ['delux', 'room']\r\n set3 = ['condo', 'room']\r\n\r\n if all(x in statement.text.split() for x in set1):\r\n return True\r\n elif all(x in statement.text.split() for x in set2):\r\n return True\r\n elif all(x in statement.text.split() for x in set3):\r\n return True\r\n else:\r\n return False",
"def accepts(self):\n\n # load derived parsers conditions to check\n self.load_conditions()\n\n # substring contained condition\n if self.row_substring:\n substring_cond = self.row_substring in self.row[0]\n else:\n substring_cond = True\n\n # length of row condition\n if self.row_length:\n len_cond = len(self.row) == self.row_length\n else:\n len_cond = True\n\n # pattern matching condition\n if self.row_pattern:\n pattern_cond = self._re_match(self.row_pattern, self.row[0])\n else:\n pattern_cond = True\n\n return substring_cond and len_cond and pattern_cond",
"def check_clauses_in_parse_filter(semantic_counts):\n contains_between_clause = False\n contains_num_clause = False\n for key in semantic_counts:\n key_str = str(key)\n contains_between_clause = _detect_semantic_token(key_str, \"@between\")\n contains_num_clause = _detect_semantic_token(key_str, \"@Num\")\n if contains_between_clause or contains_num_clause:\n break\n \n if contains_between_clause or contains_num_clause:\n max_correct_count = 0\n correct_semantic_rep = None\n for key in semantic_counts:\n count = 0\n key_str = str(key)\n if contains_between_clause:\n count += _count_correct_between_parses(key_str)\n if contains_num_clause:\n count += _count_correct_num_parses(key_str)\n if count > max_correct_count:\n correct_semantic_rep = {key : semantic_counts[key]}\n max_correct_count = count\n \n return correct_semantic_rep\n \n return semantic_counts",
"def _multiindex_row_in(cls, row, parse_list, start=None, stop=None):\n\n row_sub = row[start:stop]\n for tokens in parse_list:\n\n # A single row will never match an empty token list:\n if not tokens:\n continue\n\n # Check whether all of the entries in `row_sub` match some list of\n # tokens. If this loop terminates prematurely because of a mismatch\n # between `row_sub` and some list of tokens in `parse_list`, it will\n # not return True; this forces checking of the subsequent token\n # lists:\n for i, token in enumerate(tokens):\n\n # '*' matches everything:\n if token == '*':\n continue\n\n # Integers and strings must match exactly:\n elif isinstance(token, (int, long, basestring)):\n if row_sub[i] != token:\n break\n\n # Tokens must be in a set of values:\n elif type(token) == list:\n if row_sub[i] not in token:\n break\n\n # Token must be within range of an interval:\n elif type(token) == slice:\n i_start = token.start\n i_stop = token.stop\n\n # Handle intervals with ambiguous start or stop values:\n if (i_start is not None and row_sub[i] < i_start) or \\\n (i_stop is not None and row_sub[i] >= i_stop):\n break\n else:\n continue\n else:\n return True\n\n # If the function still hasn't returned, no match was found:\n return False",
"def _index_row_in(cls, row, parse_list):\n\n # Since `row` is a scalar, it need only match the sole entry of one of\n # the lists in `parse_list`:\n for tokens in parse_list:\n if not tokens:\n continue\n if len(tokens) > 1:\n raise ValueError('index row only is scalar')\n if tokens[0] == '*':\n return True\n elif isinstance(tokens[0], (int, long, basestring)):\n if row == tokens[0]:\n return True\n elif type(tokens[0]) == list:\n if row in tokens[0]:\n return True\n elif type(tokens[0]) == slice:\n i_start = tokens[0].start\n i_stop = tokens[0].stop\n if (i_start is None or row >= i_start) and \\\n (i_stop is None or row < i_stop):\n return True\n else:\n continue\n return False",
"def match(self) -> bool:",
"def auto_complete_query_checker(query_params: dict) -> bool:\n query_keys = list(query_params.keys())\n\n # type and prefix must appear in the list of query keys. If even one is not present, the query was not\n # formatted properly\n if 'type' not in query_keys or 'prefix' not in query_keys:\n return False\n\n # if the value for type is not in the given list, then it was not formatted properly\n if query_params['type'] not in ['name', 'brand', 'category']:\n return False\n\n # if it passes the last two checks, then the query was formatted correctly\n return True",
"def matches(inline,groupby,groupvals):\n for i,m in enumerate(groupby):\n if inline[m] == groupvals[i]:\n continue\n else:\n return False\n return True",
"def match(self, name, tags):\n or_exprs, tags = self.get_compiled(name, tags)\n \n # or_exprs = [{'a'}, {'c'}, {'d', 'a'}, {'d', 'e'}]\n return any(and_expr <= tags for and_expr in or_exprs)",
"def _racat_contains_ ( cat , item ) :\n return ( isinstance ( item , string_types ) and cat.isValidLabel ( item ) ) or \\\n ( isinstance ( item , integer_types ) and cat.isValidIndex ( item ) )",
"def match(self, data):\n # self.logger.debug('Running yara, nlp against data')\n # malicious = self._rules.match(data=data)\n # md5 = hashlib.md5(data).hexdigest()\n # if malicious:\n # for match in malicious:\n # self.logger.info('Match found; Rule: \\'%s\\';'\n # 'Namespace: \\'%s\\'; MD5: %s' %\n # (match.rule, match.namespace, md5))\n\n # return True\n \n cnt_name = 0\n cnt_dob = 0\n cnt_acc = 0\n cnt_email = 0\n cnt_line = 0\n\n for line in data: \n cnt_name += self.humanName(line)\n cnt_dob += self.dob(line)\n cnt_acc += self.account_phone(line)\n cnt_email += self.email(line)\n cnt_line += 1\n\n sum = cnt_name + cnt_dob + cnt_acc + cnt_email\n if sum > 100 or sum > cnt_line:\n return True\n else:\n return False\n return False",
"def _matches(self, i: int) -> bool:\n line = self.term.strip_seqs(self.lines[i].line)\n return self.search and self.search in line",
"def matches(\n self\n ) -> Tuple[str, ...]:\n return self._matches",
"def _block_matches_all(block_data):\n # do the checks which don't require loading any additional data\n if (\n self._block_matches(block_data, qualifiers) and\n self._block_matches(block_data.fields, settings)\n ):\n if content:\n definition_block = self.get_definition(course_locator, block_data.definition)\n return self._block_matches(definition_block['fields'], content)\n else:\n return True",
"def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))",
"def _keyword_match(self, term, keyword, fields):\n return any(keyword in self.go[term][f] for f in fields)",
"def paraMatch(para1: opParameter, para2: opParameter) -> bool:\n length = len(para1)\n if length != len(para2):\n return False\n\n # If two parameter's type is equal then\n # these two parameter is equal\n for i in range(length):\n if para1[i][1] != para2[i][1]:\n return False\n\n return True",
"def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()",
"def valid_retag_params(self) -> bool:\n if not (self.action[0] == Actions.RETAG.value):\n return False\n pairs = self.action[1].split(\",\")\n for pair in pairs:\n if not self.correct_retag_pair(pair):\n return False\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve which events to capture from the config | def set_capture_events_from_config(self):
event_config = [
{
"config_key": "events_watchlist",
"events": [
"watchlist.hit.process",
"watchlist.hit.binary",
"watchlist.storage.hit.process",
"watchlist.storage.hit.binary"
],
"options": self.forwarder_options.get("wlhitnotifenabled", "0")
},
{
"config_key": "events_feed",
"events": [
"feed.ingress.hit.process",
"feed.ingress.hit.binary",
"feed.ingress.hit.host",
"feed.storage.hit.process",
"feed.storage.hit.binary",
"feed.query.hit.process",
"feed.query.hit.binary"
],
"options": self.forwarder_options.get("feedhitnotif", "0")
},
{
"config_key": "events_alert",
"events": [
"alert.watchlist.hit.ingress.process",
"alert.watchlist.hit.ingress.binary",
"alert.watchlist.hit.ingress.host",
"alert.watchlist.hit.query.process",
"alert.watchlist.hit.query.binary"
],
"options": self.forwarder_options.get("alertnotifenabled", "0")
},
{
"config_key": "events_raw_sensor",
"events": [
"ingress.event.process",
"ingress.event.procstart",
"ingress.event.netconn",
"ingress.event.procend",
"ingress.event.childproc",
"ingress.event.moduleload",
"ingress.event.module",
"ingress.event.filemod",
"ingress.event.regmod"
"ingress.event.tamper",
"ingress.event.crossprocopen",
"ingress.event.remotethread",
"ingress.event.processblock",
"ingress.event.emetmitigation",
],
"options": self.forwarder_options.get("rawsensnotifenabled", "0")
},
{
"config_key": "events_binary_observed",
"events": ["binaryinfo.host.observed",
"binaryinfo.observed,"
"binaryinfo.group.observed"],
"options": self.forwarder_options.get("binobsnotifenabled", "0")
},
{
"config_key": "events_binary_upload",
"events": ["binarystore.file.added"],
"options": self.forwarder_options.get("binuplnotifenabled", "0")
}
]
self.capture_events = []
for event_type in event_config:
events = self.forwarder_options.get(event_type["config_key"], "0").lower()
if events == "all":
self.capture_events.extend(event_type["events"])
elif events != "0":
events_from_config = events.split(",")
events_to_capture = list(set(events_from_config) & set(event_type["events"]))
self.capture_events.extend(events_to_capture)
self.logger.info("Configured to capture events: %s" % self.capture_events) | [
"def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()",
"def events(self):\n return self._events",
"def get_subscribed_events():",
"def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()",
"def events(self):\n return list(self._events)",
"def GetEventSources(self):\n return self._GetAttributeContainers('event_source')",
"def load_config(event):\n write_error('Starting load_config')\n\n if 'task_config' in event:\n return event['task_config']\n return {}",
"def read_events(self):\n events = self._events\n self._events = []\n if len(events) > 0:\n return events\n else:\n return self._read_events()",
"def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events",
"def handlers(self, event):\n if event in self._handler_dict:\n return self._handler_dict[event]\n return []",
"def test_get_event_listeners(self):\n expected_events = [\n \"kytos/core.shutdown\",\n \"kytos/core.shutdown.kytos/mef_eline\",\n \"kytos/topology.link_up\",\n \"kytos/topology.link_down\",\n ]\n actual_events = self.napp.listeners()\n\n for _event in expected_events:\n self.assertIn(_event, actual_events, _event)",
"def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events",
"def event_list(self):\n return self._event_list",
"def get_handlers_for_event(self, event):\n pass # pragma: no cover",
"def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())",
"def pssim_events (self):\n return self._pssim_events",
"def event_log(self):\n pass",
"def get_event_handlers(self, obj):\n\n ret = []\n if not obj.events: return ret\n events = [(name,handler) for name, handler in obj.events if handler.strip()]\n if not events: return ret\n\n try:\n default_event = self.config['events']['default']['type']\n except KeyError:\n default_event = 'wxCommandEvent'\n\n for event, handler in sorted( events ):\n if not handler: continue\n\n if self.codegen.preview and handler.startswith(\"lambda \"):\n if self.codegen.language!='python': continue\n handler = \"lambda event: print('event handler: lambda function')\"\n\n major = 'wx%d' % self.codegen.for_version[0]\n detailed = 'wx%d%d' % self.codegen.for_version\n try:\n supported_by = self.config['events'][event]['supported_by']\n if not (major in supported_by or detailed in supported_by):\n continue\n except (AttributeError, KeyError):\n pass\n\n # check for specific event type\n type_generic = 'type_%s' % major\n try:\n evt_type = self.config['events'][event][type_generic]\n ret.append((obj, event, handler, evt_type))\n continue\n except KeyError:\n pass\n\n # check for generic event type\n try:\n evt_type = self.config['events'][event]['type']\n except KeyError:\n evt_type = default_event\n ret.append((obj, event, handler, evt_type))\n return ret",
"def collect_events():\n cmd = OPT.kube_cli + \" get events {}\".format(get_namespace_argument())\n collect_helper(cmd=cmd, file_name=\"events\", resource_name=\"k8s events\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compares an image to its reference | def compare(self, reference, image):
if not os.path.isfile(reference):
raise PictureComparatorError("Reference file %s does not exist" % reference)
if not os.path.isfile(image):
raise PictureComparatorError("Image file %s does not exist" % image)
reference_img = cv2.imread(reference, 0)
image_img = cv2.imread(image, 0)
reference_width, reference_height = reference_img.shape[::-1]
image_width, image_height = image_img.shape[::-1]
if reference_width < image_width or reference_height < image_height:
raise PictureComparatorError("Reference picture must be greater than image to find")
method = cv2.TM_CCOEFF_NORMED
# Apply template Matching
res = cv2.matchTemplate(reference_img, image_img, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val > 0.95:
return Rectangle(max_loc[0], max_loc[1], image_width, image_height)
else:
return None | [
"def compare_image(src_img, obj_img):\n im_src = aircv.imread(src_img) \n im_obj = aircv.imread(obj_img)\n\n pos = aircv.find_template(im_src, im_obj)\n logging.debug('Compare_Image: \\nSource Image: %s \\nObject Image:%s \\nResults: %s\\n' %(src_img, obj_img, pos)) \n \n if pos:\n return pos['result']\n else:\n return None",
"def compare_images(img1, img2):\n with Image.open(img1) as img1, Image.open(img2) as img2:\n # Calculate a difference image that is the difference between the two images.\n diff = ImageChops.difference(img1, img2)\n\n return sum(_unpack_image(diff.getdata())[1])",
"def matching(img1, img2):\n i1 = Image.open(img1)\n i2 = Image.open(img2)\n pairs = zip(i1.getdata(), i2.getdata())\n if len(i1.getbands()) == 1:\n dif = sum(abs(p1 - p2) for p1, p2 in pairs)\n else:\n dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))\n ncomponents = i1.size[0] * i1.size[1] * 3\n diff = (dif / 255.0 * 100) / ncomponents\n return 80 - diff",
"def compare_two_images():\n source_image = Image.open(sys.argv[2])\n target_image = Image.open(sys.argv[3])\n print(\"original image similarity:\")\n + str(image_similarity(source_image, target_image))\n source_feature_vector = calculate_feature_vector(source_image)\n target_feature_vector = calculate_feature_vector(target_image)\n s_matrix = similarity_matrix(source_feature_vector, target_feature_vector)\n row_indices, column_indices = dynamic_time_warp(s_matrix)\n warped_source = warp_image(source_image, row_indices)\n warped_target = warp_image(target_image, column_indices)\n comparison = comparison_image(warped_source, warped_target)\n print(\"warped image similarity: \")\n + str(image_similarity(warped_source, warped_target))\n print(\"warped image length: \" + str(len(row_indices)))\n print(\"comparison image saved to comparison.png\")\n comparison.save(\"comparison.png\")",
"def compare_images(self, reference_image, test_image, **kwargs):\n #print(\"Execute comparison\")\n #print('Resolution for image comparison is: {}'.format(self.DPI))\n\n reference_collection = []\n compare_collection = []\n detected_differences = []\n\n placeholder_file = kwargs.pop('placeholder_file', None)\n mask = kwargs.pop('mask', None)\n check_text_content = kwargs.pop('check_text_content', False)\n move_tolerance = kwargs.pop('move_tolerance', None)\n contains_barcodes = kwargs.pop('contains_barcodes', False)\n get_pdf_content = kwargs.pop('get_pdf_content', False)\n force_ocr = kwargs.pop('force_ocr', False)\n self.DPI = int(kwargs.pop('DPI', self.DPI))\n ignore_watermarks = kwargs.pop('ignore_watermarks', True)\n\n compare_options = {'get_pdf_content':get_pdf_content, 'ignore_watermarks':ignore_watermarks,'check_text_content':check_text_content,'contains_barcodes':contains_barcodes, 'force_ocr':force_ocr, 'move_tolerance':move_tolerance}\n\n if self.reference_run and (os.path.isfile(test_image) == True):\n shutil.copyfile(test_image, reference_image)\n print('A new reference file was saved: {}'.format(reference_image))\n return\n \n if (os.path.isfile(reference_image) is False):\n raise AssertionError('The reference file does not exist: {}'.format(reference_image))\n\n if (os.path.isfile(test_image) is False):\n raise AssertionError('The candidate file does not exist: {}'.format(test_image))\n\n with futures.ThreadPoolExecutor(max_workers=2) as parallel_executor:\n reference_future = parallel_executor.submit(CompareImage, reference_image, placeholder_file=placeholder_file, contains_barcodes=contains_barcodes, get_pdf_content=get_pdf_content, DPI=self.DPI, force_ocr=force_ocr, mask=mask)\n candidate_future = parallel_executor.submit(CompareImage, test_image, contains_barcodes=contains_barcodes, get_pdf_content=get_pdf_content, DPI=self.DPI)\n reference_compare_image = reference_future.result()\n candidate_compare_image = candidate_future.result()\n \n tic = time.perf_counter()\n if reference_compare_image.placeholders != []:\n candidate_compare_image.placeholders = reference_compare_image.placeholders\n with futures.ThreadPoolExecutor(max_workers=2) as parallel_executor:\n reference_collection_future = parallel_executor.submit(reference_compare_image.get_image_with_placeholders)\n compare_collection_future = parallel_executor.submit(candidate_compare_image.get_image_with_placeholders)\n reference_collection = reference_collection_future.result()\n compare_collection = compare_collection_future.result()\n else:\n reference_collection = reference_compare_image.opencv_images\n compare_collection = candidate_compare_image.opencv_images\n\n if len(reference_collection)!=len(compare_collection):\n print(\"Pages in reference file:{}. Pages in candidate file:{}\".format(len(reference_collection), len(compare_collection)))\n for i in range(len(reference_collection)):\n cv2.putText(reference_collection[i],self.REFERENCE_LABEL, self.BOTTOM_LEFT_CORNER_OF_TEXT, self.FONT, self.FONT_SCALE, self.FONT_COLOR, self.LINE_TYPE)\n self.add_screenshot_to_log(reference_collection[i], \"_reference_page_\" + str(i+1))\n for i in range(len(compare_collection)):\n cv2.putText(compare_collection[i],self.CANDIDATE_LABEL, self.BOTTOM_LEFT_CORNER_OF_TEXT, self.FONT, self.FONT_SCALE, self.FONT_COLOR, self.LINE_TYPE)\n self.add_screenshot_to_log(compare_collection[i], \"_candidate_page_\" + str(i+1))\n raise AssertionError('Reference File and Candidate File have different number of pages')\n \n with futures.ThreadPoolExecutor(max_workers=8) as parallel_executor:\n for i, (reference, candidate) in enumerate(zip(reference_collection, compare_collection)):\n if get_pdf_content:\n reference_pdf_content = reference_compare_image.pdf_content[i]\n candidate_pdf_content = candidate_compare_image.pdf_content[i]\n else:\n reference_pdf_content = None\n candidate_pdf_content = None\n parallel_executor.submit(self.check_for_differences, reference, candidate, i, detected_differences, compare_options, reference_pdf_content, candidate_pdf_content)\n #self.check_for_differences(reference, candidate, i, detected_differences, compare_options, reference_pdf_content, candidate_pdf_content)\n\n if reference_compare_image.barcodes!=[]:\n if reference_compare_image.barcodes!=candidate_compare_image.barcodes:\n detected_differences.append(True)\n print(\"The barcode content in images is different\")\n print(\"Reference image:\\n\", reference_compare_image.barcodes)\n print(\"Candidate image:\\n\", candidate_compare_image.barcodes)\n\n for difference in detected_differences:\n\n if (difference):\n print(\"The compared images are different\")\n raise AssertionError('The compared images are different.')\n\n print(\"The compared images are equal\")\n\n toc = time.perf_counter()\n print(f\"Visual Image comparison performed in {toc - tic:0.4f} seconds\")",
"def images_the_same(image1, image2):\n\n \"\"\"\n im1 = cv.imread(image1)\n im2 = cv.imread(image2)\n\n if im1.shape != im2.shape:\n return False\n\n difference = cv.subtract(im1, im2)\n b, g, r = cv.split(difference)\n\n if cv.countNonZero(b) == 0 and cv.countNonZero(g) == 0 and cv.countNonZero(r) == 0:\n return True\n return False\n \"\"\"\n im1 = cv.imread(image1)\n im2 = cv.imread(image2)\n\n if im1.shape != im2.shape:\n return False\n\n difference = cv.absdiff(im1, im2)\n d = (difference == 0).all()\n return d",
"def compare_images(self, reference_image, test_image, **kwargs):\n #print(\"Execute comparison\")\n #print('Resolution for image comparison is: {}'.format(self.DPI))\n\n reference_collection = []\n compare_collection = []\n detected_differences = []\n\n placeholder_file = kwargs.pop('placeholder_file', None)\n mask = kwargs.pop('mask', None)\n check_text_content = kwargs.pop('check_text_content', False)\n move_tolerance = kwargs.pop('move_tolerance', None)\n contains_barcodes = kwargs.pop('contains_barcodes', False)\n get_pdf_content = kwargs.pop('get_pdf_content', False)\n force_ocr = kwargs.pop('force_ocr', False)\n self.DPI = int(kwargs.pop('DPI', self.DPI))\n watermark_file = kwargs.pop('watermark_file', self.watermark_file)\n ignore_watermarks = os.getenv('IGNORE_WATERMARKS', False)\n pdf_rendering_engine = kwargs.pop('pdf_rendering_engine', self.pdf_rendering_engine)\n\n compare_options = {'get_pdf_content':get_pdf_content, 'ignore_watermarks':ignore_watermarks,'check_text_content':check_text_content,'contains_barcodes':contains_barcodes, 'force_ocr':force_ocr, 'move_tolerance':move_tolerance, 'watermark_file':watermark_file}\n\n if self.reference_run and (os.path.isfile(test_image) == True):\n shutil.copyfile(test_image, reference_image)\n print('A new reference file was saved: {}'.format(reference_image))\n return\n \n if (os.path.isfile(reference_image) is False):\n raise AssertionError('The reference file does not exist: {}'.format(reference_image))\n\n if (os.path.isfile(test_image) is False):\n raise AssertionError('The candidate file does not exist: {}'.format(test_image))\n\n with futures.ThreadPoolExecutor(max_workers=2) as parallel_executor:\n reference_future = parallel_executor.submit(CompareImage, reference_image, placeholder_file=placeholder_file, contains_barcodes=contains_barcodes, get_pdf_content=get_pdf_content, DPI=self.DPI, force_ocr=force_ocr, mask=mask, pdf_rendering_engine=pdf_rendering_engine)\n candidate_future = parallel_executor.submit(CompareImage, test_image, contains_barcodes=contains_barcodes, get_pdf_content=get_pdf_content, DPI=self.DPI, pdf_rendering_engine=pdf_rendering_engine)\n reference_compare_image = reference_future.result()\n candidate_compare_image = candidate_future.result()\n \n tic = time.perf_counter()\n if reference_compare_image.placeholders != []:\n candidate_compare_image.placeholders = reference_compare_image.placeholders\n with futures.ThreadPoolExecutor(max_workers=2) as parallel_executor:\n reference_collection_future = parallel_executor.submit(reference_compare_image.get_image_with_placeholders)\n compare_collection_future = parallel_executor.submit(candidate_compare_image.get_image_with_placeholders)\n reference_collection = reference_collection_future.result()\n compare_collection = compare_collection_future.result()\n else:\n reference_collection = reference_compare_image.opencv_images\n compare_collection = candidate_compare_image.opencv_images\n\n if len(reference_collection)!=len(compare_collection):\n print(\"Pages in reference file:{}. Pages in candidate file:{}\".format(len(reference_collection), len(compare_collection)))\n for i in range(len(reference_collection)):\n cv2.putText(reference_collection[i],self.REFERENCE_LABEL, self.BOTTOM_LEFT_CORNER_OF_TEXT, self.FONT, self.FONT_SCALE, self.FONT_COLOR, self.LINE_TYPE)\n self.add_screenshot_to_log(reference_collection[i], \"_reference_page_\" + str(i+1))\n for i in range(len(compare_collection)):\n cv2.putText(compare_collection[i],self.CANDIDATE_LABEL, self.BOTTOM_LEFT_CORNER_OF_TEXT, self.FONT, self.FONT_SCALE, self.FONT_COLOR, self.LINE_TYPE)\n self.add_screenshot_to_log(compare_collection[i], \"_candidate_page_\" + str(i+1))\n raise AssertionError('Reference File and Candidate File have different number of pages')\n \n check_difference_results = []\n with futures.ThreadPoolExecutor(max_workers=8) as parallel_executor:\n for i, (reference, candidate) in enumerate(zip(reference_collection, compare_collection)):\n if get_pdf_content:\n try:\n reference_pdf_content = reference_compare_image.mupdfdoc[i]\n candidate_pdf_content = candidate_compare_image.mupdfdoc[i]\n except:\n reference_pdf_content = reference_compare_image.mupdfdoc[0]\n candidate_pdf_content = reference_compare_image.mupdfdoc[0]\n else:\n reference_pdf_content = None\n candidate_pdf_content = None\n check_difference_results.append(parallel_executor.submit(self.check_for_differences, reference, candidate, i, detected_differences, compare_options, reference_pdf_content, candidate_pdf_content))\n for result in check_difference_results:\n if result.exception() is not None:\n raise result.exception()\n if reference_compare_image.barcodes!=[]:\n if reference_compare_image.barcodes!=candidate_compare_image.barcodes:\n detected_differences.append(True)\n print(\"The barcode content in images is different\")\n print(\"Reference image:\\n\", reference_compare_image.barcodes)\n print(\"Candidate image:\\n\", candidate_compare_image.barcodes)\n\n for difference in detected_differences:\n\n if (difference):\n print(\"The compared images are different\")\n raise AssertionError('The compared images are different.')\n\n print(\"The compared images are equal\")\n\n toc = time.perf_counter()\n print(f\"Visual Image comparison performed in {toc - tic:0.4f} seconds\")",
"def comparing_two_pictures(image_1, image_2):\n np_im1 = np.array(image_1)[:, :, :3] # sometimes PNG files can have 4 channels, which are not needed here\n np_im2 = np.array(image_2)[:, :, :3] # sometimes PNG files can have 4 channels, which are not needed here\n\n res_im = np_im1 - np_im2\n number_of_the_same = res_im.size - np.count_nonzero(res_im)\n return number_of_the_same / res_im.size",
"def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)",
"def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename",
"def compare_images(self, img1, img2):\n if self.debug:\n cv2.imshow('img1', img1)\n cv2.imshow('img2', img2)\n cv2.waitKey(5)\n time.sleep(2)\n\n # find the mean squared difference between the images\n # http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/\n err = np.sum((img1.astype('float') - img2.astype('float')) ** 2)\n err /= float(img1.shape[0] * img2.shape[1])\n\n # lower is more similar (better)\n return err",
"def compare(self, identifier, reference_image, target_url=None, ignoremask=None):\n\n response = EukalypseCompareResponse()\n response.identifier = identifier\n response.target_url = target_url\n response.reference_img = reference_image\n\n target_image = self.screenshot(identifier, target_url)\n response.target_img = target_image\n if not target_image:\n response.clean = False\n return response\n\n im1 = Image.open(target_image)\n im1 = im1.convert('RGB')\n target_size = im1.size\n\n ref_image = Image.open(reference_image)\n ref_size = ref_image.size\n\n if target_size[0] > ref_size[0]:\n im1 = im1.crop((0, 0, ref_size[0], target_size[1]))\n\n if target_size[1] > ref_size[1]:\n im1 = im1.crop((0, 0, target_size[0], ref_size[1]))\n\n im2 = Image.new(ref_image.mode, target_size, (0, 0, 0))\n im2 = im2.convert('RGB')\n try:\n im2.paste(ref_image, (0, 0, ref_size[0], ref_size[1]))\n except: # if the paste crashes, try without it. still better than nothing\n logger.warn(\"something did not scale well\")\n im2 = ref_image\n diff = ImageChops.difference(im2, im1)\n\n #if an ignoremask exist, multiply it with the difference. this makes\n #everything black in the diff which is black in the ignoremask\n if ignoremask:\n imignore_raw = Image.open(ignoremask)\n imignore_raw = imignore_raw.convert('RGB')\n ignore_size = imignore_raw.size\n diff_size = diff.size\n imignore = Image.new(imignore_raw.mode, diff_size, (0, 0, 0))\n try:\n imignore.paste(imignore_raw, (0, 0, ignore_size[0], ignore_size[1]))\n except: # if the paste crashes, try without it. still better than nothing\n imignore = imignore_raw\n diff = ImageChops.multiply(imignore, diff)\n colors = diff.getcolors(diff.size[0] * diff.size[1])\n\n #get differences. only notblack pixels show a difference\n black = 0\n notblack = 0\n for color in colors:\n if color[1] == (0, 0, 0, 0) or color[1] == (0, 0, 0):\n black += color[0]\n else:\n notblack += color[0]\n if notblack == 0:\n response.clean = True\n else:\n response.clean = False\n response.dirtiness = 100. * notblack / (notblack + black)\n\n diff_filename = os.path.join(\n self.output,\n \"%s-difference.jpg\" % identifier\n )\n diff.save(diff_filename, \"JPEG\", quality=100)\n response.difference_img = diff_filename\n\n im = Image.open(diff_filename)\n im = im.convert('RGB')\n r, g, b = im.split()\n r = r.point(lambda i: i * self.improve_factor)\n g = g.point(lambda i: i * self.improve_factor)\n b = b.point(lambda i: i * self.improve_factor)\n out = Image.merge('RGB', (r, g, b))\n diff_imp_filename = os.path.join(\n self.output,\n \"%s-difference-improved.jpg\" % identifier\n )\n out.save(diff_imp_filename, \"JPEG\", quality=100)\n\n response.difference_img_improved = diff_imp_filename\n return response",
"def compare_images(self, image1, image2):\n\t\tis_equal = False\n\t\tdistance = self.manhattan_distance(image1, image2)\n\t\t\n\t\t# UMBRAL ====> 2%\n\t\tif(distance <= 0.02):\n\t\t\tis_equal = True\n\t\t\t\n\t\treturn (is_equal, distance)",
"def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100",
"def step_1b_compare_to_ground_truth(img_1, is_w1):\n global file_id, relative_image_ground_truth_folder_path\n # load ground truth from disk\n ground_truth = cv2.imread(relative_image_ground_truth_folder_path + file_id + \"_binary.png\", -1)\n # convert ground truth to be single channel, to match img_1\n ground_truth = cv2.cvtColor(ground_truth, cv2.COLOR_BGR2GRAY)\n\n if ground_truth is not None and ground_truth.shape == img_1.shape:\n percentage_similarity = calculate_percentage_similarity(img_1, ground_truth)\n print(\"\\t{} {} - matches ground similarity {:.2f}%\".format(file_id, \"w1\" if is_w1 else \"w2\",\n percentage_similarity))\n else:\n print(\"\\tGround truth image for {} could not be loaded for comparison,\" +\n \"or does not match shape of img_1\".format(file_id))\n\n return",
"def IsImageEqual(testname, image_name):\r\n image1 = 'results/baseline/%s/%s/%s' % (options.conf, testname, image_name)\r\n image2 = r'results/current/%s/%s/Run 1/%s' % (options.conf, testname, image_name)\r\n return Md5ForFile(image1) == Md5ForFile(image2)",
"def comparison_image(source_image, target_image):\n width, height = source_image.size\n result = Image.new(source_image.mode, (width, height * 2))\n result.paste(source_image, (0, 0))\n result.paste(target_image, (0, height))\n return result",
"def IsImageEqual(testname, image_name):\n image1 = 'results/baseline/%s/%s/%s' % (options.conf, testname, image_name)\n image2 = r'results/current/%s/%s/Run 1/%s' % (options.conf, testname, image_name)\n return Md5ForFile(image1) == Md5ForFile(image2)",
"def most_similar_image():\n ref = images[0] # reference image\n result = np.linalg.norm(images[1:].astype(np.float) - ref.astype(np.float), axis=1)\n index = np.argmin(result)+1\n return index # 60"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
From a matrix of difference pixels (for each pixel, we have 0 if pixel is the same, or nonzero if they are different), creates list of pixels which are different a PNG image of the same size as 'step' image, where each different pixel is coloured RED | def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones):
# complete diff "image" to the size of step image
diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1)
# ignore excluded pixels
diff *= self._build_list_of_excluded_pixels2(exclude_zones, image_width, image_height)
# draw mask of differences
mask = numpy.ones((image_height, image_width, 1), dtype=uint8)
diff_image = numpy.zeros((image_height, image_width, 4), dtype=uint8)
cnd = diff[:,:] > 0 # says which pixels are non-zeros
diff_image[cnd] = mask[cnd]
diff_image *= numpy.array([0, 0, 255, 255], dtype=uint8) # print red pixels
diff_pixels = numpy.transpose(diff.nonzero());
return diff_pixels, diff_image | [
"def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]",
"def shift_intensiveness(color, diff):\n new_color = []\n for v in color: # iterate r,g,b\n mod_v = v + diff\n if mod_v < 0:\n mod_v = 0\n elif mod_v > 255:\n mod_v = 255\n new_color.append(mod_v)\n\n return new_color",
"def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false",
"def remove_colors(images):\n images = images[:, :, :, :, 0]\n return images",
"def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )",
"def negative(img): \n for pixel in img:\n x, y, col = pixel \n r, g, b = col\n \n new_color = create_color(255 - r, 255 - g, 255 - b)\n set_color(img, x, y, new_color)",
"def image_gradient(self, images):\n images = images.permute(0, 4, 1, 2, 3)\n _l = images\n r = F.pad(images, [0, 1, 0, 0])[:, :, :, :, 1:]\n t = images\n b = F.pad(images, [0, 0, 0, 1])[:, :, :, 1:, :]\n dx, dy = torch.abs(r - _l), torch.abs(b - t)\n dx[:, :, :, -1] = 0\n dy[:, :, -1, :] = 0\n return dx.permute(0, 2, 3, 4, 1), dy.permute(0, 2, 3, 4, 1)",
"def generate_2d_pull_back(self, f, xx, yy):\n\n nodes = np.arange(len(f))\n pull_back = []\n\n for i in range(len(xx)):\n x_and = np.logical_and(xx[i][0] <= f[:, 0], yy[i][0] >= f[:, 0])\n y_and = np.logical_and(xx[i][1] <= f[:, 1], yy[i][1] >= f[:, 1])\n preimage = nodes[np.logical_and(x_and, y_and)]\n\n # Skip empty preimages\n if not len(preimage):\n continue\n\n # Set colour to average lens value\n color = np.mean(f[preimage], axis=0)\n\n pull_back.append((color, preimage))\n return pull_back",
"def negative(img):\r\n \r\n for pixel in img:\r\n x, y, col = pixel\r\n r, g, b = col\r\n \r\n rv = 255 - r\r\n gv = 255 - g\r\n bv = 255 - b\r\n \r\n col = create_color (rv, gv, bv)\r\n set_color (img, x, y, col)",
"def diff_images(fname1, fname2, output=\"diff.png\"):\n import numpy as np\n import matplotlib.pylab as plt\n conversion_cmd = \"gs -q -sDEVICE=pngalpha -o {outname} -sDEVICE=pngalpha -dUseCropBox -r{density} {inname}\"\n # conversion_cmd = \"convert -density {density} -trim {inname} -fuzz 1% {outname}\"\n new_fnames = []\n for fname in [fname1, fname2]:\n if fname.rsplit(\".\",1)[-1] == \"pdf\":\n fname_in = fname\n fname_out = fname.replace(\".pdf\",\".png\")\n os.system(conversion_cmd.format(density=75, inname=fname_in, outname=fname_out))\n new_fnames.append(fname_out)\n if len(new_fnames) == 2: fname1, fname2 = new_fnames\n # img1 = plt.imread(fname1)[::2,::2] # downsample by factor of 2\n # img2 = plt.imread(fname2)[::2,::2]\n img1 = plt.imread(fname1)\n img2 = plt.imread(fname2)\n\n # Calculate the absolute difference on each channel separately\n error_r = np.fabs(np.subtract(img2[:,:,0], img1[:,:,0]))\n error_g = np.fabs(np.subtract(img2[:,:,1], img1[:,:,1]))\n error_b = np.fabs(np.subtract(img2[:,:,2], img1[:,:,2]))\n\n lum_img = np.sqrt(error_r*error_r + error_g+error_g + error_b*error_b)/np.sqrt(3)\n\n # # # Calculate the maximum error for each pixel\n # lum_img = np.maximum(np.maximum(error_r, error_g), error_b)\n\n # plt.set_cmap('Spectral')\n plt.set_cmap('gray')\n plt.imsave(output,-lum_img)",
"def get_cell_colors(diagram, img_width, img_height):\n\n cell_colors = []\n for i in range(len(diagram)):\n row = diagram[i]\n # if the row is a row of numbers (not for filling), turn it grey\n if i < (len(diagram) - img_height):\n cell_colors.append(len(row) * [grey])\n else:\n row_colors = []\n row_colors += (len(row) - img_width) * [grey]\n row_colors += img_width * [white]\n cell_colors.append(row_colors)\n return cell_colors",
"def compare(self, pixels):\n avg_color = self.average_square(pixels)\n\n r = avg_color[0]\n g = avg_color[1]\n b = avg_color[2]\n\n # return [r, g, b]\n\n min_dist = 100000000\n min_color = 0\n\n \"\"\" perform euclidian distance analysis between the determined avg color\n of the pixel set and the list of lego colors. Find closest color.\"\"\"\n for i in range(len(self.legolist)):\n color = self.legolist[i]\n color_r = color[0]\n color_g = color[1]\n color_b = color[2]\n\n dist = math.sqrt(((r-color_r)*1)**2 + ((g - color_g)*1)**2 +\n ((b - color_b)*1)**2)\n\n if dist < min_dist:\n min_dist = dist\n min_color = i\n\n lego_color = self.legolist[min_color]\n lego = self.filenames[min_color]\n self.lego_nums.append(lego)\n\n r = 255 - int(lego_color[0])\n g = 255 - int(lego_color[1])\n b = 255 - int(lego_color[2])\n\n # print()\n # print(r, g, b)\n return [r, g, b]",
"def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg",
"def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))",
"def remove_pixel_failures(expected_results):\n expected_results = expected_results.copy()\n if IMAGE in expected_results:\n expected_results.remove(IMAGE)\n expected_results.add(PASS)\n if IMAGE_PLUS_TEXT in expected_results:\n expected_results.remove(IMAGE_PLUS_TEXT)\n expected_results.add(TEXT)\n return expected_results",
"def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images",
"def test_get_color_map_list():\n\n image = Image.open(os.path.join(TEST_FOLDER, \"testmap.png\"))\n actual_list = utilities.get_color_map_list(image)\n\n # a representation of the images' colors with RGB tuples\n expected_list = [\n [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)], # row of black\n [(0, 255, 0), (0, 255, 0), (0, 255, 0), (0, 255, 0)], # row of green\n [(255, 0, 0), (255, 0, 0), (255, 0, 0), (255, 0, 0)], # row of red\n [(255, 255, 255), (255, 255, 255),\n (255, 255, 255), (255, 255, 255)] # row of white\n ]\n\n assert actual_list == expected_list",
"def check_differ(diff):\n for row in np.transpose(diff, (1, 0, 2)):\n for px_idx in range(len(row) - MAX_PX_ROW_DIFF):\n if ((row[px_idx: px_idx + MAX_PX_ROW_DIFF] == ((255, 0, 0),) * MAX_PX_ROW_DIFF).all()\n or (row[px_idx: px_idx + MAX_PX_ROW_DIFF] == ((0, 0, 255),) * MAX_PX_ROW_DIFF).all()):\n return True\n return False",
"def rgb_constraint(self, image_set, gap_set, criteria='r', \r\n let=(255, 255, 255, 0)):\r\n new_image_set = []\r\n order_set = []\r\n k = 0\r\n for image in image_set:\r\n image_rgba = image.convert(\"RGBA\")\r\n image_rgb = image_rgba.convert(\"RGB\")\r\n data = image_rgb.getdata()\r\n newData = []\r\n order = []\r\n gap = gap_set[k]\r\n for item in data:\r\n if criteria == 'r':\r\n if item[0] == 255:\r\n if item[1] % gap[0] == 0 and item[2] % gap[1] == 0:\r\n part1 = item[1] // gap[0]\r\n part2 = item[2] // gap[1]\r\n num = (255 // gap[1] + 1) * part1 + part2\r\n if num in order:\r\n newData.append(let)\r\n else:\r\n order.append(num)\r\n newData.append(item)\r\n else:\r\n newData.append(let) \r\n else:\r\n newData.append(let)\r\n elif criteria == 'g':\r\n if item[1] == 255:\r\n if item[0] % gap[0] == 0 and item[2] % gap[1] == 0:\r\n part1 = item[0] // gap[0]\r\n part2 = item[2] // gap[1]\r\n num = (255 // gap[1] + 1) * part1 + part2\r\n if num in order:\r\n newData.append(let)\r\n else: \r\n order.append(num)\r\n newData.append(item) \r\n else:\r\n newData.append(let) \r\n else:\r\n newData.append(let)\r\n elif criteria == 'b':\r\n if item[2] == 255:\r\n if item[0] % gap[0] == 0 and item[1] % gap[1] == 0:\r\n part1 = item[0] // gap[0]\r\n part2 = item[1] // gap[1]\r\n num = (255 // gap[1] + 1) * part1 + part2\r\n if num in order:\r\n newData.append(let)\r\n else: \r\n order.append(num)\r\n newData.append(item) \r\n else:\r\n newData.append(let) \r\n else:\r\n newData.append(let)\r\n k += 1\r\n\r\n image_rgb.putdata(newData)\r\n new_image_set.append(image_rgb)\r\n order_set.append(order)\r\n return new_image_set, order_set"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if two things have the same type. | def same_type(one, two):
return isinstance(one, type(two)) | [
"def is_same(type1, type2):\n nake_type1 = remove_declarated(type1)\n nake_type2 = remove_declarated(type2)\n return nake_type1 == nake_type2",
"def sametype(variable1, variable2):\n\n # Return the result\n return isinstance(variable1, type(variable2))",
"def _is_equal_same_type(self, other):\n self_id = self.id\n other_id = other.id\n if self_id and other_id:\n return (self_id == other_id)\n \n # banner_id\n if self.banner_id != other.banner_id:\n return False\n \n # cover_sticker_id\n if self.cover_sticker_id != other.cover_sticker_id:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n # sku_id\n if self.sku_id != other.sku_id:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n return True",
"def poem_is_of_same_type(cls, other_poem):\n return True",
"def do_types_match(type_a: Type[Any], type_b: Type[Any]) -> bool:\n # TODO [ENG-158]: Check more complicated cases where type_a can be a sub-type\n # of type_b\n return type_a == type_b",
"def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n return type(val1) is type(val2)",
"def _assert_input_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, t in type1.fields.iteritems():\n self.assertEqual(t.type_str(), type2.fields[name].type_str())",
"def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True",
"def __eq__(self, other):\n try:\n return other.type == self.type\n except AttributeError:\n return False",
"def is_same_type_as_other(cls, other):\r\n return isinstance(other, cls)",
"def set_is_same_type(*args):\n if all(isinstance(item, type(args[0])) for item in args):\n return True\n else:\n return False",
"def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n if not type(val1) is type(val2):\n return False\n\n # Numpy arrays must agree in data type and shape\n if type(val1) is np.array:\n if not val1.dtype is val2.dtype:\n return False\n\n if not np.shape(val1) == np.shape(val2):\n return False\n\n # For tuples we now from earlier checks that the data is homogeneous.\n # Thus, only the type of the first item and the length must agree.\n if type(val1) is tuple:\n return (type(val1[0]) is type(val2[0])) and (len(val1) == len(val2))\n\n return True",
"def _assert_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)\n self.assertEqual(type1.class_descriptor, type2.class_descriptor)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, field1 in type1.fields.iteritems():\n field2 = type2.fields[name]\n self._assert_fields_equal(field1, field2)",
"def _assert_union_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)",
"def test_types_are_equal(self):\n self.assertEqual(True, comparator.types_are_equal(None, None))\n self.assertEqual(True, comparator.types_are_equal(True, True))\n self.assertEqual(True, comparator.types_are_equal(True, False))\n self.assertEqual(True, comparator.types_are_equal(int(), int()))\n self.assertEqual(False, comparator.types_are_equal(int(), str()))\n self.assertEqual(True, comparator.types_are_equal(str(), str()))\n self.assertEqual(True, comparator.types_are_equal(list(), list()))\n self.assertEqual(True, comparator.types_are_equal(dict(), dict()))",
"def check_for_identical_dtypes(df_1: pd.DataFrame, df_2: pd.DataFrame) -> bool:\n return list(df_1.dtypes.values) == list(df_2.dtypes.values)",
"def same_astype(a: str | type, b: str | type):\n return pd.api.types.is_dtype_equal(a, b) or (\n is_object_string_dtype(a) and is_object_string_dtype(b)\n )",
"def are_equal(self,x,y):\n if ( ( (type(x) in [int,float] ) and (type(y) in [int,float]) and x==y) or ( (type(x) not in [int,float] ) or (type(y) not in [int,float]) ) ):\n return(True)\n else:\n return(False)",
"def test_equal_on_type_mismatch(self):\n a = payloads.GetRequestPayload()\n b = 'invalid'\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AirInstance constructor name The name of the instance input An object with the YAML description of the IR instance transmit_handler A function to be called to transmit pkts Add support to allow the specification of the MetaIR instance | def __init__(self, name, input, transmit_handler):
local_dir = os.path.dirname(os.path.abspath(__file__))
MetaIRInstance.__init__(self, os.path.join(local_dir, 'air_meta.yml'))
self.transmit_handler = transmit_handler
self.name = name
self.tm_started = False
self.disabled = True
# Add the content to the MetaIR instance
self.add_content(input)
self.port_count = self.meta_ir_object_map["layout"]["port_count"]
# Create the AIR objects: parsers, actinos, tables, pipelines and TMs
self.air_value_set = {}
self.air_value_map = {}
self.air_parser = {}
self.air_action = {}
self.air_table = {}
self.air_pipeline = {}
self.air_traffic_manager = {}
self.processors = {}
self.transmit_processor = TransmitProcessor(transmit_handler)
for name, val in self.value_set.items():
self.air_value_set[name] = [] # Just use a list
for name, val in self.value_map.items():
self.air_value_map[name] = {} # Just use a dict
for name, val in self.parser.items():
self.air_parser[name] = Parser(name, val, self.parse_state,
self.header, self.value_set)
self.processors[name] = self.air_parser[name]
for name, val in self.action.items():
self.air_action[name] = Action(name, val)
for name, val in self.table.items():
self.air_table[name] = Table(name, val, self.air_action)
for name, val in self.control_flow.items():
self.air_pipeline[name] = Pipeline(name, val, self.air_table,
self.air_action)
self.processors[name] = self.air_pipeline[name]
for name, val in self.traffic_manager.items():
self.air_traffic_manager[name] = SimpleQueueManager(name, val,
self.port_count)
self.processors[name] = self.air_traffic_manager[name]
# Plumb the layout
layout = self.meta_ir_object_map["layout"]
meta_ir_assert(layout["format"] == "list", "Unsupported layout: not a list")
layout_name_list = layout["implementation"]
meta_ir_assert(isinstance(layout_name_list, list),
"Layout implementation is not a list")
proc_count = len(layout_name_list)
for idx, processor_name in enumerate(layout_name_list):
cur_proc = self.processors[processor_name]
if idx == 0:
logging.debug("Layout: First processor %s" % cur_proc.name)
self.first_processor = cur_proc
if idx < proc_count - 1:
next_proc = self.processors[layout_name_list[idx + 1]]
cur_proc.next_processor = next_proc
else: # Last one connects to transmit processor
cur_proc.next_processor = self.transmit_processor
logging.debug("Layout %s to %s" % (cur_proc.name,
cur_proc.next_processor.name))
# Grab table initialization object if present
self.table_initialization = {}
ext_objs = self.external_object_map
if "table_initialization" in ext_objs.keys():
self.table_initialization = ext_objs["table_initialization"] | [
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n associate_public_ip_address: Optional[pulumi.Input[bool]] = None,\n ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LaunchConfigurationEbsBlockDeviceArgs']]]]] = None,\n ebs_optimized: Optional[pulumi.Input[bool]] = None,\n enable_monitoring: Optional[pulumi.Input[bool]] = None,\n ephemeral_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LaunchConfigurationEphemeralBlockDeviceArgs']]]]] = None,\n iam_instance_profile: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n key_name: Optional[pulumi.Input[str]] = None,\n metadata_options: Optional[pulumi.Input[pulumi.InputType['LaunchConfigurationMetadataOptionsArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n placement_tenancy: Optional[pulumi.Input[str]] = None,\n root_block_device: Optional[pulumi.Input[pulumi.InputType['LaunchConfigurationRootBlockDeviceArgs']]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n spot_price: Optional[pulumi.Input[str]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n user_data_base64: Optional[pulumi.Input[str]] = None,\n vpc_classic_link_id: Optional[pulumi.Input[str]] = None,\n vpc_classic_link_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...",
"def __init__(self, name, layer):\n self.name = name\n self.layer = layer\n self.kind = \"Abstract\"\n self.slot = None",
"def add_instance(self, instancename, instance, **connection):\n\n ## Propagate parent toolkit to instance\n instance.toolkit = self.toolkit\n\n if instancename in self.elements:\n del self[instancename]\n\n self.elements[instancename] = instance\n\n ## Add local nodes and branches from new instance\n for node in instance.non_terminal_nodes(instancename):\n self.append_node(node)\n\n ## Create term_node_map entry for the new instance\n term_node_map = self.term_node_map[instancename] = {}\n\n ## Connect terminal to node\n for terminal, node in connection.items():\n ## Create a node object if it is not already\n if not isinstance(node, Node):\n node = Node(str(node))\n \n ## Add node\n self.append_node(node)\n\n ## Update terminal-node map\n term_node_map[terminal] = node \n\n ## Add branches\n newbranches = self._instance_branches(instance, instancename)\n self.append_branches(*newbranches)\n\n ## Update circuit node - instance map\n self.update_node_map()\n\n ## update iparv\n instance.update_iparv(self.iparv, ignore_errors=True)",
"def __init__(self):\n print \"ABC - Deployer.__init__()\"",
"def __init__(self, args, motor_entry):\n pass",
"def __init__(self, name=None, acronym=None, agency=None, payloadMass=None,\n payloadVolume=None, dryMass=None, propellantMass=None,\n specificImpulse=None, massToLEO=None, reliability=None, cost=None,\n meanTimeBetweenLaunches=None, _id=None):\n self.name = name\n self.acronym = acronym if acronym else name\n self.agency = agency\n self.payloadMass = payloadMass\n self.payloadVolume = payloadVolume\n self.dryMass = dryMass\n self.propellantMass = propellantMass\n self.specificImpulse = specificImpulse\n self.massToLEO = massToLEO\n self.reliability = reliability\n self.cost = cost\n if isinstance(meanTimeBetweenLaunches, Number):\n self.meanTimeBetweenLaunches = isodate.duration_isoformat(datetime.timedelta(days=meanTimeBetweenLaunches))\n else:\n self.meanTimeBetweenLaunches = meanTimeBetweenLaunches\n super(LaunchVehicle,self).__init__(_id, \"LaunchVehicle\")",
"def __init__(self, name=\"\"):\n # Name of the SMT solver that this object represents.\n self.name = name",
"def __init__(self, type_equipment, firm, model):\r\n self.type_equipment = type_equipment\r\n self.firm = firm\r\n self.model = model",
"def __init__(self, name, can_perform, logic, required):\n\n self.name = name\n self.can_perform = can_perform\n self.logic = logic\n self.required = required",
"def __init__(self):\n super(Inverter, self).__init__()\n\n self.add_param('inverter_efficiency', 1.0, desc='power out / power in')\n self.add_param('output_voltage',\n 120.0,\n desc='amplitude of AC output voltage',\n units='V')\n self.add_param('output_current',\n 2.0,\n desc='amplitude of AC output current',\n units='A')\n self.add_param('output_frequency',\n 60.0,\n desc='frequency of AC output',\n units='Hz')\n self.add_param('input_voltage',\n 100.,\n desc='amplitude of DC input voltage',\n units='V')\n\n self.add_output('input_current',\n 0.48,\n desc='amplitude of DC input current',\n units='A')\n self.add_output('input_power', 10.0, units='W')",
"def __init__(self,\n name,\n account_type,\n version='v1'):\n super().__init__()\n self._name = name\n self._account_type = account_type\n self._version = version\n self._append_required_payload_params()\n self._append_optional_payload_params()",
"def __init__(self, app: Flask = None):\n\n if XBee._instance is None and app is not None:\n self.extract_parameters(app)\n self.logger.info(\"Creando la antena\")\n # self.stack = stack\n self.stack = StackSwitcher.get_instance(app)\n \"\"\"De la lista de posibles puertos a la que pueda estár conectada la antena\n nos conectamos a la primera y lo notificamos\"\"\"\n self.logger.info(\"Puertos encontrados: \" + str(self.port))\n self.logger.info(\"Frecuencia de trabajo: \" + self.baudrate)\n if len(self.port) > 0:\n for port in self.port:\n super().__init__(port, self.baudrate)\n else:\n self.logger.warning(\"No se ha informado de ningún puerto disponible para el XBee\")\n else:\n raise XBeeInstanceException(\"Esta clase ya está creada, obtenga una instancia para su uso\")\n\n # Si no se ha creado nunca ningun objeto del tipo XBee y se han informado los parámetros necesarios\n # XBee.__instance = self",
"def __init__(self, name, price, publisher):\n\n\t\t# passes self which means itself. only defines name, price, and publisher\n\t\tself.name = name\n\t\tself.price = price\n\t\tself.publisher = publisher\n\t\t# any of these variables are available with the instances below.\n\t\t# called attributes",
"def __init__(self, vehicle, steer_value, name=\"Steering\"):\n super(SteerVehicle, self).__init__(name)\n self.logger.debug(\"%s.__init__()\" % (self.__class__.__name__))\n self._control = carla.VehicleControl()\n self._vehicle = vehicle\n self._steer_value = steer_value",
"def __init__(__self__, *,\n exemption_category: pulumi.Input[str],\n management_group_id: pulumi.Input[str],\n policy_assignment_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n expires_on: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_definition_reference_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"exemption_category\", exemption_category)\n pulumi.set(__self__, \"management_group_id\", management_group_id)\n pulumi.set(__self__, \"policy_assignment_id\", policy_assignment_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if display_name is not None:\n pulumi.set(__self__, \"display_name\", display_name)\n if expires_on is not None:\n pulumi.set(__self__, \"expires_on\", expires_on)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if policy_definition_reference_ids is not None:\n pulumi.set(__self__, \"policy_definition_reference_ids\", policy_definition_reference_ids)",
"def __init__(__self__, *,\n owner: pulumi.Input[str],\n accept_language: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n product_type: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['CloudFormationProductProvisioningArtifactPropertiesArgs']]]] = None,\n replace_provisioning_artifacts: Optional[pulumi.Input[bool]] = None,\n source_connection: Optional[pulumi.Input['CloudFormationProductSourceConnectionArgs']] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['CloudFormationProductTagArgs']]]] = None):\n pulumi.set(__self__, \"owner\", owner)\n if accept_language is not None:\n pulumi.set(__self__, \"accept_language\", accept_language)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if distributor is not None:\n pulumi.set(__self__, \"distributor\", distributor)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if product_type is not None:\n pulumi.set(__self__, \"product_type\", product_type)\n if provisioning_artifact_parameters is not None:\n pulumi.set(__self__, \"provisioning_artifact_parameters\", provisioning_artifact_parameters)\n if replace_provisioning_artifacts is not None:\n pulumi.set(__self__, \"replace_provisioning_artifacts\", replace_provisioning_artifacts)\n if source_connection is not None:\n pulumi.set(__self__, \"source_connection\", source_connection)\n if support_description is not None:\n pulumi.set(__self__, \"support_description\", support_description)\n if support_email is not None:\n pulumi.set(__self__, \"support_email\", support_email)\n if support_url is not None:\n pulumi.set(__self__, \"support_url\", support_url)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[int]] = None,\n actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ObfuscationRuleActionArgs']]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n filter: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(self, name=None, needsRunInfo=False, returnInputParameter=False):\n super().__init__()\n self.name = name # name of entity, e.g. Sampler\n self.needsRunInfo = needsRunInfo # whether entity needs run info\n self.returnInputParameter = returnInputParameter # use xml or inputParams\n self._registeredTypes = {} # registered types for this entity\n self._pluginFactory = PluginManager # plugin factory, if any; provided by Simulation",
"def __init__(self):\n self.weapon = Weapon()\n self.armor = Armor()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process any table initialization spec from the IR desc The IR specification may provide a set of table initialization operations in a "table_initialization" object. This takes the form of a sequence of table entry specifications. | def process_table_init(self):
logging.debug("Processing table initialization, %d entries",
len(self.table_initialization))
for init_entry in self.table_initialization:
for table_name, entry_desc in init_entry.items():
self.air_table[table_name].add_entry(
table_entry.description_to_entry(entry_desc)) | [
"def initial_table(table_name, metadata, line):\r\n table = Table(table_name, metadata,\r\n Column('tuning_id', Integer, primary_key=True),\r\n Column('_round', Integer, primary_key=True),\r\n Column('_cost', VARCHAR(255), nullable=False)\r\n )\r\n init_key = '(tuning_id, _round, _cost'\r\n init_val = '(:tuning_id, :_round, :_cost'\r\n pairs = {'tuning_id': -1, '_round': -1, '_cost': 'cost'}\r\n params = line.split('|')[-2:]\r\n if len(params) != 2:\r\n return None, '', '', ''\r\n for param in params[1].split(','):\r\n val = param.split('=')[0]\r\n curr_key = '_' + re.sub(r'[^\\w]', '_', val.lower())\r\n init_key += ', ' + curr_key\r\n init_val += ', :' + curr_key\r\n pairs[curr_key] = val\r\n table.append_column(Column(curr_key, VARCHAR(255), nullable=False))\r\n for evals in params[0].split(','):\r\n val = evals.split('=')[0]\r\n curr_key = '_evaluation_' + re.sub(r'[^\\w]', '_', val.lower())\r\n init_key += ', ' + curr_key\r\n init_val += ', :' + curr_key\r\n pairs[curr_key] = 'evaluation-' + val\r\n table.append_column(Column(curr_key, VARCHAR(255), nullable=False))\r\n if len(params[0].split(',')) > 1:\r\n init_key += ', _total_evaluation'\r\n init_val += ', :_total_evaluation'\r\n pairs['_total_evaluation'] = 'Total-evaluation'\r\n table.append_column(Column('_total_evaluation', VARCHAR(255), nullable=False))\r\n init_key += ')'\r\n init_val += ')'\r\n return table, init_key, init_val, pairs",
"def test_init_tbl() -> bool:\n\tprint_test_header(\"Initialize table tests\")\n\t## Test information\n\tkeys_results = (\n\t\t(\n\t\t\t['info'], \n\t\t\t{'info': [], 'records': 0, 'keys': ['info']}\n\t\t),\n\t\t(\n\t\t\t['test', 'info'],\n\t\t\t{'test': [], 'info': [], 'records': 0, 'keys': ['test', 'info']}\n\t\t),\n\t\t(\n\t\t\t['a', 'b', 'c', 'd'],\n\t\t\t{'a': [], 'b': [], 'c': [], 'd': [], 'records': 0, 'keys': ['a', 'b', 'c', 'd']}\n\t\t),\n\t\t(\n\t\t\t[],\n\t\t\t{'records': 0, 'keys': []}\n\t\t)\n\t)\n\t## Run tests\n\tfor tbl_key, tbl_attrs in keys_results:\n\t\ttest = TableInfo(tbl_key)\n\n\t\tfor key, value in tbl_attrs.items():\n\t\t\ttry:\n\t\t\t\tassert getattr(test, key) == value\n\t\t\texcept:\n\t\t\t\tprint( getattr(test, key), value)\n\t\t\t\traise Exception()",
"def table_entries1(hdl):\n\n hdl.do_table_set_default(\"set_output_port set_egress_spec_1\")\n hdl.do_table_set_default(\"debug_ipv4_hdr_stack1 set_debug_fld1\")\n hdl.do_table_set_default(\"clear_stack_valid_vector1 clear_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_0_of_stack_valid_vector1 set_bit_0_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_1_of_stack_valid_vector1 set_bit_1_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_2_of_stack_valid_vector1 set_bit_2_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_3_of_stack_valid_vector1 set_bit_3_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"debug_ipv4_hdr_stack2 set_debug_fld2\")\n hdl.do_table_set_default(\"clear_stack_valid_vector2 clear_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_0_of_stack_valid_vector2 set_bit_0_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_1_of_stack_valid_vector2 set_bit_1_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_2_of_stack_valid_vector2 set_bit_2_of_stack_valid_vector_action\")\n hdl.do_table_set_default(\"set_bit_3_of_stack_valid_vector2 set_bit_3_of_stack_valid_vector_action\")",
"def initialize_table(table, exp_type, db):\n# rows = run_query(\"\"\"select distinct instance_id, best_known_sol from \"\"\"\n# \"\"\"{}_instances;\"\"\".format(exp_type))\n rows = run_query(\"\"\"select distinct instance_id from \"\"\"\n \"\"\"{}_instances;\"\"\".format(exp_type))\n for row in rows:\n instance = row[0]\n table[instance] = {}\n table[instance]['name'] = instance\n table[instance]['num_runs_mv'] = 0\n table[instance]['num_runs_ms'] = 0\n table[instance]['num_iters_mv'] = infinity\n table[instance]['num_iters_ms'] = infinity\n table[instance]['best_sols_mv'] = []\n table[instance]['best_sols_ms'] = []\n table[instance]['rtimes_mv'] = []\n table[instance]['rtimes_ms'] = []\n table[instance]['wtimes_mv'] = []\n table[instance]['wtimes_ms'] = []\n table[instance]['hypervols_mv'] = []\n table[instance]['hypervols_ms'] = []",
"def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables",
"def test_init(self):\n ir = IntracellularRecordingsTable()\n sw = SimultaneousRecordingsTable(intracellular_recordings_table=ir)\n sws = SequentialRecordingsTable(simultaneous_recordings_table=sw)\n repetitions = RepetitionsTable(sequential_recordings_table=sws)\n ret = ExperimentalConditionsTable(repetitions_table=repetitions)\n self.assertIs(ret.repetitions.table, repetitions)\n self.assertEqual(ret.name, 'experimental_conditions')",
"def _InitTables(self):\n \n self.config_table = sa.Table('config', self.metadata,\n sa.Column('name', sa.String(50), primary_key=True),\n sa.Column('url', sa.String, nullable=False),\n sa.Column('type', sa.String(20), nullable=False));\n #fetchall releases any locks\n results = self.config_table.select().execute().fetchall()\n for x in results:\n #Only process this entry if it is basic\n if x['type'] == \"banlist\":\n #Adds the table as a member of this object. The name of the variable\n #is the name of the table, with _table appended to it. \n table = sa.Table(x['name'], self.metadata,\n sa.Column('id', sa.Integer, autoincrement=True, primary_key=True),\n sa.Column('lname', sa.String(50), index=True),\n sa.Column('name', sa.String(50)),\n sa.Column('reason', sa.String, nullable=False),\n sa.Column('source', sa.String(50)),\n sa.Column('deleted', sa.Integer));\n table.create(checkfirst=True)\n setattr(self, \"%s_table\" % (x['name']), table)",
"def __init__(self, *args):\n _table.Table_swiginit(self, _table.new_Table(*args))",
"def initializeTable(opt):\n\n h5file = open_file(opt.filename, mode=\"w\", title=opt.title)\n group = h5file.create_group(\"/\", opt.groupName, opt.groupDesc)\n\n rtable = h5file.create_table(group, \"repeaters\", Repeaters(opt),\n \"Repeater Catalog\")\n rtable.attrs.scnl = [opt.station, opt.channel, opt.network, opt.location]\n rtable.attrs.samprate = opt.samprate\n rtable.attrs.windowLength = opt.winlen\n rtable.attrs.ptrig = opt.ptrig\n rtable.attrs.atrig = opt.atrig\n rtable.attrs.fmin = opt.fmin\n rtable.attrs.fmax = opt.fmax\n rtable.attrs.previd = 0\n rtable.attrs.ptime = 0\n rtable.flush()\n \n otable = h5file.create_table(group, \"orphans\", Orphans(opt),\n \"Orphan Catalog\")\n otable.flush()\n \n ttable = h5file.create_table(group, \"triggers\", Triggers(opt), \"Trigger Catalog\")\n ttable.flush()\n \n jtable = h5file.create_table(group, \"junk\", Junk(opt), \"Junk Catalog\")\n jtable.flush()\n \n dtable = h5file.create_table(group, \"deleted\", Deleted(opt),\n \"Manually Deleted Events\")\n dtable.flush()\n\n ctable = h5file.create_table(group, \"correlation\", Correlation(opt),\n \"Correlation Matrix\")\n ctable.flush()\n \n ftable = h5file.create_table(group, \"families\", Families(opt), \"Families Table\")\n ftable.attrs.nClust = 0\n ftable.flush()\n\n h5file.close()",
"def init_line_list():\n # Get str lengths from defs\n len_line = defs.str_len()['ion']\n len_src = defs.str_len()['Source']\n # Load sources to check\n sources = arcl_io.load_source_table()\n src_files = sources['File'].data\n if len(src_files[0]) > len_src:\n raise ValueError(\"Source filename now exceeds table. Should fix source name\")\n dummy_src = str('#')*len_src\n # Arc Line name\n dummy_line = str('#')*len_line\n #\n\n # Dict for Table\n idict = OrderedDict()\n idict['ion'] = dummy_line\n idict['wave'] = 0.\n idict['NIST'] = 0\n idict['Instr'] = 0 # Flag for instrument\n idict['amplitude'] = 0\n idict['Source'] = dummy_src\n\n # Table\n tkeys = idict.keys()\n lst = [[idict[tkey]] for tkey in tkeys]\n init_tbl = Table(lst, names=tkeys)\n\n # Return\n return init_tbl",
"def init_tables():\n # drop_table_m_candidates()\n # drop_table_m_qiita_users()\n create_table_m_candidates()\n create_table_m_qiita_users()",
"def fill_table(self, executer, tree, cursor, table):\n counter = 0\n table_content = executer.lots_of_eggs(cursor, table)\n for line in table_content:\n tree.insert('', 'end', text=counter, values=line)\n counter += 1",
"def finalize_tables(self):\n self.attrbuilder.finalize(self.ext_type)\n self.vtabbuilder.finalize(self.ext_type)",
"def buildConverters(tableSpec, tableNamespace):\n converters = []\n convertersByName = {}\n for tp, name, repeat, aux, descr in tableSpec:\n tableName = name\n if name.startswith(\"ValueFormat\"):\n assert tp == \"uint16\"\n converterClass = ValueFormat\n elif name.endswith(\"Count\") or name in (\"StructLength\", \"MorphType\"):\n converterClass = {\n \"uint8\": ComputedUInt8,\n \"uint16\": ComputedUShort,\n \"uint32\": ComputedULong,\n }[tp]\n elif name == \"SubTable\":\n converterClass = SubTable\n elif name == \"ExtSubTable\":\n converterClass = ExtSubTable\n elif name == \"SubStruct\":\n converterClass = SubStruct\n elif name == \"FeatureParams\":\n converterClass = FeatureParams\n elif name in (\"CIDGlyphMapping\", \"GlyphCIDMapping\"):\n converterClass = StructWithLength\n else:\n if not tp in converterMapping and \"(\" not in tp:\n tableName = tp\n converterClass = Struct\n else:\n converterClass = eval(tp, tableNamespace, converterMapping)\n\n conv = converterClass(name, repeat, aux, description=descr)\n\n if conv.tableClass:\n # A \"template\" such as OffsetTo(AType) knowss the table class already\n tableClass = conv.tableClass\n elif tp in (\"MortChain\", \"MortSubtable\", \"MorxChain\"):\n tableClass = tableNamespace.get(tp)\n else:\n tableClass = tableNamespace.get(tableName)\n\n if not conv.tableClass:\n conv.tableClass = tableClass\n\n if name in [\"SubTable\", \"ExtSubTable\", \"SubStruct\"]:\n conv.lookupTypes = tableNamespace[\"lookupTypes\"]\n # also create reverse mapping\n for t in conv.lookupTypes.values():\n for cls in t.values():\n convertersByName[cls.__name__] = Table(name, repeat, aux, cls)\n if name == \"FeatureParams\":\n conv.featureParamTypes = tableNamespace[\"featureParamTypes\"]\n conv.defaultFeatureParams = tableNamespace[\"FeatureParams\"]\n for cls in conv.featureParamTypes.values():\n convertersByName[cls.__name__] = Table(name, repeat, aux, cls)\n converters.append(conv)\n assert name not in convertersByName, name\n convertersByName[name] = conv\n return converters, convertersByName",
"def create_init_table(self) -> None:\n is_exist_table = self.cursor.execute('show tables')\n\n if is_exist_table:\n return None\n\n for table in self._tables:\n try:\n cols = self._db_rows.get(table)\n cols_str = ','.join(cols)\n sql = \"CREATE TABLE {0}({1});\".format(table, cols_str)\n self.cursor.execute(sql)\n except Exception as e:\n logger.exception(e)\n self.close()",
"def init():\n declarative_bases = [TestTable]\n tables = []\n mappers = []\n return (declarative_bases, tables, mappers)",
"def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)",
"def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data",
"def build_table(self):\n if len(self._abslines) == 0:\n return\n comp_tbl = Table()\n comp_tbl['name'] = [iline.name for iline in self._abslines]\n comp_tbl['wrest'] = u.Quantity([iline.wrest for iline in self._abslines])\n comp_tbl['z'] = [iline.z for iline in self._abslines]\n for attrib in ['flag_N', 'logN', 'sig_logN']:\n comp_tbl[attrib] = [iline.attrib[attrib] for iline in self._abslines]\n # Return\n return comp_tbl"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable the switch instance Start the traffic manager threads and allow packets to enter the processor chain | def enable(self):
if not self.tm_started:
for name, tm in self.air_traffic_manager.items():
logging.debug("Starting tm %s" % name)
tm.start()
tm_started = True
logging.debug("Enabling switch %s" % self.name)
self.disabled = False | [
"def enable_packet_switching(self):\n self.send_and_recv('CALL:LTE:SIGN:PSWitched:ACTion CONNect')\n self.wait_for_pswitched_state()",
"def enable(self):\n\n super(SequenceShot, self).enable()\n\n # create the switch handlers\n for switch in self.config['switches']:\n self.machine.switch_controller.add_switch_handler(\n switch, self._switch_handler, return_info=True)\n self.progress_index = 0",
"def enable(self):\n self.switch.enable()\n self._enabled = True",
"def start_sending_to_switch(self):\n self.switch_active = True\n for message in self.internal_switch_buffer:\n self.switch.buffer.append(message)\n self.internal_switch_buffer = []",
"def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)",
"def start_network(self):\n from mininet.topo import Topo\n from mininet.net import Mininet\n from mininet.node import OVSController\n class SingleSwitchTopo(Topo):\n \"Single Switch Topology\"\n def __init__(self, count=1, **params):\n Topo.__init__(self, **params)\n hosts = [ self.addHost('h%d' % i) for i in range(1, count + 1) ]\n s1 = self.addSwitch('s1')\n for h in hosts:\n self.addLink(h, s1)\n self.net = Mininet(topo = SingleSwitchTopo(4), controller = OVSController)\n self.net.start()\n self.impersonate(False)",
"def start(self):\n thread = Thread(target=self.ckl.sendGameEvent, args=(0, self.kill_switch))\n self.threads.append(thread)\n thread.start()",
"def enable():\n atop_enable()\n thread_enable()",
"def start(self, controllers = None):\n info(\"Starting P4 switch {}.\\n\".format(self.name))\n args = [self.sw_path]\n for port, intf in list(self.intfs.items()):\n if not intf.IP():\n args.extend(['-i', str(port) + \"@\" + intf.name])\n if self.pcap_dump:\n if self.pcap_dir:\n args.append(\"--pcap=\"+self.pcap_dir)\n else:\n args.append(\"--pcap\")\n\n if self.thrift_port:\n args.extend(['--thrift-port', str(self.thrift_port)])\n if self.nanomsg:\n args.extend(['--nanolog', self.nanomsg])\n args.extend(['--device-id', str(self.device_id)])\n\n args.append(self.json_path)\n if self.enable_debugger:\n args.append(\"--debugger\")\n if self.log_console:\n args.append(\"--log-console\")\n args.append('>' + self.log_file)\n info(' '.join(args) + \"\\n\")\n\n self.simple_switch_pid = None\n with tempfile.NamedTemporaryFile() as f:\n self.cmd(' '.join(args) + ' 2>&1 & echo $! >> ' + f.name)\n self.simple_switch_pid = int(f.read())\n debug(\"P4 switch {} PID is {}.\\n\".format(self.name, self.simple_switch_pid))\n sleep(1)\n if not self.check_switch_started():\n error(\"P4 switch {} did not start correctly.\"\n \" Check the switch log file.\\n\".format(self.name))\n exit(1)\n info(\"P4 switch {} has been started.\\n\".format(self.name))\n\n # only do this for l3..\n #self.cmd('sysctl', '-w', 'net.ipv4.ip_forward=1')",
"def start_threads(self):\n self.__toddler_control.start()\n self.__toddler_vision.start()\n self.__self_test.start()",
"def setup(self):\n self._logger.debug('Setup using ' + str(self._vswitch_class))\n\n try:\n self._vswitch.start()\n\n self._vswitch.add_switch(self._bridge)\n\n # create physical ports\n (_, phy1_number) = self._vswitch.add_phy_port(self._bridge)\n (_, phy2_number) = self._vswitch.add_phy_port(self._bridge)\n\n # create VM ports\n # initialize vport array to requested number of VMs\n guest_nics = settings.getValue('GUEST_NICS_NR')\n vm_ports = [[] for _ in range(self._pxp_vm_count)]\n # create as many VM ports as requested by configuration, but configure\n # only even number of NICs or just one\n for vmindex in range(self._pxp_vm_count):\n # just for case, enforce even number of NICs or 1\n nics_nr = int(guest_nics[vmindex] / 2) * 2 if guest_nics[vmindex] > 1 else 1\n self._logger.debug('Create %s vports for %s. VM with index %s',\n nics_nr, vmindex + 1, vmindex)\n for _ in range(nics_nr):\n (_, vport) = self._vswitch.add_vport(self._bridge)\n vm_ports[vmindex].append(vport)\n\n self._vswitch.del_flow(self._bridge)\n\n # configure flows according to the TC definition\n if self._pxp_topology == 'serial':\n flow = _FLOW_TEMPLATE.copy()\n if self._traffic['flow_type'] == 'IP':\n flow.update({'dl_type':'0x0800',\n 'nw_src':self._traffic['l3']['srcip'],\n 'nw_dst':self._traffic['l3']['dstip']})\n\n # insert flows for phy ports first\n # from 1st PHY to 1st vport of 1st VM\n self._add_flow(flow,\n phy1_number,\n vm_ports[0][0],\n self._bidir)\n # from last vport of last VM to 2nd phy\n self._add_flow(flow,\n vm_ports[self._pxp_vm_count-1][-1],\n phy2_number,\n self._bidir)\n\n # add serial connections among VMs and VM NICs pairs if needed\n # in case of multiple NICs pairs per VM, the pairs are chained\n # first, before flow to the next VM is created\n for vmindex in range(self._pxp_vm_count):\n # connect VMs NICs pairs in case of 4 and more NICs per VM\n connections = [(vm_ports[vmindex][2*(x+1)-1],\n vm_ports[vmindex][2*(x+1)])\n for x in range(int(len(vm_ports[vmindex])/2)-1)]\n for connection in connections:\n self._add_flow(flow,\n connection[0],\n connection[1],\n self._bidir)\n # connect last NICs to the next VM if there is any\n if self._pxp_vm_count > vmindex + 1:\n self._add_flow(flow,\n vm_ports[vmindex][-1],\n vm_ports[vmindex+1][0],\n self._bidir)\n else:\n proto = _PROTO_TCP if self._traffic['l3']['proto'].lower() == 'tcp' else _PROTO_UDP\n dst_mac_value = netaddr.EUI(self._traffic['l2']['dstmac']).value\n dst_ip_value = netaddr.IPAddress(self._traffic['l3']['dstip']).value\n # initialize stream index; every NIC pair of every VM uses unique stream\n stream = 0\n for vmindex in range(self._pxp_vm_count):\n # iterate through all VMs NIC pairs...\n if len(vm_ports[vmindex]) > 1:\n port_pairs = [(vm_ports[vmindex][2*x],\n vm_ports[vmindex][2*x+1]) for x in range(int(len(vm_ports[vmindex])/2))]\n else:\n # ...or connect VM with just one NIC to both phy ports\n port_pairs = [(vm_ports[vmindex][0], vm_ports[vmindex][0])]\n\n for port_pair in port_pairs:\n flow_p = _FLOW_TEMPLATE.copy()\n flow_v = _FLOW_TEMPLATE.copy()\n\n # update flow based on trafficgen settings\n if self._traffic['stream_type'] == 'L2':\n tmp_mac = netaddr.EUI(dst_mac_value + stream)\n tmp_mac.dialect = netaddr.mac_unix_expanded\n flow_p.update({'dl_dst':tmp_mac})\n elif self._traffic['stream_type'] == 'L3':\n tmp_ip = netaddr.IPAddress(dst_ip_value + stream)\n flow_p.update({'dl_type':'0x0800', 'nw_dst':tmp_ip})\n elif self._traffic['stream_type'] == 'L4':\n flow_p.update({'dl_type':'0x0800', 'nw_proto':proto, 'tp_dst':stream})\n else:\n raise RuntimeError('Unknown stream_type {}'.format(self._traffic['stream_type']))\n\n # insert flow to dispatch traffic from physical ports\n # to VMs based on stream type; all traffic from VMs is\n # sent to physical ports to avoid issues with MAC swapping\n # and upper layer mods performed inside guests\n self._add_flow(flow_p, phy1_number, port_pair[0])\n self._add_flow(flow_v, port_pair[1], phy2_number)\n if self._bidir:\n self._add_flow(flow_p, phy2_number, port_pair[1])\n self._add_flow(flow_v, port_pair[0], phy1_number)\n\n # every NIC pair needs its own unique traffic stream\n stream += 1\n\n except:\n self._vswitch.stop()\n raise",
"def start_traffic(self):\n pass",
"def activate_relay_intercom(self):\n self.ssdpResponder.start()",
"def enable(self):\n self.fisica.open()\n self.rx.threadStart()\n self.tx.threadStart()",
"def start(self):\n self.log.debug(\"Mode started\")\n self.active = True\n self.task = Task.Create(self.tick, sleep=0)\n #self.machine.events.post('machineflow_' + self.name + '_start')",
"def enabled(self, host, backend, weight):\n cmd = \"get weight $pxname/$svname; enable server $pxname/$svname\"\n if weight:\n cmd += \"; set weight $pxname/$svname %s\" % weight\n self.execute_for_backends(cmd, backend, host, 'UP')",
"def turn_on(self):\n from xbox.sg.console import Console\n _LOGGER.debug(f'About to turn on { self._name } on ip { self._ip_address } with live id { self._live_id }!')\n Console.power_on(self._live_id, addr=self._ip_address, tries=10)",
"def start_sniffing(self):\n self.stop_sniffing_flag = False\n self.pcap_thread = threading.Thread(target=self.__spawn_sniffer)\n self.pcap_thread.start()\n self.logger.debug(\"Sniffer starting to port %d\" % self.port)",
"def enable_force_switch(self, enable_force_switch):\n self._enable_force_switch = enable_force_switch"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Disable the switch instance Packets on ingress are discarded while the switch is disabled. Traffic manager threads are not stopped. | def disable(self):
logging.debug("Disabling switch %s" % self.name)
self.disabled = True | [
"def disable_packet_switching(self):\n self.send_and_recv('CALL:LTE:SIGN:PSWitched:ACTion DISConnect')\n self.wait_for_pswitched_state()",
"def disable(self):\n\n super(SequenceShot, self).disable()\n\n for switch in self.config['switches']:\n self.machine.switch_controller.remove_switch_handler(\n switch, self.switch_handler)\n self.progress_index = 0",
"def disable():\n atop_disable()\n thread_disable()",
"def disable(self):\n self._disable_monitor()\n self._pinger.stop()",
"def disable_switch_port(self, mgr, interface):\n confstr = snipp.CMD_NO_SWITCHPORT % (interface)\n confstr = self.create_xml_snippet(confstr)\n LOG.debug(\"NexusDriver: %s\" % confstr)\n mgr.edit_config(target='running', config=confstr)",
"def network_disable(iface):\n\n core.disable(iface)\n\n return jsonify(message='disabled {}'.format(iface), code=200)",
"def disable(self, **kwargs):\n if self.flipper_switches:\n self.log.debug(\"Disabling\")\n for switch in self.flipper_switches:\n self.platform.clear_hw_rule(switch)",
"def deactivate_ping():\n pass",
"def disableSledIO(self):\n resp = self.sledIOCmdProxy('set mode', 'off')",
"def _disable(self):\n self.enabled = False",
"def off_switch(self):\n self._switch_callback = None",
"def disabled(self, host, backend, shutdown_sessions):\n cmd = \"get weight $pxname/$svname; disable server $pxname/$svname\"\n if shutdown_sessions:\n cmd += \"; shutdown sessions server $pxname/$svname\"\n self.execute_for_backends(cmd, backend, host, 'MAINT')",
"def _disable_wifi(self):\n self.set_object_property(\n proxy=self.proxy,\n prop_name=self.wifi_prop,\n value=False\n )",
"def stop_traffic(self):\n self._logger.debug(\"stop_traffic()\")",
"def set_disabled_switch(self, disabled):\n self.disabled = disabled",
"def disable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 0)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)",
"def disable(self):\r\n self.enabled = False",
"def disable(self):\n self.enabled = False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transmit handler template for documentation out_port The port number to which the packet is to be sent packet A bytearray object holding the packet to transmit | def dummy_transmit_handler(out_port, packet):
pass | [
"def handle_packet_out(self, buffer_id, in_port, actions, data):\n pass",
"def _post(self, which_port, msg):\n return _spacegrant_swig.binary_sink_sptr__post(self, which_port, msg)",
"def write(self, *args):\n return _yarp.PortWriterBufferBase_write(self, *args)",
"def _post(self, which_port, msg):\n return _limesdr_swig.sink_sptr__post(self, which_port, msg)",
"def write(self, *args):\n return _yarp.PortWriter_write(self, *args)",
"def write(self, *args):\n return _yarp.Port_write(self, *args)",
"def output_in_port():\n return output_port(OFP_IN_PORT)",
"def pkt_out_port(self, packet_builder, port, *args):\n vid = None\n if self.port_is_tagged(port):\n vid = self.vid\n pkt = packet_builder(vid, *args)\n return valve_of.packetout(port.number, bytes(pkt.data))",
"def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))",
"def add_out_port(self, m: int, content: str, **opts) -> None:",
"def _send_packet_out(self, packet: Packet, port) -> None:\n try:\n p = self.shell.PacketOut(bytes(packet), egress_port=str(port))\n p.send()\n logging.debug(\"Sending packet out: egress_port {}\".format(port))\n except UserError as e:\n logging.debug(e)\n return",
"def output_port(port_num, max_len=0):\n return parser.OFPActionOutput(port_num, max_len=max_len)",
"def write_port(self,port,data):\n\n\t\tself._validate_port(port)\n\t\tself._validate_data(data)\n\n\t\tcmd = pack('cB', port.upper(), int(data))\n\t\tself.driver.write(cmd)",
"def send_packet_out(dp, pkt, out_port, in_port=ofp.OFPP_CONTROLLER):\n actions = [parser.OFPActionOutput(out_port)]\n msg = parser.OFPPacketOut(datapath=dp,\n buffer_id=ofp.OFP_NO_BUFFER,\n in_port=in_port,\n actions=actions,\n data=pkt)\n return msg",
"def record(self, port_name, t_start=None):",
"def _post(self, which_port, msg):\n return _limesdr_swig.source_sptr__post(self, which_port, msg)",
"def OutputPort(*args, **kw):\n return Port.make_shared(OutputPortInterface(*args, **kw))",
"def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr__post(self, which_port, msg)",
"def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_pdu_packer_sptr__post(self, which_port, msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take a field from the csv and expand/split on a delimiter and return a list of individual values. If the return_list flag is set to true, then this method will return the data back as a list of new fields instead of a cleaned up string and normalized with semicolon delimiter | def expand_and_normalize_field(field, return_list=False):
if isinstance(field, basestring):
field = field.rstrip(';:,')
data = [_normalize_expanded_field(r) for r in re.split(",|;|:", field)]
if return_list:
return data
else:
return ";".join(data)
else:
if return_list:
return [field]
else:
return field | [
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def splitCSVLine(self, line):\n import string\n list = []\n position = 0\n fieldStart = 0\n while 1:\n if position >= len(line):\n # This only happens when we have a trailing comma\n list.append('')\n return list\n if line[position] == '\"':\n field = \"\"\n position = position + 1\n while 1:\n end = string.find(line, '\"', position)\n if end == -1:\n # This indicates a badly-formed CSV file, but\n # we'll accept it anyway.\n field = line[position:]\n position = len(line)\n break\n if end + 1 < len(line) and line[end + 1] == '\"':\n field = \"%s%s\" % (field, line[position:end + 1])\n position = end + 2\n else:\n field = \"%s%s\" % (field, line[position:end])\n position = end + 2\n break\n else:\n end = string.find(line, \",\", position)\n if end == -1:\n list.append(line[position:end])\n return list\n field = line[position:end]\n position = end + 1\n list.append(field)\n return list",
"def from_csv_line(line):\r\n return line.strip().split(',')",
"def changing_str_to_list_in_csv(str_in_csv):\n\n temp_list = []\n reader = csv.reader(str_in_csv.split(\"\\n\"), delimiter=\",\")\n for row in reader:\n temp_list.append(row)\n return temp_list",
"def get_from_csv(self, field):\n if len(self.lists[field]) > 0:\n return self.lists[field].pop(0)\n else:\n print(\"End of file for %s\" % field)\n exit(1)",
"def _get_fields(line, sep, quotechar):\n\n # remove trailing newline charactere\n line = list(line.rstrip('\\r\\n'))\n \n # remove the sep in the fields to avoid using wrong sep\n # while spitting\n if quotechar is not None:\n in_field = False\n for i, car in enumerate(line):\n if car == quotechar:\n in_field = not in_field\n elif car == sep:\n if in_field:\n line[i] = ''\n\n return \"\".join(line).split(sep)",
"def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''",
"def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list",
"def _do_explode_field(self, row: dict) -> List[dict]:\n null_value = self.get_arg_value(\"field_null_value\")\n\n schema = self.schema_to_explode\n field = schema[\"name_qual\"]\n\n if len(listify(row.get(field, []))) <= 1:\n self._do_flatten_fields(row=row, schema=schema)\n return [row]\n\n items = listify(row.pop(field, []))\n new_rows_map = {}\n\n sub_schemas = self.get_sub_schemas(schema=schema)\n\n for idx, item in enumerate(items):\n new_rows_map[idx] = dict(row)\n\n if schema[\"is_complex\"]:\n for sub_schema in sub_schemas:\n value = item.pop(sub_schema[\"name\"], null_value)\n new_rows_map[idx][sub_schema[\"name_qual\"]] = value\n else:\n new_rows_map[idx][schema[\"name_qual\"]] = item\n\n return [new_rows_map[idx] for idx in new_rows_map]",
"def do_explode_field(self, rows: Union[List[dict], dict]) -> List[dict]:\n rows = listify(rows)\n explode = self.get_arg_value(\"field_explode\")\n\n if not explode or self.is_excluded(schema=self.schema_to_explode):\n return rows\n\n new_rows = []\n for row in rows:\n new_rows += self._do_explode_field(row=row)\n return new_rows",
"def _parse_list_val(line):\n return _clean_val(line)",
"def row_splitter(row: str) -> List[str]:\n return row[:-1].split(',')",
"def separate_delim(self, line):\n # split the line\n items = line.split(self._delimiter)\n\n # return the list\n return items",
"def single_column_csv_to_list(string):\n reader = csv.reader(string.splitlines())\n list_output = []\n for row in reader:\n list_output.extend(row)\n list_output.pop(0)\n return list_output",
"def test_prepare_value_with_custom_separator(self):\n field = ListEditField(sep=';')\n\n self.assertEqual(\n field.prepare_value(' foo; bar ; baz '),\n ['foo', 'bar', 'baz'])",
"def listparse(csvfilename):\r\n output = []\r\n with open(csvfilename, 'r', newline = '') as csvfile:\r\n csvreader = csv.reader(csvfile, skipinitialspace = True)\r\n for row in csvreader:\r\n output.append(row)\r\n return output",
"def separate_csv_line(s):\n reader = csv.reader(s.splitlines(), delimiter=\",\")\n fields = list(reader)[0]\n return fields",
"def __parse_data_line(dataline):\n list_line = []\n delimeter = '|'\n value = \"\"\n for i in dataline:\n if i != delimeter:\n value += i\n else:\n try:\n if LoadTPCData.is_int(value):\n list_line.append(int(value))\n elif LoadTPCData.is_float(value):\n list_line.append(float(value))\n else:\n list_line.append(str(value))\n except Exception:\n list_line.append(str(value))\n finally:\n value = \"\"\n return list_line",
"def _splitFieldValue(self, line):\n found = self.FIELDVALUE.findall(line)\n if found:\n fieldName, value = found[0]\n if fieldName in self.C.ADAPTER_COMMAFIELDS:\n value = self.COMMASPLIT.findall(value)[:-1] # Split and remove last empty part\n return fieldName, value\n return None, None # No field name match on this line."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take a row and a field which may have delimited values and convert into a list of new rows with the same data expect for the replaced delimited value. | def expand_rows(row, delimited_fields, expand_row):
# _log.debug('expand_row is {}'.format(expand_row))
# go through the delimited fields and clean up the rows
copy_row = copy.deepcopy(row)
for d in delimited_fields:
if d in copy_row:
copy_row[d] = expand_and_normalize_field(copy_row[d], False)
if expand_row:
new_values = []
for d in delimited_fields:
fields = []
if d in copy_row:
for value in expand_and_normalize_field(copy_row[d], True):
fields.append({d: value})
new_values.append(fields)
# return all combinations of the lists
combinations = list(itertools.product(*new_values))
new_rows = []
for c in combinations:
new_row = copy.deepcopy(copy_row)
# c is a tuple because of the .product command
for item in c:
for k, v in item.items():
new_row[k] = v
new_rows.append(new_row)
return new_rows
else:
return [copy_row] | [
"def _transform_row(self, row):\n tmp_row = []\n\n for i, column in enumerate(row.value):\n if column.scalar_value.null:\n tmp_row.append(None)\n elif column.has_array_value:\n field_name, rep, mutate_to, cast_from = self._column_data_types[i]\n\n list_value = []\n for j, typed_value in enumerate(column.array_value):\n value = getattr(typed_value, field_name)\n if cast_from is not None:\n value = cast_from(value)\n list_value.append(value)\n\n tmp_row.append(list_value)\n else:\n field_name, rep, mutate_to, cast_from = self._column_data_types[i]\n\n # get the value from the field_name\n value = getattr(column.scalar_value, field_name)\n\n # cast the value\n if cast_from is not None:\n value = cast_from(value)\n\n tmp_row.append(value)\n return tmp_row",
"def _do_explode_field(self, row: dict) -> List[dict]:\n null_value = self.get_arg_value(\"field_null_value\")\n\n schema = self.schema_to_explode\n field = schema[\"name_qual\"]\n\n if len(listify(row.get(field, []))) <= 1:\n self._do_flatten_fields(row=row, schema=schema)\n return [row]\n\n items = listify(row.pop(field, []))\n new_rows_map = {}\n\n sub_schemas = self.get_sub_schemas(schema=schema)\n\n for idx, item in enumerate(items):\n new_rows_map[idx] = dict(row)\n\n if schema[\"is_complex\"]:\n for sub_schema in sub_schemas:\n value = item.pop(sub_schema[\"name\"], null_value)\n new_rows_map[idx][sub_schema[\"name_qual\"]] = value\n else:\n new_rows_map[idx][schema[\"name_qual\"]] = item\n\n return [new_rows_map[idx] for idx in new_rows_map]",
"def do_explode_field(self, rows: Union[List[dict], dict]) -> List[dict]:\n rows = listify(rows)\n explode = self.get_arg_value(\"field_explode\")\n\n if not explode or self.is_excluded(schema=self.schema_to_explode):\n return rows\n\n new_rows = []\n for row in rows:\n new_rows += self._do_explode_field(row=row)\n return new_rows",
"def run_logic(row):\n if type(row) == 'list':\n col = int(params['column'])\n row[col:col + 1] = re.findall(params['regex'], str(row[col])) or []\n else:\n row = re.findall(params['regex'], row) or []\n return row",
"def clean_row(row):\n\n if len(row) < NUM_FIELDS:\n row.extend('')\n\n new_row = [row[0]] # just id\n\n\n # format date\n unix_epoch = datetime(1970, 1, 1)\n sql_date = row[1]\n parsed_sql_date = datetime(int(sql_date[:4]), int(sql_date[4:6]), int(sql_date[6:8]))\n days_since_epoch = (parsed_sql_date - unix_epoch).days\n\n new_row.extend([days_since_epoch])\n\n # add other fields\n new_row.extend([row[26], row[29], row[30], row[6], row[16], row[25], row[31], row[32], row[33], row[34], row[36], row[43]])\n\n # format url\n domain = tldextract.extract(row[57]).domain\n new_row.extend([domain])\n\n return new_row",
"def _convert_row_text_as_list(row_text):\n split_row = row_text.split(\"|\")\n\n if len(split_row) > 2 and split_row[-1].strip() == \"\":\n lst = split_row[1:-1]\n else:\n lst = split_row[1:]\n\n match = SEPARATOR_PATTERN.match(row_text)\n if match:\n lst = [match.group(1)]\n\n return [i.strip() for i in lst]",
"def expand_and_normalize_field(field, return_list=False):\n\n if isinstance(field, basestring):\n field = field.rstrip(';:,')\n data = [_normalize_expanded_field(r) for r in re.split(\",|;|:\", field)]\n if return_list:\n return data\n else:\n return \";\".join(data)\n else:\n if return_list:\n return [field]\n else:\n return field",
"def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''",
"def row_splitter(row: str) -> List[str]:\n return row[:-1].split(',')",
"def ConvertRow(self, row):\n i = 0\n data = []\n for entry in row['f']:\n data.append(self.Convert(entry['v'], self.schema[i]))\n i += 1\n return tuple(data)",
"def parse_row(input_row, parsers):\n\n return [parser(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def expand_row(\n row: Sequence[Union[str, Sequence[Union[str, Sequence[str]]]]]\n) -> List[List[str]]:\n elems_as_lists = []\n for elem in row:\n if isinstance(elem, list):\n elems_as_lists.append(elem)\n else:\n elems_as_lists.append([elem])\n aligned = [list(i) for i in zip_longest(*elems_as_lists, fillvalue=\"\")]\n return aligned",
"def parse_row(input_row, parsers):\r\n \r\n return [try_or_none(parser)(value) if parser is not None else value\r\n for value, parser in zip(input_row, parsers)]",
"def fake_clean_row(row):\n\treturn row",
"def parse_row(input_row, parsers):\n\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def split_row(self, row):\n entry_sep = re.compile('\\s*\\|\\s*')\n return entry_sep.split(row)[1:-1]",
"def swap_csv_coordinates(swapfile, rows):\n swap_rows = []\n with open(swapfile) as swap_csv:\n swap_reader = csv.reader(swap_csv, delimiter=',')\n for swap_row in swap_reader:\n swap_new = list(swap_row)\n r_0 = str(swap_row[0]).zfill(6)\n swap_new[0] = r_0\n swap_rows.append(swap_new)\n swapped_rows = []\n for row in rows:\n tmp_row = match_el_array(row[0], swap_rows)\n if tmp_row:\n swapped_row = [\n row[0],\n tmp_row[1],\n tmp_row[2],\n row[3],\n row[4],\n row[5],\n row[6],\n row[7],\n row[8],\n ]\n swapped_rows.append(swapped_row)\n else:\n swapped_rows.append(row)\n return swapped_rows",
"def _convert_field_type(row):\n return row"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply mapping of row data to model. | def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs):
initial_data = kwargs.get('initial_data', None)
model = model_class()
# _log.debug("map_row's mappings {}".format(mapping))
# If there are any initial states we need to set prior to mapping.
if initial_data:
model = apply_initial_data(model, initial_data)
# concat is not used as of 2016-09-14
# concat = _set_default_concat_config(concat)
for raw_field, value in row.items():
is_extra_data = True if raw_field in extra_data_fields else False
# Save the value if is is not None, keep empty fields.
if value is not None:
model = apply_column_value(raw_field, value, model, mapping, is_extra_data, cleaner)
return model | [
"def apply_model(row):\n model = load_model()\n return model(row)",
"def transform(self, X):\n\n self.check_is_fitted([\"mappings\"])\n\n X = super().transform(X)\n\n for c in self.columns:\n\n X[c] = X[c].map(self.mappings[c])\n\n return X",
"def _fast_map_row(row):\n return {'row': row}",
"def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)",
"def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)",
"def _map(event_name, data):\n pk = _pk(data)\n for (column, value) in data.items():\n yield (event_name, pk, column, value)",
"def create_row_processor(self, selectcontext, mapper, row):\n \n raise NotImplementedError()",
"def create_row_processor(self, selectcontext, mapper, row):\n\n raise NotImplementedError()",
"def mapData(self, function, data):\n self.results = list(map(function, list(data)))\n return self",
"def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)",
"def apply_iter(cls, rows, mapping, resolver, scope=None):\n mapper = cls.from_mapping(mapping, resolver, scope=scope)\n for i,row in enumerate(rows):\n err = None\n _, data = mapper.apply(row)\n try:\n mapper.validator.validate(data)\n except ValidationError as ve:\n err = ve\n yield data, err, i, row",
"def _apply_to_field(self, dictionary):\n setattr(self._modelInstance, self._datafield, dictionary)",
"def mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping):\n\n if \"FullPeptideName\" in header_dict:\n\n peptide_name = this_row[header_dict[\"FullPeptideName\"]]\n\n transitions = []\n pr_transitions = []\n if \"aggr_Fragment_Annotation\" in header_dict:\n transitions = this_row[ header_dict[\"aggr_Fragment_Annotation\"] ].split(\";\")\n if \"aggr_prec_Fragment_Annotation\" in header_dict:\n pr_transitions = this_row[ header_dict[\"aggr_prec_Fragment_Annotation\"] ].split(\";\")\n\n # Skip row if there are no transitions\n if len(transitions) == 0:\n return\n\n if len(transitions[-1]) == 0:\n transitions = transitions[:-1]\n if len(pr_transitions) > 0 and len(pr_transitions[-1]) == 0:\n pr_transitions = pr_transitions[:-1]\n\n # Get charge state (may be absent)\n charge_state = \"0\"\n if \"Charge\" in header_dict:\n charge_state = this_row[header_dict[\"Charge\"]]\n\n if charge_state == \"NA\" or charge_state == \"\":\n charge_state = \"0\"\n\n key = peptide_name + \"/\" + charge_state\n prkey = peptide_name + \"/\" + charge_state + \"_pr\"\n precursors_mapping [ key ] = transitions\n precursors_mapping [ prkey ] = pr_transitions\n mapped_precursors = sequences_mapping.get( peptide_name, [] )\n mapped_precursors.extend([key, prkey])\n sequences_mapping[peptide_name] = mapped_precursors # = [ key, prkey ]\n\n if \"ProteinName\" in header_dict:\n protein_name = this_row[header_dict[\"ProteinName\"]]\n\n tmp = protein_mapping.get(protein_name, [])\n if peptide_name not in tmp:\n tmp.append(peptide_name)\n protein_mapping[protein_name] = tmp",
"def process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup=False, no_pubmed_lookup=False,\n no_doaj_lookup=False, no_title_lookup=False, round_monetary=False,\n offsetting_mode=None, orig_file_path=None, crossref_max_retries=3):\n if len(row) != num_required_columns:\n msg = \"Line %s: \" + MESSAGES[\"num_columns\"]\n logging.error(msg, row_num, len(row), num_required_columns)\n return row\n\n empty_row = True\n for elem in row:\n if has_value(elem):\n empty_row = False\n break\n else:\n msg = \"Line %s: \" + MESSAGES[\"empty_row\"]\n logging.warning(msg, row_num)\n\n current_row = {}\n record_type = None\n\n # Copy content of identified columns and apply special processing rules\n for csv_column in column_map.values():\n index, column_type = csv_column.index, csv_column.column_type\n if empty_row:\n current_row[column_type] = \"\"\n continue\n if column_type == \"euro\" and index is not None:\n current_row[\"euro\"] = _process_euro_value(row[index], round_monetary, row_num, index, offsetting_mode)\n elif column_type == \"period\" and index is not None:\n current_row[\"period\"] = _process_period_value(row[index], row_num)\n elif column_type == \"is_hybrid\" and index is not None:\n current_row[\"is_hybrid\"] = _process_hybrid_status(row[index], row_num)\n elif column_type == \"institution\" and index is not None:\n current_row[\"institution\"] = _process_institution_value(row[index], row_num, orig_file_path, offsetting_mode)\n else:\n if index is not None and len(row[index]) > 0:\n current_row[column_type] = row[index]\n else:\n current_row[column_type] = \"NA\"\n\n doi = current_row[\"doi\"]\n if not has_value(doi) and not empty_row:\n msg = (\"Line %s: No DOI found\")\n logging.info(msg, row_num)\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # lookup the book title in Crossref\n lookup_title = current_row[\"book_title\"]\n if has_value(lookup_title):\n msg = (\"Line %s: Trying to look up the book title ('%s') in Crossref...\")\n logging.info(msg, row_num, lookup_title)\n book_doi = title_lookup(lookup_title, [\"book\", \"monograph\", \"reference-book\"])\n if book_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = book_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if has_value(doi):\n # Normalise DOI\n norm_doi = get_normalised_DOI(doi)\n if norm_doi is not None and norm_doi != doi:\n current_row[\"doi\"] = norm_doi\n msg = MESSAGES[\"doi_norm\"].format(doi, norm_doi)\n logging.info(msg)\n doi = norm_doi\n # include crossref metadata\n if not no_crossref_lookup:\n crossref_result = get_metadata_from_crossref(doi)\n retries = 0\n while not crossref_result[\"success\"] and crossref_result[\"error_msg\"].startswith(\"HTTPError: 504\"):\n if retries >= crossref_max_retries:\n break\n # retry on gateway timeouts, crossref API is quite busy sometimes\n msg = \"%s, retrying...\"\n logging.warning(msg, crossref_result[\"error_msg\"])\n retries += 1\n crossref_result = get_metadata_from_crossref(doi)\n if not crossref_result[\"success\"]:\n exc = crossref_result[\"exception\"]\n # check if a preprint lookup is possible\n if not no_title_lookup and type(exc) == UnsupportedDoiTypeError and exc.doi_type == \"posted-content\":\n msg = (\"Line %s: Found a DOI with type 'posted_content' (%s). This might \" +\n \"be a case of a preprint DOI, trying to find the final version of the article...\")\n logging.info(msg, row_num, doi)\n if not exc.crossref_title:\n msg = \"Line %s: Preprint lookup failed, no title could be extracted.\"\n logging.warning(msg, row_num)\n else:\n article_doi = title_lookup(exc.crossref_title, [\"journal-article\"])\n if article_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line...\")\n index = column_map[\"doi\"].index\n row[index] = article_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if crossref_result[\"success\"]:\n data = crossref_result[\"data\"]\n record_type = data.pop(\"doi_type\")\n logging.info(\"Crossref: DOI resolved: \" + doi + \" [\" + record_type + \"]\")\n current_row[\"indexed_in_crossref\"] = \"TRUE\"\n for key, value in data.items():\n new_value = _process_crossref_results(current_row, row_num, key, value)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Crossref: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, crossref_result[\"error_msg\"])\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref and try to find a correct DOI\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # include pubmed metadata\n if not no_pubmed_lookup and record_type == \"journal-article\":\n pubmed_result = get_metadata_from_pubmed(doi)\n if pubmed_result[\"success\"]:\n logging.info(\"Pubmed: DOI resolved: \" + doi)\n data = pubmed_result[\"data\"]\n for key, value in data.items():\n if value is not None:\n new_value = value\n else:\n new_value = \"NA\"\n msg = \"WARNING: Element %s not found in in response for doi %s.\"\n logging.debug(msg, key, doi)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Pubmed: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, pubmed_result[\"error_msg\"])\n\n # lookup in DOAJ. try the EISSN first, then ISSN and finally print ISSN\n if not no_doaj_lookup and not empty_row:\n issns = []\n new_value = \"NA\"\n if current_row[\"issn_electronic\"] != \"NA\":\n issns.append(current_row[\"issn_electronic\"])\n if current_row[\"issn\"] != \"NA\":\n issns.append(current_row[\"issn\"])\n if current_row[\"issn_print\"] != \"NA\":\n issns.append(current_row[\"issn_print\"])\n for issn in issns:\n lookup_result = doaj_analysis.lookup(issn)\n if lookup_result:\n msg = \"DOAJ: Journal ISSN (%s) found in DOAJ offline copy ('%s').\"\n logging.info(msg, issn, lookup_result)\n new_value = \"TRUE\"\n break\n else:\n msg = \"DOAJ: Journal ISSN (%s) not found in DOAJ offline copy.\"\n new_value = \"FALSE\"\n logging.info(msg, issn)\n old_value = current_row[\"doaj\"]\n current_row[\"doaj\"] = column_map[\"doaj\"].check_overwrite(old_value, new_value)\n if record_type != \"journal-article\" and not empty_row:\n collected_isbns = []\n for isbn_field in [\"isbn\", \"isbn_print\", \"isbn_electronic\"]:\n # test and split all ISBNs\n current_row[isbn_field] = _process_isbn(row_num, current_row[isbn_field], doab_analysis.isbn_handling)\n if has_value(current_row[isbn_field]):\n collected_isbns.append(current_row[isbn_field])\n additional_isbns = [row[i] for i in additional_isbn_columns]\n for isbn in additional_isbns:\n result = _process_isbn(row_num, isbn, doab_analysis.isbn_handling)\n if has_value(result):\n collected_isbns.append(result)\n if len(collected_isbns) == 0:\n logging.info(\"No ISBN found, skipping DOAB lookup.\")\n current_row[\"doab\"] = \"NA\"\n else:\n record_type = \"book\"\n logging.info(\"Trying a DOAB lookup with the following values: \" + str(collected_isbns))\n for isbn in collected_isbns:\n doab_result = doab_analysis.lookup(isbn)\n if doab_result is not None:\n current_row[\"doab\"] = \"TRUE\"\n msg = 'DOAB: ISBN %s found in normalized DOAB (%s, \"%s\")'\n logging.info(msg, isbn, doab_result[\"publisher\"], doab_result[\"book_title\"])\n if current_row[\"indexed_in_crossref\"] == \"TRUE\":\n msg = \"Book already found in Crossref via DOI, those results take precedence\"\n logging.info(msg)\n else:\n for key in doab_result:\n current_row[key] = doab_result[key]\n if not has_value(current_row[\"isbn\"]):\n current_row[\"isbn\"] = isbn\n break\n else:\n current_row[\"doab\"] = \"FALSE\"\n msg = \"DOAB: None of the ISBNs found in DOAB\"\n logging.info(msg)\n if offsetting_mode:\n current_row[\"agreement\"] = offsetting_mode\n record_type = \"journal-article_transagree\"\n\n if record_type is None:\n msg = \"Line %s: Could not identify record type, using default schema 'journal-article'\"\n logging.warning(msg, row_num)\n record_type = \"journal-article\"\n\n result = []\n for field in COLUMN_SCHEMAS[record_type]:\n result.append(current_row[field])\n\n return (record_type, result)",
"def map(self):\n self.df_primary_col = self.df.columns\n #print(len(self.df.columns))\n array1 = self.meta.index# sample id of metadata\n array2 = self.df.index # sample id of feature-table\n mapped_dict ={'metadata':[],'feature_table':[]}\n for i in range(len(array1)):\n for j in range(len(array2)):\n if array2[j] == array1[i]:\n mapped_dict['metadata'].append(i)\n mapped_dict['feature_table'].append(j)\n break\n\n temp_table = self.df.iloc[mapped_dict['feature_table'],:]\n temp_table.index = list(range(temp_table.shape[0]))\n temp_meta = self.meta.iloc[mapped_dict['metadata'],:]\n temp_meta.index = list(range(temp_meta.shape[0]))\n assert temp_meta.shape[0] == temp_table.shape[0]\n self.df = pd.concat([temp_table,temp_meta],axis=1)\n new_index = []\n for ele in mapped_dict['metadata']:\n new_index.append(array1[ele])\n self.df.index=new_index",
"def populate_model(\n model_object_name: str,\n required_mapping: dict,\n optional_mapping: dict,\n row: pd.Series,\n properties,\n identifiers: dict = None,\n sub_holding_keys=None,\n) -> typing.Callable:\n\n # Check that the provided model name actually exists\n model_object = getattr(lusid.models, model_object_name, None)\n\n if model_object is None:\n raise TypeError(\"The provided model_object is not a lusid.model object\")\n\n # Expand the mapping out from being a dot separated flat dictionary e.g. transaction_price.price to being nested\n update_dict(required_mapping, optional_mapping)\n\n mapping_expanded = expand_dictionary(required_mapping)\n\n # Set the attributes on the model\n return set_attributes_recursive(\n model_object=model_object,\n mapping=mapping_expanded,\n row=row,\n properties=properties,\n identifiers=identifiers,\n sub_holding_keys=sub_holding_keys,\n )",
"def mapper(record):\n\n mat = record[0]\n row = record[1]\n col = record[2]\n val = record[3]\n\n for k in xrange(5):\n if mat == 'a':\n #print \"Matrix a: \" + str((row, k)) + \", \" + str((mat, col, val))\n mr.emit_intermediate((row, k), (mat, col, val))\n else:\n #print \"Matrix b: \" + str((k, col)) + \", \" + str((mat, row, val))\n mr.emit_intermediate((k, col), (mat, row, val))",
"def transform(data): \n\n t_data = {}\n for k in DATA_MAP: \n t_data[k] = []\n\n for row in data: \n for k,t_func in DATA_MAP.iteritems(): \n if t_func is not None: \n t_val = t_func(row[k])\n t_data[k].append(t_val)\n else: \n t_data[k].append(t_val) \n\n return t_data",
"def mapping(query, key_func=lambda r: r[0], value_func=lambda r: r[1]):\n def inner(model, *args, **kwargs):\n return {\n key_func(r): value_func(r)\n for r in model.engine.execute(query, *args, **kwargs)\n }\n\n return inner"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates stats inside mod_stats_map with data gathered from the file. | def get_file_mod_stats_for_upstream_refs(file_name, mod_stats_map):
with open(file_name) as f:
lines = f.readlines()
upstream_ref = None
upstream_start_line = None
for line_number, line in enumerate(lines):
if REGION_START_TAG in line:
tag, ref_name = _extract_tag_and_ref_name_from_line(line, False)
if REGION_UPSTREAM_TAG in tag:
upstream_ref = ref_name
upstream_start_line = line_number
elif REGION_END_TAG in line and upstream_ref:
mod_stats = mod_stats_map[upstream_ref]
mod_stats.mod_count += 1
mod_stats.line_count += line_number - upstream_start_line - 1
upstream_ref = None
upstream_start_line = None | [
"def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)",
"def read_stats(self, files):\n with open(self.stats_file, newline='', encoding='utf-8') as fp:\n reader = csv.DictReader(fp)\n self.stats = {row['file']: row\n for row in reader\n if row['file'] in files}",
"async def update_player_stats(player_dict):\r\n db = client['players_and_teams']\r\n player_collection = db['players']\r\n for player_id in player_dict:\r\n player_document = await player_collection.find_one({'_id': player_id})\r\n if player_document == None:\r\n print(f\"Lookup for player {player_id} failed!!\")\r\n pprint.pprint(player_dict[player_id])\r\n continue\r\n stat = player_document['cached']\r\n\r\n for score in player_dict[player_id]:\r\n stat['base_acc'] += score['accuracy']\r\n stat['base_score'] += score['score']\r\n stat['base_contrib'] += score['contrib']\r\n stat['maps_played'] += 1\r\n #i highly doubt we will ever encounter a tie but \r\n #it's treated as neither a loss nor a win\r\n if score['score_difference'] > 0:\r\n stat['maps_won'] += 1\r\n elif score['score_difference'] < 0:\r\n stat['maps_lost'] += 1\r\n stat['hits']['300_count'] += score['hits']['300_count']\r\n stat['hits']['100_count'] += score['hits']['100_count']\r\n stat['hits']['50_count'] += score['hits']['50_count']\r\n stat['hits']['miss_count'] += score['hits']['miss_count']\r\n\r\n #per-mod stat changes\r\n if score['map_type'] == \"TB\":\r\n mod_stat = stat['by_mod'][\"FM\"]\r\n else:\r\n mod_stat = stat['by_mod'][score['map_type']]\r\n mod_stat['base_acc'] += score['accuracy']\r\n mod_stat['base_score'] += score['score']\r\n mod_stat['base_contrib'] += score['contrib']\r\n \r\n mod_stat['maps_played'] += 1\r\n if score['score_difference'] > 0:\r\n mod_stat['maps_won'] += 1\r\n elif score['score_difference'] < 0:\r\n mod_stat['maps_lost'] += 1\r\n\r\n #and add to the player's list of scores\r\n player_document['scores'].append(score['_id'])\r\n \r\n #recalculate baselines back to an average\r\n stat['average_acc'] = stat['base_acc'] / stat['maps_played'] \r\n stat['average_score'] = stat['base_score'] / stat['maps_played'] \r\n stat['average_contrib'] = stat['base_contrib'] / stat['maps_played']\r\n\r\n #then for mods as well\r\n for mod in stat['by_mod']:\r\n mod_stat = stat['by_mod'][mod]\r\n if mod_stat['maps_played'] != 0:\r\n mod_stat['average_acc'] = mod_stat['base_acc'] / mod_stat['maps_played'] \r\n mod_stat['average_score'] = mod_stat['base_score'] / mod_stat['maps_played'] \r\n mod_stat['average_contrib'] = mod_stat['base_contrib'] / mod_stat['maps_played'] \r\n\r\n #pprint.pprint(player_document)\r\n \r\n #and update the player document\r\n await player_collection.replace_one({'_id': player_id}, player_document)",
"def update_stat(self, player, name, value):\r\n\t\tself.stats[name][player.username] += value",
"def readPlayerFileAndFillStats(players_data_filename, game_stats):\n\tplayer_stats = {}\n\tteam_stats = {}\n\tgame_stats_clean = {}\n\tgame_stats = fillGameStats(players_data_filename, game_stats)\n\n\twith open(players_data_filename) as csvfile:\n\t reader = csv.DictReader(csvfile)\n\t for row in reader:\n\t \tgame_id = row['game_id']\n\n\t \tif isGameStatsValid(game_stats[game_id]):\n\t\t \tplayer_id = row['player_id']\n\t\t \tteam_id = row['team_id']\n\t\t \tkills = row['kill']\n\t\t \tdeaths = row['death']\n\t\t \tassists = row['assists']\n\t\t \tgold = row['gold_earned']\n\n\t\t \tif not game_stats.get(game_id):\n\t\t \t\tprint('no game id')\n\n\t\t \tkills = int(kills)\n\t\t \tdeaths = int(deaths)\n\t\t \tassists = int(assists)\n\t\t \tgold = int(gold)\n\n\t\t \tif not game_stats_clean.get(game_id):\n\t \t\t\tgame_stats_clean[game_id] = game_stats[game_id]\n\n\t\t \twin = 0\n\t\t \tif game_stats[game_id]['winner_team_id'] == team_id:\n\t\t \t\twin = 1\n\t\t \t\n\n\t\t \tif not team_stats.get(team_id):\n\t \t\t\tteam_stats[team_id] = {'games_played': 1, 'wins': 0, 'loses': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'gold': 0, 'player_ids': Set([]), 'game_ids': Set([]), 'player_stats': []}\n\t \t\t\n\t \t\tteam_stats[team_id]['wins'] += win/5\n\t \t\tteam_stats[team_id]['loses'] += (1 - win)/5\n\t\t \tteam_stats[team_id]['kills'] += kills\n\t\t \tteam_stats[team_id]['deaths'] += deaths\n\t\t \tteam_stats[team_id]['assists'] += assists\n\t\t \tteam_stats[team_id]['gold'] += gold\n\t\t \tteam_stats[team_id]['player_ids'].add(player_id)\n\t\t \tteam_stats[team_id]['game_ids'].add(game_id)\n\t\t \tteam_stats[team_id]['games_played'] = len(team_stats[team_id]['game_ids'])\n\n\n\t\t \tif not player_stats.get(player_id):\n\t\t \t\tplayer_stats[player_id] = {'games_played': 1, 'wins': win, 'loses': 1 - win, 'kills': kills, 'deaths': deaths, 'assists': assists, 'gold': gold, 'team_ids': Set([team_id])}\n\t\t \telse:\n\t\t \t\tplayer_stats[player_id]['games_played'] += 1\n\t\t \t\tplayer_stats[player_id]['wins'] += win\n\t\t \t\tplayer_stats[player_id]['loses'] += 1 - win\n\t\t \t\tplayer_stats[player_id]['kills'] += kills\n\t\t \t\tplayer_stats[player_id]['deaths'] += deaths\n\t\t \t\tplayer_stats[player_id]['assists'] += assists\n\t\t \t\tplayer_stats[player_id]['gold'] += gold\n\t\t \t\tplayer_stats[player_id]['team_ids'].add(team_id)\n\n\t\t \t#team_stats[team_id]['player_stats'].append({player_id: player_stats[player_id]})\n\n\treturn game_stats_clean, team_stats, player_stats",
"def UpdateFile(self, modID = None):\n if modID is None:\n modID = self.modActive\n\n source = self.modules[modID][1]\n filename = self.modules[modID][2]\n\n try: \n file = open(filename, \"wt\")\n file.write(source)\n finally:\n file.close()",
"def UpdateFile(self, modID = None):\n if modID is None:\n modID = self.modActive\n\n source = self.modules[modID][1]\n filename = self.modules[modID][2]\n\n try:\n file = open(filename, \"wt\")\n file.write(source)\n finally:\n file.close()",
"def update_stats_table(stats_dict, dataset):\n\n # Get the id of the record, if it exists\n session = get_session()\n query = session.query(Stats.id)\\\n .filter(Stats.lightcurve_filename == os.path.basename(dataset)).all()\n if query == []:\n id_num = ''\n else:\n id_num = query[0][0]\n session.close()\n\n # If id doesn't exist then instert. If id exists, then update\n insert_or_update(Stats, stats_dict, id_num)",
"def load_stats_from_logs(self, stats):\n\n starting_ = self.score_starting_\n self.logger.info(lS.LOADING_MATCHES_FROM_.format(starting_))\n\n for file in os.listdir(starting_):\n self.logger.info(lS.LOADING_STATS_FOR_.format(starting_ + file))\n temp_ids = []\n\n for file_ip in os.listdir(starting_ + file):\n if sC.TEXT not in file_ip:\n continue\n\n with open(starting_ + file + sC.SEPARATOR + file_ip) as f:\n for line in f:\n stat = line.strip().split()\n team, nick, name = self.get_details_by_id(stat[0])\n\n if stat[0] not in temp_ids:\n temp_ids.append(stat[0])\n stats[team][stat[0]][sC.MATCHES] += 1\n stats[team][stat[0]][stat[1]] += 1",
"def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break",
"def loadMetaChunkToServerMap (fileName):\n if not os.path.exists(fileName):\n print \"File \", fileName, \" does not exists\"\n sys.exit(1)\n\n infile = open (fileName, \"r\")\n count = 0\n while infile:\n count = count + 1\n line = infile.readline()\n if not line:\n break\n print \"DEBUGME : processing line %s, %d\" % (line, count)\n lineParts = line.split(' ')\n gChunkMap[lineParts[0]] = ChunkInfo(lineParts[0], lineParts[1], lineParts[2])\n # Add a ChunkHostInfo\n numServers = int(lineParts[2])\n for i in range(numServers):\n i = i * 3\n gChunkMap[lineParts[0]].addChunkHostInfo(ChunkHostInfo(lineParts[i+3], lineParts[i+4], lineParts[i+5]))",
"def updateFile(self):",
"def update(self, file = \"both\", add = {\"hello\": \"world\"}):\n\n\t\tif(file == \"allModules\"):\n\t\t\tif(add != {\"hello\": \"world\"}):\n\t\t\t\tself.allModules.update(add)\n\n\t\t\twith open((self.path + self.jsonFiles[\"allModules\"])) as self.rawAllModulesWrite:\n\t\t\t\tjson.dump(self.allModules, self.rawAllModulesWrite)\n\n\t\telif(file == \"client\"):\n\t\t\tif(add != {\"hello\": \"world\"}):\n\t\t\t\tself.clientInfo.update(add)\n\n\t\t\twith open((self.path + self.jsonFiles[\"clientInfo\"]), 'w') as self.rawClientWrite:\n\t\t\t\tjson.dump(self.clientInfo, self.rawClientWrite)\n\n\t\telif(file == \"both\"):\n\t\t\twith open((self.path + self.jsonFiles[\"clientInfo\"]), 'w') as self.rawClientWrite:\n\t\t\t\tjson.dump(self.clientInfo, self.rawClientWrite)\n\t\t\twith open((self.path + self.jsonFiles[\"allModules\"])) as self.rawAllModulesWrite:\n\t\t\t\tjson.dump(self.allModules, self.rawAllModulesWrite)",
"async def update_team_stats(team_dict):\r\n db = client['players_and_teams']\r\n team_collection = db['teams']\r\n for team_name in team_dict:\r\n team_document = await team_collection.find_one({'_id': team_name})\r\n if team_document == None:\r\n print(f\"Lookup for team {team_name} failed!!\")\r\n pprint.pprint(team_dict[team_name])\r\n continue\r\n processed_maps = []\r\n stat = team_document['cached']\r\n #theoretically no need to call every single score that's already stored in the player's document\r\n\r\n for score in team_dict[team_name]:\r\n #main\r\n stat['base_acc'] += score['accuracy']\r\n stat['base_score'] += score['score']\r\n stat['total_scores'] += 1 #usually two per map\r\n if score['match_id']+str(score['match_index']) not in processed_maps:\r\n #only one per map\r\n stat['maps_played'] += 1\r\n if score['score_difference'] > 0:\r\n stat['maps_won'] += 1\r\n elif score['score_difference'] < 0:\r\n stat['maps_lost'] += 1\r\n stat['hits']['300_count'] += score['hits']['300_count']\r\n stat['hits']['100_count'] += score['hits']['100_count']\r\n stat['hits']['50_count'] += score['hits']['50_count']\r\n stat['hits']['miss_count'] += score['hits']['miss_count']\r\n\r\n #per-mod stat changes\r\n if score['map_type'] == \"TB\":\r\n mod_stat = stat['by_mod'][\"FM\"]\r\n else:\r\n mod_stat = stat['by_mod'][score['map_type']]\r\n mod_stat['base_acc'] += score['accuracy']\r\n mod_stat['base_score'] += score['score']\r\n mod_stat['total_scores'] += 1\r\n \r\n if score['match_id']+str(score['match_index']) not in processed_maps:\r\n mod_stat['maps_played'] += 1\r\n if score['score_difference'] > 0:\r\n mod_stat['maps_won'] += 1\r\n elif score['score_difference'] < 0:\r\n mod_stat['maps_lost'] += 1\r\n #formatted match_id-match_index, always unique per individual map played\r\n processed_maps.append(score['match_id']+str(score['match_index']))\r\n\r\n #add score id \r\n team_document['scores'].append(score['_id'])\r\n \r\n #recalculate baselines back to an average\r\n #the team size (in an individual map)\r\n stat['average_acc'] = stat['base_acc'] / stat['total_scores']\r\n stat['average_score'] = stat['base_score'] / stat['total_scores']\r\n\r\n #then for mods as well\r\n for mod in stat['by_mod']:\r\n mod_stat = stat['by_mod'][mod]\r\n if mod_stat['maps_played'] != 0:\r\n mod_stat['average_acc'] = mod_stat['base_acc'] / mod_stat['total_scores'] \r\n mod_stat['average_score'] = mod_stat['base_score'] / mod_stat['total_scores'] \r\n\r\n #pprint.pprint(team_document)\r\n \r\n #and update the document\r\n await team_collection.replace_one({'_id': team_name}, team_document)",
"def update(self) -> None:\n\n # self.logger.info(\"updating estats\")\n\n # First up, try bailing early.\n if not self.update_lock.acquire(blocking=False):\n self.logger.warning(\"EStats update: skipping due to lock contention\")\n return\n\n # If here, we have the lock. Make sure it gets released!\n try:\n # Remember when we started.\n last_attempt = time.time()\n\n self.update_log_levels(last_attempt)\n self.update_envoy_stats(last_attempt)\n except Exception as e:\n self.logger.exception(\"could not update Envoy stats: %s\" % e)\n finally:\n self.update_lock.release()",
"def update_envoy_stats(self, last_attempt: float) -> None:\n\n text = self.fetch_envoy_stats()\n\n if not text:\n # EnvoyStats is immutable, so...\n new_stats = EnvoyStats(\n max_live_age=self.stats.max_live_age,\n max_ready_age=self.stats.max_ready_age,\n created=self.stats.created,\n last_update=self.stats.last_update,\n last_attempt=last_attempt, # THIS IS A CHANGE\n update_errors=self.stats.update_errors + 1, # THIS IS A CHANGE\n requests=self.stats.requests,\n clusters=self.stats.clusters,\n envoy=self.stats.envoy\n )\n\n with self.access_lock:\n self.stats = new_stats\n return\n\n # Parse stats into a hierarchy.\n envoy_stats: Dict[str, Any] = {} # Ew.\n\n for line in text.split(\"\\n\"):\n if not line:\n continue\n\n # self.logger.info('line: %s' % line)\n key, value = line.split(\":\")\n keypath = key.split('.')\n\n node = envoy_stats\n\n for key in keypath[:-1]:\n if key not in node:\n node[key] = {}\n\n node = node[key]\n\n value = value.strip()\n\n # Skip histograms for the moment.\n # if value.startswith(\"P0(\"):\n # continue\n # # for field in value.split(' '):\n # # if field.startswith('P95('):\n # # value = field.split(',')\n\n try:\n node[keypath[-1]] = int(value)\n except:\n continue\n\n # Now dig into clusters a bit more.\n\n requests_info = {}\n active_clusters = {}\n\n if (\"http\" in envoy_stats) and (\"ingress_http\" in envoy_stats[\"http\"]):\n ingress_stats = envoy_stats[\"http\"][\"ingress_http\"]\n\n requests_total = ingress_stats.get(\"downstream_rq_total\", 0)\n\n requests_4xx = ingress_stats.get('downstream_rq_4xx', 0)\n requests_5xx = ingress_stats.get('downstream_rq_5xx', 0)\n requests_bad = requests_4xx + requests_5xx\n\n requests_ok = requests_total - requests_bad\n\n requests_info = {\n \"total\": requests_total,\n \"4xx\": requests_4xx,\n \"5xx\": requests_5xx,\n \"bad\": requests_bad,\n \"ok\": requests_ok,\n }\n\n if \"cluster\" in envoy_stats:\n for cluster_name in envoy_stats['cluster']:\n cluster = envoy_stats['cluster'][cluster_name]\n\n # # Toss any _%d -- that's madness with our Istio code at the moment.\n # cluster_name = re.sub('_\\d+$', '', cluster_name)\n\n # mapping_name = active_cluster_map[cluster_name]\n # active_mappings[mapping_name] = {}\n\n # self.logger.info(\"cluster %s stats: %s\" % (cluster_name, cluster))\n\n healthy_percent: Optional[int]\n \n healthy_members = cluster['membership_healthy']\n total_members = cluster['membership_total']\n healthy_percent = percentage(healthy_members, total_members)\n\n update_attempts = cluster['update_attempt']\n update_successes = cluster['update_success']\n update_percent = percentage(update_successes, update_attempts)\n\n # Weird.\n # upstream_ok = cluster.get('upstream_rq_2xx', 0)\n # upstream_total = cluster.get('upstream_rq_pending_total', 0)\n upstream_total = cluster.get('upstream_rq_completed', 0)\n\n upstream_4xx = cluster.get('upstream_rq_4xx', 0)\n upstream_5xx = cluster.get('upstream_rq_5xx', 0)\n upstream_bad = upstream_5xx # used to include 4XX here, but that seems wrong.\n\n upstream_ok = upstream_total - upstream_bad\n\n # self.logger.info(\"%s total %s bad %s ok %s\" % (cluster_name, upstream_total, upstream_bad, upstream_ok))\n\n if upstream_total > 0:\n healthy_percent = percentage(upstream_ok, upstream_total)\n # self.logger.debug(\"cluster %s is %d%% healthy\" % (cluster_name, healthy_percent))\n else:\n healthy_percent = None\n # self.logger.debug(\"cluster %s has had no requests\" % cluster_name)\n\n active_clusters[cluster_name] = {\n 'healthy_members': healthy_members,\n 'total_members': total_members,\n 'healthy_percent': healthy_percent,\n\n 'update_attempts': update_attempts,\n 'update_successes': update_successes,\n 'update_percent': update_percent,\n\n 'upstream_ok': upstream_ok,\n 'upstream_4xx': upstream_4xx,\n 'upstream_5xx': upstream_5xx,\n 'upstream_bad': upstream_bad\n }\n\n # OK, we're now officially finished with all the hard stuff.\n last_update = time.time()\n\n # Finally, set up the new EnvoyStats.\n new_stats = EnvoyStats(\n max_live_age=self.stats.max_live_age,\n max_ready_age=self.stats.max_ready_age,\n created=self.stats.created,\n last_update=last_update, # THIS IS A CHANGE\n last_attempt=last_attempt, # THIS IS A CHANGE\n update_errors=self.stats.update_errors,\n requests=requests_info, # THIS IS A CHANGE\n clusters=active_clusters, # THIS IS A CHANGE\n envoy=envoy_stats # THIS IS A CHANGE\n )\n\n # Make sure we hold the access_lock while messing with self.stats!\n with self.access_lock:\n self.stats = new_stats\n\n # self.logger.info(\"stats updated\")",
"def updateMap(inpath,infile,outpath,motifChrom,seqlen,window=100):\n motifdir = os.path.join(outpath,\"bedMotifs\")\n mapdir = os.path.join(outpath,\"mapMotifs\")\n if not os.path.isdir(motifdir):\n print \"Error: path-to-motif-bed-files invalid, please specify a valid outpath to store all calculated scores.\"\n sys.exit()\n if not os.path.isdir(mapdir):\n os.mkdir(mapdir) \n mapinfile = os.path.join(inpath, infile)##better in per chromosome bedGraph files\n (mappath,mapfilename) = os.path.split(mapinfile)\n expName = mapfilename.split(motifChrom)[0]\n\n print 'updating', expName, motifChrom\n with bz2.BZ2File(mapinfile,'r') as bedGraphFile:\n bbFile = os.path.join(inpath,expName+motifChrom+'.bb')\n if not os.path.isfile(bbFile):\n countBed.compressBed4(bedGraphFile, expName, bbFile)\n coordDict, valuesDict = countBed.getBinBedCoord(bbFile, expName)\n arrayDict=defaultdict(list)\n for bedfile in glob.glob(os.path.join(motifdir,\"*\"+motifChrom+\".bed.gz\")):\n (filepath,filename) = os.path.split(bedfile)\n tfname = filename.split(motifChrom)[0]\n gcoordsfile = gzip.open(bedfile,'r')\n gcoords = csv.reader(gcoordsfile, delimiter='\\t')\n mapfile = gzip.open(os.path.join(mapdir,tfname+motifChrom+'map'+str(seqlen)+'.txt.gz'),'w')\n writer = csv.writer(mapfile, delimiter='\\t')\n for test in gcoords:\n motifStart, motifEnd = int(test[1]), int(test[2])\n if not motifChrom in arrayDict:\n arrayDict[motifChrom] = countBed.buildBedHist(motifChrom, coordDict, valuesDict, expName)\n xs, xvals, sums = arrayDict[motifChrom]\n avg = countBed.queryHist(xs, xvals, sums, motifStart-window, motifEnd+window)[0]\n row = [avg]\n writer.writerows([row])\n mapfile.close()\n return 0",
"def add_stats(self, stats):\r\n\t\tself[\"stats\"] = stats",
"def write_stats(self, new_stats):\n with open(self.stats_path, 'w') as stats_file:\n stats_file.write(self.pack(new_stats))\n\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the tracking file for the given file. Returns the last path mentioned in the file via a tracking tag or the equivalent thirdparty path given the file's path. If there is no file in the default path and no files mentioned within the file exist, returns None. Normally the thirdparty path must exist. Passing |check_exist|=False will bypass this check when it is not desired. An additional check is enabled by passing |check_uses_tag|=True. In this case the given file MUST use either a file track tag or another modification tag, before a tracking_path is returned. stats is a variable for keeping track of the status of the analyzer, which can be None. | def compute_tracking_path(stats, our_path, our_lines, do_lint_check=False,
check_exist=True, check_uses_tags=False):
tracking_path = staging.get_default_tracking_path(our_path)
base_matcher = re.compile(re.escape(FILE_TRACK_TAG) + r' "([^\"]+)"')
tag_matcher = re.compile(re.escape(REGION_START_TAG))
uses_any_tags = False
next_lineno = 1
for line in our_lines:
if stats:
stats['lineno'] = next_lineno
match = base_matcher.search(line)
if match:
tracking_path = match.group(1)
if not os.path.exists(tracking_path) and stats:
show_error(stats, 'Mod tracking path does not exist:\n' + line)
if next_lineno > MAX_ARC_TRACK_SEARCH_LINES:
show_error(stats, 'Tracking not allowed on line > %d' %
MAX_ARC_TRACK_SEARCH_LINES)
uses_any_tags = True
break
elif not uses_any_tags and tag_matcher.search(line):
uses_any_tags = True
next_lineno += 1
if (not do_lint_check and (uses_any_tags or not check_uses_tags) and
next_lineno > MAX_ARC_TRACK_SEARCH_LINES):
break
if not tracking_path:
return None
if check_uses_tags and not uses_any_tags:
return None
if check_exist and not os.path.exists(tracking_path):
return None
return tracking_path | [
"def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None",
"def _find_token_file(file: Path | str) -> Path:\n if isinstance(file, Path):\n if file.is_absolute():\n filepath = file.with_suffix(\".txt\")\n else:\n token_dir = _find_token_dir()\n filepath = (token_dir / file.name).with_suffix(\".txt\")\n else:\n file = Path(file)\n filepath = _find_token_file(file=file)\n\n if filepath.exists():\n return filepath\n else:\n print(f\"Looked for {filepath} but could not find it.\")\n print()\n raise FileNotFoundError",
"def _resolve_file_or_none(context_dir, conf, conf_file, has_args=False):\n if not conf:\n return None\n base1 = os.path.expanduser(context_dir)\n base2 = os.path.expanduser(conf)\n path = os.path.join(base1, base2)\n path = os.path.abspath(path) # This resolves \"/../\"\n if not os.path.exists(path):\n raise Exception(\"File does not exist: '%s'. This was \"\n \"referenced in the file '%s'.\" % (path, conf_file))\n return path",
"def check_tracker_file():\n tracker = tracker_helper.grab_tracker_file()\n tracker_helper.check_tracker_lastbib( tracker )\n tracker_helper.check_tracker_batches( tracker, start_bib=int('1000000'), end_bib=int(tracker['last_bib']) )\n log.debug( 'check_tracker_file() complete' )\n return tracker",
"def find_cue_path(self, path, verbose=False):\n meta = {}\n if('.flaccuesplit.' not in path and not os.path.exists(path)):\n try:\n path, meta = self._track_cache[path]\n except (AttributeError, NameError, TypeError, KeyError):\n # Not caching or not yet cached.\n raw_path = path\n dir_path = self.clean_path(os.path.dirname(path))\n files = os.listdir(dir_path)\n for cue_file in files:\n if(os.path.splitext(cue_file)[1] == '.cue'):\n try:\n # Don't use verbose here. Overly spammy.\n to_add, metadata, to_remove = self.get_cue_files(os.path.join(dir_path, cue_file))\n base_path = os.path.basename(path)\n if(base_path in to_add):\n path = to_add[base_path]\n meta = metadata[base_path]\n break\n except Exception:\n print(f'Error parsing {cue_file}:', file=sys.stderr, flush=True)\n import traceback\n traceback.print_exc()\n try:\n self._track_cache[raw_path] = (path, meta)\n except (AttributeError, NameError, TypeError):\n # Not caching.\n pass\n if(verbose):\n print(f'{raw_path} -> {path}', flush=True)\n return path, meta",
"def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)",
"def _getFileLocalOrPath(filename, pathenv):\n if os.path.exists(filename):\n log.info( \"Using local file %s\", filename)\n return filename\n\n pathlist = os.getenv(pathenv,'').split(os.pathsep)\n resolvedfilename = FindFile(filename, pathlist, os.R_OK)\n if resolvedfilename:\n return resolvedfilename\n\n log.fatal(\"No file %s found locally nor in %s\" % (filename, os.getenv('CORAL_DBLOOKUP_PATH')) )\n return None",
"def get_file_info(self, relativePath):\n relativePath = self.to_repo_relative_path(path=relativePath, split=False)\n fileName = os.path.basename(relativePath)\n isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)\n if not isRepoFile:\n return None, \"file is not a registered repository file.\"\n if not infoOnDisk:\n return None, \"file is a registered repository file but info file missing\"\n fileInfoPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%fileName)\n try:\n with open(fileInfoPath, 'rb') as fd:\n info = pickle.load(fd)\n except Exception as err:\n return None, \"Unable to read file info from disk (%s)\"%str(err)\n return info, ''",
"def findfile(file2find):\n cwd = os.getcwd()\n paths = [cwd] + sys.path\n for dirname in paths:\n possible = os.path.join(dirname, file2find)\n if os.path.isfile(possible):\n return possible\n return None",
"def checkFilePath(self, filename, searchpath=[]):\n\t\tif filename is None:\n\t\t\treturn None\n\t\telif os.path.isfile(filename):\n\t\t\treturn filename\n\t\telse:\n\t\t\t# Append current dir to searchpath and try each in turn\n\t\t\tsearchpath.append(os.path.dirname(__file__))\n\t\t\t# print(searchpath)\n\t\t\tfor folder in searchpath:\n\t\t\t\tfilepath = os.path.join(folder, filename)\n\t\t\t\tif os.path.isfile(filepath):\n\t\t\t\t\treturn filepath\n\n\t\t# File not found\n\t\treturn None",
"def locate(tgt_fpath, survey):\n flen = os.stat(tgt_fpath).st_size\n fpaths = survey.get(flen, ())\n if not fpaths:\n return None\n\n for fbase_path in fpaths:\n # print(' '*5, tgt_fpath, fbase_path)\n if not filecmp.cmp(tgt_fpath, fbase_path, shallow=True):\n continue # early reject, try other candidates\n if filecmp.cmp(tgt_fpath, fbase_path, shallow=False):\n # identically equal\n return fbase_path\n\n return None",
"def _find_metadata_file_path(root, rel_path):\n for file in constants.ROLE_META_FILES:\n meta_path = os.path.join(root, rel_path, file)\n if os.path.exists(meta_path):\n return meta_path\n return None",
"def find(self, relative_path):\n found = list(self.grep(relative_path, lazy=True))\n if found:\n return found[0]\n\n return None",
"def _get_fpath(self, data, stats, filename=None, return_None=False):\n # figure out the filename. If disposition one was needed, pipeline should\n # have had it explicitly\n if filename is None:\n filename = data['filename'] if 'filename' in data else None\n stats.files += 1\n fpath = filename\n\n if filename is None:\n if return_None:\n return None\n stats.skipped += 1\n raise ValueError(\"No filename were provided\")\n elif isabs(filename):\n stats.skipped += 1\n raise ValueError(\"Got absolute filename %r\" % filename)\n\n path_ = data.get('path', None)\n if path_:\n # TODO: test all this handling of provided paths\n if isabs(path_):\n stats.skipped += 1\n raise ValueError(\"Absolute path %s was provided\" % path_)\n fpath = opj(path_, fpath)\n\n return fpath",
"def _detectInfoDir( self, path ):\n svnDir = os.path.join( path, '.svn' )\n gitDir = os.path.join( path, '.git' )\n\n if os.path.exists( svnDir ):\n return svnDir\n\n elif os.path.exists( gitDir ):\n return gitDir\n\n else:\n\n if path == '' or path == '/':\n return None\n else:\n return self._detectInfoDir( os.path.split( path )[0] )",
"def find(self, filename):\n if self.get_config_path() is None:\n return None\n full_path = filename\n if not os.path.sep in full_path:\n full_path = os.path.join(self.get_config_path(), filename)\n #if os.path.exists(full_path):\n return full_path\n #raise ValueError(\"could not locate path {}\".format(filename))",
"def extract_log_file_location(filename: str) -> Optional[Path]:\n # Neither getopt, optparse, or argparse will let me pick out\n # just one option from the command line and ignore the rest.\n # So we do it by hand.\n prefix = '--muscle-log-file='\n given_path_str = None\n for arg in sys.argv[1:]:\n if arg.startswith(prefix):\n given_path_str = arg[len(prefix):]\n\n if not given_path_str:\n return None\n\n given_path = Path(given_path_str)\n\n if given_path.is_dir():\n return given_path / filename\n return given_path",
"def pathStat (\n\n self,\n path = None\n ) :\n\n try :\n\n return os.stat( self.normalizePath( path, normalize = False ) )\n\n except Exception, exception :\n\n return None",
"def find_file_here_or_above(\n here_path: Path, target_name: str) -> Optional[Path]:\n start = here_path\n if here_path.is_dir():\n start = start / 'dummy'\n for dirname in start.resolve().parents:\n path = dirname / target_name\n if path.exists():\n return path\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the notices object as if the two paths were properly staged. analyze_diffs needs to be independent of staging. Staging might not have been run, or might be out of date from when analyze_diffs is run. So we make a best attempt to reconstruct the notices that would have occurred poststaging. | def _compute_staged_notices(mods_path, third_party_path):
mods_notices = notices.Notices()
if mods_path:
mods_notices.add_sources([mods_path])
third_party_notices = notices.Notices()
if third_party_path:
third_party_notices.add_sources([third_party_path])
# If there are mods and third_party notices, pick the one that is more
# specific to the file, which is the one that has a deeper path.
if (_count_directory_levels_in_license_root(third_party_notices) >
_count_directory_levels_in_license_root(mods_notices)):
return third_party_notices
else:
return mods_notices | [
"def apply_decisions(base, decisions):\n\n merged = copy.deepcopy(base)\n prev_path = None\n parent = None\n last_key = None\n resolved = None\n diffs = None\n # clear_parent actions should override other decisions on same obj, so\n # we need to track it\n clear_parent_flag = False\n for md in decisions:\n path, line = split_string_path(merged, md.common_path)\n # We patch all decisions with the same path in one op\n if path == prev_path:\n # Same path as previous, collect entry\n if clear_parent_flag:\n # Another entry will clear the parent, all other decisions\n # should be dropped\n pass\n else:\n if md.action == \"clear_parent\":\n clear_parent_flag = True\n # Clear any exisiting decsions!\n diffs = []\n ad = resolve_action(resolved, md)\n if line:\n ad = push_path(line, ad)\n diffs.extend(ad)\n\n else:\n # Different path, start a new collection\n if prev_path is not None:\n # First, apply previous diffs\n if parent is None:\n # Operations on root create new merged object\n merged = patch(resolved, diffs)\n else:\n # If not, overwrite entry in parent (which is an entry in\n # merged). This is ok, as no paths should point to\n # subobjects of the patched object\n parent[last_key] = patch(resolved, diffs)\n\n prev_path = path\n # Resolve path in base and output\n resolved = merged\n parent = None\n last_key = None\n for key in path:\n parent = resolved\n resolved = resolved[key] # Should raise if key missing\n last_key = key\n diffs = resolve_action(resolved, md)\n if line:\n diffs = push_path(line, diffs)\n clear_parent_flag = md.action == \"clear_parent\"\n # Apply the last collection of diffs, if present (same as above)\n if prev_path is not None:\n if parent is None:\n merged = patch(resolved, diffs)\n else:\n parent[last_key] = patch(resolved, diffs)\n\n merged = nbformat.from_dict(merged)\n return merged",
"def extract_diff(self):\n\n if self.diff:\n target = self.path_1.split('\\\\')[1:-1] + ['Difference']\n path_1_miss = target + [\n 'Missing_from_path_1_({})'.format(self.path_1.split('\\\\')[-1])]\n path_2_miss = target + [\n 'Missing_from_path_2_({})'.format(self.path_2.split('\\\\')[-1])]\n p1p = cp(path_1_miss)\n p2p = cp(path_2_miss)\n\n if not os.path.exists(p1p):\n os.makedirs(p1p)\n\n if not os.path.exists(p2p):\n os.makedirs(p2p)\n\n for i in self.diff:\n if self.diff[i][0]:\n _dst = os.path.join(p2p,\n os.path.split(self.diff[i][0])[1])\n c_copy(self.diff[i][0], _dst)\n elif self.diff[i][1]:\n _dst = os.path.join(p1p,\n os.path.split(self.diff[i][1])[1])\n c_copy(self.diff[i][1], _dst)\n\n print('\\n{} - was created for files missing'.format(p1p))\n print('\\n{} - was created for files missing '.format(p2p))\n else:\n print('Directories are identical. No extraction was performed')",
"def preprocess(self):\n self.diff_paths = set()\n for label_id in self.diff_applier.diff:\n label_parts = tuple(label_id.split('-'))\n path = tuple()\n for part in label_parts:\n path = path + (part,)\n self.diff_paths.add(path)",
"def _transmit_differences(self, report):\n\n # This option really only makes sense on date range reports,\n # as updates hit older data than just 'yesterday'.\n if self.criteria.start_date == self.criteria.end_date:\n raise ValueError(\"difference calculation not supported on \"\\\n \"single day reports\")\n # See if we can find a similar report in the archive from\n # yesterday\n search_criteria = {'report_method':\n self.criteria.report_method,\n 'start_date': self.criteria.start_date -\n timedelta(days=1), 'end_date':\n self.criteria.end_date - timedelta(days=1)}\n old_doc = document_find(search_criteria, limit=1)\n if old_doc is None:\n logging.info(\"No comparable report found for difference \"\\\n \"generation\")\n self._transmit_report(report)\n else:\n target_filename = self.\\\n _generate_output_filename(start_date=self.criteria.start_date,\n end_date=self.criteria.end_date)\n # RemoveDuplicates not yet ported!!\n raise ValueError(\"RemoveDuplicates not ported\")\n #from pheme.essence.remove_duplicates import RemoveDuplicates\n #rd = RemoveDuplicates(new_report=report,\n # old_report=old_doc,\n # out=target_filename)\n #rd.generate_report()\n #logging.info(\"initiate upload of difference %s\", target_filename)\n #self._transport.transfer_file(target_filename)",
"def prepare_context(self, image_obj, context):\n\n base_img = image_obj.get_image_base()\n if not base_img or base_img.id == image_obj.id:\n return context # No diffs to compute\n\n img_files = image_obj.fs.files\n img_suid_files = {k: v for k, v in filter(lambda x: x[1], { path: meta.get('suid') for path, meta in img_files.items()}.items())}\n\n base_files = base_img.fs.files\n base_suid_files = {k: v for k, v in filter(lambda x: x[1], { path: meta.get('suid') for path, meta in base_files.items()}.items())}\n\n added_keys = set(img_suid_files.keys()).difference(set(base_suid_files.keys()))\n removed_keys = set(base_suid_files.keys()).difference(set(img_suid_files.keys()))\n common_keys = set(base_suid_files.keys()).intersection(set(img_suid_files.keys()))\n\n added = {k: v for k, v in filter(lambda x: x[0] in added_keys, img_suid_files.items())}\n removed = {k: v for k, v in filter(lambda x: x[0] in removed_keys, base_suid_files.items())}\n changed = {k: v for k, v in filter(lambda x: x[0] in common_keys and img_suid_files[x[0]] != base_suid_files[x[0]], img_suid_files.items())}\n\n context.data['added_suid_files'] = added\n context.data['base_suid_files'] = removed\n context.data['changed_suid_files'] = changed\n return context",
"def diff(self, **kargs):\n refs, count, objs = self.collect() ## refs contains the list of ALL objects\n \n ## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)\n delRefs = {}\n for i in list(self.startRefs.keys()):\n if i not in refs:\n delRefs[i] = self.startRefs[i]\n del self.startRefs[i]\n self.forgetRef(delRefs[i])\n for i in list(self.newRefs.keys()):\n if i not in refs:\n delRefs[i] = self.newRefs[i]\n del self.newRefs[i]\n self.forgetRef(delRefs[i])\n #print \"deleted:\", len(delRefs)\n \n ## Which refs have appeared since call to start() or diff()\n persistentRefs = {} ## created since start(), but before last diff()\n createRefs = {} ## created since last diff()\n for o in refs:\n if o not in self.startRefs: \n if o not in self.newRefs: \n createRefs[o] = refs[o] ## object has been created since last diff()\n else:\n persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)\n #print \"new:\", len(newRefs)\n \n ## self.newRefs holds the entire set of objects created since start()\n for r in self.newRefs:\n self.forgetRef(self.newRefs[r])\n self.newRefs.clear()\n self.newRefs.update(persistentRefs)\n self.newRefs.update(createRefs)\n for r in self.newRefs:\n self.rememberRef(self.newRefs[r])\n #print \"created:\", len(createRefs)\n \n ## self.persistentRefs holds all objects considered persistent.\n self.persistentRefs.clear()\n self.persistentRefs.update(persistentRefs)\n \n \n print(\"----------- Count changes since start: ----------\")\n c1 = count.copy()\n for k in self.startCount:\n c1[k] = c1.get(k, 0) - self.startCount[k]\n typs = list(c1.keys())\n typs.sort(key=lambda a: c1[a])\n for t in typs:\n if c1[t] == 0:\n continue\n num = \"%d\" % c1[t]\n print(\" \" + num + \" \"*(10-len(num)) + str(t))\n \n print(\"----------- %d Deleted since last diff: ------------\" % len(delRefs))\n self.report(delRefs, objs, **kargs)\n print(\"----------- %d Created since last diff: ------------\" % len(createRefs))\n self.report(createRefs, objs, **kargs)\n print(\"----------- %d Created since start (persistent): ------------\" % len(persistentRefs))\n self.report(persistentRefs, objs, **kargs)",
"def _pre_process(self):\n\n\n\n if self.verbosity >= 1:\n print(\"----------------Start pre-process\")\n\n #remove empty layers\n new_transformation = []\n for layer in range(len(self.transformation)):\n new_layer = [rule for rule in self.transformation[layer]]\n if new_layer:\n new_transformation.append(new_layer)\n self.transformation = new_transformation\n\n\n\n \n # now traceability is being built for all rules. We only need traceability for the rules that have no dependencies,\n # as the others are built by the combinators associated to the rule\n\n if self.verbosity >= 1:\n print(\"Start building traceability for rules\")\n # transformation to built traceability for rules\n\n\n for layerIndex in range(0, len(self.transformation)):\n for ruleIndex in range(0, len(self.transformation[layerIndex])):\n self.transformation[layerIndex][ruleIndex] = build_traceability(self.transformation[layerIndex][ruleIndex])\n\n\n if self.verbosity >= 1:\n print(\"Start changing rule names\")\n \n self.rule_names = {\"Em\":\"HEmptyPathCondition\"}\n # keep the original names around \n self.shortened_rule_names = {\"HEmptyPathCondition\":\"Em\"}\n # change rules names to be shorter\n\n rule_num = 0\n\n for layer in range(len(self.transformation)):\n i = 0\n subsumedRulesInLayer = []\n subsumingRulesInLayer = []\n\n for rule in self.transformation[layer]:\n \n new_name = \"\" + str(layer) + \"R\" + str(i)\n\n #new_name = str(self.generate_letters(rule_num))\n\n rule_num += 1\n\n i += 1\n self.rule_names[new_name] = rule.name\n self.shortened_rule_names[rule.name] = new_name\n \n self.ruleCombinators[new_name] = self.ruleCombinators[rule.name]\n del self.ruleCombinators[rule.name]\n \n self.ruleTraceCheckers[new_name] = self.ruleTraceCheckers[rule.name]\n del self.ruleTraceCheckers[rule.name]\n \n if rule.name in self.overlappingRules.keys():\n self.overlappingRules[new_name] = self.overlappingRules[rule.name]\n del self.overlappingRules[rule.name]\n subsumedRulesInLayer.append(new_name)\n \n #remove this when layer is ordered\n if rule.name in self.subsumption.keys():\n self.subsumption[new_name] = self.subsumption[rule.name]\n del self.subsumption[rule.name]\n subsumingRulesInLayer.append(new_name)\n \n rule.name = new_name\n\n # now that the layer renaming is complete, change the names of the subsuming rules for rules that need overlap treatment\n\n for subsumedRule in subsumedRulesInLayer:\n for subsumingRuleIndex in range(len(self.overlappingRules[subsumedRule])):\n ruleName = self.overlappingRules[subsumedRule][subsumingRuleIndex]\n short_rule_name = self.shortened_rule_names[ruleName]\n self.overlappingRules[subsumedRule][subsumingRuleIndex] = short_rule_name\n \n # remove this when layer is ordered \n for subsumedRule in subsumingRulesInLayer:\n for subsumingRuleIndex in range(len(self.subsumption[subsumedRule])):\n ruleName = self.subsumption[subsumedRule][subsumingRuleIndex]\n try:\n short_rule_name = self.shortened_rule_names[ruleName]\n except:\n #TODO: Fix this Exception\n short_rule_name = ruleName\n self.subsumption[subsumedRule][subsumingRuleIndex] = short_rule_name\n\n \n # change the names of the rules in a subsumption loop\n\n for loop in self.loopingRuleSubsumption:\n for ruleIndex in range(len(loop)):\n newRuleName = None\n for newRuleNameIter in self.rule_names.keys():\n if loop[ruleIndex] == self.rule_names[newRuleNameIter]:\n newRuleName = newRuleNameIter\n break\n loop[ruleIndex] = newRuleName\n\n\n if self.verbosity >= 2: \n print(\"------------------------------------\")\n print(\"Transformation: \" )\n for l in range(len(self.transformation)):\n print(\"Layer \" + str(l))\n for r in self.transformation[l]:\n print(\" \" + self.rule_names[r.name])\n print(\"------------------------------------\")\n print(\"\\n\")",
"def diff_report(self) -> str:\n graph_a = self.graph_a\n graph_b = self.graph_b\n\n graph_a_str = str(graph_a)\n graph_b_str = str(graph_b)\n\n if graph_a_str == graph_b_str:\n return \"\"\n\n graph_diff = difflib.ndiff(\n graph_a_str.splitlines(True), graph_b_str.splitlines(True)\n )\n graph_diff_report = [\"Graph diff:\", self._indent(\"\".join(graph_diff))]\n\n for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):\n if str(node_a) != str(node_b):\n graph_diff_report.append(\"First diverging operator:\")\n node_diff = difflib.ndiff(\n str(node_a).splitlines(True), str(node_b).splitlines(True)\n )\n source_printout = [\"node diff:\", self._indent(\"\".join(node_diff))]\n\n stack_a = node_a.sourceRange() if node_a else None\n if stack_a:\n source_printout.extend(\n [\"Former source location:\", self._indent(str(stack_a))]\n )\n stack_b = node_b.sourceRange() if node_b else None\n if stack_b:\n source_printout.extend(\n [\"Latter source location:\", self._indent(str(stack_b))]\n )\n\n graph_diff_report.extend(source_printout)\n\n break\n\n return \"\\n\".join(graph_diff_report)",
"def analyze(self) -> DiffReport:\n # analyzing does not update the file yet.\n if not os.path.isdir(self.dir_path):\n raise DiffError(\"Project path is empty: %s\" % self.dir_path)\n\n if not os.path.isdir(self.cache_path):\n os.mkdir(self.cache_path)\n\n # dict: key = file directory, value = hashcash list\n hashcashdic = dict()\n for filename in glob.iglob(self.dir_path + '**/**', recursive=True):\n if os.path.isfile(filename) and os.path.splitext(filename)[-1] == \".py\":\n with open(filename, 'r', encoding=\"UTF8\") as file:\n rel_path = os.path.relpath(filename, self.dir_path)\n hashcashdic[rel_path] = hashcash.hashcash(file.readlines())\n\n # dict: key = file directory (relative), value = hashcash list\n # What if cache file does not exists?\n prev_cache = pickle.load(\n open(os.path.join(self.cache_path, \"cache.pkl\"), \"rb\"))\n\n diff_formats = []\n for f in hashcashdic.keys():\n df = DiffFormat(f)\n # if previous cache exists for the file, use Diff.analyze to find difference.\n if f in prev_cache.keys():\n Diff.analyze(df, hashcashdic[f], prev_cache[f])\n\n # if no previous cache for the file was found, add all lines to added.\n else:\n with open(os.path.join(self.dir_path, f), 'r', encoding=\"UTF8\") as fl:\n df.added = range(1, len(fl.readlines()))\n diff_formats.append(df)\n return DiffReport(diff_formats)",
"def PostProcessDiff(self, diff):\r\n return diff",
"def _CompareStores(self, storage_reader, compare_storage_reader):\n stores_are_identical = True\n\n storage_counters = self._CalculateStorageCounters(storage_reader)\n compare_storage_counters = self._CalculateStorageCounters(\n compare_storage_reader)\n\n # Compare number of events.\n parsers_counter = storage_counters.get('parsers', collections.Counter())\n compare_parsers_counter = compare_storage_counters.get(\n 'parsers', collections.Counter())\n differences = self._CompareCounter(parsers_counter, compare_parsers_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences,\n column_names=['Parser (plugin) name', 'Number of events'],\n title='Events generated per parser')\n\n # Compare extraction warnings by parser chain.\n warnings_counter = storage_counters.get(\n 'extraction_warnings_by_parser_chain', collections.Counter())\n compare_warnings_counter = compare_storage_counters.get(\n 'extraction_warnings_by_parser_chain', collections.Counter())\n differences = self._CompareCounter(\n warnings_counter, compare_warnings_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences,\n column_names=['Parser (plugin) name', 'Number of warnings'],\n title='Extraction warnings generated per parser')\n\n # Compare extraction warnings by path specification\n warnings_counter = storage_counters.get(\n 'extraction_warnings_by_path_spec', collections.Counter())\n compare_warnings_counter = compare_storage_counters.get(\n 'extraction_warnings_by_path_spec', collections.Counter())\n differences = self._CompareCounter(\n warnings_counter, compare_warnings_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences, column_names=['Number of warnings', 'Pathspec'],\n reverse=True, title='Pathspecs with most extraction warnings')\n\n # Compare recovery warnings by parser chain.\n warnings_counter = storage_counters.get(\n 'recovery_warnings_by_parser_chain', collections.Counter())\n compare_warnings_counter = compare_storage_counters.get(\n 'recovery_warnings_by_parser_chain', collections.Counter())\n differences = self._CompareCounter(\n warnings_counter, compare_warnings_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences,\n column_names=['Parser (plugin) name', 'Number of warnings'],\n title='Recovery warnings generated per parser')\n\n # Compare recovery warnings by path specification\n warnings_counter = storage_counters.get(\n 'recovery_warnings_by_path_spec', collections.Counter())\n compare_warnings_counter = compare_storage_counters.get(\n 'recovery_warnings_by_path_spec', collections.Counter())\n differences = self._CompareCounter(\n warnings_counter, compare_warnings_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences, column_names=['Number of warnings', 'Pathspec'],\n reverse=True, title='Pathspecs with most recovery warnings')\n\n # Compare timelining warnings by parser chain.\n warnings_counter = storage_counters.get(\n 'timelining_warnings_by_parser_chain', collections.Counter())\n compare_warnings_counter = compare_storage_counters.get(\n 'timelining_warnings_by_parser_chain', collections.Counter())\n differences = self._CompareCounter(\n warnings_counter, compare_warnings_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences,\n column_names=['Parser (plugin) name', 'Number of warnings'],\n title='Timelining warnings generated per parser')\n\n # Compare timelining warnings by path specification.\n warnings_counter = storage_counters.get(\n 'timelining_warnings_by_path_spec', collections.Counter())\n compare_warnings_counter = compare_storage_counters.get(\n 'timelining_warnings_by_path_spec', collections.Counter())\n differences = self._CompareCounter(\n warnings_counter, compare_warnings_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences, column_names=['Number of warnings', 'Pathspec'],\n reverse=True, title='Pathspecs with most timelining warnings')\n\n # Compare event labels.\n labels_counter = storage_counters.get('event_labels', collections.Counter())\n compare_labels_counter = compare_storage_counters.get(\n 'event_labels', collections.Counter())\n differences = self._CompareCounter(labels_counter, compare_labels_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences, column_names=['Label', 'Number of event tags'],\n title='Event tags generated per label')\n\n # Compare analysis reports.\n reports_counter = storage_counters.get(\n 'analysis_reports', collections.Counter())\n compare_reports_counter = compare_storage_counters.get(\n 'analysis_reports', collections.Counter())\n differences = self._CompareCounter(reports_counter, compare_reports_counter)\n\n if differences:\n stores_are_identical = False\n\n self._PrintCounterDifferences(\n differences, column_names=['Plugin name', 'Number of reports'],\n title='Reports generated per plugin')\n\n return stores_are_identical",
"def analyse_diffs(difftext, params):\n # print(\"-- analyse_diffs\")\n allitemdata = []\n line_number = 0\n for line in difftext[:]:\n line_number += 1\n line = clean_line(line)\n # print(\"\\n\" + line)\n pairs = create_pairs(line)\n # for pair in pairs:\n # print(pair)\n item_number = 0\n for item in pairs:\n item_number += 1\n item1, item2 = prepare_item(item)\n itemid = str(line_number)+\"-\"+str(item_number)\n itemdata = define_itemdata(itemid, item1, item2)\n itemdata = perform_itemanalysis(itemdata, item1, item2, params)\n # print(itemdata)\n allitemdata.append(itemdata)\n columns = define_columnorder()\n allitemdata = pd.DataFrame(allitemdata, columns=columns)\n # print(allitemdata)\n if allitemdata.shape[0] > 1:\n print(\"Looking good: \" + str(allitemdata.shape[0])\n + \" differences have been analysed.\")\n else:\n sys.exit(\"ERROR! No differences have been analysed. Stoppping.\")\n return allitemdata",
"def estimate_edit_overhead_ratio(self):\n \n self.edit_overhead_ratio = {}\n self.edit_overhead_ratio_d1 = {}\n self.edit_overhead_ratio_d2 = {}\n\n for idx, changes in self.results['log_changes'].items():\n self.edit_overhead_ratio[idx] = self._get_edit_overhead_ratio(changes)\n self.edit_overhead_ratio_d1[idx] = self._get_edit_overhead_ratio_d1(changes)\n self.edit_overhead_ratio_d2[idx] = self._get_edit_overhead_ratio_d2(changes)",
"def _prepare_diff_info(diff_file_contents, parent_diff_file_contents,\n repository, request, basedir, check_existence,\n get_file_exists=None, base_commit_id=None):\n if check_existence and get_file_exists is None:\n raise ValueError('Must provide get_file_exists when check_existence '\n 'is True')\n\n tool = repository.get_scmtool()\n parsed_diff = _parse_diff(tool, diff_file_contents)\n\n files = list(_process_files(\n parsed_diff=parsed_diff,\n basedir=basedir,\n repository=repository,\n base_commit_id=base_commit_id,\n request=request,\n check_existence=(check_existence and\n not parent_diff_file_contents),\n get_file_exists=get_file_exists))\n\n if len(files) == 0:\n raise EmptyDiffError(_('The diff is empty.'))\n\n # Sort the files so that header files come before implementation\n # files.\n files.sort(key=cmp_to_key(_compare_files))\n\n parsed_parent_diff = None\n parent_files = {}\n\n if parent_diff_file_contents:\n diff_filenames = {f.orig_filename for f in files}\n parsed_parent_diff = _parse_diff(tool, parent_diff_file_contents)\n\n # If the user supplied a base diff, we need to parse it and later\n # apply each of the files that are in main diff.\n parent_files = {\n f.modified_filename: f\n for f in _process_files(\n get_file_exists=get_file_exists,\n parsed_diff=parsed_parent_diff,\n basedir=basedir,\n repository=repository,\n base_commit_id=base_commit_id,\n request=request,\n check_existence=check_existence,\n limit_to=diff_filenames)\n }\n\n return {\n 'files': files,\n 'parent_files': parent_files,\n 'parsed_diff': parsed_diff,\n 'parsed_parent_diff': parsed_parent_diff,\n 'parser': parsed_diff.parser,\n }",
"def compare_old_and_new_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"statusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,\"status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"statusfiles\",recipedir,\"status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n myidx=0\n while myidx < len(scratchstatusfile.data):\n oldline = controlstatusfile.data[myidx]\n newline = scratchstatusfile.data[myidx]\n if \"#\" in oldline:\n pass\n else:\n ingred = oldline.split(\":\")[0].strip()\n oldstatus = oldline.split(\":\")[1].strip()\n newstatus = newline.split(\":\")[1].strip()\n if (oldstatus == \"P\") and (newstatus == \"P\"):\n rdict[recipedir][ingred]=\"AVOID\"\n elif (oldstatus == \"C\") and (newstatus == \"C\"):\n rdict[recipedir][ingred]=\"AVOID\"\n else:\n rdict[recipedir][ingred]=\"send\"\n myidx = myidx + 1\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict",
"def __simplify_analysis_results(self):\n simple_results = {}\n for key in (\"messages\", \"warnings\", \"errors\"):\n if key in self.__draft_analysis_result:\n issue_list = []\n issues = self.__draft_analysis_result[key]\n for ((message, code), layerlist) in issues.items():\n issue = {\n \"text\": message,\n \"code\": code,\n \"layers\": [layer.longName for layer in layerlist],\n }\n issue_list.append(issue)\n simple_results[key] = issue_list\n self.__draft_analysis_result = simple_results",
"def analyze_state_changes(self):\n graph = self._graph\n lost_chunks = set(self._lost_chunks)\n op_states = self._op_states\n\n # mark lost virtual nodes as lost when some preds are lost\n for n in graph:\n if not isinstance(n.op, VirtualOperand) \\\n or op_states.get(n.op.key) == OperandState.UNSCHEDULED:\n continue\n if any(pred.key in lost_chunks for pred in graph.iter_predecessors(n)):\n lost_chunks.add(n.key)\n\n # collect operands with lost data\n op_key_to_chunks = defaultdict(list)\n lost_ops = set()\n for n in graph:\n op_key_to_chunks[n.op.key].append(n)\n if n.key in lost_chunks:\n lost_ops.add(n.op.key)\n\n # check data on finished operands. when data lost, mark the operand\n # and its successors as affected.\n affected_op_keys = set()\n for op_key in lost_ops:\n affected_op_keys.add(op_key)\n for n in op_key_to_chunks[op_key]:\n affected_op_keys.update(succ.op.key for succ in graph.iter_successors(n))\n\n # scan the graph from bottom and reassign new states\n new_states = dict()\n for chunk in graph.topological_iter(reverse=True):\n op_key = chunk.op.key\n if chunk.op.key not in affected_op_keys:\n continue\n\n can_be_ready = True\n stop_spread_states = (OperandState.RUNNING, OperandState.FINISHED)\n for pred in graph.iter_predecessors(chunk):\n pred_op_key = pred.op.key\n # mark affected, if\n # 1. data of the operand is lost\n # 2. state does not hold data, or data is lost,\n # for instance, operand is freed.\n if pred.key in lost_chunks or op_states.get(pred_op_key) not in stop_spread_states:\n affected_op_keys.add(pred_op_key)\n can_be_ready = False\n\n # update state given data preservation of prior nodes\n chunk_op_state = op_states.get(op_key)\n if can_be_ready and chunk_op_state != OperandState.READY:\n new_states[op_key] = OperandState.READY\n elif not can_be_ready and chunk_op_state != OperandState.UNSCHEDULED:\n new_states[op_key] = OperandState.UNSCHEDULED\n\n op_states.update(new_states)\n return new_states",
"def printCantSolveCorruptDataTrend(annotators_answer):\n images_with_cant_solve = dict()\n images_with_corrut_data = dict()\n\n for annotator in annotators_answer: # I am iterating through the annotators\n\n # I am finding the indeces where either the cant_solve or corrupt_data options are used.\n intersection_cant_solve_corrupt_data = np.logical_or(annotators_answer[annotator]['cant_solve_list'] == 1,\\\n annotators_answer[annotator]['corrupt_data_list'] == 1)\n\n # I have split the cases where either the cant_solve or corrupt_data options are used\n corrupt_data_list_split = annotators_answer[annotator]['corrupt_data_list'][intersection_cant_solve_corrupt_data]\n cant_solve_list_split = annotators_answer[annotator]['cant_solve_list'][intersection_cant_solve_corrupt_data]\n\n image_id_cant_solve = annotators_answer[annotator]['image_id'][annotators_answer[annotator]['cant_solve_list'] == 1]\n image_id_corrupt_data = annotators_answer[annotator]['image_id'][annotators_answer[annotator]['corrupt_data_list'] == 1]\n\n for ids in image_id_cant_solve:\n if ids in images_with_cant_solve:\n images_with_cant_solve[ids] += 1\n else:\n images_with_cant_solve[ids] = 1\n\n for ids in image_id_corrupt_data:\n if ids in images_with_corrut_data:\n images_with_corrut_data[ids] += 1\n else:\n images_with_corrut_data[ids] = 1\n\n\n if np.sum(intersection_cant_solve_corrupt_data) != 0: \n # if there is use case of either options, we find the matching cases of the two options\n simple_matching = np.sum(cant_solve_list_split == corrupt_data_list_split)/len(intersection_cant_solve_corrupt_data)\n\n print(\"{} - Trend(cant solve-corrupt data): {}\\n\".format(annotator, simple_matching))\n\n else: # if there is no use case of either options, I print a message\n print(\"{} - didn't use cant_solve or corrupt_data options\\n\".format(annotator))\n \n for image_id in images_with_cant_solve:\n print('{} has {} cant_solve options'.format(image_id, images_with_cant_solve[image_id]))\n for image_id in images_with_corrut_data:\n print('{} has {} corrupt_data options'.format(image_id, images_with_corrut_data[image_id]))",
"def _post_analysis(self):\n\n # Filter replacements and remove all TOP values\n if self.model.replacements is not None:\n for codeloc in list(self.model.replacements.keys()):\n filtered_rep = {}\n for k, v in self.model.replacements[codeloc].items():\n if isinstance(v, claripy.ast.Base):\n # claripy expressions\n if not PropagatorState.is_top(v):\n filtered_rep[k] = v\n else:\n # AIL expressions\n if not PropagatorAILState.is_top(v):\n filtered_rep[k] = v\n self.model.replacements[codeloc] = filtered_rep\n\n if self._cache_results:\n self.kb.propagations.update(self.prop_key, self.model)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update dictionary from a collection of documents. Each document is a list of tokens. | def add_document_lists(self, docs):
for sent in docs:
sent = map(self.process_token, sent)
self._token_count.update(sent) | [
"def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)",
"def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token",
"def update(tokens):\n global TOKENS\n\n for token_id in tokens:\n\n if token_id not in TOKENS:\n TOKENS[token_id] = {}\n\n if isinstance(tokens, dict):\n token_info = tokens[token_id]\n if token_info is None:\n token_info = {}\n\n alias = token_info.get(\"alias\")\n if alias is not None:\n TOKENS[token_id][\"alias\"] = alias\n\n decimals = token_info.get(\"decimals\")\n if decimals is not None:\n TOKENS[token_id][\"decimals\"] = decimals",
"def __update_documents(self, tags_list):\n docs = [] # updated documents\n # sentence_number in the tags_list\n sentence_num = 0\n for document in self.documents:\n updated_sentences = []\n for doc_sentence in document.sentences:\n\n # get the word, tag list for the current sentence\n # and we have to align the sentence_num for each document\n word_tag_list = tags_list[sentence_num]\n\n # update current sentence with the tag list\n words, tags = zip(*word_tag_list)\n updated_sentence = self.__update_sentence_tags(doc_sentence, tags)\n updated_sentences.append(updated_sentence) # add the new sentence to the sentence list\n\n sentence_num+=1\n\n # create a new document with updated sentences\n new_document = BIODocument(document.docname, document.text, updated_sentences)\n docs.append(new_document)\n return docs",
"def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()",
"def _updateSurfaceFormsDictionary( self, sfDocuments ):\n\t\tprint( \"[*] Updating ned_dictionary collection... \", end=\"\" )\n\n\t\trequests = [] \t\t\t\t\t\t\t\t\t\t# We'll use bulk writes to speed up process.\n\t\tBATCH_SIZE = 10000\n\t\ttotalRequests = 0\n\t\tfor sfDoc in sfDocuments:\n\t\t\tif not sfDoc: continue\t\t\t\t\t\t\t# Skip empty sf dictionaries.\n\n\t\t\tfor sf in sfDoc:\t\t\t\t\t\t\t\t# Iterate over surface forms in current dict.\n\t\t\t\trequests.append( pymongo.UpdateOne( { \"_id\": sf }, { \"$inc\": sfDoc[sf] }, upsert=True ) )\n\t\t\t\ttotalRequests += 1\n\t\t\t\tif len( requests ) == BATCH_SIZE: \t\t\t# Send lots of update requests.\n\t\t\t\t\tself._mNed_Dictionary.bulk_write( requests )\n\t\t\t\t\trequests = []\n\n\t\tif requests:\n\t\t\tself._mNed_Dictionary.bulk_write( requests ) \t# Process remaining requests.\n\t\tprint( \"Done with\", totalRequests, \"requests sent!\" )",
"def __increment_token_count(self, tokens):\n\n #Loop through the document tokens and increment the token and document count\n for token_qty in tokens.iteritems():\n token = token_qty[1]['token']\n qty = token_qty[1]['qty']\n\n try:\n self.db[self.tokens_collection_name].insert({'_id': token, 'qty': 0, 'doc': 0 })\n except DuplicateKeyError:\n pass\n\n self.db[self.tokens_collection_name].update({'_id': token }, {'$inc': {'qty': qty, 'doc': 1 }})",
"def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)",
"def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''",
"def fit(self, documents):\n # Get a list of all the unique tokens that appear\n vocab = list({\n token for doc in documents\n for token in self.tokenizer(doc)\n if token not in self._word2index\n })\n\n # This is UNK, START, END, and PAD.\n nb_special_tokens = 4\n\n # First, we map token -> ID, leaving the first slots for special tokens\n self._word2index.update({\n word: idx\n for idx, word in enumerate(vocab, nb_special_tokens)\n })\n\n # Next, we invert this map, which we can do since it was built from\n # unique vocabulary elements and is by definition bijective.\n self._index2word.update({\n idx: word\n for word, idx in self._word2index.items()\n })\n\n return self",
"def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)",
"def store(self):\n\t\tfor word in self._index.keys():\n\t\t\tself._store_word(word, self._index[word])\n\n\t\tself._doc_collection.insert_many(self._docs.values())\n\t\tself.docs = {}\n\t\tself.index = {}\n\t\tself._counter = 0;",
"def add_doc_in_posting_list(word_posting_list, docs):\n for doc_score in docs:\n if doc_score[\"doc\"] in word_posting_list.keys():\n word_posting_list[doc_score[\"doc\"]] = int(doc_score[\"score\"]) + int(word_posting_list[doc_score[\"doc\"]])\n else:\n word_posting_list[doc_score[\"doc\"]] = doc_score[\"score\"]",
"def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document",
"def merge_tokens(self, second_token):\n self.tf += second_token.tf\n self.df += second_token.df\n self.doc_dict = {**self.doc_dict, **second_token.doc_dict}",
"def _create_doc_dict_from_string(self, list_of_docs_string):\n self.doc_dict = {}\n\n for string in list_of_docs_string:\n string = string.split()\n self.doc_dict[string[0]] = string[1:]",
"def apply(*functions):\n for record in tqdm(corpus.find(), total=corpus.count()):\n\n orig_record = record.copy()\n\n for func in functions:\n ret = func(**record)\n overwrite = False\n\n if isinstance(ret, tuple):\n ret, overwrite = ret\n\n if ret:\n if overwrite:\n record = ret\n else:\n record = dict(record, **ret)\n\n if record != orig_record:\n corpus.update({'_id': record['_id']}, record)",
"def prepare_dictionary_from_docs(self):\n if os.path.exists(self.DICT_PATH):\n return True\n self.logger.info(\"START PREPARING DICT\")\n for fn in os.listdir(self.wiki_path):\n self.logger.info(\"dict update {0}\".format(fn))\n content = self.get_processed_content(fn)\n self.dictionary.add_documents([content])\n self.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=100000)\n self.dictionary.compactify()\n self.dictionary.save(self.DICT_PATH)\n return True",
"def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indices = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indices = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indices = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_urls = doc_as_list[12]\n retweet_quoted_indices = doc_as_list[13]\n\n term_dict = {}\n entities_dict={}\n tokenized_text = []\n \"\"\"--------parse url and quote--------\"\"\"\n if len(url) > 2:\n url_finished = str(self.parse_url(url))\n returned_token = self.check_url(url_finished) # check if the term is url\n check_spec = '2019'\n if check_spec in returned_token:\n returned_token.remove(check_spec)\n\n if len(returned_token) > 0:\n tokenized_text.extend(returned_token)\n else:\n tokenized_text.append(returned_token)\n\n to_insert_list_dict = self.parse_sentence(full_text)\n tokenized_text += to_insert_list_dict[0]\n for key in to_insert_list_dict[1]:\n if key in entities_dict:\n entities_dict[key] = entities_dict[key] + to_insert_list_dict[1][key]\n else:\n entities_dict[key] = to_insert_list_dict[1][key]\n\n if quote_text != None and len(quote_text) > 2:\n to_insert_list_dict = self.parse_sentence(quote_text)\n for term_from_quote in to_insert_list_dict[0]:\n if term_from_quote in tokenized_text:\n to_insert_list_dict[0].remove(term_from_quote)\n tokenized_text += to_insert_list_dict[0]\n for key in to_insert_list_dict[1]:\n if key in entities_dict:\n entities_dict[key] = entities_dict[key] + to_insert_list_dict[1][key]\n else:\n entities_dict[key] = to_insert_list_dict[1][key]\n\n doc_length = len(tokenized_text) # after text operations.\n\n for term in tokenized_text:\n\n if len(term) == 0 or term.lower() in self.stop_words or self.isAscii(term) == False or (term.isdigit() and len(term) > 15):\n continue\n\n\n if len(term) == 0:\n continue\n\n\n term_dict = self.upperCase_handler(term, term_dict)\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, entities_dict, doc_length)\n\n return document"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the list of token_id given doc. | def doc2id(self, doc):
if isinstance(doc, string_types):
raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string")
doc = map(self.process_token, doc)
return [self.token_to_id(token) for token in doc] | [
"def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]",
"def get_word2ids_from_tokens(word2id,tokens):\r\n return [get_word2id(word2id,x) for x in tokens]",
"def get_tokens_for_doc(self, pid):\n end_offset = self.end_offsets[pid]\n start_offset = end_offset - self.doclens[pid]\n return self.emb2tid[start_offset:end_offset]",
"def get_tokens(self, document):\n raise NotImplementedError()",
"def get_word2ids_from_token_lists(word2id,token_lists):\r\n return [get_word2ids_from_tokens(word2id,x) for x in token_lists]",
"def get_list_ids():",
"def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results",
"def get_document_ids_from_dml_results(result):\n ret_val = list(map(lambda x: x.get('documentId'), result))\n return ret_val",
"def convert_tokens_to_ids(vocab, tokens):\n ids = []\n for token in tokens:\n ids.append(vocab[token])\n return ids",
"def tokenize(doc: str) -> list:\n tokens = [word for word in doc.split() if word not in STOP_WORDS]\n return tokens",
"def convert_ids_to_tokens(self, tok_ids):\n result = []\n for tok in tok_ids:\n word = self.itos(tok)\n result.append(word)\n return result",
"def doc2Id(self):\r\n \r\n self.docsID = []\r\n for doc in self.docsString:\r\n tempDoc = []\r\n for word in doc:\r\n tempDoc.append(self.word2IdVocabulary[word])\r\n self.docsID.append(tempDoc)",
"def get_token(self, id):\n return self.word_list[id]",
"def tokenizer(doc):\n\ttokens = word_tokenize(doc)\n\treturn tokens",
"def list_ids(token):\n\n init_tenant_context(token, db)\n\n data = []\n LOGGER.debug(f\" Fetching list with known devices\")\n for id in db.session.query(Device.id).all():\n data.append(id[0])\n return data",
"def extarct_id_tf(docs):\n\n if len(docs) == 0:\n return []\n docs = docs.split(',')\n ret = []\n for doc in docs:\n doc = doc.split('|')\n # doc_id, tf\n ret.append((int(doc[0]), int(doc[1])))\n return ret",
"def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)",
"def list(uid: int):\n\n return Token.list(uid)",
"def getReviewsWithToken(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.doc_to_words_path, 'rb') as bin:\n tup = []\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n docid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # count words:\n count = 0\n for i in range(frequency):\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n if wordid == wordid_in_file:\n count += 1\n tup.append(docid_in_file)\n tup.append(count)\n return tuple(tup)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the token_id of given token. | def token_to_id(self, token):
token = self.process_token(token)
return self.token2id.get(token, len(self.token2id) - 1) | [
"def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)",
"def get_id(self, token):\n\n if not isinstance(token, types.UnicodeType):\n raise TypeError(\"token must be Unicode\")\n\n if token not in self.token_ids:\n # Register the token, assigning the next available integer\n # as its id.\n token_id = varint.encode_one(len(self.tokens))\n\n self._put(token_id, token)\n self.token_log.append((token.encode(\"utf-8\"), token_id))\n\n return self.token_ids[token]",
"def token_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_id\")",
"def TokenToId(self, token):\n if isinstance(token, CommandToken):\n return token.Id\n return self.vocab[token]",
"def map_token_to_id(self, token: str):\n if token not in self._token_to_id:\n token = self._unk_token\n return self._token_to_id[token]",
"def get_user_id_by_token(token):\n data = mongo.db.tokens.find_one({'token': token})\n\n return data.get('user_id') if data else None",
"def get_id(self, token):\n if token in self.word_dict:\n return self.word_dict[token]\n else:\n return 0",
"def token_to_id(self,\n token):\n return self.sp_processor.PieceToId(token)",
"def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")",
"def get_app_token_id(self, token_name):\n self._update_header()\n\n response = self._ui_api_request(\n 'GET', APP_TOKEN, exp_resp_code=HTTPStatus.OK)\n\n for token in response['data']['tokens']:\n if token['token_name'] == token_name:\n token_id = token['token_id']\n return token_id\n\n return None",
"def get_token(self, token_id):\n return self.tokens[token_id]",
"def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))",
"def lookup_token(self, token):\n return self._token_to_idx[token]",
"def get_token(self, token_id: int) -> Token:",
"def token_to_auth_user_id(token: str) -> int:\n session_id = decode_token(token)\n for user in get_users():\n for session in user.get('session_list'):\n if session == session_id:\n return user.get('u_id')\n return None",
"def get_token(self, token_id):\n raise NotImplementedError()",
"def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover",
"def token_to_id(self):\n return lambda x: self.alphabet.tok_to_idx[x]",
"def try_get_user_id_from_token(token):\n dot_index = token.find('.')\n if (dot_index > 0):\n token_base64 = token[:dot_index]\n \n try:\n token_string = b64decode(token_base64)\n except Base64DecodeError:\n user_id = 0\n else:\n try:\n user_id = int(token_string)\n except ValueError:\n user_id = 0\n else:\n user_id = 0\n \n return user_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tokenid to token (string). | def id_to_token(self, idx):
return self._id2token[idx] | [
"def id_to_token(self,\n id):\n return self.sp_processor.IdToPiece(id)",
"def convert_id_to_token(self, index: int) -> str:\n try:\n return self.tokens[index]\n except IndexError:\n raise IndexError(f\"Unrecognized index: '{index}'\")",
"def map_id_to_token(self, id: int):\n return self._id_to_token[id]",
"def get_token(self, token_id: int) -> Token:",
"def token_to_id(self, token):\n token = self.process_token(token)\n return self.token2id.get(token, len(self.token2id) - 1)",
"def _convert_id_to_token(self, index, return_unicode=True):\n return self.idx2token[index]",
"def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)",
"def TokenToId(self, token):\n if isinstance(token, CommandToken):\n return token.Id\n return self.vocab[token]",
"def id_to_token(self, index):\r\n return self.decoder.get(index)",
"def _convert_id_to_token(self, index):\n return self.reverse_vocab.get(index, self.unk_token)",
"def get_token(self, token_id):\n raise NotImplementedError()",
"def token_to_id(self,\n token):\n return self.sp_processor.PieceToId(token)",
"def tokens_to_string(self, tokens):",
"def token_id_hex(self) -> str: # this is *ALSO* a MINT property\n return self.token_id.hex()",
"def _subtoken_id_to_subtoken_string(self, subtoken):\n if 0 <= subtoken < self.vocab_size:\n return self._all_subtoken_strings[subtoken]\n return u\"\"",
"def _subtoken_id_to_subtoken_string(self, subtoken):\n if 0 <= subtoken < self.vocab_size:\n return self._all_subtoken_strings[subtoken]\n return u\"\"",
"def token_to_id(self):\n return lambda x: self.alphabet.tok_to_idx[x]",
"def _get_unique_token() -> str:\n return f\"0x{int(secrets.token_hex(4), base=16):X}\"",
"def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the current trigger. | def delete(self):
request = self.triggers_service.delete(path=self._path)
request.execute() | [
"def delete_trigger(self, Name: str) -> Dict:\n pass",
"def delete_trigger(self, trigger_id):\n self._delete(path=\"triggers/{}\".format(trigger_id))",
"def create_delete_trigger(self):\n self.execute(self.commands.delete_function(\n dest_table=self.name,\n pk_col=self.primary_key_column\n ))\n\n self.execute(self.commands.delete_trigger(\n self.triggers['DELETE'],\n self.source.name,\n self.name\n ))",
"def delete(self):\n\t\tdel self.scheduler.find(self)\n\t\tdel self",
"def delete(self):\n self['_deleted'] = True",
"def deleteOrDelay(self):\n self.delete()",
"def delete_build_trigger(\n self,\n trigger_id: str,\n project_id: str = PROVIDE_PROJECT_ID,\n retry: Retry | _MethodDefault = DEFAULT,\n timeout: float | None = None,\n metadata: Sequence[tuple[str, str]] = (),\n location: str = \"global\",\n ) -> None:\n client = self.get_conn(location=location)\n\n self.log.info(\"Start deleting build trigger: %s.\", trigger_id)\n\n client.delete_build_trigger(\n request={\"project_id\": project_id, \"trigger_id\": trigger_id},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n self.log.info(\"Build trigger has been deleted: %s.\", trigger_id)",
"def delete(event, context):\r\n return",
"def test_delete_trigger_item(self):\n canary = CanaryFileHelpers.create_canary_file(self.mu)\n\n id = canary.pk\n identifier = canary.identifier\n\n url = reverse('trigger_item', kwargs={'id': id})\n\n response = self.client.delete(url, **self.headers,\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertFalse(Trigger.objects.filter(\n trigger_identifier=identifier\n ).exists())",
"def __macroDelete(self):\n self.activeWindow().macroDelete()",
"def test_remove_trigger(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death, name='on_death')\n self.client.add_trigger(trigger)\n self.assertEqual(len(self.client.triggers), 1)\n self.client.remove_trigger('on_death')\n self.assertEqual(len(self.client.triggers), 0)\n with self.assertRaises(KeyError):\n self.client.remove_trigger('does_not_exist')",
"def drop_trigger(self, trig):\n self.vr_trig_queue.put((trig,'done'))",
"def deleteTableTrigger(self, trigger, table, schema=None):\n\t\tsql = u\"DROP TRIGGER %s ON %s\" % (self.quoteId(trigger), self.quoteId( (schema, table) ))\n\t\tself._exec_sql_and_commit(sql)",
"def delete(self):\n with self.locked():\n self.path.delete()",
"def delete(self):\n print(self.client.delete_tell(self.id))",
"def delete(self):\r\n with self.locked():\r\n self.path.delete()",
"def delete(self):\n\t\tself.table.delete()",
"def delete(self, alert):\n super(GAlertsManager, self).delete(alert.new_alert)",
"def delete(self):\r\n delete_tracks(self.project, [self])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and return a D > D0 pi Selection object. | def makeDstar2D0Pi( name
, config
, DecayDescriptor
, inputSel
) :
daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config']
combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config']
dstarCuts = "(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)" \
"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)" % locals()['config']
_Dstar = CombineParticles( DecayDescriptor = DecayDescriptor
, DaughtersCuts = { "pi+" : daugCuts }
, CombinationCut = combCuts
, MotherCut = dstarCuts
)
return Selection( name+'Sel',
Algorithm = _Dstar,
RequiredSelections = inputSel
) | [
"def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )",
"def makeDstar2D0Pi(self, name, inputSel, decDescriptors):\n combCuts = '((AM - AM1) < {0[Dstar_AMDiff_MAX]})'.format(self.config)\n\n dstarCuts = '(VFASPF(VCHI2/VDOF) < {0[Dstar_VCHI2VDOF_MAX]})'.format(\n self.config\n )\n\n _Dstar = CombineParticles(\n name='Combine{0}'.format(name),\n DecayDescriptors=decDescriptors,\n CombinationCut=combCuts,\n MotherCut=dstarCuts\n )\n\n return Selection(name, Algorithm=_Dstar, RequiredSelections=inputSel)",
"def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )",
"def __create_selector(\n renderer: vtkRenderer, tolerance: float = 1e-6\n ) -> vtkSelectVisiblePoints:\n selector = vtkSelectVisiblePoints()\n selector.SetRenderer(renderer)\n selector.SetTolerance(tolerance)\n\n return selector",
"def make_odorant_selector(name):\n return dcc.Input(\n id=\"cid_%s\" % name,\n placeholder=\"Enter a PubChem ID number...\",\n type=\"number\",\n value=None,\n )",
"def PrimarySelection(self) -> object:",
"def get_random_choice(self):\n if self.p_manager.num_dtrees == 1:\n return 0\n\n prob = self.p_manager.get_prob()\n self._last_prob = prob # this will be used to paint particles\n try:\n choice = np.random.choice(range(self.p_manager.num_dtrees), p=prob)\n assert self.p_manager.particles[choice].tree is not None\n assert hasattr(self.p_manager.particles[choice].tree, \"poses\"), hex(\n id(self.p_manager.particles[choice].tree)\n )\n except ValueError as e:\n # NOTE dont know why the probability got out of sync... (not sums to 1)\n # probably because of underflow?\n # We will notify the use, then try re-sync the prob\n LOGGER.error(\n \"!! probability got exception '{}'... trying to re-sync prob again.\".format(\n e\n )\n )\n self.p_manager.resync_prob()\n prob = self.p_manager.get_prob()\n self._last_prob = prob\n choice = np.random.choice(range(self.p_manager.num_dtrees), p=prob)\n self.last_choice = choice\n return choice",
"def __init__(self,initial_v,v_select=0,max_dev_semitones=1):\n self.v=initial_v\n self.v_select=v_select\n self.max_dev_semitones=max_dev_semitones",
"def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)",
"def select(self):\n return Select(self)",
"def create_selection():\n operation = Forward()\n nested = Group(Suppress(\"(\") + operation + Suppress(\")\")).setResultsName(\"nested\")\n select_expr = Forward()\n functions = select_functions(select_expr)\n maybe_nested = functions | nested | Group(var_val)\n operation <<= maybe_nested + OneOrMore(oneOf(\"+ - * /\") + maybe_nested)\n select_expr <<= operation | maybe_nested\n alias = Group(Suppress(upkey(\"as\")) + var).setResultsName(\"alias\")\n full_select = Group(\n Group(select_expr).setResultsName(\"selection\") + Optional(alias)\n )\n return Group(\n Keyword(\"*\") | upkey(\"count(*)\") | delimitedList(full_select)\n ).setResultsName(\"attrs\")",
"def __Dplus__(self, conf):\n _dplus = AutomaticData(Location = 'Phys/StdLooseDplus2KPiPi/Particles')\n _filter_dplus = FilterDesktop(Code = self.__KpiCuts__(conf) +\" & \"+ self.__DplusCuts__(conf))\n _seldplus = Selection(\"Selection_\"+self.name+\"_dplus\",\n RequiredSelections = [ _dplus ] ,\n Algorithm = _filter_dplus)\n return _seldplus",
"def _make_select(self):\n conditions = []\n values = []\n picklist = None\n if self.selection_dict:\n select_d = self.selection_dict\n if 'ksize' in select_d and select_d['ksize']:\n conditions.append(\"sourmash_sketches.ksize = ?\")\n values.append(select_d['ksize'])\n if 'num' in select_d and select_d['num'] > 0:\n conditions.append(\"sourmash_sketches.num > 0\")\n if 'scaled' in select_d and select_d['scaled'] > 0:\n conditions.append(\"sourmash_sketches.scaled > 0\")\n if 'containment' in select_d and select_d['containment']:\n conditions.append(\"sourmash_sketches.scaled > 0\")\n if 'moltype' in select_d and select_d['moltype'] is not None:\n moltype = select_d['moltype']\n assert moltype in ('DNA', 'protein', 'dayhoff', 'hp'), moltype\n conditions.append(f\"sourmash_sketches.moltype = '{moltype}'\")\n\n picklist = select_d.get('picklist')\n\n return conditions, values, picklist",
"def make_pi():\n return [3, 1, 4]",
"def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()",
"def GetSelectionPoint(self):\n ...",
"def __create_fromnumeration_select(self):\n if not self.fromnumeration_label:\n self.__create_fromnumeration_label()\n from1=tk.Radiobutton(self.fromnumeration_label, variable=self.__from_system,\n value=converter.NUMERATION_BINARY, text=\"Binary system\")\n from2=tk.Radiobutton(self.fromnumeration_label, variable=self.__from_system,\n value=converter.NUMERATION_DECIMAL, text=\"Decimal system\")\n from3=tk.Radiobutton(self.fromnumeration_label, variable=self.__from_system,\n value=converter.NUMERATION_HEXADECIMAL, text=\"Hexadecimal system\")\n from1.grid()\n from2.grid()\n from3.grid()",
"def curve_through_selection(*args):\n sel = cmds.ls(sl=True, fl=True)\n if not sel or len(sel)==1:\n cmds.warning(\"You need to select multiple things to create curve through!\")\n return()\n\n pList = []\n crvType = cmds.radioButtonGrp(widgets[\"crvSelRBG\"], q=True, sl=True)\n\n for obj in sel:\n if cmds.objectType(obj) in [\"transform\"]:\n pos = cmds.xform(obj, q=True, ws=True, rp=True)\n pList.append(pos)\n elif obj in cmds.filterExpand(sm=[28, 30, 31, 32, 34, 46]):\n pos = cmds.pointPosition(obj)\n pList.append(pos)\n\n #add points if only 2 (cv, ep) or 3 (cv) are given, and create the curve\n if crvType == 1:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n if len(pList) == 3:\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n crv = cmds.curve(d=3, p=pList, name=\"newCurve\")\n\n if crvType == 2:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n crv = cmds.curve(d=3, ep=pList, name=\"newCurve\")\n\n return(crv)",
"def __Kshort__(self, conf): \n _ksdd = AutomaticData(Location = 'Phys/StdLooseKsDD/Particles')\n _ksll = AutomaticData(Location = 'Phys/StdLooseKsLL/Particles')\n _filter_ksdd = FilterDesktop(Code = self.__KsCuts__(conf))\n _filter_ksll = FilterDesktop(Code = self.__KsCuts__(conf)) \n _selksdd = Selection(\"Selection_\"+self.name+\"_Ksdd\",\n RequiredSelections = [ _ksdd ] ,\n Algorithm = _filter_ksdd)\n _selksll = Selection(\"Selection_\"+self.name+\"_Ksll\",\n RequiredSelections = [ _ksll ] ,\n Algorithm = _filter_ksll)\n\n _sel = MergedSelection(\"Selection_\"+self.name+\"_Kshort\",\n RequiredSelections = [ _selksdd, _selksll ])\n return _sel"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load all quest handlers here | def load_quests(self):
raise NotImplementedError() | [
"def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0",
"def setup_default_handlers(self):\n msg_type = self.build_message_type('set')\n self.skill.add_event(msg_type, self.gui_set)",
"def _load_questions(self):\n logger.info(f\"[{self.id}] Loading questions\")\n self.questions_by_id = {}\n for q in self.lime_api.list_questions(self.id):\n question = Question(q)\n self.questions_by_id[question.question_id] = question\n self._process_question_relationships()\n self._create_question_title_mapping()",
"def loadTreeHandlers(self):\n #\n # Paths for key folders\n plugin_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\")\n self.handler_path = handler_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\", \"trees\")\n #\n if not g.os_path_isdir(handler_path):\n g.es(\"No tree handler folder found\", color=\"red\")\n else:\n g.es(\"Scanning for tree handlers\", color=\"blue\")\n #\n # Add folder locations to path\n old_path = sys.path[:]\n sys.path.insert(0, plugin_path)\n sys.path.insert(0, handler_path)\n #@+<< Get plugin manager module >>\n #@+node:ekr.20050329082101.135: *4* << Get plugin manager module >>\n # Get the manager\n try:\n self.plugin_manager = __import__(\"plugin_manager\")\n except ImportError as err:\n g.es(\"Autotrees did not load plugin manager: %s\" % (err,), color=\"red\")\n self.plugin_manager = None\n #@-<< Get plugin manager module >>\n #@+<< Find all handlers >>\n #@+node:ekr.20050329082101.136: *4* << Find all handlers >>\n # Find all handlers\n for filename in glob.glob(g.os_path_join(handler_path, \"*.py\")):\n handler_name = g.os_path_splitext(g.os_path_split(filename)[1])[0]\n g.es(\"... looking in %s\" % handler_name, color=\"blue\")\n try:\n self.loadHandlersFrom(handler_name)\n except BadHandler as err:\n g.es(\"... unable to load '%s' handler: %s\" % (handler_name, err), color=\"red\")\n #@-<< Find all handlers >>\n # Restore\n sys.path = old_path",
"def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))",
"def handle(self, *args, **options):\n factory = LoaderFactory()\n base_path = settings.QUESTIONS_BASE_PATH\n\n factory.difficulty_levels_loader(\n structure_filename='difficulty-levels.yaml',\n base_path=base_path\n ).load()\n\n factory.programming_concepts_loader(\n structure_filename='programming-concepts.yaml',\n base_path=base_path\n ).load()\n\n factory.question_contexts_loader(\n structure_filename='question-contexts.yaml',\n base_path=base_path\n ).load()\n\n factory.create_questions_loader(\n structure_filename='questions.yaml',\n base_path=base_path\n ).load()",
"def init_handlers(self):\n self.dispatcher.add_handler(MessageHandler(Filters.text, self.text_handler))",
"def _check_handlers(self):\n if self.check_chords:\n self._check_chord_handlers()\n if self.check_sequences:\n self._check_sequence_handlers()\n if self.check_chord_progressions:\n self._check_chord_progression_handlers()",
"def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)",
"def CompleteAll(self):\n\t\tfor EachQuest in self.GetQuestLog().GetQuests():\n\t\t\tself.CompleteQuest(EachQuest)",
"def runcommands(self):\n self.Globals()\n self.Position()\n self.Btn_Reset()\n self.checkdir()\n self.background()\n ################\n #Starting Quest#\n ################\n if self.room == \"13\" and self.Quests == []:\n self.Quests.append(\"GetNZTicket\")\n if \"GetNZTicket\" in self.Quests:\n self.Questcom()",
"def loadAllCommand(self, player):\n for eachCmd in self.commands.keys():\n player.addCommand(eachCmd, self.commands[eachCmd]())",
"def add_commands(self):\n self.dp.add_handler(CommandHandler(\"start\", self.start))\n self.dp.add_handler(CommandHandler(\"help\", self.help))\n self.dp.add_handler(CommandHandler(\"imprint\", self.imprint))\n self.dp.add_handler(InlineQueryHandler(self.inlinequery))",
"def on_register_event_handlers(self):\n pass",
"def init_bot(self):\n dispatcher = self.updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"start\", self.on_bot_start))\n dispatcher.add_handler(CommandHandler(\"help\", self.on_bot_help))\n dispatcher.add_handler(CommandHandler(\"about\", self.on_bot_about))\n dispatcher.add_handler(CommandHandler(\"vreausaajut\", self.on_bot_offer_to_help))\n dispatcher.add_handler(CommandHandler(\"status\", self.on_status))\n dispatcher.add_handler(CommandHandler(\"Da\", self.on_accept))\n dispatcher.add_handler(CommandHandler(\"Nu\", self.on_reject))\n\n dispatcher.add_handler(CallbackQueryHandler(self.negotiate_time, pattern=\"^eta.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_dispatch, pattern=\"^caution.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_handle, pattern=\"^handle.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wellbeing, pattern=\"^state.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_symptom, pattern=\"^symptom.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wouldyou, pattern=\"^wouldyou.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_further, pattern=\"^further.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_activities, pattern=\"^assist.*\"))\n\n dispatcher.add_handler(MessageHandler(Filters.photo, self.on_photo))\n dispatcher.add_handler(MessageHandler(Filters.contact, self.on_contact))\n dispatcher.add_handler(MessageHandler(Filters.text, self.on_text_message))\n dispatcher.add_error_handler(self.on_bot_error)",
"def makeHandlers(self):\n\n yield self.loadGrids.start(funcSelf=self)\n yield self.updateClientWatchedGrids.start(funcSelf=self)\n logger.debug(\"RPCs started\")",
"def get_handlers(self):\n return []",
"def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action",
"def loadPlayerCommands(self, player):\n player.addCommand('get', self.commands['get']())\n player.addCommand('drop', self.commands['drop']())\n player.addCommand('go', self.commands['go']())\n player.addCommand('say', self.commands['say']())\n player.addCommand('look', self.commands['look']())\n player.addCommand('quit', self.commands['quit']())\n player.addCommand('commands', self.commands['commands']())\n player.addCommand('color', self.commands['color']())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a quest handler to the aiohttp app | def add_quest(self, method: str, route: str, handler):
self.aiohttp.router.add_route(method, route, handler) | [
"def handle_telegram_request(app: Flask):\n pass",
"async def run(self, app: ASGIApp) -> None:\n try:\n await app(self.request.scope, self.receive, self.send)\n except BaseException as exc:\n self.logger.error(\"Exception in 'http' protocol.\", exc_info=exc)\n if self.state is HTTPCycleState.REQUEST:\n await self.send(\n {\n \"type\": \"http.response.start\",\n \"status\": 500,\n \"headers\": [[b\"content-type\", b\"text/plain; charset=utf-8\"]],\n }\n )\n await self.send(\n {\"type\": \"http.response.body\", \"body\": b\"Internal Server Error\"}\n )\n elif self.state is not HTTPCycleState.COMPLETE:\n self.response = Response(\n status=500,\n body=b\"Internal Server Error\",\n headers=[[b\"content-type\", b\"text/plain; charset=utf-8\"]],\n )",
"def on_startup():\n\n async def startup_handler(app):\n \"\"\"Run all initialization tasks.\n These are tasks that should be run after the event loop has been started but before the HTTP\n server has been started.\n \"\"\"\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)\n\n return startup_handler",
"def ingredient_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = launch(handler_input, True)\n reprompt = speech_text\n\n attr = handler_input.attributes_manager.session_attributes\n attr[\"last_response\"] = speech_text\n handler_input.attributes_manager.session_attributes = attr\n\n handler_input.response_builder.speak(speech_text).ask(reprompt)\n return handler_input.response_builder.response",
"def apiai_hook():\n\n route = {\n 'artist_bio': artist_bio,\n 'artist_top_tracks': artist_top_tracks,\n 'artist_similar': artist_similar,\n 'track_similar': track_similar,\n }\n\n req = request.get_json(silent=True, force=True)\n response = {}\n try:\n response = route[req.get('result').get('action')](req)\n except (KeyError, AttributeError) as e:\n logger.error('Invalid action specified, error=\"{0}\".'.format(e))\n return jsonify(response)\n\n return response",
"def add_handler(self, handler):\r\n pass",
"def add_handler(self, handler):\n pass",
"async def start(ctx):\r\n if not char.char:\r\n await bot.say('I\\'m sorry but you may only request a quest if you are a registered hero.')\r\n return\r\n\r\n name = arg(ctx).capitalize()\r\n if check(cursor, 'quests', 'name', name): # Checks if quest exists\r\n await bot.say('Quest {} was not found.'.format(name))\r\n return\r\n\r\n qr = cursor.execute('SELECT requirement FROM quests WHERE name = ?', (name,)).fetchall()[0][0]\r\n cr = char.extra.split(', ')\r\n\r\n if char.curquest != 'Currently no pending quests': # Checks for already pending quests\r\n await bot.say('You already have a quest pending.')\r\n return\r\n elif qr.lower() != 'none':\r\n for i in qr.lower().split(', '):\r\n if i not in [x.lower() for x in cr]:# Checks if user has requirement for quest\r\n await bot.say('You do not have the requirements to do this.')\r\n return\r\n\r\n failure = 20\r\n ql, achv = cursor.execute('SELECT level, achievement FROM quests WHERE name = ?', (name,)).fetchall()[0] # Gets quest level\r\n cl = char.lvl\r\n if ql == 0:\r\n failure = 0\r\n elif achv != 'None' and ql > 30:\r\n failure = 40\r\n\r\n if ql > cl: # Changes failure rate\r\n failure += int((ql-cl) * 8.67)\r\n elif cl > ql:\r\n failure -= int((cl-ql) * 5)\r\n\r\n if char.curpet != 'None':\r\n failure -= char.pet['lvl']\r\n\r\n failure = min(max(failure, 0), 100)\r\n tim, exp, gold = cursor.execute('SELECT time, exp, gold FROM quests WHERE name = ?', (name,)).fetchall()[0] # Gets quest info\r\n cursor.execute('INSERT INTO logs(ID, time, duration, exp, gold, name, failure) VALUES (?, ?, ?, ?, ?, ?, ?)',\r\n (char.id, int(time.time()), tim, exp, gold, name, failure)) # Logs quest info\r\n db.commit() # commits changes\r\n if tim < 60:\r\n await bot.say('Your hero has started adventuring on quest {} for {}mins, {} exp and {}G, wish them luck!' .format(name, tim, exp, gold))\r\n # prints quest confirmation\r\n else:\r\n await bot.say('Your character has started adventuring on quest {} for {}h and {}mins, {} exp and {}G, wish them luck!'\r\n .format(name, (tim//60), (tim % 60), exp, gold))\r\n\r\n await record(ctx, '{} has started on the quest {}.'.format(char.username, name))",
"async def init_http_client(app: Application):\n logger.info('Initializing HTTP client')\n tcp_connector = aiohttp.TCPConnector(limit=TCP_CONNECTIONS_LIMIT, ttl_dns_cache=TTL_DNS_CACHE)\n app['http_client'] = ClientSession(connector=tcp_connector)",
"def make_new_handler(self, *args, **kwargs):",
"async def async_start_hermod():\n print('START HERMOD SERVICES')\n module_dir = os.getcwd()\n sys.path.append(module_dir)\n\n if ARGS.webserver:\n webservice_config = {\n 'certificates_folder': os.getenv('SSL_CERTIFICATES_FOLDER', '/app/certs'),\n 'domain_name': os.getenv('SSL_DOMAIN_NAME', 'localhost'),\n 'email': os.getenv('SSL_EMAIL', 'none@syntithenai.com'),\n }\n # dev mode rebuild web - (NEED docker rebuild with npm global watchify)\n # watchify index.js -v -o static/bundle.js\n CONFIG['services']['WebService'] = webservice_config\n\n if ARGS.actionserver > 0:\n CONFIG['services']['RasaActionsService'] = {}\n\n if ARGS.hermod:\n # admin mqtt connection\n CONFIG['mqtt_hostname'] = os.getenv('MQTT_HOSTNAME') or 'localhost'\n CONFIG['mqtt_hostname'] = os.getenv('MQTT_HOSTNAME') or 'localhost'\n CONFIG['mqtt_port'] = int(os.getenv('MQTT_PORT') or '1883')\n CONFIG['mqtt_user'] = os.getenv('MQTT_USER') or 'hermod_admin'\n CONFIG['mqtt_password'] = os.getenv('MQTT_PASSWORD') or 'talk2mebaby'\n\n # SET SOUND DEVICES\n CONFIG['services']['AudioService'] = {\n \"site\": CONFIG.get('mqtt_user'),\n \"inputdevice\": \"pulse\",\n \"outputdevice\": \"pulse\"}\n if os.getenv(\n 'SPEAKER_DEVICE') is not None and 'AudioService' in CONFIG['services']:\n CONFIG['services']['AudioService']['outputdevice'] = os.getenv(\n 'SPEAKER_DEVICE')\n if os.getenv(\n 'MICROPHONE_DEVICE') is not None and 'AudioService' in CONFIG['services']:\n CONFIG['services']['AudioService']['inputdevice'] = os.getenv(\n 'MICROPHONE_DEVICE')\n\n CONFIG['services']['DialogManagerService'] = {}\n CONFIG['services']['DataLoggerService'] = {}\n\n # HOTWORD\n # #,bumblebee,porcupine\"\n CONFIG['services']['PicovoiceHotwordService'] = {\n \"hotwords\": os.getenv(\n 'PICOVOICE_HOTWORDS',\n \"picovoice\"),\n \"sensitivity\": 0.9}\n\n # ASR\n # Deepspeech\n using_asr = None\n if os.getenv('DEEPSPEECH_MODELS') is not None and os.path.exists(\n os.getenv('DEEPSPEECH_MODELS')):\n if 'DeepspeechAsrService' not in CONFIG['services']:\n CONFIG['services']['DeepspeechAsrService'] = {}\n CONFIG['services']['DeepspeechAsrService']['model_path'] = os.getenv(\n 'DEEPSPEECH_MODELS')\n using_asr = 'Deepspeech'\n\n # disable deepspeech and enable IBM ASR\n if os.getenv('IBM_SPEECH_TO_TEXT_APIKEY', None) is not None and len(\n os.getenv('IBM_SPEECH_TO_TEXT_APIKEY', '')) > 0:\n CONFIG['services'].pop('DeepspeechAsrService', None)\n # 'language': os.environ.get('GOOGLE_APPLICATION_LANGUAGE','en-AU')}\n CONFIG['services']['IbmAsrService'] = {'vad_sensitivity': 1}\n using_asr = 'IBM'\n\n # disable deepspeech,ibm and enable google ASR\n if os.getenv('GOOGLE_ENABLE_ASR') == \"true\" and \\\n os.getenv('GOOGLE_APPLICATION_CREDENTIALS') \\\n and os.path.isfile(os.getenv('GOOGLE_APPLICATION_CREDENTIALS')):\n CONFIG['services'].pop('DeepspeechAsrService', None)\n CONFIG['services'].pop('IbmAsrService', None)\n CONFIG['services']['GoogleAsrService'] = {\n 'language': os.environ.get(\n 'GOOGLE_APPLICATION_LANGUAGE', 'en-AU')}\n using_asr = 'Google'\n print(\"ASR ENABLED using {}\".format(using_asr))\n\n # require asr\n if not using_asr:\n print('ASR CONFIGURATION MISSING')\n sys.exit()\n\n # TTS\n if os.getenv('GOOGLE_ENABLE_TTS') == \"true\" and \\\n os.getenv('GOOGLE_APPLICATION_CREDENTIALS') and \\\n os.path.isfile(os.getenv('GOOGLE_APPLICATION_CREDENTIALS')):\n print('TTS ENABLED USING GOOGLE')\n CONFIG['services'].pop('Pico2wavTtsService', False)\n CONFIG['services']['GoogleTtsService'] = {'language': os.environ.get(\n 'GOOGLE_APPLICATION_LANGUAGE', 'en-AU'), 'cache': '/tmp/tts_cache'} # }\n else:\n CONFIG['services'].pop('GoogleTtsService', None)\n CONFIG['services']['Pico2wavTtsService'] = {\n 'binary_path': os.environ.get(\n 'TTS_BINARY',\n '/usr/bin/pico2wave'),\n 'cache_path': os.environ.get(\n 'TTS_CACHE',\n '/tmp/tts_cache')} # }\n print('TTS ENABLED USING PICO2WAV')\n\n if os.getenv('RASA_URL') and len(os.getenv('RASA_URL')) > 0:\n print('RASA ENABLED USING URL ' + os.getenv('RASA_URL'))\n rasa_service = CONFIG['services'].get('RasaService', {})\n rasa_service['rasa_server'] = os.getenv('RASA_URL')\n rasa_service['keep_listening'] = os.getenv(\n 'HERMOD_KEEP_LISTENING', 'false')\n CONFIG['services']['RasaService'] = rasa_service\n else:\n print('RASA ENABLED USING LOCAL ')\n rasa_service = CONFIG['services'].get('RasaServiceLocal', {})\n rasa_service['rasa_actions_url'] = os.getenv(\n 'RASA_ACTIONS_URL', '')\n rasa_service['keep_listening'] = os.getenv(\n 'HERMOD_KEEP_LISTENING', 'false')\n rasa_service['model_path'] = os.getenv(\n 'RASA_MODEL', '/app/rasa/models/model.tar.gz')\n CONFIG['services']['RasaServiceLocal'] = rasa_service\n\n # satellite mode restrict to audio and hotword services\n if ARGS.satellite:\n services = {\n 'AudioService': CONFIG['services']['AudioService'],\n 'PicovoiceHotwordService': CONFIG['services']['PicovoiceHotwordService']}\n CONFIG['services'] = services\n # no local audio/hotword\n if ARGS.nolocalaudio:\n if 'AudioService' in CONFIG['services']:\n del CONFIG['services']['AudioService']\n if 'PicovoiceHotwordService' in CONFIG['services']:\n del CONFIG['services']['PicovoiceHotwordService']\n\n # satellite mode\n if ARGS.satellite:\n services = {\n 'AudioService': CONFIG['services']['AudioService'],\n 'PicovoiceHotwordService': CONFIG['services']['PicovoiceHotwordService']}\n CONFIG['services'] = services\n # no local audio/hotword\n if ARGS.nolocalaudio:\n if 'AudioService' in CONFIG['services']:\n del CONFIG['services']['AudioService']\n if 'PicovoiceHotwordService' in CONFIG['services']:\n del CONFIG['services']['PicovoiceHotwordService']\n\n loop = asyncio.get_event_loop()\n # loop.set_debug(True)\n run_services = []\n for service in CONFIG['services']:\n # force dialog initialise if argument present\n full_path = os.path.join(module_dir, 'src', service + '.py')\n module_name = pathlib.Path(full_path).stem\n module = importlib.import_module(module_name)\n print(module_name)\n module_function = getattr(module, service)(CONFIG, loop)\n run_services.append(module_function.run())\n # extra event loop threads on init\n if hasattr(module_function, 'also_run'):\n for i in module_function.also_run:\n run_services.append(i())\n print('starting services')\n print(run_services)\n await asyncio.gather(*run_services, return_exceptions=True)",
"def on_intent(request, session):\n\n intent = request['intent']\n intent_name = request['intent']['name']\n\n print(\"on_intent \" +intent_name)\n \n if 'dialogState' in request:\n #delegate to Alexa until dialog sequence is complete\n if request['dialogState'] == \"STARTED\" or request['dialogState'] == \"IN_PROGRESS\":\n return dialog_response(\"\", False)\n\n # process the intents\n if intent_name == \"GetRandomQuote\":\n print(\"Calling do_random_quote\")\n return do_random_quote(request)\n elif intent_name == \"AMAZON.HelpIntent\":\n return do_help()\n elif intent_name == \"AMAZON.StopIntent\":\n return do_stop()\n elif intent_name == \"AMAZON.CancelIntent\":\n return do_stop()\n\n else:\n print(\"invalid intent reply with help\")\n return do_help()",
"def add_hoist(self, app: Flask, handle_errors: bool = True, auth: list = [\"\"], premade_pages: bool = True) -> Flask:\n if hasattr(app, 'HOIST_INTERNALSERVER'):\n raise HoistExistsError('hoist is already set up on app')\n\n app.HOIST_INTERNALSERVER = Server(app, handle_errors)\n\n @app.route('/hoist/send', methods=['POST'])\n def hoist_send() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALSERVER._received, 'msg')\n\n if premade_pages:\n @app.route('/hoist', methods=['POST', 'GET'])\n def hoist_home() -> str:\n if request.method == 'POST':\n return jsonify({'RESPONSE': f'Version {__version__}'})\n\n # done with html instead of flask.render_template so i dont have to touch the apps template_folder property\n \n html = HTML.replace('{{ version }}', __version__).replace('{{ serverUrl }}', request.base_url)\n\n return html\n \n\n return app",
"def main():\n updater = Updater(token, use_context = True)\n disp = updater.dispatcher\n conv_handler = ConversationHandler(\n entry_points = [CommandHandler('start', start)],\n states = {\n \"CHOICE\": [MessageHandler(Filters.text, choice)],\n \"STOCK\": [MessageHandler(Filters.text, stock_query)]\n },\n fallbacks = [CommandHandler('cancel', cancel)]\n )\n disp.add_handler(conv_handler)\n updater.start_webhook(listen = \"0.0.0.0\",\n port = int(PORT),\n url_path = token,\n webhook_url='https://appname.herokuapp.com/' + token)\n updater.idle()",
"def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())",
"def quantity_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = quanity_for_ingredient(handler_input)\n reprompt = speech_text\n\n attr = handler_input.attributes_manager.session_attributes\n attr[\"last_response\"] = speech_text\n handler_input.attributes_manager.session_attributes = attr\n\n handler_input.response_builder.speak(speech_text).ask(reprompt)\n return handler_input.response_builder.response",
"def start_http_server(app: Application, config: Dict[str, Any]) -> None:\n http_config = Config()\n http_config.bind = [f\"{config['host']}:{config['port']}\"]\n asyncio.run(serve(app, http_config))",
"def _add_io_handler(self, handler):\r\n self._handlers.append(handler)",
"async def app_aclient(app_token):\n sender = tk.RetryingSender(sender=tk.AsyncSender())\n yield tk.Spotify(app_token, sender=sender)\n await sender.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Representation of the linked list | def __repr__(self):
return "LinkedList([{}],{}/{})".format(self.cur_node, self.cur_pos, self.length) | [
"def __repr__(self):\n\n return \"LinkedList created\"",
"def __repr__(self):\r\n return \"ListNode({})\".format(self.data)",
"def simple_ll():\n ll = LinkedList()\n ll.push(20)\n ll.push(4)\n ll.push(15)\n ll.push(85)\n return ll",
"def __init__(self, linked_list: object):\n self.current_node = linked_list._head",
"def l1():\n head = l1 = ListNode(3)\n l1.next = ListNode(4)\n l1.next.next = ListNode(5)\n return head",
"def __init__(self):\n self.head = None\n self.tail = None\n self.current_node = None",
"def __init__(self):\r\n self.head = None\r\n self.tail = None",
"def __init__(self, lst=[]):\r\n self.__length = 0 # current length of the linked list\r\n self.__head = None # pointer to the first node in the list\r\n self.__last = None # pointer to the last node in the list\r\n lst.reverse() # reverse to ensure elements will appear in same order\r\n for e in lst: # add elements of input list lst one by one\r\n self.add(e)",
"def __init__(self, lst=[]):\n self.__length = 0 # current length of the linked list\n self.__head = None # pointer to the first node in the list\n for e in lst: # initialize the list,\n self.add(e) # by adding elements one by one",
"def __str__(self):\n if self._size > 0:\n result = []\n initial = self._head\n result.append(str(initial._element))\n for i in range(self._size-1):\n initial = initial._next\n result.append(str(initial._element))\n result = ', '.join(result)\n else:\n result = ''\n return 'Linked list: head [' + result + '] tail'",
"def lstToLinkedList(lst):\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList",
"def __str__(self):\r\n output = \"linkedlist[\"\r\n first = True\r\n for value in self:\r\n if first:\r\n first = False\r\n else:\r\n output += \", \"\r\n output += str(value)\r\n output += \"]\"\r\n return output",
"def show(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = self.head\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return",
"def l2():\n head = l2 = ListNode(2)\n l2.next = ListNode(4)\n l2.next.next = ListNode(5)\n return head",
"def list_print(self):\n node = self.cur_node # cant point to ll!\n while node:\n print(node.data)\n node = node.next",
"def create_linked_list(input_list):\n head=None\n for value in input_list:\n if head is None:\n head=Node(value)\n else:\n current_node=head\n while current_node.next:\n current_node=current_node.next\n current_node.next=Node(value)\n# printlist(head)\n# print('------')\n return head",
"def print_list(self):\n\n current = self.head\n\n while current:\n\n print current.data\n\n current = current.next",
"def __init__(self):\n self.__head = None",
"def print_list_and_index(self):\r\n current_node = self.head\r\n index = 0\r\n while current_node:\r\n print(str(index) + \", \" + str(current_node.data))\r\n index += 1\r\n current_node = current_node.next"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the linked list | def list_print(self):
node = self.cur_node # cant point to ll!
while node:
print(node.data)
node = node.next | [
"def print_list(self):\n\t\tcur_node = self.head\n\t\twhile cur_node:\n\t\t\tprint(cur_node.data)\n\t\t\tcur_node = cur_node.next",
"def print_list(self):\n\n current = self.head\n\n while current:\n\n print current.data\n\n current = current.next",
"def show(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = self.head\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return",
"def print_list(l):\n if not l.head:\n print(\"List Empty!\")\n\n print(' -> '.join([str(curr.value) for curr in l.get_next()]))",
"def printList(self, node):\n print( \"\\nTraversal in forward direction:\")\n while(node is not None):\n print(\" % d\" %(node.data))\n last = node\n node = node.next\n\n print( \"\\nTraversal in reverse direction:\")\n while(last is not None):\n print(\" % d\" %(last.data))\n last = last.previous",
"def print_list_and_index(self):\r\n current_node = self.head\r\n index = 0\r\n while current_node:\r\n print(str(index) + \", \" + str(current_node.data))\r\n index += 1\r\n current_node = current_node.next",
"def display(self):\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tprint pointer.state + \"\\t\" + pointer.info\t\n\t\t\tpointer = pointer.next",
"def print_linked_list(head):\n while head != None:\n print head.val, \n head = head.sibling\n print",
"def print_nodes(self) -> None:\r\n ll= self.head\r\n while ll != None:\r\n print(\"Score:\", ll.score, end= ' ')\r\n print(\"ID:\", ll.ids, end= ' ')\r\n print(\"Positions:\", ll.position, end= ' ')\r\n print(\"->\\t\", end= \"\")\r\n ll= ll.next\r\n print(\"None\")",
"def display(self):\n current = self\n while current is not None:\n print(current, end=' -> ')\n current = current.next_node\n\n print('END')",
"def displayNode(self):\n for x in self.__node:\n print(x)",
"def _printNodes(self):\n print(self.nodes)",
"def printLR(headNode):\n node = headNode\n \n while node is not None:\n print(node.item, end = \"\\t\")\n node = node.rightL\n\n print(\"end of linked list\")",
"def print(self, index):\n count=0\n start = self.head\n while start:\n if count==index:\n print(count, ' : ', start.getMember())\n break\n start=start.getLink()\n count+=1",
"def __str__(self):\r\n output = \"linkedlist[\"\r\n first = True\r\n for value in self:\r\n if first:\r\n first = False\r\n else:\r\n output += \", \"\r\n output += str(value)\r\n output += \"]\"\r\n return output",
"def __str__(self):\n if self._size > 0:\n result = []\n initial = self._head\n result.append(str(initial._element))\n for i in range(self._size-1):\n initial = initial._next\n result.append(str(initial._element))\n result = ', '.join(result)\n else:\n result = ''\n return 'Linked list: head [' + result + '] tail'",
"def print_songs(self):\n if self.head == None:\n print(\"The playlist is empty!\")\n else:\n current_node = self.head\n while current_node:\n print(current_node.data)\n current_node = current_node.next",
"def __str__(self) -> str:\n content = ''\n if self.head is not None:\n content = str(self.head)\n cur = self.head.next\n while cur is not None:\n content += ' -> ' + str(cur)\n cur = cur.next\n return 'SLL [' + content + ']'",
"def traverse(self):\n current = self.head\n while current is not None:\n print current.value\n current = current.next"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the data of the next node | def get_next(self):
return self.cur_node.next.data | [
"def get_next(node):\n return node['next']",
"def data(self):\n return self.first_node.data",
"def get_data(node):\n return node['data']",
"def node_data(self):\n return self.node_data_",
"def get_next(self): \n return self.nextval",
"def get_next(self):\n return self.next_node",
"def next_data(self):\n self.current_index += 1\n if self.current_index == len(self.hawk_data):\n self.current_index = 0\n\n return self.hawk_data[ self.current_index ]",
"def next_node(self):\n return self._next_node",
"def next_node(self):\n return self.__next_node",
"def get_next_data_after(self, addr):\n\t\treturn core.BNGetNextDataAfterAddress(self.handle, addr)",
"def get_next(self, known_node):\n return known_node.next_node",
"def get_next(self):\n return self.next",
"def get_next_data_after(self, addr: int) -> int:\n\t\treturn core.BNGetNextDataAfterAddress(self.handle, addr)",
"def get_next(node, offset):\n row, column = node\n row_offset, column_offset = offset\n return row + row_offset, column + column_offset",
"def getNodeRRDData(self,node):\n data = self.connect('get','nodes/%s/rrddata' % (node),None)\n return data",
"def _get_next_nodes(self):\n next_nodes = self.data[5] if not is_nan(self.data[5]) else \"eos\"\n if is_nan(next_nodes):\n next_nodes = \"eos\"\n return next_nodes",
"def next(self):\n next_data = self.load_next()\n if next_data is None:\n raise StopIteration\n return next_data",
"def next(self):\n if self._next_value:\n value = self._next_value[0]\n self._next_value = None\n return value\n \n return next(self.inner)",
"def _data(self):\n if self.is_multigraph():\n return self._graph[self.src_id][self.dst_id][self.ekey]\n\n return self._graph[self.src_id][self.dst_id]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Representation of the spinlock | def __repr__(self):
return "Spinlock({})".format(self.stepforward) | [
"def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')",
"def spinlocks(self):\n return self._spinlocks",
"def acquire_lock(self, object_id):",
"def lock(self):\n raise NotImplementedError",
"def SynchronizingObject(self) -> _n_1_t_3:",
"def __get_lock(self):\n self.core.get_lock()\n self.locked = True",
"def lock_blocks(self) -> int:",
"def read_lock(self):\n pass",
"def locked(self, state=None):\n pass",
"def get_lock(self):\n return self.lock",
"def lock(self, item_type):",
"def _lock_key(self):\n return hash_string_64bit('dirbs-listgen')",
"def lock_status(self):\n return self._lock_status",
"def lock_type(self):\n return self._lock_type",
"def lock_handler(self):\n print(\"Lock requested!\")\n # TODO: Call lock function.",
"def getSpinControl(*args):",
"def _obtain_lock(self):\r\n return self._obtain_lock_or_raise()",
"def _refill_lock_key(self):\n # type: () -> str\n return \"lock:{}\".format(self.__key)",
"def lock (self):\n self.locked = True\n self._changed = False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the tile location (x,y) and zoom level z, fetch the corresponding tile from the server and save it to the location specfied in fpath. Note, this saves just one tile; usually, want to use `positive_dataset` instead. | def save_tile(x,y,z,fpath):
UA = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0"
tile_url = f"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png"
# cmd = f"wget --user-agent='please download' -O {fpath} {url}"
if os.path.exists(fpath):
print(f"Already have tile {fpath}!")
return 0
if os.path.isdir(fpath):
raise ValueError(f"requested path {fpath} exists and is a directory!")
try:
res = rq.get(
url=tile_url,
headers={'User-Agent': UA}
)
status = res.status_code
if status == 200:
with open(fpath,'wb') as of:
of.write(res.content)
return 0
else:
print(f"Error: response {status} from server:\n{res.reason}")
return status
except Exception as e:
print(f"Error getting tile: {e}")
return 1 | [
"def tile(self, z, x, y_tms):\n logger.debug(_(\"Download tile %s\") % ((z, x, y_tms),))\n # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )\n size = self.tilesize\n s = self.tiles_subdomains[(x + y_tms) % len(self.tiles_subdomains)];\n y_osm = (2**int(z) - 1) - int(y_tms)\n try:\n url = self.tiles_url.format(**locals())\n except KeyError, e:\n raise DownloadError(_(\"Unknown keyword %s in URL\") % e)\n logger.debug(_(\"Retrieve tile at %s\") % url)\n r = DOWNLOAD_RETRIES\n sleeptime = 1\n while r > 0:\n try:\n request = urllib2.Request(url)\n for header, value in self.headers.items():\n request.add_header(header, value)\n stream = urllib2.urlopen(request)\n assert stream.getcode() == 200\n return stream.read()\n except (AssertionError, IOError), e:\n logger.debug(_(\"Download error, retry (%s left). (%s)\") % (r, e))\n r -= 1\n time.sleep(sleeptime)\n # progressivly sleep longer to wait for this tile\n if (sleeptime <= 10) and (r % 2 == 0):\n sleeptime += 1 # increase wait\n raise DownloadError(_(\"Cannot download URL %s\") % url)",
"def download_tile(self, xtile, ytile):\n location = 'http://maps.six.nsw.gov.au/arcgis/rest/services/public/NSW_Imagery/MapServer/tile/'\n destination = 'downloaded_tiles/'\n save_name = str(self.zoom_level) + '_' + str(xtile) + '_' + str(ytile)\n tile_url = location + save_name.replace('_', '/')\n tile = requests.get(tile_url, stream=True)\n with open(destination + save_name + '.png', 'wb') as out_file:\n tile.raw.decode_content = True\n shutil.copyfileobj(tile.raw, out_file)\n tilepng = png.Reader(file=tile.raw)\n # shutil.copyfileobj(tilepng, out_file)\n del tile",
"def fetch_tile(x, y, z):\n\n url = TILE_SERVER.format(x=x, y=y, z=z)\n\n print(\"Fetching tile from {}\".format(url))\n\n req = Request(url, data=None, headers={'User-Agent': 'smopy'})\n png = BytesIO(urlopen(req).read())\n img = Image.open(png)\n img.load()\n\n return img",
"def fetch_tile(x, y, z, tileserver):\n\n url = get_url(x, y, z, tileserver)\n req = Request(url, data=None, headers={'User-Agent': 'smopy'})\n png = BytesIO(urlopen(req).read())\n img = Image.open(png)\n img.load()\n return img",
"def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n # logger.info(_(\"TilesManager.tile calling sources.tile: \") )\n pass\n output = self.reader.tile(z, x, y)\n if output is None:\n return None\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output",
"def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))",
"def get_tile(tilefile,level,x,y):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\ta=td[(level,x,y)]\n\t\n\ttf.seek(a[0],1)\n\tret=tf.read(a[1])\n\t\n\ttf.close()\n\treturn ret",
"def download(self):\n\n self.status = MapTileStatus.DOWNLOADING\n\n try:\n url = MapTile.tile_url_template.format(x=self.x, y=self.y, zoom=self.zoom)\n r = requests.get(url, headers={'User-Agent': USER_AGENT})\n except requests.exceptions.ConnectionError:\n self.status = MapTileStatus.ERROR\n return\n\n # error handling (note that a warning is appropriate here – if this tile\n # is one of a tiles used in imagery quality testing, an error is not an\n # unexpected outcome and should thus not be thrown)\n if r.status_code != 200:\n LOGGER.warning(f\"Unable to download {self}, status code {r.status_code}.\")\n self.status = MapTileStatus.ERROR\n return\n\n # convert response into an image\n data = r.content\n self.image = Image.open(io.BytesIO(data))\n\n # sanity check\n assert self.image.mode == \"RGB\"\n assert self.image.size == (TILE_SIZE, TILE_SIZE)\n\n # save original data (not: re-encoded via image.save) in tile store if\n # enabled (and create the directory first if it doesn't already exist)\n if self.filename is not None:\n d = os.path.dirname(self.filename)\n if not os.path.isdir(d):\n os.makedirs(d)\n with open(self.filename, 'wb') as f:\n f.write(data)\n\n self.status = MapTileStatus.DOWNLOADED",
"def tile_to_url(tile_x, tile_y, tile_z):\n subdomain = random.choice([\"a\", \"b\", \"c\"])\n resource_url = \"https://{0}.tile.openstreetmap.org/{1}/{2}/{3}.png\"\n return resource_url.format(subdomain, tile_z, tile_x, tile_y)",
"def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))",
"def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n output = self.reader.tile(z, x, y)\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output",
"def make_tile(self, coord=(0, 0, 0), timestamp=None):\n tile_dir = os.path.join(self.dir, 'cache/one_EPSG4326/%02d/000/000/%03d/000/000/' %\n (coord[2], coord[0]))\n\n ensure_directory(tile_dir)\n tile = os.path.join(tile_dir + '%03d.png' % coord[1])\n open(tile, 'wb').write(b'')\n if timestamp:\n os.utime(tile, (timestamp, timestamp))\n return tile",
"def get_tile(self, tile, as_png=False, overwrite=True):\n zoom, row, col = tile\n output_path = self.config[\"output_name\"]\n zoomdir = os.path.join(output_path, str(zoom))\n rowdir = os.path.join(zoomdir, str(row))\n image_path = os.path.join(rowdir, str(col)+\".png\")\n if os.path.isfile(image_path):\n return send_file(image_path, mimetype='image/png')\n else:\n try:\n self.save_tile(tile)\n except:\n print \"tile not available\", tile\n size = self.tile_pyramid.tile_size\n empty_image = Image.new('RGBA', (size, size))\n return empty_image.tobytes()\n return send_file(image_path, mimetype='image/png')",
"def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)",
"def tile_coords_zoom_and_tileserver_to_url(\n tile_x: int, tile_y: int, tile_z: int, tile_server: dict\n) -> str:\n\n if tile_server[\"name\"] == \"bing\":\n quadKey = tile_coords_and_zoom_to_quadKey(tile_x, tile_y, tile_z)\n url = quadKey_to_Bing_URL(quadKey, tile_server[\"apiKey\"])\n elif tile_server[\"name\"] == \"sinergise\":\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n layer=tile_server[\"wmtsLayerName\"],\n )\n elif \"maxar\" in tile_server[\"name\"]:\n # maxar uses not the standard TMS tile y coordinate,\n # but the Google tile y coordinate\n # more information here:\n # https://www.maptiler.com/google-maps-coordinates-tile-bounds-projection/\n tile_y = int(math.pow(2, tile_z) - tile_y) - 1\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n elif \"{-y}\" in tile_server[\"url\"]:\n # this uses not the standard TMS tile y coordinate,\n # but the Google tile y coordinate\n # more information here:\n # https://www.maptiler.com/google-maps-coordinates-tile-bounds-projection/\n tile_y = int(math.pow(2, tile_z) - tile_y) - 1\n url = tile_server[\"url\"].replace(\"{-y}\", \"{y}\")\n url = url.format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n else:\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n\n return url",
"def import_tile(self, tileset, zoom, col, row, compressor=None):\n\n key = (zoom, col, row)\n\n url = self.get_tile_url(zoom, col, row)\n\n print('Importing {}x{}x{}: {}'.format(zoom, col, row, url))\n\n res = requests.get(url)\n\n if res.status_code == requests.codes.ok:\n if compressor is not None:\n content = compressor.compress(res.content)\n else:\n content = res.content\n\n tileset[key] = content\n\n return content\n else:\n print('Warning. This failed.')",
"def get_map_features_points_tiles(\n x: float,\n y: float,\n z: float,\n ) -> str:\n\n return (\n f\"https://tiles.mapillary.com/maps/vtp/mly_map_feature_point/2\"\n f\"/{z}/{x}/{y}/\"\n )",
"async def tilelayer(request):\n logger.debug(f\"doing tilelayer request at {request.path_params['path']}\")\n logger.debug(f\"requesting tile (tczyx): {[request.path_params[a] for a in 'tczyx']}\")\n p = request[\"app\"].serve_rootpath / request.path_params[\"path\"]\n\n external_path, internal_path, is_h5_n5 = handle_path(p)\n logger.debug(f\"external_path: {external_path}, internal_path: {internal_path}, is_h5_n5: {is_h5_n5}\")\n\n if not external_path.exists():\n return JSONResponse({\"external_path\": str(external_path), \"internal_path\": str(internal_path), \"b\": is_h5_n5})\n\n if not is_h5_n5:\n return JSONResponse({\"external_path\": str(external_path), \"internal_path\": str(internal_path), \"b\": is_h5_n5})\n\n z5dataset = H5N5DataSource(f\"{external_path}{internal_path}\")\n tile_shape = z5dataset.tile_shape()\n tile_shape = Chunk5D(x=tile_shape.x, y=tile_shape.y, z=1, c=1, t=1)\n bc = Blocking5D(tile_shape)\n block_coords = Point5D(**{k: request.path_params[k] for k in \"tczyx\"})\n slicing = bc.get_slice(block_coords)\n logger.debug(f\"getting data with slicing: {slicing} - shape: {z5dataset.shape}\")\n data = z5dataset.do_retrieve(slicing)\n logger.debug(f\"data.shape: {data.shape}\")\n img = Image.fromarray(data.raw(\"yx\"), mode=\"P\")\n bo = io.BytesIO()\n img.save(bo, format=\"PNG\")\n bo.seek(0)\n return StreamingResponse(bo, media_type=\"image/png\")",
"def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save the tiles whose coordinates are in the input DataFrame, defined by columns x, y, and z | def save_tiles(df,output_dir,namefunc = None):
if not isinstance(df,pd.core.frame.DataFrame):
raise TypeError("df must be a pandas DataFrame!")
if any(e not in df.columns for e in ('z','x','y')):
raise ValueError("df must have columns x, y, and z")
if namefunc is None:
def namefunc(x,y,z):
return f'{z}_{x}_{y}.png'
opath = os.path.abspath(os.path.expanduser(output_dir))
Path(opath).mkdir(parents=True, exist_ok=True)
L = df.shape[0]
flocs = [''] * L
for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):
x,y,z = xyz
print(f"({i+1} of {L})...")
sleep(0.75)
outloc = os.path.join(opath,namefunc(x,y,z))
if save_tile(x,y,z,outloc) == 0:
flocs[i] = outloc
df = df.assign(file_loc = flocs)
return df[df['file_loc'] != ''] | [
"def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))",
"def batch_save_tile_mask(tiles_gdf, label_poly_series, tile_size, region, zone, save_path, channels=3, display=False):\n \n import warnings; warnings.simplefilter('ignore')\n\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n tile_poly = get_specific_tile(idx, tiles_gdf)\n save_tile_mask(label_poly_series, tile_poly, tile['xyz'], tile_size, dataset,\n region, zone, save_path, channels, display)",
"def find_tiles(x_index = None, y_index = None):\n db_cursor2 = self.db_connection.cursor()\n\n sql = \"\"\"-- Check for any existing tiles\nselect\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n tile_pathname,\n dataset_id,\n tile_class_id,\n tile_size\nfrom tile_footprint\ninner join tile using(x_index, y_index, tile_type_id)\nwhere (%(x_index)s is null or x_index = %(x_index)s)\n and (%(y_index)s is null or y_index = %(y_index)s)\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(fc_dataset_id)s\n\n and ctime is not null -- TODO: Remove this after reload\n;\n\"\"\"\n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'fc_dataset_id': dataset_info['fc_dataset_id']}\n \n log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\\t')\n db_cursor2.execute(sql, params)\n tile_info = {}\n for record in db_cursor2:\n tile_info_dict = {\n 'x_index': record[1],\n 'y_index': record[2],\n 'tile_type_id': record[3],\n 'tile_pathname': record[4],\n 'dataset_id': record[5],\n 'tile_class_id': record[6],\n 'tile_size': record[7]\n }\n tile_info[record[0]] = tile_info_dict # Keyed by tile_id\n \n log_multiline(logger.debug, tile_info, 'tile_info', '\\t')\n return tile_info",
"def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)",
"def xyz_from_grid(x,y,z, pnts_out):\n\tx_flt=x.flatten()\n\ty_flt=y.flatten()[::-1]\n\tz_flt=z.flatten()\n\n\tutil.check_output_dir(pnts_out)\n\tfout = open(pnts_out, 'w')\n\tfout.write(\"x,y,z\\n\")\n\n\tprint(\"Writing out %i xyz triples to %s\" %(len(z_flt),pnts_out))\n\tfor i in range(0, len(z_flt)):\n\t\tif not np.isnan(z_flt[i]):\n\t\t\tfout.write(\"%.6f,%.6f,%.2f\\n\" %(x_flt[i], y_flt[i], z_flt[i]))\n\n\tfout.close()",
"def _get_tiles(self,X,Y,Z,C,T):\n \n self._buffer_supertile(X[0][0],X[0][1])\n \n if X[-1][0] - self._tile_x_offset > self._TILE_SIZE:\n shift_buffer = True\n split_ind = 0\n while X[split_ind][0] - self._tile_x_offset < self._TILE_SIZE:\n split_ind += 1\n else:\n shift_buffer = False\n split_ind = len(X)\n \n # Tile the data\n num_rows = Y[0][1] - Y[0][0]\n num_cols = X[0][1] - X[0][0]\n num_tiles = len(X)\n images = np.zeros((num_tiles,num_rows,num_cols,1),dtype=self.pixel_type())\n \n for ind in range(split_ind):\n images[ind,:,:,0] = self._pixel_buffer[Y[ind][0]-self._tile_y_offset:Y[ind][1]-self._tile_y_offset,\n X[ind][0]-self._tile_x_offset:X[ind][1]-self._tile_x_offset]\n \n if split_ind != num_tiles:\n self._buffer_supertile(X[-1][0],X[-1][1])\n for ind in range(split_ind,num_tiles):\n images[ind,:,:,0] = self._pixel_buffer[Y[ind][0]-self._tile_y_offset:Y[ind][1]-self._tile_y_offset,\n X[ind][0]-self._tile_x_offset:X[ind][1]-self._tile_x_offset]\n \n return images",
"def save_positions(path, results):\n with open(path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([\"x [px]\", \"y [px]\", \"Frame\"])\n\n for i, positions in enumerate(results):\n if np.any(positions):\n for coord in positions:\n writer.writerow(\n [coord[1], coord[0], i]\n )",
"def xyz_by_id(self, idx):\n \n ## TODO: make it working for unfinished scans\n \n x_raw, y_raw, z_raw = self.xy_by_id(idx)\n\n x = np.unique( x_raw )\n y = np.unique( y_raw )\n\n N_x = len( x )\n N_y = len( y )\n \n z = np.reshape( z_raw, (N_x, N_y) )\n return x, y, z",
"def read_cells(filename):\n\n import pandas as pd\n\n min_x = -1.77\n min_y = 174.0\n min_z = -183.0\n\n size_x = 0.972\n size_y = 3.69\n size_z = 0.976\n\n frame = pd.read_csv(filename, skiprows=3)\n # frame = pd.read_csv(filename)\n\n# print(\"X range:\",min(frame['Position X']), max(frame['Position X']), \"dynamic range:\", max(frame['Position X'])-min(frame['Position X']))\n# print(\"Y range:\",min(frame['Position Y']), max(frame['Position Y']), \"dynamic range:\", max(frame['Position Y'])-min(frame['Position Y']))\n# print(\"Z range:\",min(frame['Position Z']), max(frame['Position Z']), \"dynamic range:\", max(frame['Position Z'])-min(frame['Position Z']))\n#\n # will need to check IMARIS for correspondence between exported um files and pixel values\n # X and Z on csv files are my X and Y on resliced images\n\n frame[\"Pixel X\"] = (frame['Position X'] - min_x) / size_x\n frame[\"Pixel X\"] = frame[\"Pixel X\"].round().astype(int)\n\n frame[\"Pixel Y\"] = (frame['Position Z'] - min_z) / size_z\n frame[\"Pixel Y\"] = frame[\"Pixel Y\"].round().astype(int)\n\n frame[\"Pixel Z\"] = (frame['Position Y'] - min_y) / size_y\n frame[\"Pixel Z\"] = frame[\"Pixel Z\"].round().astype(int)\n\n print(\"X pixel range:\", min(frame[\"Pixel X\"]), max(\n frame[\"Pixel X\"]), \"dynamic range:\", max(frame[\"Pixel X\"]) - min(frame[\"Pixel X\"]))\n print(\"Y pixel range:\", min(frame[\"Pixel Y\"]), max(\n frame[\"Pixel Y\"]), \"dynamic range:\", max(frame[\"Pixel Y\"]) - min(frame[\"Pixel Y\"]))\n print(\"Z pixel range:\", min(frame[\"Pixel Z\"]), max(\n frame[\"Pixel Z\"]), \"dynamic range:\", max(frame[\"Pixel Z\"]) - min(frame[\"Pixel Z\"]))\n# print(frame)\n frame.to_csv(\"frame.csv\")\n return frame",
"def save_tile_mask(label_poly_series, tile_poly, xyz, tile_size, dataset, region, zone, save_path, channels = 3, display=False):\n \n \n\n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tfm = from_bounds(*tile_poly.bounds, tile_size, tile_size) \n \n cropped_polys = [poly for poly in label_poly_series if poly.intersects(tile_poly)]\n cropped_polys_gdf = gpd.GeoDataFrame(geometry=cropped_polys, crs={'init': 'epsg:4326'})\n \n fbc_mask = burn_mask(cropped_polys_gdf, tfm, tile_size, channels)\n # fbc_mask = sol.vector.mask.df_to_px_mask(df=cropped_polys_gdf,\n # channels=['footprint', 'boundary', 'contact'],\n # affine_obj=tfm, shape=(tile_size,tile_size),\n # boundary_width=5, boundary_type='inner', contact_spacing=5, meters=True)\n \n if display: \n plt.imshow(fbc_mask); plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}_mask.png',fbc_mask, check_contrast=False)",
"def save_tile(x,y,z,fpath):\n UA = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0\"\n tile_url = f\"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n # cmd = f\"wget --user-agent='please download' -O {fpath} {url}\"\n if os.path.exists(fpath):\n print(f\"Already have tile {fpath}!\")\n return 0\n if os.path.isdir(fpath):\n raise ValueError(f\"requested path {fpath} exists and is a directory!\")\n try:\n res = rq.get(\n url=tile_url,\n headers={'User-Agent': UA}\n )\n status = res.status_code\n if status == 200:\n with open(fpath,'wb') as of:\n of.write(res.content)\n return 0\n else:\n print(f\"Error: response {status} from server:\\n{res.reason}\")\n return status\n except Exception as e:\n print(f\"Error getting tile: {e}\")\n return 1",
"def write_towhee_coord(self, filename):\n with open(filename, 'w') as f:\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)",
"def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))",
"def saveForceSamples(self,directory,forcevolumes,points):\n out = open(\"%s/wrench_slices.csv\"%(directory,),'w')\n out.write(\"#index,x,y,z\\n\")\n \n for i,(V,pt) in enumerate(zip(forcevolumes,points)):\n if len(V.convex_decomposition) > 1:\n raise NotImplementedError(\"Convert nonconvex geometry to Geom\")\n geom = polytope.hull_to_klampt_geom(V.convex_hull)\n out.write(\"%d,%f,%f,%f\\n\"%(i+1,pt[0],pt[1],pt[2]))\n geom.saveFile(\"%s/wrench_slice_%d.obj\"%(directory,i+1))\n out.close()",
"def griddatafindz (pts,X,Y,Z):\n\n z=[]\n\n for i in range(len(pts)):\n x=pts[i][0]\n y=pts[i][1]\n\n if ((x<X[0,0])|(x>X[0,-1])|(y<Y[-1,0])|(y>Y[0,0])):\n print('WARNING: point %i is outside data file: (x,y)= (%g,%g)' % (i+1,x,y))\n print('**file corners are (X0,Y0)= (%g,%g), and (X1,Y1) = (%g,%g)' % (X[0,0],Y[-1,-1],X[-1,-1],Y[0,0]))\n z.append(-9999)\n else:\n\n #find indices of four corners\n #some corners might be the same, if x or y happen to intersect X or Y\n i0 = np.where(X[0,:]<=x)[0][-1]\n i1 = np.where(X[0,:]>=x)[0][0]\n\n j0 = np.where(Y[:,0]<=y)[0][0]\n j1 = np.where(Y[:,0]>=y)[0][-1]\n\n #find height of four corners\n Z00=Z[j0,i0]\n Z01=Z[j0,i1]\n Z10=Z[j1,i0]\n Z11=Z[j1,i1]\n\n X00=X[j0,i0]\n X01=X[j0,i1]\n X10=X[j1,i0]\n X11=X[j1,i1]\n\n Y00=Y[j0,i0]\n Y01=Y[j0,i1]\n Y10=Y[j1,i0]\n Y11=Y[j1,i1]\n\n #find slopes of opposing lines.\n if i0==i1:\n dzdx0=0.0\n dzdx1=0.0\n else:\n dzdx0 = (Z01-Z00)/(X11-X00)\n dzdx1 = (Z11-Z10)/(X11-X00)\n\n #find height of points on lines\n zy0 = Z00 + (x-X00)*dzdx0\n zy1 = Z10 + (x-X10)*dzdx1\n\n if j0==j1:\n dzdy=0.0\n else:\n dzdy = (zy1-zy0)/(Y11-Y00)\n\n z.append( zy0 + (y-Y00)*dzdy)\n\n z=np.array(z)\n return z",
"def save_geotiff(df, data_col, crs, x_col='x', y_col='y', time_col=None, nfiles='many', export_path='geotiff.tif', grid_res=None):\n\n ### create the xy coordinates\n if time_col is None:\n xy1 = df[[x_col, y_col]]\n else:\n time = df[time_col].sort_values().unique()\n xy1 = df.loc[df[time_col] == time[0], [x_col, y_col]]\n if any(xy1.duplicated()):\n raise ValueError('x and y coordinates are not unique!')\n\n ### Determine grid res\n if grid_res is None:\n res_df1 = (xy1.loc[0] - xy1).abs()\n res_df2 = res_df1.replace(0, nan).min()\n x_res = res_df2[x_col]\n y_res = res_df2[y_col]\n elif isinstance(grid_res, int):\n x_res = y_res = grid_res\n else:\n raise ValueError('grid_res must either be None or an integer.')\n\n ### Make the affline transformation for Rasterio\n trans2 = transform.from_origin(xy1[x_col].min() - x_res/2, xy1[y_col].max() + y_res/2, x_res, y_res)\n\n ### Make the rasters\n if time_col is None:\n z = df.set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset = ras_open(export_path, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=1, dtype=df[data_col].dtype, crs=convert_crs(crs, pass_str=True), transform=trans2)\n new_dataset.write(z, 1)\n new_dataset.close()\n else:\n if nfiles == 'one':\n new_dataset = ras_open(export_path, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=len(time), dtype=df[data_col].dtype, crs=convert_crs(crs), transform=trans2)\n for i in range(1, len(time)+1):\n z = df.loc[df[time_col] == time[i - 1]].set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset.write(z, i)\n new_dataset.close()\n elif nfiles == 'many':\n file1 = path.splitext(export_path)[0]\n for i in time:\n str_date = to_datetime(i).strftime('%Y-%m-%d_%H')\n file2 = file1 + '_' + str_date + '.tif'\n z = df.loc[df[time_col] == i].set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset = ras_open(file2, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=1, dtype=df[data_col].dtype, crs=convert_crs(crs), transform=trans2)\n new_dataset.write(z, 1)\n new_dataset.close()",
"def _getTileData(self, rtable, t):\n db = dbio.connect(self.dbname)\n cur = db.cursor()\n var = rtable.split(\".\")[0]\n sql = \"select gid,fdate,st_value(rast,x,y) from {0},{1}_xy where rid=tile and tile={8} and fdate>=date'{2}-{3}-{4}' and fdate<=date'{5}-{6}-{7}' order by gid,fdate\".format(\n rtable, var, self.startyear, self.startmonth, self.startday, self.endyear, self.endmonth, self.endday, t)\n cur.execute(sql)\n data = cur.fetchall()\n return data",
"def dump_tile(tileno, trow, tcol, jpeg_tables_bytes, tile_offset, tile_length):\n global group_file_count\n global zoomno\n global tile_group\n global total_tiles\n\n if group_file_count >= tiles_per_group:\n # last group is full already\n tile_group += 1\n group_file_count = 0\n\n group_file_count += 1\n total_tiles += 1\n\n dirname = dir_template % dict(\n outdir = outdir,\n groupno = tile_group\n )\n\n if not os.path.exists(dirname):\n # create tile group dir on demand\n os.makedirs(dirname, mode=0755)\n\n outname = tile_template % dict(\n outdir = outdir,\n groupno = tile_group,\n zoomno = zoomno,\n tcolno = tcol,\n trowno = trow\n )\n \n outfile = open(outname, 'wb')\n outfile.write( jpeg_assemble(jpeg_tables_bytes, load_tile(tile_offset, tile_length)) )\n outfile.close()",
"def query_image_tile(self, coord):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add latitude/longitude values to a dataframe | def add_latlon(df):
LLs = [num2deg(x,y,z) for x,y,z in zip(df['x'],df['y'],df['z'])]
LLdf = pd.DataFrame.from_records(LLs,columns = ['latitude','longitude'])
return pd.concat([df.reset_index(drop=True),LLdf],axis = 1) | [
"def add_lat_lon(df):\r\n df[\"lat\"] = df['geohash6'].apply(lambda x: geohash2.decode_exactly(x)[0])\r\n df[\"lon\"] = df['geohash6'].apply(lambda x: geohash2.decode_exactly(x)[1])",
"def add_coord_to_grid_data_frames(grid):\n bus2coord = (\n pd.merge(grid.bus2sub[[\"sub_id\"]], grid.sub[[\"lat\", \"lon\"]], on=\"sub_id\")\n .set_index(grid.bus2sub.index)\n .drop(columns=\"sub_id\")\n .to_dict()\n )\n\n def get_lat(idx):\n return [bus2coord[\"lat\"][i] for i in idx]\n\n def get_lon(idx):\n return [bus2coord[\"lon\"][i] for i in idx]\n\n extra_col_bus = {\"lat\": get_lat(grid.bus.index), \"lon\": get_lon(grid.bus.index)}\n add_column_to_data_frame(grid.bus, extra_col_bus)\n\n extra_col_plant = {\n \"lat\": get_lat(grid.plant.bus_id),\n \"lon\": get_lon(grid.plant.bus_id),\n }\n add_column_to_data_frame(grid.plant, extra_col_plant)\n\n extra_col_branch = {\n \"from_lat\": get_lat(grid.branch.from_bus_id),\n \"from_lon\": get_lon(grid.branch.from_bus_id),\n \"to_lat\": get_lat(grid.branch.to_bus_id),\n \"to_lon\": get_lon(grid.branch.to_bus_id),\n }\n add_column_to_data_frame(grid.branch, extra_col_branch)",
"def read_csv_point_data(df, lat_col='lat', lon_col='lon', crs='epsg:4326'):\n df['geometry'] = [geometry.Point(y, x) \\\n for x, y in zip(df[lat_col],\n df[lon_col])\n ]\n crs = {'init': crs}\n gdf = gpd.GeoDataFrame(df, crs=crs, geometry=\"geometry\")\n return gdf",
"def convert_to_geopandas(df):\n df['geometry'] = [Point(xy) for xy in zip(df.latitude, df.longitude)]\n crs = {'init': 'epsg:4326'}\n df = gpd.GeoDataFrame(df, crs=crs, geometry=df['geometry'])\n\n return df",
"def make_latlon(df):\n lat_list=['Latitude Degrees',\n 'Latitude Minutes',\n 'Latitude Seconds']\n\n lon_list=['Longitude Degrees',\n 'Longitude Minutes',\n 'Longitude Seconds']\n msk1=df['Latitude Degrees'].astype(str).str.contains(r'[0-9]')\n lons=df.loc[msk1,lon_list].values.astype(float)\n lats=df.loc[msk1,lat_list].values.astype(float)\n msk=(lons[:,1]==lons[:,1])\n lons=dec_degree(lons[msk])\n lats=dec_degree(lats[msk])\n return lats,lons",
"def make_geo(df: pd.DataFrame) -> gpd.GeoDataFrame:\n df = df.loc[~(np.isnan(df.d) | np.isnan(df.e))]\n gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.d, df.e))\n gdf.set_crs(epsg=5514)\n\n return gdf",
"def sort_by_latlon(self, df, lat_column='lat', lon_column='lon'):\n\n return df.assign(f = df[lat_column] + df[lon_column]).sort_values(by=['f', 'time']).drop('f', axis=1)",
"def insert_location(clean_df, station_id):\n lat, lon, elev = get_location(station_id)\n nrows = clean_df.shape[0]\n clean_df.insert(0, \"STATION\", [station_id]*nrows)\n clean_df.insert(1, \"LAT\", [lat]*nrows)\n clean_df.insert(2, \"LON\", [lon]*nrows)\n clean_df.insert(3, \"ELEV\", [elev]*nrows)\n return clean_df",
"def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df",
"def add_loc_ocean2df(df=None, LatVar='lat', LonVar='lon'):\n from geopandas.tools import sjoin\n # Get the shapes for the ocean\n featurecla = 'ocean'\n group = get_shapes4oceans(rtn_group=True, featurecla=featurecla)\n # Turn the dataframe into a geopandas dataframe\n gdf = geopandas.GeoDataFrame(\n df, geometry=geopandas.points_from_xy(df[LonVar], df[LatVar]))\n # Work out if any of the points are within the polygons\n pointInPolys = sjoin(gdf, group, how='left')\n # Check how many were assigned to a region\n Nnew = float(pointInPolys['name'].dropna().shape[0])\n N = float(df.shape[0])\n if N != Nnew:\n pstr = 'WARNING: Only {:.2f}% assigned ({} of {})'\n print(pstr.format((Nnew/N)*100, int(Nnew), int(N)))\n # Add the ocean assingnment back into the orginal dataframe\n df[featurecla] = pointInPolys['name'].values\n return df",
"def addUbigeoAndLocation(df_obj, s_option,\n s_amb,\n s_depart,\n s_prov,\n s_dist ):\n\n df_obj['ubigeo'] = s_option\n df_obj['ambito'] = s_amb\n df_obj['departamento'] = s_depart\n df_obj['provincia'] = s_prov\n df_obj['distrito'] = s_dist\n return (df_obj)",
"def makeGeoDataFrame(data):\n df = pd.DataFrame(data)\n gdf = gpd.GeoDataFrame(df, geometry=\"Coordinates\", crs=\"EPSG:3395\")\n\n return gdf",
"def add_shortest_route(df):\n\n df['gmaps_dist'] = df.apply(lambda row: gmaps.getTotDist((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)\n df['gmaps_dur'] = df.apply(lambda row: gmaps.getTotDur((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)",
"def point_to_coord(gdf):\n\n new_gdf = gpd.GeoDataFrame(columns=[\"centroid\"])\n new_gdf[\"centroid\"] = gdf\n coordinates = []\n\n for row in new_gdf[\"centroid\"]:\n coordinates.append(np.array(row.coords[0]))\n new_gdf[\"coordinates\"] = coordinates\n\n return new_gdf",
"def build_geoseries(self, dataframe):\n geo_list = []\n with click.progressbar(dataframe.iterrows(), label='Pulling site plans and geographic title data', length=len(dataframe)) as d:\n for index, row in d:\n geo_list.append(self.map_property(row['linc']))\n\n geo_series = gpd.GeoSeries([Point(mark) for mark in geo_list], index=dataframe.index)\n\n return geo_series",
"def map_points(df, lat_col='latitude', lon_col='longitude', zoom_start=11, \\\n plot_points=False, pt_radius=15, \\\n draw_heatmap=False, heat_map_weights_col=None, \\\n heat_map_weights_normalize=True, heat_map_radius=15):\n\n ## center map in the middle of points center in\n middle_lat = df[lat_col].median()\n middle_lon = df[lon_col].median()\n\n curr_map = folium.Map(location=[middle_lat, middle_lon],\n zoom_start=zoom_start)\n\n # add points to map\n if plot_points:\n for _, row in df.iterrows():\n folium.CircleMarker([row[lat_col], row[lon_col]],\n radius=pt_radius,\n popup=row['name'],\n fill_color=\"#3db7e4\", # divvy color\n ).add_to(curr_map)\n\n # add heatmap\n if draw_heatmap:\n # convert to (n, 2) or (n, 3) matrix format\n if heat_map_weights_col is None:\n cols_to_pull = [lat_col, lon_col]\n else:\n # if we have to normalize\n if heat_map_weights_normalize:\n df[heat_map_weights_col] = \\\n df[heat_map_weights_col] / df[heat_map_weights_col].sum()\n\n cols_to_pull = [lat_col, lon_col, heat_map_weights_col]\n\n stations = df[cols_to_pull].as_matrix()\n curr_map.add_children(plugins.HeatMap(stations, radius=heat_map_radius))\n\n return curr_map",
"def insert_row(self, row_value, index):\n row = pd.DataFrame(row_value, columns=['lat', 'long', 'alt', 'descr'])\n self.df = pd.concat([self.df.iloc[:index], row, self.df.iloc[index:]]).reset_index(drop=True)",
"def convert_and_append_geohash(data,\n cluster_c='cluster',\n lat_c='latitude',\n lon_c='longitude'):\n for idx, row in data.iterrows():\n if pd.isnull(row[cluster_c]):\n continue\n lat, lon = convert_geohash_to_gps(row[cluster_c])\n data.loc[idx, lat_c] = lat\n data.loc[idx, lon_c] = lon\n return data",
"def conversion_function(self):\n conversion = gpd.GeoDataFrame(gpd.GeoSeries(self)) # convert the geoseries into a geodataframe\n # rename the geometry column from '0' to 'geometry'\n conversion = conversion.rename(columns={0: 'geometry'}).set_geometry('geometry')\n return conversion"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns first n values from the given sequence. | def take(n, seq):
seq = iter(seq)
result = []
try:
for i in range(n):
result.append(next(seq))
except StopIteration:
pass
return result | [
"def take(n, seq):\n return list(itertools.islice(seq, 0, n))",
"def take(n, seq):\n return itertools.islice(seq, n)",
"def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(seq.next())\n except StopIteration:\n pass\n return result",
"def FirstNItems(iterable, n):\n def FNI(iterable, n):\n source = iter(iterable)\n while n > 0:\n yield next(source)\n n -= 1\n\n if n is None:\n pass\n elif n <= 0:\n iterable = []\n else:\n iterable = FNI(iterable, n)\n return iterable",
"def first_N(num): \n i = 0\n r = 0.0\n for v in gen_seq():\n if i >= num:\n break\n r = r + v\n i += 1\n return r",
"def nth(n, seq):\n try:\n return seq[n]\n except TypeError:\n return next(itertools.islice(seq, n, None))",
"def first(s, n=1):\n return s[:n]",
"def take(iterable, n):\n return list(itertools.islice(iterable, n))",
"def lookahead(n, iterable):\n for value in islice(copy.copy(iterable), n, None):\n return value\n raise IndexError(n)",
"def take(n, iterable):\r\n return list(itertools.islice(iterable, n))",
"def take(n, iterable):\n return list(itertools.islice(iterable, n))",
"def first_n(n):\r\n return Quantifier(\"first_{}\".format(n),\r\n isom=False, cons=True, lcons=False, rmon=True, lmon=None,\r\n fn=lambda seq: first_n_ver(seq, n),\r\n gen_fn=lambda verify_fn, truth_value, max_length: first_n_gen(n, verify_fn, truth_value, max_length))",
"def take(n, iterable):\n return list(islice(iterable, n))",
"def take(n, iterable):\r\n return list(islice(iterable, n))",
"def first(self, n=1):\r\n res = self.obj[0:n]\r\n if len(res) is 1:\r\n res = res[0]\r\n return self._wrap(res)",
"def take_every(n, iterable):\n return islice(iterable, 0, None, n)",
"def take_spread(sequence, num_to_take):\n length = float(len(sequence))\n return [sequence[int(math.ceil(i * length / num_to_take))] for i in range(num_to_take)]",
"def nth(iterable, n, default=None):\n return next(itertools.islice(iterable, n, None), default)",
"def take(iterable, nitems):\n return itertools.islice(iterable, 0, nitems)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Report Method to Get Work Order Details. | def get_work_order_detail(self, date_range):
work_order_obj = self.env["task.line"]
start = datetime.strptime(date_range.get("date_from"), "%Y-%m-%d")
end = datetime.strptime(date_range.get("date_to"), "%Y-%m-%d")
step = timedelta(days=1)
workorder_detail = []
while start <= end:
sdate = str(
datetime.strptime(
str(start.date()) + " 00:00:00", DEFAULT_SERVER_DATETIME_FORMAT
)
)
edate = str(
datetime.strptime(
str(start.date()) + " 23:59:59", DEFAULT_SERVER_DATETIME_FORMAT
)
)
work_order_ids = work_order_obj.search(
[("date_issued", ">=", sdate), ("date_issued", "<=", edate)]
)
if work_order_ids:
parts_data = {}
parts_value = []
for parts_line in work_order_ids:
if (
parts_line.fleet_service_id
and parts_line.fleet_service_id.state == "done"
):
parts_dict = {
"wo_name": parts_line.fleet_service_id
and parts_line.fleet_service_id.name
or "",
"vehicle_id": parts_line.fleet_service_id
and parts_line.fleet_service_id.vehicle_id
and parts_line.fleet_service_id.vehicle_id.name
or "",
"part_no": parts_line.product_id
and parts_line.product_id.default_code
or "",
"part_name": parts_line.product_id
and parts_line.product_id.name
or "",
"vehicle_make": parts_line.vehicle_make_id
and parts_line.vehicle_make_id.name
or "",
"qty": parts_line.qty or 0.0,
"uom": parts_line.product_uom
and parts_line.product_uom.name
or "",
"old_part_return": parts_line.old_part_return
and "Yes"
or "No",
"issued_by": parts_line.issued_by
and parts_line.issued_by.name
or "",
"remarks": parts_line.fleet_service_id
and parts_line.fleet_service_id.note
or "",
}
parts_value.append(parts_dict)
if parts_value:
parts_value = sorted(parts_value, key=lambda k: k["wo_name"])
parts_data = {"date": start.date(), "value": parts_value}
workorder_detail.append(parts_data)
start += step
return workorder_detail | [
"def get_work_order_detail_by_advance_search(self):\n self.ensure_one()\n return {\n \"name\": _(\"Work Order\"),\n \"view_type\": \"form\",\n \"view_mode\": \"tree,form\",\n \"res_model\": \"fleet.vehicle.log.services\",\n \"type\": \"ir.actions.act_window\",\n \"domain\": [(\"id\", \"=\", self.work_order_id.id)]\n if self.work_order_id\n else [],\n \"context\": self._context,\n \"target\": \"current\",\n }",
"def test_get_order_report_by_order_id(self):\n pass",
"def test_get_trade_report_by_order_id(self):\n pass",
"def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass",
"def test_get_order_reports(self):\n pass",
"def get_buy_order_details(self):\n return self.buy_order",
"def get_order_detail(orderid): \n data = order_obj.get_order_detail(orderid)\n return data",
"def open_workorders(self, cr, uid, ids, context=None):\n context = context or {}\n models_data = self.pool.get('ir.model.data')\n data = self.browse(cr, uid, ids[0])\n wo_ids = self._make_query_result(cr, uid, data, context=context)\n\n # Get workorder views\n dummy, form_view = models_data.get_object_reference(cr, uid, 'l10n_in_mrp_subcontract', 'mrp_production_workcenter_form_cost_report')\n dummy, tree_view = models_data.get_object_reference(cr, uid, 'l10n_in_mrp_subcontract', 'mrp_production_workcenter_tree_view_cost_report')\n\n context.update({'group_by':'production_id'})\n\n return {\n 'domain': \"[('id','in',[\"+','.join(map(str, wo_ids))+\"])]\",\n 'name': _('WorkOrder Cost Analysis'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'context':context,\n 'res_model': 'mrp.production.workcenter.line',\n 'views': [(tree_view or False, 'tree'), (form_view or False, 'form')],\n 'type': 'ir.actions.act_window',\n }",
"def getOrderInfo(self):\n return self.__orderinfo",
"def get_all_order_detail(self):\r\n return self.order_detail",
"def order_item_details(self) -> 'outputs.OrderItemDetailsResponse':\n return pulumi.get(self, \"order_item_details\")",
"def get_an_order_details(cls, account_id, order_id):\n return cls().requests.get(f'users/{account_id}/orders/{order_id}')",
"def get_order_info(self, exchange,orderID):\n my_trade_keys = [\"timestamp\",\"datetime\",\"id\",\"order\",\"amount\",\"price\",\"cost\",'fee']\n trade_book_columns = ['timestamp','datetime','tradeID','orderID','amount','price','cost','fee']\n trade_info = None\n status=None\n if self.exchanges[exchange].has['fetchOrder']:\n order_info = self.exchanges[exchange].fetch_order(orderID)\n status = order_info['status']\n if not status: raise ValueError('The exchange does not return order status')\n\n elif status !='closed': return {'status':status, 'info':trade_info}\n else:\n my_trades = self.exchanges[exchange].fetch_my_trades(order_info['symbol'])\n right_trades = list(filter(lambda a: a['order']==orderID,my_trades))\n trade_info = [{info[0]:trade[info[1]] for info in zip(trade_book_columns,my_trade_keys)} for trade in right_trades]\n return {'status': status,'info':trade_info}\n tm.sleep(self.exchanges[exchange].rateLimit / 1000)",
"def test_retrieve_order(self):\n pass",
"def test_order_info(self):\n pass",
"def retrieve_work_arrangement(self, wa_pk):\n pass",
"def order_details(self, request):\n customer_id = request.query_params.get('customerId')\n order = Customers.objects.customer_orders(customer_id)\n\n return Response(order)",
"def __str__(self):\n return self.order_number",
"def get_sell_order_details(self):\n return self.sell_order"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate xlsx format print report. | def generate_xlsx_report(self, workbook, data, parts_data):
worksheet = workbook.add_worksheet("daily_parts_issuance_wizard")
worksheet.set_column(0, 0, 10)
worksheet.set_column(1, 1, 15)
worksheet.set_column(2, 2, 20)
worksheet.set_column(3, 3, 15)
worksheet.set_column(4, 4, 10)
worksheet.set_column(5, 5, 12)
worksheet.set_column(6, 6, 10)
worksheet.set_column(7, 7, 10)
worksheet.set_column(8, 8, 15)
worksheet.set_column(9, 9, 10)
worksheet.set_column(10, 10, 15)
worksheet.set_column(11, 11, 10)
worksheet.set_column(12, 12, 20)
worksheet.set_column(13, 13, 5)
worksheet.set_column(14, 14, 5)
worksheet.set_column(15, 15, 5)
bold = workbook.add_format(
{"bold": True, "font_name": "Arial", "font_size": "10"}
)
tot = workbook.add_format(
{"border": 2, "bold": True, "font_name": "Arial", "font_size": "10"}
)
border = workbook.add_format(
{"border": 2, "font_name": "Arial", "font_size": "10"}
)
merge_format = workbook.add_format({"border": 2, "align": "center"})
format1 = workbook.add_format(
{"border": 2, "bold": True, "font_name": "Arial", "font_size": "10"}
)
format1.set_bg_color("gray")
date = workbook.add_format({"num_format": "dd/mm/yy"})
worksheet.merge_range("C3:F3", "Merged Cells", merge_format)
row = 0
row += 1
row += 1
worksheet.write(row, 2, "DAILY PARTS ISSUANCE", tot)
row += 1
worksheet.write(row, 2, "Date From:", tot)
worksheet.write(row, 3, data["form"]["date_from"] or "", border)
worksheet.write(row, 4, "To:", tot)
worksheet.write(row, 5, data["form"]["date_to"] or "", border)
row += 2
worksheet.write(row, 0, "CMF", bold)
row = 3
for objec in self.get_work_order_detail(data["form"]):
row += 3
worksheet.write(row, 0, "DATE ISSUED :", bold)
worksheet.write(row, 1, objec.get("date") or "", date)
row += 2
worksheet.write(row, 0, "NO.", format1)
worksheet.write(row, 1, "WO NO.", format1)
worksheet.write(row, 2, "VEHICLE ID", format1)
worksheet.write(row, 3, "PART NO.", format1)
worksheet.write(row, 4, "PART NAME", format1)
worksheet.write(row, 5, "VEHICLE MAKE", format1)
worksheet.write(row, 6, "USED", format1)
worksheet.write(row, 7, "UNIT TYPE", format1)
worksheet.write(row, 8, "OLD PART RETURND", format1)
worksheet.write(row, 9, "ISSUED BY", format1)
worksheet.write(row, 10, "REMARKS", format1)
line_row = row + 1
line_col = 0
counter = 1
for obj in objec.get("value"):
worksheet.write(line_row, line_col, counter, border)
line_col += 1
worksheet.write(line_row, line_col, obj.get("wo_name") or "", border)
line_col += 1
worksheet.write(line_row, line_col, obj.get("vehicle_id") or "", border)
line_col += 1
worksheet.write(line_row, line_col, obj.get("part_no") or "", border)
line_col += 1
worksheet.write(line_row, line_col, obj.get("part_name") or "", border)
line_col += 1
worksheet.write(
line_row, line_col, obj.get("vehicle_make") or "", border
)
line_col += 1
worksheet.write(line_row, line_col, obj.get("qty") or "", border)
line_col += 1
worksheet.write(line_row, line_col, obj.get("uom") or "", border)
line_col += 1
worksheet.write(
line_row, line_col, obj.get("old_part_return") or "", border
)
line_col += 1
worksheet.write(line_row, line_col, obj.get("issued_by") or "", border)
line_col += 1
worksheet.write(line_row, line_col, obj.get("remarks") or "", border)
line_col = 0
line_row += 1
counter += 1
worksheet.write(line_row, line_col, "********", border) | [
"def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }",
"def generate_excel_report(dashes):\r\n pass",
"def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)",
"def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])",
"def download_report_as_xlsx(self):\n self.ensure_one()\n self.inventory_coverage = True\n self.get_products_for_requisition()\n report_action = self.download_xlsx_report_with_text()\n return report_action",
"def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb",
"def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True",
"def make_report(self):\n\n #\n response = self.create_response()\n\n self.workbook.add_worksheet(_('Statistics'))\n self.fillup_sheet_statistics()\n\n # adding needed worksheets and to fill up it\n if 'polls' in self.subjects:\n self.workbook.add_worksheet(_('Polls'))\n self.fillup_sheet_polls()\n if 'choices' in self.subjects:\n self.workbook.add_worksheet(_('Choices'))\n self.fillup_sheet_choices()\n if 'votes' in self.subjects:\n self.workbook.add_worksheet(_('Votes'))\n self.fillup_sheet_votes()\n if 'voters' in self.subjects:\n self.workbook.add_worksheet(_('Voters'))\n self.fillup_sheet_voters()\n if 'results' in self.subjects:\n self.workbook.add_worksheet(_('Results'))\n self.fillup_sheet_results()\n\n logger.debug('Added worksheets to the workbook')\n\n # close the workbook, as well as to write the Excel document in the response and to return it\n self.workbook.close()\n response.write(self.output.getvalue())\n logger.debug('Wrote a PDF in the response')\n logger.info('Succefully created a report about polls in Excel for user {0}'.format(self.author))\n return response",
"def print_report(stocks_to_print):\n\n print(\"=========== REPORT ============\")\n for stock in stocks_to_print:\n stock.print_one_line_report()",
"def _generate_excel_file(self):\n\n if len(self._data) > 1048576:\n raise ValueError(\"Excel has a maximum row limit of 1,048,576 rows, but the output would contain %s rows. Please update your configutation to output in another data format (for example, a CSV) or reduce the number of rows to be processed.\" % len(self._data))\n\n import xlsxwriter\n\n output_stream = io.BytesIO()\n workbook = xlsxwriter.Workbook(output_stream, {'constant_memory': True, 'in_memory': True}) # in_memory option is used to prevent 1GB+ files from being written to disk. Writing to disk has constant memory, but is SLOW. We can specify the tmp directory if needed.\n worksheet = workbook.add_worksheet(self.sheet_name)\n\n output = self._data.to_dict('split')\n if self.has_header:\n for col in range(0, len(output['columns'])):\n worksheet.write(0, col + 1 if self.index_rows else col, output['columns'][col])\n\n for row in range(0, len(output['data'])):\n if self.index_rows:\n worksheet.write(row + 1 if self.has_header else row, 0, row + 1)\n\n for col in range(0, len(output['columns'])):\n worksheet.write(row + 1 if self.has_header else row, col + 1 if self.index_rows else col, output['data'][row][col])\n\n workbook.close()\n output_stream.seek(0)\n\n self._write_data(output_stream.getvalue(), \"wb\")",
"def xlsx_to_pdf(src_file):\n\tcurrent_work_dir = os.getcwd() + '/printer'\n\twb_path = src_file\n\twb = EXCEL.Workbooks.Open(wb_path)\n\n\tws_index_list = [1] # say you want to print these sheets\n\n\ttmp_file = 'print_total' if re.match(r'.*_total.*', src_file) else 'print'\n\tpath_to_pdf = current_work_dir + '/tmp/' + tmp_file +'.pdf'\n\n\twb.WorkSheets(ws_index_list).Select()\n\t# ExportAsFixedFormat(type, filename, quality=0-good,1-bad, IncludeDocProperties, IgnorePrintAreas, From, To, OpenAfterPublish)\n\twb.ActiveSheet.ExportAsFixedFormat(0, path_to_pdf, 0, False, False, 1, 1)\n\n\twb.Close()\n\n\treturn path_to_pdf\n\t# excel.Quit()",
"def generate_service_odometer_xlsx_report(self, res, next_service):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"next_service_by_odometer\")\n worksheet.col(0).width = 5000\n worksheet.col(1).width = 12500\n worksheet.col(2).width = 10000\n worksheet.col(3).width = 6000\n worksheet.col(4).width = 7500\n worksheet.col(5).width = 7500\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 7500\n worksheet.col(8).width = 10000\n\n font = xlwt.Font()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n border = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"Scheduled Maintenance By Mileage\", format1)\n row += 3\n worksheet.write(row, 7, \"Date :\", format1)\n worksheet.write(row, 8, time.strftime(\"%d-%B-%Y\"), format1)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"VEHICLE ID\", format1)\n worksheet.write(row, 2, \"VIN NO.\", format1)\n worksheet.write(row, 3, \"MAKE\", format1)\n worksheet.write(row, 4, \"MODEL\", format1)\n worksheet.write(row, 5, \"LAST SERVICE DATE\", format1)\n worksheet.write(row, 6, \"LAST MILEAGE\", format1)\n worksheet.write(row, 7, \"NEXT MILEAGE\", format1)\n worksheet.write(row, 8, \"REGISTRATION STATE\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in next_service:\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.name or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.vin_sn or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.f_brand_id and obj.f_brand_id.name or \"\", border\n )\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.model_id and obj.model_id.name or \"\", border\n )\n line_col += 1\n date = \"\"\n if obj.last_service_date:\n date = format_date(\n self.env,\n obj.last_service_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(line_row, line_col, date or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.odometer or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.due_odometer or \"\", border)\n line_col += 1\n # worksheet.write(line_row, line_col,\n # obj.vechical_location_id and\n # obj.vechical_location_id.name or '', border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res",
"def generate_waiter_financial_report_excel_file(self, staff_info, period, month_report, path):\n try:\n workbook = xlw.Workbook(path)\n worksheet = workbook.add_worksheet()\n\n file_header_format = workbook.add_format({\n 'font_size':20,\n 'align': 'center',\n 'valign': 'vcenter'\n })\n table_header_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'font_size': 12,\n 'fg_color': '#C0C0C0'})\n cell_format = workbook.add_format({\n 'font_size': 12,\n 'align':'center',\n 'valign':'vcenter'\n })\n sum_format = workbook.add_format({\n 'font_size': 12,\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': '#99FF99'\n })\n\n worksheet.set_column('A:A', 10)\n worksheet.set_column('B:B', 30)\n worksheet.set_column('C:C', 20)\n worksheet.set_column('D:D', 20)\n worksheet.set_column('E:E', 20)\n worksheet.set_column('F:F', 10)\n worksheet.set_column('G:G', 15)\n\n worksheet.merge_range('A1:G2', f'{staff_info[3]} {staff_info[1]} {period}', file_header_format)\n\n row = 4\n column = 0\n\n for line in month_report:\n for item in line:\n if row == 4:\n worksheet.write(row, column, item.__str__(), table_header_format)\n else:\n if month_report.index(line) == len(month_report)-1 and line.index(item) == len(line)-1:\n worksheet.write(row, column, item.__str__(), sum_format)\n else:\n worksheet.write(row, column, item.__str__(), cell_format)\n column += 1\n row += 1\n column = 0\n\n workbook.close()\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response",
"def output_excel(self):\n\n self.logger.info(\"writing excel\")\n self.logger.debug(self)\n\n #client = self.data_bucket.client\n isin = self.data_bucket.shareclass_isin\n date = self.data_bucket.date\n sc_name = self.data_bucket.get_shareclass_infos(\"shareclass_name\")\n\n # open template excel\n template_file_name = 'AO_TPT_V5.0_Template.xlsx'\n output_file_name = f\"AO_TPT_V5.0_{sc_name}_{isin}_{date}.xlsx\"\n template = openpyxl.load_workbook(self.source_dir / template_file_name)\n report = template.get_sheet_by_name('Report')\n rows = dataframe_to_rows(self.report, index=False)\n\n # map dataframe columns to excel columns \n column_map = {}\n for row_idx, row in enumerate(rows):\n if row_idx == 0:\n assert report.max_column == len(row), \"Number of columns in report and template are different.\"\n for col_idx_pd, column_name in enumerate(row):\n for i in range(len(row)):\n if report.cell(row=1, column=i+1).value == column_name:\n #print(column_name, report.cell(row=1, column=i+1).value)\n column_map[col_idx_pd] = i+1\n assert col_idx_pd in column_map.keys(), f\"Missing {column_name} in template\"\n assert report.cell(row=1, column=column_map[col_idx_pd]).value == row[col_idx_pd]\n \n else:\n for col_idx, value in enumerate(row):\n if value == \"nan\":\n report.cell(row=row_idx+1, column=column_map[col_idx], value=\"\")\n report.cell(row=row_idx+1, column=column_map[col_idx]).alignment = Alignment(horizontal='center')\n else:\n report.cell(row=row_idx+1, column=column_map[col_idx], value=value)\n report.cell(row=row_idx+1, column=column_map[col_idx]).alignment = Alignment(horizontal='center')\n \n # fill SCR sheet\n scr_sheet = template.get_sheet_by_name('SCR')\n scr_sheet.cell(row=2, column=2, value=self.data_bucket.get_shareclass_infos(\"shareclass_name\"))\n \n # shareclass' infos\n scr_sheet.cell(row=6, column=3, value=self.data_bucket.date)\n scr_sheet.cell(row=7, column=3, value=self.data_bucket.get_shareclass_infos(\"shareclass_name\"))\n scr_sheet.cell(row=8, column=3, value=self.data_bucket.get_shareclass_infos().name)\n scr_sheet.cell(row=9, column=3, value=self.data_bucket.get_shareclass_infos(\"shareclass_currency\"))\n scr_sheet.cell(row=10, column=3, value=self.data_bucket.get_shareclass_nav(\"shareclass_total_net_asset_sc_ccy\"))\n\n # sub-module detail\n submodules = [\"interest_rate_risk\", \n \"equity_risk\",\n \"property_risk\",\n \"spread_risk\",\n \"currency_risk\"]\n\n for i, submodule in enumerate(submodules):\n weight_capreq = getattr(self.data_bucket.scr_module, f\"compute_{submodule}_submodule\")()\n scr_sheet.cell(row=16 + i, column=3, value=weight_capreq * self.data_bucket.get_shareclass_nav(\"shareclass_total_net_asset_sc_ccy\"))\n scr_sheet.cell(row=16 + i, column=3).number_format = '#,##0.00'\n scr_sheet.cell(row=16 + i, column=4, value=weight_capreq)\n scr_sheet.cell(row=16 + i, column=4).number_format = '#,##0.000 %'\n \n categories = [\"Interest_rate_risk_Up\",\n \"Interest_rate_risk_Down\",\n \"Equity_Risk_Type_1\",\n \"Equity_Risk_Type_2\",\n \"Property\",\n \"Spread_risk_of_bonds\",\n \"Credit_risk_Structured_Products\",\n \"Credit_risk_Derivatives_Up\",\n \"Credit_risk_Derivatives_Down\",\n \"Currency_risk_Up\",\n \"Currency_risk_Down\"]\n\n # risks specific detail\n for i, category in enumerate(categories):\n weight_capreq = getattr(self.data_bucket.scr_module, category)\n print(category)\n print(weight_capreq)\n scr_sheet.cell(row=23 + i, column=3, value=weight_capreq * self.data_bucket.get_shareclass_nav(\"shareclass_total_net_asset_sc_ccy\"))\n scr_sheet.cell(row=23 + i, column=3).number_format = '#,##0.00'\n scr_sheet.cell(row=23 + i, column=4, value=weight_capreq)\n scr_sheet.cell(row=23 + i, column=4).number_format = '#,##0.000 %'\n\n # total SCR market risk\n scr_sheet.cell(row=13, column=3, value= self.data_bucket.scr_module.compute_total_scr_market_risk())\n scr_sheet.cell(row=13, column=3).number_format = '#,##0.000 %'\n # save produced report\n template.save(self.output_dir / output_file_name)",
"def excel_generator(products, file_name, columns):\n try:\n workbook = xlsxwriter.Workbook(f\"{file_name}_results.xlsx\")\n worksheet = workbook.add_worksheet(f\"{file_name.capitalize()} Sheet\")\n cols = 0\n first_row = 0\n\n # Name header as 'columns' var\n for header in columns:\n cols = columns.index(header)\n worksheet.write(first_row, cols, header)\n rows = 1\n for product in products:\n for key, value in product.items():\n cols = columns.index(key)\n worksheet.write(rows, cols, value)\n rows += 1\n except IOError:\n raise IOError(\"Could not build file due to IOError.\")\n workbook.close()",
"def create_optimization_report(self, filename, objOpt):\n # Add a workbook and a worksheet.\n name = '%s.xlsx' % (filename)\n workbook = xlsxwriter.Workbook(name)\n worksheet1 = workbook.add_worksheet('Sweep Method')\n\n # Add a bold format to use to highlight cells.\n bold = workbook.add_format({'bold': True, 'text_wrap': True})\n # Add a bold format with a bottom border for table titles.\n tabletitle = workbook.add_format({'bold': True, 'bottom': True,\n 'align': 'center', 'valign': 'vcenter',})\n\n # Add numerical formats\n oneDec = workbook.add_format()\n twoDec = workbook.add_format()\n threeDec = workbook.add_format()\n # Set numerical formats to x decimals\n oneDec.set_num_format('0.0')\n twoDec.set_num_format('0.00')\n threeDec.set_num_format('0.000')\n\n \"\"\"____________________WORKSHEET 1_______________________\"\"\"\n # Adjust the column width.\n worksheet1.set_column('A:A', 8.5)\n worksheet1.set_column('B:B', 12.5)\n worksheet1.set_column('C:C', 12.5)\n worksheet1.set_column('D:D', 8.0)\n worksheet1.set_column('E:E', 12.0)\n worksheet1.set_column('F:F', 4.5)\n worksheet1.set_column('G:G', 4.5)\n worksheet1.set_column('H:H', 6.5)\n worksheet1.set_column('I:I', 6.5)\n worksheet1.set_column('J:J', 5.5)\n worksheet1.set_column('K:K', 5.5)\n worksheet1.set_column('L:L', 7.0)\n worksheet1.set_column('M:M', 6.0)\n worksheet1.set_column('N:N', 6.0)\n\n \"\"\" Extrusions \"\"\"\n # Write some data headers for optimization data\n worksheet1.merge_range('A1:N1', 'Extruded Profiles', tabletitle)\n worksheet1.merge_range('A2:E2', 'Input Data', tabletitle)\n worksheet1.write('A3', 'Number of Stiffeners', bold)\n worksheet1.write('B3', 'Plating Material', bold)\n worksheet1.write('C3', 'Profile Material', bold)\n worksheet1.write('D3', 'Plating Thickness (mm)', bold)\n worksheet1.write('E3', 'Profile (mm)', bold)\n worksheet1.merge_range('F2:G2', 'Offered', tabletitle)\n worksheet1.write('F3', 'SM (cm3)', bold)\n worksheet1.write('G3', 'Aw (cm2)', bold)\n worksheet1.merge_range('H2:I2', 'Required', tabletitle)\n worksheet1.write('H3', 'SM Min (cm3)', bold)\n worksheet1.write('I3', 'Aw Min (cm2)', bold)\n worksheet1.merge_range('J2:N2', 'Results', tabletitle)\n worksheet1.write('J3', 'SM ratio', bold)\n worksheet1.write('K3', 'Aw Ratio', bold)\n worksheet1.write('L3', 'Stiffener Weight (kg)', bold)\n worksheet1.write('M3', 'Plating Weight (kg)', bold)\n worksheet1.write('N3', 'Total Weight (kg)', bold)\n\n # optimization data we want to write to the worksheet.\n inputList = objOpt.sweep[0]\n\n\n # Start from the first cell below the headers.\n row = 3\n col = 0\n\n # Iterate over the data and write it out row by row.\n for (nStiff, panMat, stiffMat, tp, profile, SM, Aw, SMMin, AwMin, SMRat,\n AwRat, platWeight, stiffWeight, totWeight) in (inputList):\n worksheet1.write(row, col, nStiff)\n worksheet1.write(row, col + 1, panMat)\n worksheet1.write(row, col + 2, stiffMat)\n worksheet1.write(row, col + 3, tp)\n worksheet1.write(row, col + 4, profile)\n worksheet1.write(row, col + 5, SM, threeDec)\n worksheet1.write(row, col + 6, Aw, threeDec)\n worksheet1.write(row, col + 7, SMMin, threeDec)\n worksheet1.write(row, col + 8, AwMin, threeDec)\n worksheet1.write(row, col + 9, SMRat, threeDec)\n worksheet1.write(row, col + 10, AwRat, threeDec)\n worksheet1.write(row, col + 11, platWeight, twoDec)\n worksheet1.write(row, col + 12, stiffWeight, twoDec)\n worksheet1.write(row, col + 13, totWeight, twoDec)\n row += 1\n\n row_e = row + 4 # used in formatting for machined\n\n # format cells with colors\n\n red = workbook.add_format({'bg_color': '#E74C3C'})\n yellow = workbook.add_format({'bg_color': '#F7DC6F'})\n green = workbook.add_format({'bg_color': '#82E0AA'})\n\n # calculate the minimum weight with a section modulus ratio > 1\n # TODO: This only checks the SM ratio, add Aw ratio later.\n secMs = []\n for i in range(0, objOpt.sweep[0].__len__()):\n # Gather all SMs ratios that are > 1\n if objOpt.sweep[0][i][9] > 1:\n secMs = secMs + [[i] + [objOpt.sweep[0][i][9]] + [objOpt.sweep[0][i][-1]]]\n else:\n pass\n\n allW = []\n for j in range(0, secMs.__len__()):\n # Gather all the weights from the SMs list above and find the minimum\n allW = allW + [secMs[j][-1]]\n minw = min(allW)\n indices = [ind for ind, val in enumerate(allW) if val == minw]\n\n minWs = []\n for k in indices:\n # Gather all the property combinations that give the minimum weight\n minWs = minWs + [secMs[k][0]]\n\n for ind in range(0, minWs.__len__()):\n # fill the background color of all the minimums\n rowmin = minWs[ind] + 4\n cellMin = 'N$%d' % rowmin\n worksheet1.conditional_format(cellMin, {'type': 'cell',\n 'criteria': '==',\n 'value': cellMin,\n 'format': green})\n\n\n # Write a conditional format over a range.\n rowlenJ = 'J4:J%d' % row # SM ratio\n rowlenK = 'K4:K%d' % row # Aw ratio\n\n worksheet1.conditional_format(rowlenJ, {'type': 'cell',\n 'criteria': '<',\n 'value': 1,\n 'format': red})\n worksheet1.conditional_format(rowlenK, {'type': 'cell',\n 'criteria': '<',\n 'value': 1,\n 'format': red})\n worksheet1.conditional_format(rowlenJ, {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 1,\n 'maximum': 1.15,\n 'format': yellow})\n\n \"\"\" Machined \"\"\"\n # Write some data headers for optimization data\n TT = 'A%d:N%d' % (row+2, row+2)\n T1 = 'A%d:E%d' % (row+3, row+3)\n H1 = 'A%d' % (row + 4)\n H2 = 'B%d' % (row + 4)\n H3 = 'C%d' % (row + 4)\n H4 = 'D%d' % (row + 4)\n H5 = 'E%d' % (row + 4)\n T2 = 'F%d:G%d' % (row+3, row+3)\n H6 = 'F%d' % (row + 4)\n H7 = 'G%d' % (row + 4)\n T3 = 'H%d:I%d' % (row+3, row+3)\n H8 = 'H%d' % (row + 4)\n H9 = 'I%d' % (row + 4)\n T4 = 'J%d:N%d' % (row+3, row+3)\n H10 = 'J%d' % (row + 4)\n H11 = 'K%d' % (row + 4)\n H12 = 'L%d' % (row + 4)\n H13 = 'M%d' % (row + 4)\n H14 = 'N%d' % (row + 4)\n\n\n worksheet1.merge_range(TT, 'Machined Profiles', tabletitle)\n worksheet1.merge_range(T1, 'Input Data', tabletitle)\n worksheet1.write(H1, 'Number of Stiffeners', bold)\n worksheet1.write(H2, 'Plating Material', bold)\n worksheet1.write(H3, 'Profile Material', bold)\n worksheet1.write(H4, 'Plating Thickness (mm)', bold)\n worksheet1.write(H5, 'Profile (mm)', bold)\n worksheet1.merge_range(T2, 'Offered', tabletitle)\n worksheet1.write(H6, 'SM (cm3)', bold)\n worksheet1.write(H7, 'Aw (cm2)', bold)\n worksheet1.merge_range(T3, 'Required', tabletitle)\n worksheet1.write(H8, 'SM Min (cm3)', bold)\n worksheet1.write(H9, 'Aw Min (cm2)', bold)\n worksheet1.merge_range(T4, 'Results', tabletitle)\n worksheet1.write(H10, 'SM ratio', bold)\n worksheet1.write(H11, 'Aw Ratio', bold)\n worksheet1.write(H12, 'Stiffener Weight (kg)', bold)\n worksheet1.write(H13, 'Plating Weight (kg)', bold)\n worksheet1.write(H14, 'Total Weight (kg)', bold)\n\n # optimization data we want to write to the worksheet.\n inputList = objOpt.sweep[1]\n\n # Start from the first cell below the headers.\n row = row + 4\n col = 0\n\n # Iterate over the data and write it out row by row.\n for (nStiff, panMat, stiffMat, tp, profile, SM, Aw, SMMin, AwMin, SMRat,\n AwRat, platWeight, stiffWeight, totWeight) in (inputList):\n worksheet1.write(row, col, nStiff)\n worksheet1.write(row, col + 1, panMat)\n worksheet1.write(row, col + 2, stiffMat)\n worksheet1.write(row, col + 3, tp)\n worksheet1.write(row, col + 4, profile)\n worksheet1.write(row, col + 5, SM, threeDec)\n worksheet1.write(row, col + 6, Aw, threeDec)\n worksheet1.write(row, col + 7, SMMin, threeDec)\n worksheet1.write(row, col + 8, AwMin, threeDec)\n worksheet1.write(row, col + 9, SMRat, threeDec)\n worksheet1.write(row, col + 10, AwRat, threeDec)\n worksheet1.write(row, col + 11, platWeight, twoDec)\n worksheet1.write(row, col + 12, stiffWeight, twoDec)\n worksheet1.write(row, col + 13, totWeight, twoDec)\n row += 1\n\n\n # calculate the minimum weight with a section modulus ratio > 1\n # TODO: This only checks the SM ratio, add Aw ratio later.\n secMs2 = []\n for i in range(0, objOpt.sweep[1].__len__()):\n # Gather all SMs ratios that are > 1\n if objOpt.sweep[1][i][9] > 1:\n secMs2 = secMs2 + [[i] + [objOpt.sweep[1][i][9]] + [objOpt.sweep[1][i][-1]]]\n else:\n pass\n\n if secMs2 != []:\n allW2 = []\n for j in range(0, secMs2.__len__()):\n # Gather all the weights from the SMs list above and find the minimum\n allW2 = allW2 + [secMs2[j][-1]]\n minw2 = min(allW2)\n indices2 = [ind for ind, val in enumerate(allW2) if val == minw2]\n\n minWs2 = []\n for k in indices2:\n minWs2 = minWs2 + [secMs2[k][0]]\n\n for ind in range(0, minWs2.__len__()):\n # fill the background color of all the minimums\n rowmin2 = minWs2[ind] + row_e + 1\n cellMin2 = 'N$%d' % rowmin2\n worksheet1.conditional_format(cellMin2, {'type': 'cell',\n 'criteria': '==',\n 'value': cellMin2,\n 'format': green})\n\n else:\n pass\n\n # Write a conditional format over a range.\n rowlenJ = 'J%d:J%d' % (row_e, row) # SM ratio\n rowlenK = 'K%d:K%d' % (row_e, row) # Aw ratio\n\n worksheet1.conditional_format(rowlenJ, {'type': 'cell',\n 'criteria': '<',\n 'value': 1,\n 'format': red})\n worksheet1.conditional_format(rowlenK, {'type': 'cell',\n 'criteria': '<',\n 'value': 1,\n 'format': red})\n worksheet1.conditional_format(rowlenJ, {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 1,\n 'maximum': 1.15,\n 'format': yellow})",
"def main():\n # Get connection object for FTP queries.\n connection = mal.setup_FTP()\n\n # Get week start and end dates for report based on today's date.\n dates = setup_dates()\n\n # Set up Excel file and name it.\n name = str(dates[\"This Week\"][1]) + \" Weekly Usage Report.xlsx\"\n path = os.path.join(os.getcwd(), \"Usage Reports\", name)\n if os.path.exists(path):\n os.remove(path)\n writer = setup_dest_file(path)\n\n # Grab reference to the workbook and add some formats.\n workbook = writer.book\n f_week = workbook.add_format({\n 'bold': True,\n 'font_color': 'white',\n 'bg_color': '#4f81bd'\n })\n f_purple = workbook.add_format({\n 'bold': True,\n 'font_color': 'white',\n 'bg_color': '#8064a2'\n })\n f_header = workbook.add_format({\n 'bold': True,\n 'border': 0\n })\n f_percent = workbook.add_format({'num_format': '0.00%'})\n f_header_percent = workbook.add_format({\n 'bold': True,\n 'border': 0,\n 'num_format': '0%'\n })\n\n # Create a dictionary to label our weeks.\n weekname = [\"This Week\", \"Last Week\", \"Last Year\"]\n # Initialize a bunch of counters for column placement.\n weekcount, C1, C2, C3, C4, C5, C6 = 0, 0, 0, 0, 0, 0, 0\n # Initialize a bunch of offsets for row placement.\n R1A, R1B = 1, 17\n R2 = 1\n R3 = 1\n R4A, R4B = 2, 14\n R5 = 2\n R6A, R6B = 2, 14\n\n # Store some pre-written chunks of excel formula for later.\n weekly_change = [\"Weekly Change\", \"=(B10-G10)/G10\",\n \"=(C10-H10)/H10\", \"=(D10-I10)/I10\"]\n yearly_change = [\"Yearly Change\", \"=(B10-L10)/L10\",\n \"=(C10-M10)/M10\", \"=(D10-N10)/N10\"]\n\n # Some bits of SQL code, saved to variables to keep our code cleaner\n sql_1A = \"\"\"---- # of Test Results by Date\n select CONVERT(varchar(10), tr.UpdatedDate,101) as Date,\n COUNT(tr.testresultid) as Total,\n sum(case when tr.qtionlinetestsessionid is not null then 1\n else 0 end) as OnlineTests, sum(case when tr.BubbleSheetID\n is not null then 1 else 0 end) as BubbleSheets\n from TestResult tr\n join VirtualTest vt on vt.VirtualTestID=tr.VirtualTestID\n join Student s on s.StudentID=tr.StudentID\n join District d on d.DistrictID=s.DistrictID\n join State st on st.StateID=d.StateID\n where tr.UpdatedDate>@weekstart\n and tr.UpdatedDate<@weekend and d.Name not like '%demo%'\n and (tr.BubbleSheetID is not null\n or tr.QTIOnlineTestSessionID is not null)\n group by CONVERT(varchar(10), tr.UpdatedDate, 101)\n order by CONVERT(varchar(10), tr.UpdatedDate, 101)\"\"\"\n sql_1B = \"\"\"\n select CONVERT(varchar(10), tr.UpdatedDate,101) as Date,\n COUNT(tr.testresultid) as Total, sum(case when\n tr.qtionlinetestsessionid is not null then 1 else 0 end)\n as OnlineTests, sum(case when tr.BubbleSheetID is not null\n then 1 else 0 end) as BubbleSheets from TestResult tr\n join VirtualTest vt on vt.VirtualTestID=tr.VirtualTestID\n join Student s on s.StudentID=tr.StudentID\n join District d on d.DistrictID=s.DistrictID\n join State st on st.StateID=d.StateID\n where tr.UpdatedDate>@weekstart and\n tr.UpdatedDate<@weekend and d.Name not like '%demo%'\n and (tr.BubbleSheetID is not null or\n tr.QTIOnlineTestSessionID is not null) and d.DistrictID\n not in (2680, 2479) and d.DistrictGroupID not in (112,114)\n and d.name not like '%frog street%'\n group by CONVERT(varchar(10), tr.UpdatedDate, 101)\n order by CONVERT(varchar(10), tr.UpdatedDate, 101)\"\"\"\n sql_2 = \"\"\"---- # of Test Results by Client\n select st.Name as State, d.Name as District, count(1) TotalResults,\n sum(case when tr.qtionlinetestsessionid is not null then 1\n else 0 end) as OnlineTests, sum(case when tr.BubbleSheetID is not\n null then 1 else 0 end) as BubbleSheets from TestResult tr\n join VirtualTest vt on vt.VirtualTestID=tr.VirtualTestID\n join Student s on s.StudentID=tr.StudentID\n join District d on d.DistrictID=s.DistrictID\n join State st on st.StateID=d.StateID\n where tr.UpdatedDate>@weekstart and tr.UpdatedDate<@weekend\n and d.Name not like '%demo%'\n and (tr.BubbleSheetID is not null\n or tr.QTIOnlineTestSessionID is not null)\n group by st.Name, d.Name\n order by count(1) desc\"\"\"\n sql_3 = \"\"\"---- # of LinkIt Benchmarks by Client\n select st.Name as State, d.Name as District, count(1) TotalResults,\n sum(case when tr.qtionlinetestsessionid is not null then 1 else 0\n end) as OnlineTests, sum(case when tr.BubbleSheetID is not null\n then 1 else 0 end) as BubbleSheets from TestResult tr\n join VirtualTest vt on vt.VirtualTestID=tr.VirtualTestID\n join Student s on s.StudentID=tr.StudentID\n join District d on d.DistrictID=s.DistrictID\n join State st on st.StateID=d.StateID\n where tr.UpdatedDate>@weekstart and tr.UpdatedDate<@weekend\n and d.Name not like '%demo%' and (tr.BubbleSheetID is not null\n or tr.QTIOnlineTestSessionID is not null)\n and vt.Name like '%linkit%form%'\n group by st.Name, d.Name\n order by count(1) desc\"\"\"\n sql_4A = \"\"\"---- # of Online Test Sessions by Start Time ---\n select CONVERT(varchar(10), qots.startdate,101) as [Date Started],\n count(1) as [Total # of Online Tests], sum(case when qots.statusid=1\n then 1 else 0 end) as [# of Created], sum(case when qots.statusid=2\n then 1 else 0 end) as [# of Started], sum(case when qots.statusid=3\n then 1 else 0 end) as [# of Paused], sum(case when qots.statusid=5\n then 1 else 0 end) as [# of Pending Review],\n sum(case when qots.statusid=4 then 1 else 0 end) as [# of Completed]\n from QTIOnlineTestSession qots With (nolock)\n join student s With (nolock) on s.studentid=qots.studentid\n join district d With (nolock) on d.DistrictID=s.districtid\n where d.name not like '%demo%' and qots.StartDate>@weekstart\n and qots.StartDate<@weekend\n group by CONVERT(varchar(10), qots.startdate,101)\n order by CONVERT(varchar(10), qots.startdate,101)\"\"\"\n sql_4B = \"\"\"---- # of Online Test Sessions by Last Log In Time ---\n select CONVERT(varchar(10), qots.LastLoginDate,101) as\n [Date Last Log In], count(1) as [Total # of Online Tests],\n sum(case when qots.statusid=1 then 1 else 0 end) as [# of Created],\n sum(case when qots.statusid=2 then 1 else 0 end) as [# of Started],\n sum(case when qots.statusid=3 then 1 else 0 end) as [# of Paused],\n sum(case when qots.statusid=5 then 1 else 0 end) as\n [# of Pending Review], sum(case when qots.statusid=4 then 1 else 0\n end) as [# of Completed]\n from QTIOnlineTestSession qots With (nolock)\n join student s With (nolock) on s.studentid=qots.studentid\n join district d With (nolock) on d.DistrictID=s.districtid\n where d.name not like '%demo%' and qots.LastLoginDate>@weekstart\n and qots.LastLoginDate<@weekend\n group by CONVERT(varchar(10), qots.LastLoginDate,101)\n order by CONVERT(varchar(10), qots.LastLoginDate,101)\"\"\"\n sql_5 = \"\"\"-- # of Online Test Sessions by Hour by Last Log In Time\n select CONVERT(varchar(13), dateadd(hour, -4,qots.LastLoginDate),\n 120) as [Hour], count(1) as [Number of Sessions],\n SUM(case when d.DistrictID=2479 then 1 else 0 end) as [A Beka],\n sum(case when d.districtgroupid=112 then 1 else 0 end) as BEC,\n sum(case when d.name like '%frog street%' then 1 else 0 end) as\n [Frogstreet] from QTIOnlineTestSession qots With (nolock)\n join student s With (nolock) on s.studentid=qots.studentid\n join district d With (nolock) on d.DistrictID=s.districtid\n where d.name not like '%demo%' and qots.LastLoginDate>@weekstart\n and qots.LastLoginDate<@weekend\n group by CONVERT(varchar(13),\n dateadd(hour, -4,qots.LastLoginDate),120)\n order by count(1) desc\"\"\"\n sql_6A = \"\"\"---- # of Results Entry by Date\n select CONVERT(varchar(10), tr.UpdatedDate,101) as Date,\n COUNT(tr.testresultid) as Total from TestResult tr\n join VirtualTest vt on vt.VirtualTestID=tr.VirtualTestID\n join Student s on s.StudentID=tr.StudentID\n join District d on d.DistrictID=s.DistrictID\n join State st on st.StateID=d.StateID\n where tr.UpdatedDate>@weekstart and tr.UpdatedDate<@weekend\n and d.Name not like '%demo%' and vt.virtualtestsourceid=3\n and vt.virtualtesttype in (1,5)\n group by CONVERT(varchar(10), tr.UpdatedDate, 101)\n order by CONVERT(varchar(10), tr.UpdatedDate, 101)\"\"\"\n sql_6B = \"\"\"---- # of Results Entry by District\n select st.Name as State, d.Name as District,\n COUNT(tr.testresultid) as Total from TestResult tr\n join VirtualTest vt on vt.VirtualTestID=tr.VirtualTestID\n join Student s on s.StudentID=tr.StudentID\n join District d on d.DistrictID=s.DistrictID\n join State st on st.StateID=d.StateID\n where tr.UpdatedDate>@weekstart and tr.UpdatedDate<@weekend\n and d.Name not like '%demo%' and vt.virtualtestsourceid=3\n and vt.virtualtesttype in (1,5)\n group by st.Name, d.Name\n order by COUNT(tr.testresultid) desc\"\"\"\n\n for week in dates.values():\n # Part 1A\n sql = sql_1A\n R = R1A\n C = C1\n N = \"# of Results by Date\"\n\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n\n # Write the data to the file\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n worksheet = writer.sheets[N]\n\n # Then formatted headers\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n # Add week label above table\n worksheet.write_string(R-1, C, weekname[weekcount], f_week)\n # Add 'Total' row at bottom of table\n worksheet.write_string(R+8, C, 'Total', f_header)\n worksheet.write(R+8, C+1, df['Total'].sum(), f_header)\n worksheet.write(R+8, C+2, df['OnlineTests'].sum(), f_header)\n worksheet.write(R+8, C+3, df['BubbleSheets'].sum(), f_header)\n\n # Add weekly/yearly change\n if weekcount is 0:\n for i in range(4):\n worksheet.write(R+10, C+i, weekly_change[i], f_header_percent)\n worksheet.write(R+11, C+i, yearly_change[i], f_header_percent)\n\n # Part 1B\n sql = sql_1B\n R = R1B\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n # Add week label above table\n worksheet.write_string(R-1, C, weekname[weekcount], f_week)\n\n # Add 'Total' row at bottom of table\n worksheet.write_string(R+8, C, 'Total', f_header)\n worksheet.write(R+8, C+1, df['Total'].sum(), f_header)\n worksheet.write(R+8, C+2, df['OnlineTests'].sum(), f_header)\n worksheet.write(R+8, C+3, df['BubbleSheets'].sum(), f_header)\n\n # Add weekly/yearly change\n if weekcount is 0:\n for i in range(4):\n worksheet.write(R+10, C+i,\n weekly_change[i].replace(\"10\", \"26\"),\n f_header_percent)\n worksheet.write(R+11, C+i,\n yearly_change[i].replace(\"10\", \"26\"),\n f_header_percent)\n\n if weekcount is 2:\n # Not counted for column widths, so we do it at the end\n long_cell = \"Without BEC, A Beka, A List, CEE, Frog Street\"\n worksheet.write(R1B-2, 0, long_cell, f_purple)\n worksheet.write(R1B-2, 1, \"\", f_purple)\n worksheet.write(R1B-2, 2, \"\", f_purple)\n worksheet.write(R1B-2, 3, \"\", f_purple)\n C1 = C1 + 5\n\n # Part 2\n sql = sql_2\n R = R2\n C = C2\n N = \"# of Results by Client\"\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Add '%' column\n tr_sum = df.TotalResults.sum(axis=0)\n df['%'] = (df['TotalResults']/tr_sum)\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n worksheet = writer.sheets[N]\n # Apply percent format to '%' column\n worksheet.set_column(C+5, C+5, None, f_percent)\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-1, C, weekname[weekcount], f_week)\n C2 = C2 + 7\n\n # Part 3\n sql = sql_3\n R = R3\n C = C3\n N = \"# of LinkIt Benchmarks\"\n # worksheet = writer.sheets[N]\n\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Add 'Total' row at top\n df.loc[-1] = ['Total', '', df['TotalResults'].sum(),\n df['OnlineTests'].sum(), df['BubbleSheets'].sum()]\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n worksheet = writer.sheets[N]\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-1, C, weekname[weekcount], f_week)\n worksheet.set_row(R+1, None, f_header)\n\n C3 = C3 + 6\n # Part 4A\n sql = sql_4A\n R = R4A\n C = C4\n N = \"# of Online by Date\"\n\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n worksheet = writer.sheets[N]\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-2, C, weekname[weekcount], f_week)\n worksheet.write_string(R-1, C, \"By Start Date\", f_header)\n\n # Part 4B\n sql = sql_4B\n R = R4B\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-2, C, weekname[weekcount], f_week)\n worksheet.write_string(R-1, C, \"By Last Login Date\", f_header)\n\n C4 = C4 + 8\n\n # Part 5\n sql = sql_5\n R = R5\n C = C5\n N = \"# of Online by Hour\"\n\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Add some columns at the end\n df['Others'] = (df['Number of Sessions']\n - (df['A Beka'] + df['BEC'] + df['Frogstreet']))\n df['% of A Beka'] = df['A Beka'] / df['Number of Sessions']\n df['% of BEC'] = df['BEC'] / df['Number of Sessions']\n df['% of Frog Street'] = df['Frogstreet'] / df['Number of Sessions']\n df['% of Others'] = df['Others'] / df['Number of Sessions']\n\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n worksheet = writer.sheets[N]\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-2, C, weekname[weekcount], f_week)\n worksheet.write_string(R-1, C, \"By Last Login Date\", f_header)\n C5 = C5 + 11\n\n # Part 6A\n sql = sql_6A\n R = R6A\n C = C6\n N = \"# of Data Locker\"\n\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n worksheet = writer.sheets[N]\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-1, C, weekname[weekcount], f_week)\n\n # Add Total to bottom\n worksheet.write_string(R+8, C, \"Total\", f_header)\n worksheet.write(R+8, C+1, df[\"Total\"].sum(), f_header)\n\n if weekcount == 0:\n worksheet.write_string(R-2, C, \"By Date\", f_week)\n\n # Part 6B\n sql = sql_6B\n R = R6B\n # Get data from database\n df = pd.read_sql(sql_week(sql, week), connection)\n # Write the data to the file, then formatted headers\n df.to_excel(writer, sheet_name=N, index=False, header=False,\n startrow=R+1, startcol=C)\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(R, C+col_num, value, f_header)\n\n worksheet.write_string(R-1, C, weekname[weekcount], f_week)\n if (weekcount == 0):\n worksheet.write_string(R-2, C, \"By Client\", f_week)\n # Adjust column widths\n C6 = C6 + 4\n\n weekcount = weekcount + 1\n writer.save()\n # Handle some special formatting by hijacking Excel.\n excel = win32.DispatchEx('Excel.Application')\n wb = excel.Workbooks.Open(path)\n for ws in wb.Worksheets:\n ws.Columns.AutoFit()\n ws = wb.Worksheets(\"# of Results by Date\")\n ws.Columns(1).ColumnWidth = 15\n ws = wb.Worksheets(\"# of Online by Hour\")\n ws.Range('G:J,R:U,AC:AF').NumberFormat = '0%'\n ws = wb.Worksheets(\"# of Data Locker\")\n ws.Range(\"A4:J10\")\n wb.Save()\n excel.Application.Quit()",
"def pdf_report_generate(self, cnx, mysql=False, postgres=False):\n\n # Instantiating the controller helper class.\n aux = ControllerHelper()\n\n ret = aux._EXIT_SUCCESS\n\n # Instantiating the model class.\n model = ReporterModel()\n\n # Retrieving a list of all data items stored in the database.\n# (hdr_set, row_set) = model.get_all_data_items(cnx, mysql)\n\n # Retrieving a list of data items for a given date period.\n (hdr_set, row_set) = model.get_data_items_by_date(self.FROM, self.TO,\n cnx, mysql, postgres)\n\n # In case of getting an empty result set, informing the user.\n if (not(row_set)):\n ret = aux._EXIT_FAILURE\n\n print(__name__ + aux._COLON_SPACE_SEP + aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP + aux._ERROR_NO_DATA)\n\n return ret\n\n # ---------------------------------------------------------------------\n # --- Debug output - Begin --------------------------------------------\n # ---------------------------------------------------------------------\n dbg_output = PrettyTable(hdr_set)\n\n # Populating table rows.\n # Note: For PostgreSQL and SQLite databases the following simple loop\n # between dash separators is quite sufficient,\n # but for MySQL database it needs to decode\n # row_set cells.\n i = 0\n\n # ---------------------------------------------------------------------\n if (not(mysql)):\n # Simply traversing through row_set rows.\n while (i < len(row_set)):\n dbg_output.add_row(row_set[i])\n\n i += 1\n # ---------------------------------------------------------------------\n else:\n # Traversing through row_set rows with cells post-processing.\n while (i < len(row_set)):\n row_ary = row_set[i]\n\n j = 0\n\n # Decoding row_set cells.\n while (j < len(hdr_set)):\n if ((j != 4) and (j != 5)):\n row_ary[j] = row_ary[j].decode()\n\n j += 1\n\n dbg_output.add_row(row_ary)\n\n i += 1\n\n # Left-aligning table columns.\n dbg_output.align=\"l\"\n\n print(dbg_output)\n\n print(str(len(row_set)) + self._ROWS_IN_SET_FOOTER + aux._NEW_LINE)\n # ---------------------------------------------------------------------\n # --- Debug output - End ----------------------------------------------\n # ---------------------------------------------------------------------\n\n time.sleep(1) # <== Waiting one second... just for fun... :-)... -- OK.\n\n # ---------------------------------------------------------------------\n # --- Generating the PDF report - Begin -------------------------------\n # ---------------------------------------------------------------------\n pdf_report_path = self._get_pdf_report_path(__file__, aux)\n\n report = canvas.Canvas(pdf_report_path,\n pagesize=A4, # <== 210 x 297 mm.\n pdfVersion=(1, 4), # <== PDF version 1.4.\n # --- Page boxes ------------------------------------------------------\n# cropBox=( (10 / self.MM), (10 / self.MM), (200 / self.MM), (287 / self.MM)),\n# artBox=( (15 / self.MM), (15 / self.MM), (195 / self.MM), (282 / self.MM)),\n# trimBox=((210 / self.MM), (297 / self.MM) ),\n#bleedBox=( (5 / self.MM), (5 / self.MM), (205 / self.MM), (292 / self.MM))\n )\n\n # --- Report metadata -------------------------------------------------\n report.setTitle (self._REPORT_TITLE )\n report.setAuthor (self._REPORT_AUTHOR )\n report.setSubject (self._REPORT_SUBJECT )\n report.setKeywords(self._REPORT_KEYWORDS)\n report.setCreator (self._REPORT_CREATOR )\n\n # --- Page body (data) x MAX_PAGES ------------------------------------\n i = 0\n\n while (i < self.MAX_PAGES):\n ret = self._page_body_draw(report, hdr_set, row_set)\n\n if (ret == aux._EXIT_FAILURE):\n print(__name__ + aux._COLON_SPACE_SEP+aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP+aux._ERROR_NO_REPORT_GEN)\n\n return ret\n\n report.showPage()\n\n i += 1\n\n # Trying to save the report.\n try:\n report.save()\n except Exception as e:\n ret = aux._EXIT_FAILURE\n\n print(__name__ + aux._COLON_SPACE_SEP + aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP + str(e))\n\n return ret\n\n print(self._PDF_REPORT_SAVED_MSG + aux._COLON_SPACE_SEP\n + pdf_report_path)\n # ---------------------------------------------------------------------\n # --- Generating the PDF report - End ---------------------------------\n # ---------------------------------------------------------------------\n\n return ret"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Here we define the configuration settings needed for all ingestion plugins with reasonable defaults. | def vdk_configure(self, config_builder: ConfigurationBuilder) -> None:
# Plugin-related configurations
config_builder.add(
key="INGEST_METHOD_DEFAULT",
default_value=None,
description="Default Ingestion method to be used.",
)
config_builder.add(
key="INGEST_TARGET_DEFAULT",
default_value=None,
description="Default Ingestion target to be used.",
)
# Configure ingestion specific environment variables
ingester_configuration.add_definitions(config_builder=config_builder) | [
"def _configure_plugin(self):\n # The execution setting.\n if 'Execution' in self.configuration:\n self.workflow.config['execution'] = self.configuration['Execution']\n self.logger.debug(\n \"Workflow %s execution parameters: %s.\" %\n (self.workflow.name, self.workflow.config['execution'])\n )\n\n # The Nipype plug-in parameters.\n if self.plug_in and self.plug_in in self.configuration:\n plug_in_opts = self.configuration[self.plug_in]\n opts = dict(plugin=self.plug_in, **plug_in_opts)\n self.logger.debug(\"Workflow %s %s plug-in parameters: %s.\" %\n (self.workflow.name, self.plug_in, opts))\n else:\n opts = {}\n\n return opts",
"def configure(self):",
"def get_default_config(self):\n config = super(MySQLCollector, self).get_default_config()\n config.update({\n 'path': 'mysql',\n # Connection settings\n 'hosts': [],\n\n # Which rows of 'SHOW GLOBAL STATUS' you would like to publish.\n # http://dev.mysql.com/doc/refman/5.1/en/show-status.html\n # Leave unset to publish all\n # 'publish': '',\n\n 'slave': False,\n 'master': False,\n 'innodb': False,\n })\n return config",
"def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config",
"def configure(plugins):\n global __plugins\n __plugins = plugins",
"def define_user_config(self) -> None:\n\n self.add_standard_metadata(\"infiles\")\n self.add_standard_metadata(\"outfile\")\n\n self.add_custom_metadata(name=\"output_format\",\n default=\"readable\",\n type=str)\n self.add_custom_metadata(name=\"brief\",\n short_name=\"b\",\n default=False,\n type=bool,\n action=\"store_const\",\n const=True)\n self.add_custom_metadata(name=\"read_limit\",\n default=-1,\n type=int)\n self.add_custom_metadata(name=\"column\",\n short_name=\"c\",\n type=str)\n self.add_custom_metadata(name=\"max_freq\",\n type=int)\n self.add_custom_metadata(name=\"column_types\",\n type=str)\n self.add_custom_metadata(name=\"metadata\",\n type=bool,\n action=\"store_const\",\n const=True)\n self.add_custom_metadata(name=\"schema_id\",\n type=int)\n self.add_custom_metadata(name=\"collection_id\",\n type=int)\n\n self.add_standard_metadata(\"verbosity\")\n self.add_all_config_configs()\n self.add_all_csv_configs()\n self.add_all_help_configs()",
"def build_config(self):\n config = agent_config.Plugins()\n parameters = {'name': self.CHECK_NAME}\n if self.args:\n for arg in ('metrics_files', 'subcommands', 'suppress_ok'):\n if arg in self.args:\n parameters[arg] = self.args.get(arg)\n\n # set service and component\n dimensions = _get_dimensions('object-storage', None)\n if len(dimensions) > 0:\n parameters['dimensions'] = dimensions\n\n config[self.CHECK_NAME] = {'init_config': None,\n 'instances': [parameters]}\n return config",
"def _load_config(self, config):\n\n\n # now config for this class\n # first the defaults\n this_config = {}\n this_config.update(DEFAULT_MAKER_CONFIG)\n\n # now override\n if config is not None:\n this_config.update(config)\n\n # first load the defaults from the parent\n super(DMMedsMaker,self)._load_config(this_config)",
"def configure(self, config):\n pass",
"def _process_config(self):\n self._user_agent_adding_config = botocore.config.Config(user_agent_extra=USER_AGENT_SUFFIX)\n\n if self.config.region_names:\n self.add_regional_clients_from_list(self.config.region_names)\n self.default_region = self.config.region_names[0]\n else:\n self.default_region = self.config.botocore_session.get_config_variable(\"region\")\n if self.default_region is not None:\n self.add_regional_client(self.default_region)\n\n if self.config.key_ids:\n self.add_master_keys_from_list(self.config.key_ids)",
"def process_config(self):\n pass",
"def define_user_config(self) -> None:\n self.add_standard_metadata('infiles')\n\n self.add_custom_metadata(name='key_cols',\n short_name='k',\n required=True,\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='compare_cols',\n short_name='c',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='ignore_cols',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='col_names',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='variables',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='already_sorted',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='already_uniq',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='temp_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='out_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='assignments',\n default=[],\n type=list)\n\n self.add_standard_metadata('verbosity')\n self.add_all_config_configs()\n self.add_all_csv_configs()\n self.add_all_help_configs()",
"def define_user_config(self) -> None:\n\n self.add_standard_metadata('infiles')\n self.add_standard_metadata('outfile')\n\n self.add_custom_metadata(name='errfile',\n short_name='e',\n default='-',\n type=str)\n\n self.add_custom_metadata(name='valid_schema',\n type=str)\n\n self.add_custom_metadata(name='random_out',\n type=float,\n default=1.0)\n\n self.add_custom_metadata(name='field_cnt',\n short_name='f',\n type=int)\n\n self.add_custom_metadata(name='err_out_fields',\n type=bool,\n default=False,\n action='store_const',\n const=True)\n\n self.add_custom_metadata(name='err_out_text',\n type=bool,\n default=False,\n action='store_const',\n const=True)\n\n self.add_standard_metadata('verbosity')\n self.add_all_config_configs()\n self.add_all_csv_configs()\n self.add_all_help_configs()",
"def _configure(self):\n path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'config.yml'\n )\n\n with open(path) as file:\n defaultconfig = yaml.load(file)\n\n self.config = merge_dict(self.config, defaultconfig)\n\n if 'logging' in self.config:\n logging.config.dictConfig(self.config['logging'])\n else:\n logging.getLogger('sirbot').setLevel('INFO')",
"def setIngestConfig(config, retarget=True):\n if retarget:\n config.parse.retarget(PfsParseTask)\n config.register.retarget(PfsRegisterTask)\n config.register.columns = {'site': 'text', # J: JHU, L: LAM, X: Subaru offline, I: IPMU, A: ASIAA,\n # S: Summit, P: Princeton, F: simulation (fake)\n 'category': 'text', # A: science, B: NTR, C: Meterology, D: HG\n 'field': 'text', # Observation name\n 'visit': 'int', # Required because hard-coded in LSST's CameraMapper\n 'ccd': 'int', # [0-11]\n 'filter': 'text', # b: blue, r: red, n: nir, m: medium resolution red\n 'arm': 'text', # b: blue, r: red, n: nir, m: medium resolution red\n 'spectrograph': 'int', # Spectrograph module: 1-4\n 'dateObs': 'text', # Date of observation; used for filenames\n 'expTime': 'double', # Exposure time\n 'dataType': 'text', # Type of exposure\n 'taiObs': 'text', # Date+time of observation; used for finding calibs\n 'pfsDesignId': 'int', # Configuration of the top-end\n 'slitOffset': 'double', # Horizontal slit offset; kept for backwards compat.\n 'dither': 'double', # Slit offset in spatial dimension\n 'shift': 'double', # Slit offset in spectral dimension\n 'focus': 'double', # Focus offset\n 'lamps': 'text', # Lamps that are lit\n 'attenuator': 'double', # Attenuator setting\n 'photodiode': 'double', # Photodiode reading (cd/m^2)\n }\n config.register.unique = ['site', 'category', 'visit', 'filter', 'arm', 'spectrograph',\n 'pfsDesignId']\n config.register.visit = ['visit', 'field', 'dateObs', 'taiObs', 'pfsDesignId',\n 'slitOffset', 'dither', 'shift', 'focus',\n 'lamps', 'attenuator', 'photodiode',\n ]\n\n config.parse.translation = {'expTime': 'EXPTIME',\n }\n config.parse.defaults = {'ccdTemp': \"0\", # Added in commissioning run 3\n }",
"async def _configure_plugins(self) -> None:\n logger.debug('Configuring plugins')\n funcs = [\n info['plugin'].configure(\n config=info['config'],\n session=self._session,\n router=self.app.router\n )\n for info in self._plugins.values()\n ]\n\n if funcs:\n await asyncio.gather(*funcs, loop=self._loop)\n logger.debug('Plugins configured')",
"def _initialize_default_config(self) -> NoReturn:\n self[AcceptedKey.CONFIG][\n ConfigKey.ENABLE_SAMPLING\n ] = DefaultConfig.ENABLE_SAMPLING\n self[AcceptedKey.CONFIG][\n ConfigKey.SAMPLING_DATASET_SIZE_THRESHOLD\n ] = DefaultConfig.SAMPLING_DATASET_SIZE_THRESHOLD\n self[AcceptedKey.CONFIG][\n ConfigKey.SAMPLING_WITH_REPLACEMENT\n ] = DefaultConfig.SAMPLING_WITH_REPLACEMENT\n self[AcceptedKey.CONFIG][\n ConfigKey.SAMPLING_FRACTION\n ] = DefaultConfig.SAMPLING_FRACTION\n self[AcceptedKey.CONFIG][ConfigKey.N_JOBS] = DefaultConfig.N_JOBS",
"def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)",
"def __init__(self, *args, **kwargs):\n self.plugin_configs = kwargs.pop('plugin_configs', None)\n super().__init__(*args, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a single TW task as an Albert Item. | def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore
field = get_as_subtext_field
task_id = tw_side.get_task_id(task)
actions = [
FuncAction(
"Complete task",
lambda args_list=["done", task_id]: run_tw_action(args_list),
),
FuncAction(
"Delete task",
lambda args_list=["delete", task_id]: run_tw_action(args_list),
),
FuncAction(
"Start task",
lambda args_list=["start", task_id]: run_tw_action(args_list),
),
FuncAction(
"Stop task",
lambda args_list=["stop", task_id]: run_tw_action(args_list),
),
FuncAction(
"Edit task interactively",
lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True),
),
FuncAction(
"Fail task",
lambda task_id=task_id: fail_task(task_id=task_id),
),
ClipAction("Copy task UUID", f"{task_id}"),
]
found_urls = url_re.findall(task["description"])
if "annotations" in task.keys():
found_urls.extend(url_re.findall(" ".join(task["annotations"])))
for url in found_urls[-1::-1]:
actions.insert(0, UrlAction(f"Open {url}", url))
if reminders_tag_path.is_file():
global reminders_tag
reminders_tag = load_data(reminders_tag_path)
else:
save_data("remindme", str(reminders_tag_path))
actions.append(
FuncAction(
f"Add to Reminders (+{reminders_tag})",
lambda args_list=[
"modify",
task_id,
f"+{reminders_tag}",
]: run_tw_action(args_list),
)
)
actions.append(
FuncAction(
"Work on next (+next)",
lambda args_list=[
"modify",
task_id,
"+next",
]: run_tw_action(args_list),
)
)
urgency_str, icon = urgency_to_visuals(task.get("urgency"))
text = task["description"]
due = None
if "due" in task:
due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore
return get_as_item(
text=text,
subtext="{}{}{}{}{}".format(
field(urgency_str),
"ID: {}... | ".format(tw_side.get_task_id(task)[:8]),
field(task["status"]),
field(task.get("tags"), "tags"),
field(due, "due"),
)[:-2],
icon=[str(icon)],
completion=f'{curr_trigger}{task["description"]}',
actions=actions,
urgency=task.get("urgency"),
) | [
"async def get_task(self, task_id: str) -> Task:",
"def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)",
"def _get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)",
"def get(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if not task == None:\n guid = \"%s\" % task.key().id_or_name()\n task_json = { \"id\": \"%s\" % guid, \"name\": task.name,\n \"priority\": task.priority, \"effort\": task.effort,\n \"projectId\": task.projectId,\n \"submitterId\": task.submitterId, \"assigneeId\": task.assigneeId,\n \"type\": task.type, \"developmentStatus\": task.developmentStatus,\n \"validation\": task.validation, \"description\": task.description,\n \"createdAt\": task.createdAt,\n \"updatedAt\": task.updatedAt }\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(404, \"Task not found\")",
"def get_item(self, item_id):\n url = self._make_item_endpoint(item_id)\n story = self._request(url)\n if story != None and story.get(\"by\"):\n by = str(story[\"by\"])\n return self._build_hnitem(story)",
"def get_item(self, id: str, user: User) -> Optional[T]:",
"def get(self, guid):\n results = j.sal.fs.find(self._root, '*_%s' % guid)\n if len(results) <= 0:\n raise TaskNotFoundError(\"task %s not found\" % guid)\n if len(results) > 1:\n raise RuntimeError(\"found 2 tasks with same guid, this should not happen\")\n return self._deserialize_task(j.sal.fs.readFile(results[0]))",
"def get_specific_task(self, taskID):\n route = f\"wiki/rest/api/longtask/{taskID}\"\n return self.get(route=route)",
"def get_task_by_tid(self, tid):\n return self.task_controller.get_task(tid)",
"def getTask():\n content = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\n print(\"Task call %s\\n\" % content)\n if content == \"null\":\n return None\n else:\n return json.loads(content)",
"def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError",
"def get_by_name(task_name):\n return task_service.get_by_name(request, task_name)",
"def get_by_name(task_name):\n return tasks.find_one({'name': task_name})",
"def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)",
"def get_item(self):\n item = self.queue.get()\n print(f'Item {item.id} received by {self.name}')\n return item",
"def get_task(self):\n return self.task",
"def get_item_by_id(item_id):\n print('-------------------- Item - get_item_by_id')\n return Item.query.filter_by(id=item_id).first()",
"def read(self, request, task_id=None):\n base = Task.objects\n\n if task_id:\n return base.get(pk= task_id)\n\n else:\n return base.all() # or base.filter(...) can be used here",
"def alias_item(self, alias):\n ident = self.alias[alias]\n return self.items[ident]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine whether current query is of a subcommand. If so first returned the corresponding SubcommandQeury object. | def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:
if not query_str:
return None
# spilt:
# "subcommand_name rest of query" -> ["subcommand_name", "rest of query""]
query_parts = query_str.strip().split(None, maxsplit=1)
if len(query_parts) < 2:
query_str = ""
else:
query_str = query_parts[1]
subcommand = get_subcommand_for_name(query_parts[0])
if subcommand:
return SubcommandQuery(subcommand=subcommand, query=query_str) | [
"def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False",
"def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None",
"def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None",
"def subcmd(self) -> Optional[str]:\n return self._subcmd",
"def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)",
"def _appears_as_command(self, path):\n return self._is_driver_command(path) or self._is_regular_command(path)",
"def is_sub_operation(operation_id):\n return get_mother_operation() is not None",
"def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise",
"def __Ancestor(self, flag):\n command = self._parent\n while command:\n if flag in command.flags:\n return True\n command = command._parent # pylint: disable=protected-access\n return False",
"def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False",
"def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass",
"def test_subcommand_help():\n help_check(SUBCOMMAND)",
"def is_command(self):\n return self.element_type == ElementType.COMMAND",
"def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass",
"def get_subcommand(\n parser: \"ArgumentParser\",\n cfg: Namespace,\n prefix: str = \"\",\n fail_no_subcommand: bool = True,\n ) -> Tuple[Optional[str], Optional[\"ArgumentParser\"]]:\n subcommands, subparsers = _ActionSubCommands.get_subcommands(\n parser,\n cfg,\n prefix=prefix,\n fail_no_subcommand=fail_no_subcommand,\n )\n return subcommands[0] if subcommands else None, subparsers[0] if subparsers else None",
"def is_command_response(schema_obj):\n\n return isinstance(schema_obj, schema.CommandResponse)",
"def is_Q(self):\n return isinstance(self,Q)",
"def is_device_command(self):\n if len(self.params) == 0:\n return False\n\n first_param = self.params[0]\n # See: https://cgit.freedesktop.org/mesa/mesa/tree/src/intel/vulkan/anv_entrypoints_gen.py#n434\n return first_param.type.type in ('VkDevice', 'VkCommandBuffer', 'VkQueue')",
"def subcommand(cls, name, subapp = None):\r\n def wrapper(subapp):\r\n attrname = \"_subcommand_%s\" % (subapp if isinstance(subapp, str) else subapp.__name__,)\r\n setattr(cls, attrname, Subcommand(name, subapp))\r\n return subapp\r\n return wrapper(subapp) if subapp else wrapper"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens and reads the parameters in the [SUBCATCHMENT] and [SUBAREA] headers within the SWMM input file. Adds these parameters (as strings) to a numpy array | def read_initial_parameters(inputfilename):
subc_params = []
subarea_params = []
global subc_names
subc_names = []
subcatchment_parameters = []
inputfile = open(inputfilename, 'r')
for line in inputfile:
if(line.find("[SUBCATCHMENTS]") != -1):
line = inputfile.readline()
for i in range(count):
templine = list(line)
if templine[0] == ";" or templine[0] == " " or len(templine) < 10:
line = inputfile.readline()
continue
elif (line.find("[") != -1):
break
else:
linesplit = line.split()
subc_params.append(linesplit[4:7])
subc_names.append(linesplit[0])
line = inputfile.readline()
if (line.find("[SUBAREAS]") != -1):
line = inputfile.readline()
for i in range(count):
templine = list(line)
if templine[0] == ";" or templine[0] == " " or len(templine) < 10:
line = inputfile.readline()
continue
elif (line.find("[") != -1):
break
else:
linesplit = line.split()
subarea_params.append(linesplit[1:6])
line = inputfile.readline()
inputfile.close()
#Part of the function that experiments with np array. Potentially removes the need for the list transformation
# functions that chew up a lot of time. Each subcatchment has a row, each parameter type has a column.
global subcatchment_parameters_np
subcatchment_parameters_np = np.empty((len(subc_params[0]) + len(subarea_params[0]), len(subc_params)), dtype=float)
for row in range(len(subc_params)):
for col in range(len(subc_params[0])):
subcatchment_parameters_np[row, col] = float(subc_params[row][col])
for row in range(len(subarea_params)):
for col in range(len(subarea_params[0])):
subcatchment_parameters_np[row, col + len(subc_params[0])] = float(subarea_params[row][col])
#Old string code
# for i in range(len(subc_params)):
# for j in range(len(subarea_params[i])):
# subc_params[i].append(subarea_params[i][j])
# subcatchment_parameters.append(subc_params[i])
return(np_subcatchment_parameters) | [
"def subcatch(ini_file='subcatch.ini'):\n config.read(ini_file)\n print 'Read the file ', ini_file\n\n file_in = config.get('file_in', 'file_in')\n\n file_out = config.get('file_out', 'file_out')\n\n picture_out = config.get('picture_out', 'picture_out')\n\n Xoutlet = config.getfloat('coord_outlet', 'Xoutlet')\n Youtlet = config.getfloat('coord_outlet', 'Youtlet')\n\n nb_param = config.getfloat('flags', 'nb_param')\n X = config.getfloat('flags', 'X')\n\n #Reading of parameter file\n print 'Reading parameter file'\n ar_cell_label, ar_coorx, ar_coory, ar_lambda, ar_Xc, ar_dam, ar_tan_beta, \\\n ar_tan_beta_channel, ar_L, ar_Ks, ar_theta_r, ar_theta_s, ar_n_o, ar_n_c, \\\n ar_cell_down, ar_pVs_t0, ar_Vo_t0, ar_Qc_t0, ar_kc \\\n = pm.read_cell_parameters(file_in)\n\n #Search for the cell close to the coordinates\n print 'Search for the outlet cell'\n cell_outlet = find_cell_coordinates(ar_cell_label, Xoutlet,\n Youtlet, ar_coorx, ar_coory, ar_lambda)\n\n #Search for the catchment cells\n print 'Search for the catchment cells'\n subcatch_label = all_up_cell(cell_outlet, ar_cell_down, ar_cell_label)\n\n #Select the subcatchmnent parameters\n print 'Select the subcatchmnent parameters'\n tab_param = np.zeros((len(subcatch_label),nb_param))\n new_label = np.arange(len(subcatch_label))\n\n tab_param[:,0] = new_label#ar_cell_label[subcatch_label]\n tab_param[:,1] = ar_coorx[subcatch_label]\n tab_param[:,2] = ar_coory[subcatch_label]\n tab_param[:,3] = ar_lambda[subcatch_label]\n tab_param[:,4] = ar_Xc[subcatch_label]\n tab_param[:,5] = ar_dam[subcatch_label]\n tab_param[:,6] = ar_tan_beta[subcatch_label]\n tab_param[:,7] = ar_tan_beta_channel[subcatch_label]\n tab_param[:,8] = ar_L[subcatch_label]\n tab_param[:,9] = ar_Ks[subcatch_label]\n tab_param[:,10] = ar_theta_r[subcatch_label]\n tab_param[:,11] = ar_theta_s[subcatch_label]\n tab_param[:,12] = ar_n_o[subcatch_label]\n tab_param[:,13] = ar_n_c[subcatch_label]\n for i in range(len(subcatch_label)):\n if i == 0:\n tab_param[i,14] = -9999.0\n else:\n ind = np.where(ar_cell_label[subcatch_label]\n == ar_cell_down[subcatch_label][i])\n\n tab_param[i,14] = new_label[ind]\n\n tab_param[:,15]=ar_pVs_t0[subcatch_label]\n tab_param[:,16]=ar_Vo_t0[subcatch_label]\n tab_param[:,17]=ar_Qc_t0[subcatch_label]\n tab_param[:,18]=ar_kc[subcatch_label]\n\n #~~~~~~Write parameter file~~~~~~#\n np.savetxt(file_out, tab_param)\n\n ar_image=ar_cell_label*0.\n ar_image[subcatch_label]=1.\n ar_image[ar_lambda==1.]=10.\n ar_image[cell_outlet]=5.\n field_map(ar_image, ar_coorx, ar_coory, X, picture_out, 'Subcatchment')",
"def read_subarray_description(filename, subarray_name='LST-1'):\n tel_pos = read_telescopes_positions(filename)\n tel_descrp = read_telescopes_descriptions(filename)\n return SubarrayDescription(subarray_name, tel_positions=tel_pos, tel_descriptions=tel_descrp)",
"def read_spectra(study_identifier):\r\n\tfile_list = read_file_list(study_identifier)\r\n\tzip_file = ZipFile(file_list[0,1], 'r')\r\n\tlocal = file_list[0,0]+'/10/pdata/1/1r'\r\n\tfile_path = zip_file.extract(local)\r\n\tarr = read_intensities(file_path)\r\n\tarr_c = np.zeros((len(file_list),len(arr)))\r\n\tarr_c[0,:] = arr\r\n\tfor ix in range(1,len(file_list)):\r\n\t\tzip_file = ZipFile(file_list[ix,1], 'r')\r\n\t\tlocal = file_list[ix,0]+'/10/pdata/1/1r'\r\n\t\tfile_path = zip_file.extract(local)\r\n\t\tarr_c[ix,:] = read_intensities(file_path)\r\n\treturn arr_c",
"def getParametersfromHDR(self, inHeaderFile):\n if(os.path.isfile(inHeaderFile)):\n datatype = 'INTEGER*2'\n try:\n parFile = open(inHeaderFile, 'rU') \n for eachLine in parFile:\n #print eachLine\n count = eachLine.count('=')\n #print 'count = ' + str(count)\n if(count == 1):\n elements = eachLine.split('=', count)\n elements[0] = elements[0].strip()\n elements[1] = elements[1].strip()\n if elements[0] == 'samples':\n self.headerparameters.append(elements[1])\n elif elements[0] == 'lines':\n self.headerparameters.append(elements[1])\n elif elements[0] == 'data type': \n datatypeENVI = int(elements[1])\n elif elements[0] == 'map info':\n elements[1] = re.sub('\\{','',elements[1])\n elements[1] = re.sub('\\}','',elements[1])\n elements[1] = re.sub('\\s+','',elements[1])\n \n count = elements[1].count(',')\n mapElements = elements[1].split(',', count)\n i = 0\n while i < count:\n self.headerparameters.append(mapElements[i])\n i = i + 1\n \n parFile.close()\n \n # Convert ENVI to Gamma data type\n datatype = self.envi2gammaDataType(datatypeENVI)\n \n self.headerparameters.append(datatype)\n \n except IOError as e:\n print('\\nCould not open file: ', e)\n raise IOError(e)\n else:\n raise BaseException",
"def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set",
"def read_Hrrr_spec_loc(filename, parameters = [''],loc = [-97.485,36.605], max = False):\n \n myfile = pygrib.open(filename) \n parameterlist = ['Geopotential Height','Temperature','Relative humidity','Dew point temperature',\n 'Specific humidity','Vertical velocity','U component of wind','V component of wind',\n 'Absolute vorticity','Cloud mixing ratio','Cloud Ice','Rain mixing ratio','Snow mixing ratio',\n 'Graupel (snow pellets)'] \n \n if parameters != ['']:\n for i in range(len(parameters)):\n x = parameterlist.count(parameters[i])\n if x == 0: \n print 'requested parameter not in list'\n print parameters[i] \n parameterlist = parameters[:]\n \n \n data = []\n grb = myfile.select(name = parameterlist[0]) \n grb_cube = grb_to_grid(grb)\n dataloc = np.array(grb[0].latlons())\n datah = grb_cube['levels']\n units = []\n \n x = abs(dataloc[0]-loc[0])\n y = abs(dataloc[1]-loc[1])\n xy = x+y\n xymin = min(xy.flatten())\n xy2 = xy.flatten().tolist()\n xyflatindex = xy2.index(xymin)\n [ysize,xsize] = dataloc[0].shape\n zsize = len(grb_cube['levels'])\n xyindex = [xyflatindex/xsize, xyflatindex%xsize]\n \n \n for p in parameterlist:\n grb = myfile.select(name = p)\n grb_cube = grb_to_grid(grb)\n if not max:\n newshape = grb_cube['data'].reshape([ysize,xsize,zsize])\n data.append(newshape[xyindex[0]][xyindex[1]][:])\n else:\n newshape = grb_cube['data'].reshape([ysize,xsize,zsize])\n data.append(newshape[xyindex[0]][xyindex[1]][:].max(axis=0))\n units.append(grb_cube['units'])\n \n \n return [data,parameterlist,datah,loc,units]",
"def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])",
"def loadCwEPRSpec(fileName,doNormalize = True, resample=False): #{{{\n # Open the spc and par files and pull the data and relevant parameters\n #try:\n expDict = returnEPRExpDict(fileName)\n specData = fromfile(fileName+'.spc','<f') # read the spc\n sizeY = expDict.get('SSY')\n xU = 'field'\n if sizeY: # this is a two dimensional data set\n sizeY = int(sizeY)\n sizeX = int(expDict.get('SSX'))\n yU = expDict.get('XYUN')\n specData = specData.reshape((sizeY,sizeX))\n if expDict.get('HCF'):\n centerSet = float(expDict.get('HCF'))\n elif expDict.get('XXLB'):\n lowBound = float(expDict.get('XXLB'))\n width = float(expDict.get('XXWI'))\n centerSet = lowBound + width/2.\n else:\n centerSet = float(expDict.get('GST'))\n \n sweepWidth = float(expDict.get('HSW'))\n if doNormalize:\n numScans = expDict.get('JNS') # I'm not sure if this is right\n if numScans:\n numScans = float(numScans)\n else:\n numScans = 1\n specData /= numScans # normalize by number of scans\n if expDict.get('RRG'):\n rg = float(expDict.get('RRG'))\n modAmp = float(expDict.get('RMA'))\n specData /= modAmp # normalize by modulation amplitude\n specData /= rg # normalize by receiver gain\n normalized = 'good'\n else:\n normalized = 'bad'\n else:\n normalized = 'None'\n #except:\n # expDict = returnEPRExpDictDSC(fileName)\n # specData = fromfile(fileName+'.DTA','>c') # or if it is a DTA file read that instead\n # centerSet = float(expDict.get('CenterField').split(' ')[0])\n # sweepWidth = float(expDict.get('SweepWidth').split(' ')[0])\n # numScans = float(expDict.get('NbScansAcc')) # Yea bruker just changes things...\n # rg = float(expDict.get('RCAG'))\n # if doNormalize:\n # specData /= rg\n # normalized = 'good'\n # sizeY = False\n\n # calculate the field values and normalize by the number of scans and the receiver gain and return an nddata\n # The data is two dimensional so find second dimension and \n if sizeY:\n fieldVals = pys.r_[centerSet-sweepWidth/2.:centerSet+sweepWidth/2.:sizeX*1j]\n LB = float(expDict.get('XYLB'))\n width = float(expDict.get('XYWI'))\n yDim = pys.r_[LB : LB + width : sizeY*1j]\n if yU == 'dB': # Change it to power mW.\n yDim = 197.9 * 10**(-1*yDim / 10)\n yU = 'mW'\n\n dataShape = pys.ndshape([sizeY, sizeX],[yU, xU])\n data = dataShape.alloc(dtype='float')\n data.data = specData\n spec = data\n spec.labels([yU, xU],[yDim, fieldVals])\n else:\n fieldVals = pys.r_[centerSet-sweepWidth/2.:centerSet+sweepWidth/2.:len(specData)*1j]\n spec = pys.nddata(specData).rename('value',xU).labels(xU,fieldVals)\n if resample:\n # down sample the data to 512. This is for output to the multicomponent fitting program.\n newField = pys.r_[centerSet-sweepWidth/2.:centerSet+sweepWidth/2.:512*1j]\n spec = spec.interp(xU,newField)\n spec.other_info = expDict\n return spec,normalized #}}}",
"def read_iai_param(self):\n\n shear_mod_file = self.directory + 'EXTRA_current_shear_modulus_sem2d.dat'\n dev_stress_file = self.directory + 'EXTRA_deviatoric_stress_sem2d.dat'\n s_param_file = self.directory + 'EXTRA_S_parameter_sem2d.dat'\n\n if os.path.isfile(shear_mod_file):\n with open(shear_mod_file, 'rb') as sid:\n shear_mod = np.fromfile(sid,np.float32)\n with open(dev_stress_file, 'rb') as sid:\n deviatoric_stress = np.fromfile(sid, np.float32)\n with open(s_param_file, 'rb') as sid:\n s_param = np.fromfile(sid,np.float32)\n\n l = len(shear_mod)\n\n assert self.npts == (l/self.xsta), 'Recording error'\n\n self.shear_mod = np.zeros( (self.npts,self.xsta) )\n self.deviatoric_stress = np.zeros( (self.npts,self.xsta) )\n self.s_param = np.zeros( (self.npts,self.xsta) )\n\n for i in range( int(l/self.xsta) ):\n limit1 = i * self.xsta\n limit2 = (i+1) * self.xsta\n\n self.shear_mod[i,:] = shear_mod[limit1:limit2]\n self.deviatoric_stress[i,:] = deviatoric_stress[limit1:limit2]\n self.s_param[i,:] = s_param[limit1:limit2]\n\n else :\n print('No Iai model parameter files found')",
"def read_ised(self,filename):\n\n with open(filename,'rb') as f:\n check = array.array('i')\n check.fromfile(f,2)\n \n if check[1] == 221:\n ksl, ksi = 2, 1\n F_l, F_i = 3, 2\n else:\n ksl, ksi = 3, 2\n F_l, F_i = 5, 4\n \n with open(filename,'rb') as f:\n ks = array.array('i')\n ks.fromfile(f,ksl)\n\n ta = array.array('f')\n ta.fromfile(f,ks[ksi])\n self.ta = numpy.array(ta)\n\n tmp = array.array('i')\n tmp.fromfile(f,3)\n self.ml,self.mul,iseg = tmp\n\n if iseg > 0:\n tmp = array.array('f')\n tmp.fromfile(f,iseg*6)\n\n tmp = array.array('f')\n tmp.fromfile(f,5)\n self.totm, self.totn, self.avs, self.jo, self.tauo = tmp\n\n\n self.ids= array.array('c')\n self.ids.fromfile(f,80)\n\n tmp = array.array('f')\n tmp.fromfile(f,4)\n self.tcut = tmp[0]\n self.ttt = tmp[1:]\n\n ids = array.array('c')\n ids.fromfile(f,80)\n\n self.ids = array.array('c')\n self.ids.fromfile(f,80)\n\n self.igw = array.array('i')\n self.igw.fromfile(f,1)\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.iw = array.array('i')\n self.iw.fromfile(f,1)\n\n wave = array.array('f')\n wave.fromfile(f,self.iw[0])\n self.wave = numpy.array(wave)\n\n #SED Section\n self.F = array.array('i')\n self.F.fromfile(f,F_l)\n self.iw = self.F[F_i] #Number of wavelength elements\n\n self.sed = numpy.zeros((self.iw,ks[ksi]),dtype=numpy.float32)\n G = array.array('f')\n G.fromfile(f,self.iw)\n self.sed[:,0] = G\n ik = array.array('i')\n ik.fromfile(f,1)\n\n self.h = numpy.empty((ik[0],ks[ksi]),'f')\n H = array.array('f')\n H.fromfile(f,ik[0])\n self.h[:,0] = H\n\n for i in range(1,ks[ksi]): #Fill rest of array with SEDs\n F = array.array('i')\n F.fromfile(f,F_l)\n iw = F[F_i]\n\n G = array.array('f')\n G.fromfile(f,iw)\n self.sed[:,i] = G\n ik = array.array('i')\n ik.fromfile(f,1)\n\n H = array.array('f')\n H.fromfile(f,ik[0])\n self.h[:,i] = H\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.bflx = array.array('f')\n self.bflx.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n strm = array.array('f')\n strm.fromfile(f,tmp[F_i])\n self.strm = numpy.array(strm)\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.evf = array.array('f')\n self.evf.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.evf = array.array('f')\n self.evf.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.snr = array.array('f')\n self.snr.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.pnr = array.array('f')\n self.pnr.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.sn = array.array('f')\n self.sn.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.bh = array.array('f')\n self.bh.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.wd = array.array('f')\n self.wd.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n rmtm = array.array('f')\n rmtm.fromfile(f,tmp[F_i])\n self.rmtm = numpy.array(rmtm)",
"def load_parameters(self): \r\n \r\n file_choices = \"TXT (*.txt)|*.txt\"\r\n input_file = QFileDialog.getOpenFileName(self, 'open file', '', file_choices)\r\n with open(input_file[0]) as f:\r\n parameters = f.readlines()\r\n interv = parameters[0].split('_')\r\n self.interval.setText(interv[0])\r\n ps = parameters[1].split('_')\r\n self.pixel_size.setText(ps[0])\r\n smo = parameters[2].split('_')\r\n self.smoothing.setText(smo[0])\r\n ce = parameters[3].split('_')\r\n self.collapse_limit.setText(ce[0])\r\n self.on_draw()",
"def read(self, run):\n # read the file\n self['run'] = run[0:run.rfind('.xml')]\n f = open(run)\n for line in f:\n \n if line.find('SDSU Exec') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n self['application'] = line[n1:n2]\n\n elif line.find('<detector_status') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n if line[n1:n2] != 'Ultraspec':\n raise Exception, 'Run ' + run + ' is not an Ultraspec file.'\n \n elif line.find('SPEED') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['speed'] = line[n1:n2]\n \n elif line.find('X_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x_bin'] = line[n1:n2]\n \n elif line.find('Y_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y_bin'] = line[n1:n2]\n \n # first window \n \n elif line.find('X1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_start'] = line[n1:n2]\n \n elif line.find('X1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_size'] = line[n1:n2]\n \n elif line.find('Y1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_start'] = line[n1:n2]\n \n elif line.find('Y1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_size'] = line[n1:n2]\n \n # second window\n \n elif line.find('X2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_start'] = line[n1:n2]\n \n elif line.find('X2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_size'] = line[n1:n2]\n \n elif line.find('Y2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_start'] = line[n1:n2]\n \n elif line.find('Y2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_size'] = line[n1:n2]\n \n elif line.find('<target>') >= 0:\n n1 = line.index('target') + 7\n n2 = line.index('<', n1)\n self['target'] = line[n1:n2]\n\n elif line.find('<grating>') >= 0:\n n1 = line.index('grating') + 8\n n2 = line.index('<', n1)\n self['grating'] = line[n1:n2]\n\n elif line.find('<slit_width>') >= 0:\n n1 = line.index('slit_width') + 11\n n2 = line.index('<', n1)\n self['slit_width'] = line[n1:n2]\n\n elif line.find('<slit_angle>') >= 0:\n n1 = line.index('slit_angle') + 11\n n2 = line.index('<', n1)\n self['slit_angle'] = line[n1:n2]\n \n elif line.find('<filters>') >= 0:\n n1 = line.index('filters') + 8\n n2 = line.index('<', n1)\n self['filters'] = line[n1:n2]\n\n elif line.find('<ID>') >= 0:\n n1 = line.index('ID') + 3\n n2 = line.index('<', n1)\n self['ID'] = line[n1:n2]\n\n elif line.find('<PI>') >= 0:\n n1 = line.index('PI') + 3\n n2 = line.index('<', n1)\n self['PI'] = line[n1:n2]\n\n elif line.find('<comment>') >= 0:\n n1 = line.index('comment') + 8\n n2 = line.index('<', n1)\n self['comment'] = line[n1:n2]\n \n\n # check that we have found what we expected to find\n if 'application' not in self:\n raise Exception, 'Failed to find application name in ' + run\n\n if self.is_not_power_onoff():\n\n if 'x_bin' not in self:\n raise Exception, 'Failed to find X_BIN in ' + run\n\n if 'y_bin' not in self:\n raise Exception, 'Failed to find Y_BIN in ' + run\n\n if 'x1_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x1_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y1_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y1_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'x2_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x2_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y2_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y2_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'target' not in self:\n self['target'] = 'UNKNOWN'\n\n if 'filters' not in self:\n self['filters'] = '---'\n\n if 'grating' not in self:\n self['grating'] = '---'\n\n if 'slit_width' not in self:\n self['slit_width'] = '---'\n\n if 'slit_angle' not in self:\n self['slit_angle'] = '---'\n\n if 'ID' not in self:\n self['ID'] = 'UNKNOWN'\n\n if 'PI' not in self:\n self['PI'] = 'UNKNOWN'",
"def __read_header(self):\n\n filename = self.directory + 'SeisHeader_sem2d.hdr'\n try :\n f = open(filename, 'r')\n except:\n msg = 'No Header file <SeisHeader_sem2d.hdr> in directory'\n print(msg)\n answer = input(\"Do you want to continue [Y/N] : \")\n if answer.upper() == 'Y':\n return\n else:\n sys.exit()\n\n f.readline()\n string = f.readline()\n header_line = string.rstrip(\" \").split()\n\n self.dt = float(header_line[0])\n self.npts = int(header_line[1])\n self.nsta = int(header_line[2])\n\n # Seismos\n f.readline()\n self.rcoord = np.zeros((self.nsta,2))\n for reciever in np.arange(self.nsta):\n string = f.readline()\n reciever_line = string.rstrip(\" \").split()\n # x-coord\n self.rcoord[reciever,0] = float(reciever_line[0])\n # z-coord\n self.rcoord[reciever,1] = float(reciever_line[1])\n\n #extra station\n try:\n xsta = int(f.readline())\n self.xsta = xsta\n f.readline()\n self.x_rcoord = np.zeros((xsta,2))\n\n for ex_reciever in range(xsta):\n xtra = f.readline()\n x_reciever_line = xtra.rstrip(\" \").split()\n self.x_rcoord[ex_reciever,0] = float(x_reciever_line[0])\n self.x_rcoord[ex_reciever,1] = float(x_reciever_line[0])\n except :\n print(\"No Extra recievers\")\n self.x_rcoord = None\n\n f.close()\n return self.dt, self.npts, self.nsta, self.rcoord, self.x_rcoord",
"def structure(self, ism_input):\n f = open(ism_input, 'r')\n data = []\n for line in f:\n line = line.replace('\\\"', '')\n line = line.replace('],[', '];[')\n line = line.strip()\n line = line.replace(']', '')\n line = line.replace('[', '')\n line = line.split(';')\n line[0] = line[0].split('|')\n ls = list(map(lambda x: x.split(','), line[1:]))\n ls = list(map(lambda x: list(map(lambda y: y.split('|'), x)), ls))\n line[1:] = ls\n data.append(line)\n data = np.array(data[1:]) \n \n return data",
"def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw",
"def open_gains(fname, snver=1):\n\n hdu = get_hdu(fname, extname='AIPS SN', ver=snver)\n\n nif = hdu.header['NO_IF']\n npol = hdu.header['NO_POL']\n nant = hdu.header['NO_ANT']\n # set ``nif'' from dtype of hdu.data\n _data = np.zeros(hdu.header['NAXIS2'], dtype=[('start', '<f8'),\n ('stop', '<f8'),\n ('antenna', 'int'),\n ('gains', 'complex',\n (nif, npol,)),\n ('weights', '<f8',\n (nif, npol,))])\n\n time = hdu.data['TIME']\n dtime = hdu.data['TIME INTERVAL']\n antenna = hdu.data['ANTENNA NO.']\n\n # Constructing `gains` field\n rgains = hdu.data['REAL1'] + 1j * hdu.data['IMAG1']\n # => (466, 8)\n lgains = hdu.data['REAL2'] + 1j * hdu.data['IMAG2']\n rgains = np.expand_dims(rgains, axis=2)\n # => (466, 8, 1)\n lgains = np.expand_dims(lgains, axis=2)\n gains = np.dstack((rgains, lgains))\n # => (466, 8, 2)\n\n # Constructing `weights` field\n rweights = hdu.data['WEIGHT 1']\n # => (466, 8)\n lweights = hdu.data['WEIGHT 2']\n rweights = np.expand_dims(rweights, axis=2)\n # => (466, 8, 1)\n lweights = np.expand_dims(lweights, axis=2)\n weights = np.dstack((rweights, lweights))\n # => (466, 8, 2)\n\n # Filling structured array by fields\n _data['start'] = time - 0.5 * dtime\n _data['stop'] = time + 0.5 * dtime\n _data['antenna'] = antenna\n _data['gains'] = gains\n _data['weights'] = weights\n\n gains = list()\n for ant in set(_data['antenna']):\n idx = _data['antenna'] == ant\n gains.append(GainCurve(ant, nif, npol, _data[idx][['start', 'stop',\n 'gains',\n 'weights']]))\n return gains",
"def read(shortcode):\r\n \r\n url = \"https://www.hep.ph.ic.ac.uk/~ms2609/CompPhys/neutrino_data/\"+str(shortcode)+\".txt\"\r\n urllib.request.urlretrieve(url, str(shortcode)+\".txt\")\r\n # Request teh datafile from url\r\n data = urllib.request.urlopen(url)\r\n count = 0\r\n pos = []\r\n for line in data:\r\n count += 1\r\n if line == b'\\n': # Finds where data starts and ends form empty line\r\n pos.append(count)\r\n \r\n fit_data = np.loadtxt(str(shortcode)+\".txt\", delimiter=\"\\n\", skiprows=pos[0],\r\n max_rows=(pos[1]-pos[0]))\r\n u_flux = np.loadtxt(str(shortcode)+\".txt\", delimiter=\"\\n\", skiprows=pos[2])\r\n return fit_data, u_flux",
"def getSubParamLine(self,subname, numNodesSub, subParamInfo,dir_name):\n #nodeSubInterface = []\n subOptionInfo_p = []\n subSchemInfo_p = []\n filename_t = subname + '.sub'\n filename_t = os.path.join(dir_name, filename_t)\n data_p = self.readNetlist(filename_t)\n subOptionInfo_p, subSchemInfo_p = self.separateNetlistInfo(data_p)\n \n if len(subOptionInfo_p) > 0:\n newline = subOptionInfo_p[0]\n newline = newline.split('.subckt '+ subname) \n intLine = newline[1].split()\n print \"numNodesSub Index---------->\",numNodesSub\n newindex = numNodesSub[subname]\n appen_line = intLine[newindex:len(intLine)]\n appen_param = ','.join(appen_line)\n paramLine = 'parameter Real ' + appen_param + ';'\n paramLine = paramLine.translate(maketrans('{}', ' '))\n subParamInfo.append(paramLine)\n return subParamInfo",
"def readRSPData(self) -> CalibrationData:\n with open(self.datafile, \"r\") as f:\n lines = f.readlines()\n numLines = len(lines)\n\n serial: int = 1\n staticGain: float = 1\n sensor: str = \"\"\n dataReadFrom: int = 0\n for il in range(0, numLines):\n # remove whitespace and new line characters\n lines[il] = lines[il].strip()\n # find serial\n if \"induction coil no\" in lines[il]:\n split1 = lines[il].split(\":\")[1]\n serial = int(split1.split(\"-\")[0].strip())\n # find sensor\n if \"SensorType\" in lines[il]:\n sensor = lines[il].split()[1]\n # find static gain value\n if \"StaticGain\" in lines[il]:\n staticGain = float(lines[il].split()[1])\n if \"FREQUENCY\" in lines[il]:\n dataReadFrom = il\n dataLines = []\n il = dataReadFrom + 2\n # get part of file desired\n while il < numLines and lines[il] != \"\":\n # save line then increment\n dataLines.append(lines[il])\n il = il + 1\n\n # get the data as an array\n data = self.linesToArray(dataLines)\n # unit manipulation - change phase to radians and apply static gain\n data[:, 1] = data[:, 1] * staticGain\n data[:, 2] = data[:, 2] * (math.pi / 180)\n # sort and extend\n data = self.sortCalData(data)\n if self.extend:\n data = self.extendCalData(data)\n\n return CalibrationData(\n self.datafile,\n data[:, 0],\n data[:, 1],\n data[:, 2],\n staticGain,\n chopper=self.chopper,\n sensor=sensor,\n serial=serial,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets parameters for rigs rig_ids_str coma separated string with rig ids "1,2,3,4" miner Miner to set. Leave it null if you do not want to change. "claymore", "claymorez", "ewbf", ... miner2 Second miner to set. Leave it null if you do not want to change. "0" if you want to unset it. id_wal ID of wallet. Leave it null if you do not want to change. id_oc ID of OC profile. Leave it null if you do not want to change. bool|mixed | def multiRocket(self, rig_ids_str, miner, miner2, id_wal, id_oc):
if rig_ids_str is not None:
self.log("Rigs ids required")
exit()
params = {
'method': 'multiRocket',
'rig_ids_str': rig_ids_str,
'miner': miner,
'miner2': miner2,
'id_wal': id_wal,
'id_oc': id_oc
}
result = self.request(params)
if 'error' in result:
return False
return result | [
"def set_sids(self, sids):\n self._sids = sids\n # encode sids in RGB\n r = sids // 256**2\n rem = sids % 256**2 # remainder\n g = rem // 256\n b = rem % 256\n self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)\n self.rgbsids[:, 0] = r\n self.rgbsids[:, 1] = g\n self.rgbsids[:, 2] = b",
"def set_rstr(self, rstr_entry):\n\n self.rstr_type = ''\n self.rstr_atoms = []\n self.rstr_atoms_crds = []\n self.rstr_entry = rstr_entry\n #ugly hack...trying to be more general\n self.rstr_atoms = self.rstr_entry.split('iat')[1].split('r')[0].replace('=',' ').replace(',',' ').strip().split()\n if len(self.rstr_atoms) == 2: self.rstr_type = 'BOND'\n elif len(self.rstr_atoms) == 3: self.rstr_type = 'ANGLE'\n elif len(self.rstr_atoms) == 4:\n if 'rstwt' in self.rstr_entry: self.rstr_type = 'GENCRD'\n else: self.rstr_type = 'DIHEDRAL'\n for atom in self.rstr_atoms:\n if int(atom) % 2: self.rstr_atoms_crds.append(self.crd_data[int(atom)/2+2][:36].split())\n else: self.rstr_atoms_crds.append(self.crd_data[int(atom)/2+1][37:].strip().split())",
"def id_str(self, id_str: str):\n\n self._id_str = id_str",
"def buildRigFromSelection():\n # Get Selection\n sel = cmds.ls(sl=1)\n iso = cmds.filterExpand(sel, sm=45)\n if not iso: iso = []\n # Adjust Selection\n sel = list(set(sel) - set(iso))\n\n # Build Surface Rigs\n for surface in sel:\n\n # Check Surface\n if glTools.utils.surface.isSurface(surface):\n minU = cmds.getAttr(surface + '.minValueU')\n maxU = cmds.getAttr(surface + '.maxValueU')\n midU = minU + ((maxU - minU) * 0.5)\n buildRig(surface, uValue=midU)\n\n # Build Isoparm Rigs\n for crv in iso:\n surface = cmds.ls(crv, o=True)[0]\n uValue = float(crv.split('[')[-1].split(']')[0])\n buildRig(surface, uValue)",
"def __set_receivers_id(self, receivers_id):\n if not isinstance(receivers_id, list):\n raise TypeError('Receivers id should be a list')\n if not all(isinstance(receiver_id, int) for receiver_id in receivers_id): # Check if all elements are int\n raise TypeError('All elements in the receivers id list should be integer')\n if any(receiver_id < 0 for receiver_id in receivers_id): # If any elements is negative\n raise ValueError('An element is negative, there can not be negative ids')\n self.__receivers_id = receivers_id",
"def reasoner_ids(self, reasoner_ids: List[str]):\n\n self._reasoner_ids = reasoner_ids",
"def init_armor_set(self, armor_set):\n \n if armor_set:\n for armor_build in armor_set:\n armor = armor_build(self)\n self.armor_set.append(armor)\n armor.activate()",
"def set_ships(self, dictionary):\n for key, value in dictionary.items():\n if value < 0:\n raise SettingsError(\"No negative ships\")\n self._parser.set(\"settings\", \"carriers\", str(dictionary[CARRIER]))\n self._parser.set(\"settings\", \"battleships\", str(dictionary[BATTLESHIP]))\n self._parser.set(\"settings\", \"cruisers\", str(dictionary[CRUISER]))\n self._parser.set(\"settings\", \"destroyers\", str(dictionary[DESTROYER]))\n self._save()",
"def reviewer_id(self, reviewer_id: int):\n\n self._reviewer_id = reviewer_id",
"def rmg_reaction_from_str(self, reaction_string):\n reactants, products = reaction_string.split(self.arrow)\n reactants = [Species().fromSMILES(str(smiles)) for smiles in reactants.split(self.plus)]\n products = [Species().fromSMILES(str(smiles)) for smiles in products.split(self.plus)]\n self.rmg_reaction = Reaction(reactants=reactants, products=products)",
"def init_dna_strands_set():\n strand_index = -1\n for line in fileinput.input(argv[1]):\n if line.startswith('>'):\n data = dict()\n data[\"id\"] = line.replace('>', '').replace('\\n', '')\n data[\"strand\"] = \"\"\n dna_strands_set.append(data)\n strand_index += 1\n elif len(dna_strands_set) == strand_index + 1:\n data = dna_strands_set[strand_index]\n data[\"strand\"] += line.replace('\\n', '')\n length = len(data[\"strand\"])\n\n global shortest_strand_length\n shortest_strand_length\n for strand in dna_strands_set:\n length = len(strand[\"strand\"])\n if 0 < length < shortest_strand_length:\n shortest_strand_length = length\n elif shortest_strand_length == 0:\n shortest_strand_length = length",
"def setModemInitString(self, initString, unitCode=0):\n resp = self.XAPCommand('MINIT', initString, unitCode=unitCode)\n return resp",
"def contributor_researcher_id_names(self, contributor_researcher_id_names):\n\n self._contributor_researcher_id_names = contributor_researcher_id_names",
"def _setisbn(self, isbn):\n if isinstance(isbn, str):\n if len(isbn) in (9, 10):\n self._isbn = isbn\n else:\n raise ValueError(\"isbn must have an lenght of 9 or 10\")\n else:\n raise TypeError(\"isbn must be a string\")",
"async def set_supergroup_sticker_set(\n self,\n supergroup_id: int,\n sticker_set_id: int,\n *,\n request_id: str = None,\n request_timeout: int = None,\n skip_validation: bool = False\n ) -> Ok:\n _constructor = SetSupergroupStickerSet.construct if skip_validation else SetSupergroupStickerSet\n\n return await self.client.request(\n _constructor(\n supergroup_id=supergroup_id,\n sticker_set_id=sticker_set_id,\n ),\n request_id=request_id,\n request_timeout=request_timeout,\n )",
"def set_selected(self, idstr: str) -> None:\n opt = self._optdct.get(idstr, None)\n if opt is None:\n print(\"select: no option with idstr: {}\".format(idstr))\n return\n for iidstr, opt in self._optdct.items():\n opt.set_selected(iidstr == idstr)",
"def krsedg(self, krsedg):\n if (self.local_vars_configuration.client_side_validation and\n krsedg is not None and len(krsedg) > 32):\n raise ValueError(\"Invalid value for `krsedg`, length must be less than or equal to `32`\") # noqa: E501\n\n self._krsedg = krsedg",
"async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],\n sticker_set_name: base.String) -> base.Boolean:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)\n\n return result",
"def set_num_rs(self, num_rs: t.Union[str, int]) -> None:\n if isinstance(num_rs, str):\n self.run_args[\"nrs\"] = num_rs\n else:\n self.run_args[\"nrs\"] = int(num_rs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dump utils image template.py as a Dict. The key is like "simnet/lndbtc" | def _dump_template(self, utils_image) -> Dict[str, str]:
cmd = f"docker run -i --rm --entrypoint python {utils_image}"
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
out, _ = p.communicate(input=SCRIPT.encode())
output = out.decode()
if p.returncode != 0:
self._logger.error("Failed to dump %s template.py\n%s", utils_image, output)
raise RuntimeError("Failed to dump %s template.py" % utils_image)
lines = output.splitlines()
result = {}
for line in lines:
key, value = line.split()
result[key] = value
return result | [
"def generate_image(config):\n scripts = dict()\n count = 0\n for controller in config['nodes']:\n if 'controller' not in controller['roles']:\n continue\n # Add function to create directories\n script = BytesIO()\n\n # Add environment information\n script.write(b'HOSTNAME=%s\\n' % controller['name'].encode())\n script.write(b'DOMAIN=%s\\n' % config['environment']['subdomain'].encode())\n script.write(b'MASK=%s\\n' % config['network']['management']['network']\n .split('/', 1)[1].encode())\n script.write(b'MGMTVLAN=%s\\n' % str(config['network']['management']['vlan']).encode())\n script.write(b'STORVLAN=%s\\n' % str(config['network']['storage']['vlan']).encode())\n pubip, mask = controller['fallback']['ipaddress'].split('/', 1)\n script.write(b'PUBIP=%s\\n' % pubip.encode())\n script.write(b'PUBMASK=%s\\n' % mask.encode())\n script.write(b'PUBGW=%s\\n' % controller['fallback']['gateway'].encode())\n script.write(b'PUBVLAN=%s\\n' % str(config['network']['public']['vlan']).encode())\n script.write(b'MGMTIP=%s\\n' % construct_ip(config['network']['management']['network'],\n controller['ip-lsb']))\n script.write(b'IPMIIP=%s\\n' % construct_ip(config['network']['ipmi']['network'],\n controller['ip-lsb']))\n script.write(b'STORIP=%s\\n' % construct_ip(config['network']['storage']['network'],\n controller['ip-lsb']))\n script.write(b'UNTAGIP=%s\\n' % construct_ip(config['network']['backplane']['network'],\n controller['ip-lsb']))\n script.write(b'GIGPWD=%s\\n' % str(config['environment']['password']).encode())\n count += 1\n scripts['/etc/ctrl-0%s' % count] = script\n pk = config['ssh']['private-key'].strip()\n buf = StringIO(pk)\n buf.seek(0)\n k = paramiko.RSAKey.from_private_key(buf)\n key = \"ssh-rsa %s root@meneja.gig.tech\\n\" % k.get_base64()\n scripts['/etc/id_rsa.pub'] = BytesIO(key.encode('utf8'))\n return scripts",
"def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF",
"def _get_image_type_templates():\n yaml_file = os.path.join(ROOT_DIR, 'docker', 'image_types.yaml')\n all_templates = yaml_utils.read(yaml_file)\n return all_templates",
"def genConvOnboardingInfoJsonFile( sztpOnboardingInfo, onboardingFileJson ):\n template = {\n \"boot-image\": {\n \"os-name\": str,\n \"os-version\": str,\n \"download-uri\": list, # of uri strings\n \"image-verification\": [ {\n \"hash-algorithm\": str,\n \"hash-value\": str } ],\n },\n \"configuration-handling\": str,\n \"pre-configuration-script\": str,\n \"configuration\": str,\n \"post-configuration-script\": str\n }\n\n def verifyBootImage( template, sztpBootImage ):\n \"\"\"Verify boot image is correct\"\"\"\n def verifyImageVerification( imageVerification ):\n \"\"\"Verify instance of image-verification is correct\"\"\"\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"\n\n def verifyImageVerificationList( template, sztpImageVerification ):\n \"\"\"Verify image-verification list is correct\"\"\"\n assert isinstance( sztpImageVerification, list ), \\\n \"Expected list\"\n for imageVer in sztpImageVerification:\n assert verifyDictTypes( template, imageVer ), \"Unexpected value types\"\n assert set( imageVer.keys() ).issubset( set( template.keys() ) ), \\\n \"Unexpected keys in dict\"\n verifyImageVerification( imageVer )\n\n mandatory = [ \"download-uri\" ]\n assert isinstance( sztpBootImage, dict ), \"Expected dict\"\n assert set( sztpBootImage.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpBootImage ), \\\n \"Unexpected value types\"\n assert set( mandatory ).issubset( sztpBootImage ), \\\n \"Mandatory keys not present\"\n if \"image-verification\" in sztpBootImage:\n verifyImageVerificationList( template[ \"image-verification\" ][ 0 ],\n sztpBootImage[ \"image-verification\" ] )\n\n # verify onboarding-info dict is correctly constructed\n assert isinstance( sztpOnboardingInfo, dict ), \"Expected dict\"\n assert set( sztpOnboardingInfo.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpOnboardingInfo ), \\\n \"Unexpected values types\"\n assert sztpOnboardingInfo[ \"configuration-handling\" ] == \"replace\", \\\n \"Unsupported configuration-handling value\"\n if \"boot-image\" in sztpOnboardingInfo:\n verifyBootImage( template[ \"boot-image\" ],\n sztpOnboardingInfo[ \"boot-image\" ] )\n\n # construct outer dictionary and convert to json\n ietfOnboardingInfo = { \"ietf-sztp-conveyed-info:onboarding-information\":\n sztpOnboardingInfo }\n jsonIetfOnboardingInfo = json.dumps( ietfOnboardingInfo, indent=4 )\n\n # save to file\n with open( onboardingFileJson, \"w\" ) as tmpFile:\n tmpFile.write( jsonIetfOnboardingInfo )",
"def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)",
"def get_templates(grey=False):\n templates = {}\n for name, t in LED_TEMPLATES.items():\n temp = {'active': t['active']}\n if grey:\n temp['img'] = cv2.cvtColor(cv2.imread('templates/' + t['file']), cv2.COLOR_BGR2GRAY)\n else:\n temp['img'] = cv2.imread('templates/' + t['file'])\n\n templates[name] = temp\n return templates",
"def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }",
"def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data",
"def dockerfile_template(self) -> str:\n return f\"{self.os_name}.dockerfile\"",
"def createToolImages( self ):",
"def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data",
"def get_nginx_template_dict(self):\n template_dict = {\n 'upstream_server': self.upstream_server,\n 'service_port': self.service_port,\n 'ping_port': self.ping_port\n }\n return template_dict",
"def export_project_dump(self, key):",
"def img_path(self):\n return 'images/key.png'",
"def create_base_image(self, builder, template, parameters):",
"def return_template_output(base_dir,filename,data_dict):\n templateLoader = jinja2.FileSystemLoader( searchpath=base_dir)\n templateEnv = jinja2.Environment( loader=templateLoader )\n template = templateEnv.get_template(filename)\n output = template.render(data_dict)\n return output",
"def do_createTemplate(gDict, args):\n\n (doThis, todo) = splitArgs(args, 1)\n fail = checkPresence(doThis, [\"type\"], [])\n kind = fail[\"type\"] # todo[0]\n if fail[\"truth\"]:\n print kind, \" makes no sense\"\n sys.exit()\n n = 1\n if \"n\" in doThis:\n n = int(doThis[\"n\"])\n if kind != \"sensor\" and kind != \"site\":\n print kind, \" must be either sensor or site\"\n sys.exit()\n\n fields = templateObjs[kind]\n examples = {\n \"operator\" : \"id:xx|contact:xx|tel:xxxyyyyzzz|email:xxx@yyy\",\n \"provider\" : \"id:xx|contact:xx|tel:xxxyyyyzzz|email:xxx@yyy\",\n \"firstdate\": \"DD-MM-YYYY\",\n \"energysupply\": \"solar|mains\",\n }\n hide = [\"listOfDetectors\", \"detector\", \"epoch.f\", \"date.l\", \"epoch.l\", \"history\", \"username\"]\n for i in range(0,n):\n print \"begin.asset\"\n for uf in fields:\n uk = uf.keys()\n for f in uk:\n uff = uf[f]\n if uff in hide:\n continue\n if uff == \"n-detectors\":\n print \"# add more detectors if necessary\"\n for i in range(0,5):\n print \"detector=name:xxx|unit:xx|epsilon:xx\"\n break\n if uff in examples:\n print \"{}={}\".format(uff, examples[uff])\n else:\n print \"{}=\".format(uff)\n print \"end.asset\\n\"\n sys.exit()",
"def raw_image(self):\n\t\treturn FstabEntry([f\"{self.mount_point}_image\", \"emmc\", self.device])",
"def export_template_geo(self):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send detection data and return status | def send_detection_data(self, image_width, image_height,
image, detection_result):
if self._send_buffer.full() is True:
log_error("Send detection data failed for buffer is full")
return False
image_data = None
if isinstance(image, AclImage):
image_data = DataBuf(image.data(), image.size).copy_to_local()
elif isinstance(image, np.ndarray):
image_data = image
else:
log_error("Invalid data to send")
return False
request_msg = pm.image_frame_request(image_width, image_height,
image_data.tobytes(),
detection_result)
self.send_message(request_msg)
self._send_buffer.put(image_data)
self._release_send_success_data()
return True | [
"def send_image(self, image_width, image_height, image):\n detection_result = []\n return self.send_detection_data(image_width, image_height, image, detection_result)",
"def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self.Config[\"MACHINE_NAME\"], \n idData=idData, classes=classes, aux=aux)",
"def detect(self, listener):\n\n # Define the co-routine to be synchronously run on the event loop.\n @gen.coroutine\n def __connect():\n\n # Form the websocket URL and connect to it\n url = 'ws://{0}:{1}/api/detection'.format(self.server, self.port)\n socket = yield websocket.websocket_connect(url)\n\n # The loop which waits for a response (a web socket message), a stop signal, or a break in the connection\n while not self.__stop_detection:\n # Obtain a potential message (a \"future\") from the server and wait for something to happen\n message_future = socket.read_message()\n while True:\n # If the socket is unexpectedly closed, send an alarm to shutdown\n if message_future.done() and message_future.result() is None :\n self.__stop_detection = True\n print '\\nConnection with aerial Devkit lost.\\n'\n signal.alarm(1) # send SIGALRM to shutdown via our signal handler;\n break;\n # If a stop signal is received, simply break out\n if self.__stop_detection :\n break\n # If a response is received, parse it and call the callback function\n elif message_future.done():\n detection_result = json.loads(message_future.result())\n listener(detection_result)\n break\n # Otherwise, check again in half a second\n yield gen.sleep(0.5)\n\n # After the loop is finished, close the socket.\n if socket is not None:\n socket.close()\n\n # Create the thread to run the event loop, start it, and return it\n loop_thread = Thread(target = lambda: IOLoop.current().run_sync(__connect))\n loop_thread.start()\n return loop_thread",
"def status_check_callback(self, req, res):\n try:\n res.single_camera_status = 1\n res.stereo_camera_status = 1\n res.lidar_status = 1\n if self.camera_buffer.read_buffer is not None \\\n and isinstance(self.camera_buffer.read_buffer, list):\n if len(self.camera_buffer.read_buffer) == 2:\n res.stereo_camera_status = 0\n elif len(self.camera_buffer.read_buffer) == 1:\n res.single_camera_status = 0\n if self.lidar_buffer.read_buffer is not None:\n res.lidar_status = 0\n return res\n except Exception as ex:\n self.get_logger().error(f\"Failed to get sensor data status: {ex}\")",
"def send_reception_image(self):\n\n self.socket.sendall(pack('B', codes['image_received']))",
"def post(self):\n try:\n person_id = self.get_argument('person_id')\n coordinate = eval(self.get_argument('coordinate'))\n picture = self.get_argument('picture')\n description = self.get_argument('description')\n user_id = int(self.get_secure_cookie('user_id'))\n pic_type = self.get_argument('pic_type')\n except tornado.web.MissingArgumentError as e:\n raise MyMissingArgumentError(e.arg_name) \n try:\n binary_picture = base64.b64decode(picture)\n except TypeError as e:\n raise ArgumentTypeError('picture')\n message_mapping = [\n 'find high confidence person',\n 'the person maybe not the missing one or you upload a low quality picture'\n ]\n result =ReturnStruct(message_mapping)\n event_happen_date = self.get_even_happen_data()\n # 1. get person's std picture. personid--> -->face_token\n # std_face_token = self.person_model.get_person_std_pic(person_id)\n # # 2. detect picture --> face_token2\n # result_detect_struct = yield self.background_task(self.face_model.detect_img_list, [binary_picture], True)\n # result.merge_info(result_detect_struct)\n # # 3. compare face_token.\n # if result_detect_struct.code == 0:\n # # the result just one element\n # detect_result = result_detect_struct.data['detect_result_list']\n\n # -----[base64]不需要检测人脸,只要比较person_id和base64;不需要std_face_token;不需要detect_result\n confidence = yield self.background_task(self.face_model.compare_face, person_id, picture)\n result.data = confidence\n logging.info(\"[compare]the confidence is %s\"%confidence)\n # [change] \n if confidence['errorcode'] == 0 and confidence['ismatch'] and confidence['level'] >= self.confirm_level: \n # 4. update info\n result.code = 0\n #[change] 这里也不需要detect_result\n pic_key_list = yield self.background_task(self.picture_model.store_pictures,[binary_picture], \"user\"+str(user_id), pic_type)\n # 4. update track and person information\n shooter_info = {\n 'user_id':user_id,\n 'description':description\n }\n event_info = {\n 'coordinate':coordinate,\n 'confidence':confidence['confidence'],\n 'pic_key':pic_key_list[0],\n 'person_id':person_id,\n 'date':event_happen_date\n }\n self.person_model.update_person_status(self.person_model.PERSON, event_info, shooter_info)\n # 5. send message.\n message_data = {\n 'spot':coordinate,\n 'date':event_happen_date,\n 'person_id':person_id,\n 'upload_user_id':user_id,\n 'confidence':confidence['confidence'],\n 'pic_key':pic_key_list[0]\n }\n self.message_model.send_message_factory(self.message_model.COMPARE, message_data)\n else:\n result.code = 1\n result.data = confidence\n self.return_to_client(result)\n self.finish()",
"def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result",
"def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)",
"def handle_detect(req):\n\n '''\n obj = req.obj\n\n # get current frame\n frame = get_camera()\n\n # show the frame in a popup window\n cv2.imshow(\"frame\", frame)\n cv2.waitKey(5)\n\n # detect object position in frame\n (x, y), r = detect_ocv(frame, obj)\n '''\n\n (x, y), r = ((0.5, 0.4), 16)\n return DetectResponse([x, y, r])",
"def test_detect_fields_error_in_detection(self):\n\n with open('snapshotServer/tests/data/replyDetectionNoData.json', 'rb') as fp:\n response = self.client.post(reverse('detect'), data={'image': fp, 'task': 'error'})\n self.assertEqual(response.status_code, 500)\n self.assertEqual(json.loads(response.content.decode('UTF-8')), \"Model error in detection\")",
"def process_image(self):\n for i in range(0,3):\n return_code,resolution,dataimage=vrep.simxGetVisionSensorImage(self.client_id,self.visionsensor[i],1,vrep.simx_opmode_buffer)\n if dataimage:\n self.cv_image[i] = dataimage\n \n h = np.array(self.cv_image)\n self.binary_image = ((h > self.color_line[0]) & (h < self.color_line[1]))\n self.stop_robot = np.all((h > self.color_stop[0]) & (h < self.color_stop[1]))",
"def detect_image_client(img):\n rospy.wait_for_service('detect_service') # attendende che il servizio sia pronto\n rospy.loginfo(\"Detection service invoked\")\n try:\n detect_service = rospy.ServiceProxy('detect_service', Detect) #istanzia il proxy al servizio detect_service\n msg = detect_service(img) # invoca il servizio con un'istanza di Image per ottenere un'istanza di DetectResponse\n return msg.det # restituisce l'istanza di tipo Detection2DArray prelevandola dall'oggetto DetectResponse\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)",
"def check_status(self):\n #looks like byte range is currently not accepted\n fetch_headers = {'accept':'application/json', 'range':'bytes=0-2'} \n results = self.fetch_results(fetch_headers)\n if len(results) > 0: #lolhax\n return 'Stopped'\n\n #fall back to status request\n status_list = list()\n headers = {'accept': 'application/json'}\n \n req_url = '%s/%s/%s' % (Retrieve.URL, self.measurement_id, '?fields=status')\n if self.key:\n req_url += '&key=%s' % self.key\n \n response = self.sess.get(req_url, headers=headers)\n response_str = response.text\n\n results = json.loads(response_str)\n status = results['status']['name']\n\n return status",
"def _send(self, data):\n return 0",
"def test_detect_fields(self):\n\n with open('snapshotServer/tests/data/replyDetection.json.png', 'rb') as fp:\n response = self.client.post(reverse('detect'), data={'image': fp, 'task': 'field'})\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.content.decode('UTF-8'))\n\n # check content of data\n self.assertEqual(len(data['fields']), 21)\n self.assertEqual(len(data['labels']), 11)\n self.assertEqual(data['fileName'], 'replyDetection.json.png')\n self.assertIsNone(data['error'])\n self.assertEqual(data['version'], \"afcc45\")\n\n # check files are copied in \"detect\" media folder\n self.assertTrue(Path(self.media_dir, 'replyDetection.json.json').is_file())\n self.assertTrue(Path(self.media_dir, 'replyDetection.json.png').is_file())",
"def _executefacerecognition(self, session, image, max_results=4):\n\n # convert cv2 to .jpg to make it compatible for google vision api\n image_buf = cv2.imencode('.jpg', image)[1]\n image_str = np.array(image_buf).tostring()\n\n request = google_utils.buildrequest(image_str, max_results, 'FACE_DETECTION')\n\n response = yield from session.post(self._GOOGLE_VISION_API_ENDPOINT, data=json.dumps(request))\n response_json = yield from response.json()\n self._logger.debug('response: {}'.format(response_json))\n\n return response_json, image",
"def vh_request(self, data):\n self.__stop()\n try:\n self.get_connection().write(data.encode('ascii') + b\"\\n\\n\")\n except NotConnected:\n self.connection = None\n self.connected = False\n return None\n\n self.__start()",
"def _simulate_detection(self, aug_obj_data, aug_obj_metadata):\n # Calculate the S/N of the observations\n s2n = np.abs(aug_obj_data[\"flux\"]) / aug_obj_data[\"flux_error\"]\n\n # Apply the S/N threshold\n prob_detected = (erf((s2n - 5.5) / 2) + 1) / 2.0\n aug_obj_data[\"detected\"] = self._rs.rand(len(s2n)) < prob_detected\n pass_detection = np.sum(aug_obj_data[\"detected\"]) >= 2\n\n return aug_obj_data, pass_detection",
"def test_get_status(self):\n # Index of status bit to flip\n for app_num, servo_type in app_nr.items():\n self.cmd_num += 1\n # Retrieve the positions directly from the server (without ACS)\n command = headers[0] + commands[6] + ':%d=' %self.cmd_num + str(app_num) + closers[0]\n\n found = False\n while(not found):\n self.sockobj.sendall(command)\n data = \"\"\n while(True):\n data += self.sockobj.recv(1)\n if closers[0] in data:\n if ':%d=' %self.cmd_num in data:\n found = True\n break\n else:\n data = \"\"\n\n if data.startswith(\"!NAK\"):\n continue\n status_obj = self.__dict__[servo_type]._get_status()\n acs_status, completion = status_obj.get_sync()\n\n if(completion.code):\n print \"\\nError code found in status...\"\n continue\n try:\n # Retrieve the message header\n sent, answer = data.split(\">\")\n status = int(answer.strip())\n except:\n continue\n\n self.assertAlmostEqual(acs_status, status, places=1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
send detection image data | def send_image(self, image_width, image_height, image):
detection_result = []
return self.send_detection_data(image_width, image_height, image, detection_result) | [
"def send_detection_data(self, image_width, image_height,\n image, detection_result):\n if self._send_buffer.full() is True:\n log_error(\"Send detection data failed for buffer is full\")\n return False\n\n image_data = None\n if isinstance(image, AclImage):\n image_data = DataBuf(image.data(), image.size).copy_to_local()\n elif isinstance(image, np.ndarray):\n image_data = image \n else:\n log_error(\"Invalid data to send\") \n return False \n\n request_msg = pm.image_frame_request(image_width, image_height,\n image_data.tobytes(),\n detection_result) \n self.send_message(request_msg) \n self._send_buffer.put(image_data) \n self._release_send_success_data()\n\n return True",
"def send_reception_image(self):\n\n self.socket.sendall(pack('B', codes['image_received']))",
"def sendOpenCV(self, image):\n h, w, c = image.shape\n \n # resize input if necessary\n if h != self.imageHeight or w != self.imageWidth:\n image = cv2.resize(image, (self.imageWidth, self.imageHeight))\n \n if c == 1:\n self.array[:,:] = np.squeeze(image)\n \n else:\n # and convert image back to something the yarpview can understand\n self.array[:,:] = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # Send the result to the output port\n self.write(self.image)",
"def detect_image_client(img):\n rospy.wait_for_service('detect_service') # attendende che il servizio sia pronto\n rospy.loginfo(\"Detection service invoked\")\n try:\n detect_service = rospy.ServiceProxy('detect_service', Detect) #istanzia il proxy al servizio detect_service\n msg = detect_service(img) # invoca il servizio con un'istanza di Image per ottenere un'istanza di DetectResponse\n return msg.det # restituisce l'istanza di tipo Detection2DArray prelevandola dall'oggetto DetectResponse\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)",
"def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)",
"def img_callback(self, img_msg):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(img_msg, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n boxes_list = self.detector.return_predict(cv_image, self.threshold)\n\n msg_to_pub = self.msg_constructor(img_msg, boxes_list)\n self.bboxes_pub.publish(msg_to_pub)",
"def test_get_fields_from_previous_detection_format_image(self):\n \n # trigger detection\n with open('snapshotServer/tests/data/replyDetection.json.png', 'rb') as fp:\n response = self.client.post(reverse('detect'), data={'image': fp, 'task': 'field'})\n self.assertEqual(response.status_code, 200)\n \n response = self.client.get(reverse('detect'), data={'image': 'replyDetection.json.png', 'output': 'image'})\n self.assertEqual(response.status_code, 200)\n data2 = response.content\n \n # check we get image file\n self.assertEqual(data2[:15], b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00')",
"def process_image(self):\n for i in range(0,3):\n return_code,resolution,dataimage=vrep.simxGetVisionSensorImage(self.client_id,self.visionsensor[i],1,vrep.simx_opmode_buffer)\n if dataimage:\n self.cv_image[i] = dataimage\n \n h = np.array(self.cv_image)\n self.binary_image = ((h > self.color_line[0]) & (h < self.color_line[1]))\n self.stop_robot = np.all((h > self.color_stop[0]) & (h < self.color_stop[1]))",
"def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result",
"def rcv_image(msg): \n\n rospy.loginfo(\"Image received\")\n global arrived\n arrived += 1 #count how many images are already arrived\n\n # convert image to numpy array\n image = ros_numpy.numpify(msg.image)\n\n # if model isn't already loaded, store (pose, image) in the queue\n if mydetector is None:\n rospy.loginfo(\"Model not already loaded, saving image in queue\")\n queue[msg.pose.data] = image\n # else run inference\n else:\n detect(msg.pose.data, image)\n \n # once image acquire is finished, scan the queue for processing remaining elements\n if arrived == MAX_IMAGES: \n while mydetector is None: #detector not already loaded, cannot do inference\n #rospy.loginfo(\"Model not already loaded\")\n pass\n for pose,img in queue.items():\n detect(pose, img)\n queue.clear()",
"def receive_image(self):\n code = self.socket.recv(1)\n self.verify_img_code(code)\n if code[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n idpokemon = bytes_to_int(self.socket.recv(1))\n self.verify_pokemon(idpokemon)\n tam_image = bytes_to_int(self.socket.recv(4))\n f = open(\"../..\" + str(idpokemon) + \".png\", 'wb')\n l = 1\n while(l):\n l = self.socket.recv(1024)\n f.write(l)\n print(\"Se guardó una imagen del pokémon capturado en el archivo \" +\n str(idpokemon) + \".png.\")\n f.close()\n\n print(\"Sesión terminada.\")\n reply = self.socket.recv(1)\n self.close_connection()",
"def send_image(self, path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n # removed by alice\n #rospy.sleep(1)",
"def handle_image_data(data):\n \n #Get the incoming RGB image from the Kinect\n D.image = D.bridge.imgmsg_to_cv(data, \"bgr8\")\n\n if D.created_images == False:\n #Initialize the additional images we need for processing\n ImageProcessing.initialize(D)\n D.created_images = True\n\n # Recalculate threshold image\n ImageProcessing.threshold_image(D)\n\n # Recalculate blob in main image\n ImageProcessing.find_biggest_region(D)\n\n # Check on the display of dragged section\n ImageProcessing.mouse_section(D)\n\n #Display target circle\n #ImageProcessing.target_coord(D)\n \n #Display info box on image\n ImageProcessing.draw_on_image(D)\n \n #Handle incoming key presses\n key_press = cv.WaitKey(5) & 255\n if key_press != 255:\t\t\t#Handle only if it's a real key\n check_key_press(D, key_press)\t\t#(255 = \"no key pressed\")\n\n #Update the displays:\n #Show main image in the image window\n #cv.ShowImage('Image', D.image)\n\n #Show threshold image in the threshold window 3currentThreshold = getattr(D, D.current_threshold)\n cv.ShowImage('Threshold', currentThreshold)",
"def capture_image(self, data={}):\n # call self.increment_count() after each image saved\n pass",
"def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self.Config[\"MACHINE_NAME\"], \n idData=idData, classes=classes, aux=aux)",
"def recognize(self, image):\n pass",
"def raw_image_callback(self, msg):\n if self.pictures_to_take and not self.detection_to_receive:\n self.pictures_to_take -= 1\n # so let's analyse it here and then delete the subscription\n rows = msg.height\n step = msg.step\n cols = msg.width\n dim = int(step / cols)\n pixels = msg.data # of size (steps, nrows)\n # save the image (later we will need to analyse it)\n vision_utils.save_picture(pixels, rows, cols, dim, self.name, FOLDER)",
"def on_msg_image_stream(self, msg):\n img = self.bridge.imgmsg_to_cv2(msg.data, 'bgr8')\n img_msg = Message(img, msg.timestamp)\n self.get_output_stream(self.output_name).send(img_msg)",
"def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get channel presenter_server_ip, port, channel_name, content_type | def get_channel_config(config_file):
config = configparser.ConfigParser()
config.read(config_file)
presenter_server_ip = config['baseconf']['presenter_server_ip']
port = int(config['baseconf']['presenter_server_port'])
channel_name = config['baseconf']['channel_name']
content_type = int(config['baseconf']['content_type'])
log_info("presenter server ip %s, port %d, channel name %s, "
"type %d " % (presenter_server_ip, port, channel_name, content_type))
return presenter_server_ip, port, channel_name, content_type | [
"def getChannel(self):\r\n return self.channel",
"def getchannelinfo(self, channelid):\n requestquery = {}\n requestquery.update(Commonquery)\n requestquery['credential'] = self.credential or \"None\"\n request = self.session.get(self.apiaddress+'/live/channels/'+channelid, params=requestquery)\n return request.json()",
"async def get_channel_info(app_channel_id: Optional[str] = Header(None)):\n try:\n response = fetch_channel(app_channel_id)\n return response\n except ReturnExceptions as err:\n raise HTTPException(\n status_code=Code.error_enum_http[err.error_code], detail=str(err)\n )",
"def getChannelResponse(self):\n \n \n return self.channel_response",
"def get_channel(self, channel_id):\n uri = 'channels/' + channel_id\n return self.make_request(uri)",
"def channel(self,ch,*args):\n return self.__getattr__('_'.join(['channel%02d'%(ch+self.head._channel_offset[self.site-1],)]+list(args)))",
"def test_get_channel_helper(self):\n\n c = self.rmt.create_project(self.coll)\n\n cf = self.rmt.create_project(self.coord)\n\n e = self.rmt.create_project(self.exp)\n\n chan = self.rmt.create_project(self.source_chan)\n\n actual = self.rmt.get_channel(chan.name, chan.coll_name, chan.exp_name)\n\n # This is not an exhaustive list of attributes, but they are the\n # important ones for correct interaction with the volume service.\n self.assertTrue(actual.cutout_ready)\n self.assertEqual(chan.datatype, actual.datatype)\n self.assertEqual(chan.default_time_sample, actual.default_time_sample)\n self.assertEqual(chan.base_resolution, actual.base_resolution)\n self.assertEqual(chan.downsample_status, actual.downsample_status)\n self.assertEqual(chan.type, actual.type)\n self.assertEqual(chan.name, actual.name)\n self.assertEqual(chan.coll_name, actual.coll_name)\n self.assertEqual(chan.exp_name, actual.exp_name)",
"def channel_details(self):\n return self._channel_details",
"def get_channel(self, channel_name):\n try:\n cm = self.__core.get_service(\"channel_manager\")\n cdb = cm.channel_database_get()\n channel = cdb.channel_get(channel_name)\n return channel.get()\n except Exception:\n traceback.print_exc()",
"def get_channel(driver, team_name, channel_name):\n response = driver.channels.get_channel_by_name_and_team_name(\n team_name, channel_name)\n return response",
"def get_channel_info(self, channel_id: str):\n return chat.ChannelInfo(\n id=self.channels.loc[channel_id]['yt_channel_id'],\n name=self.channels.loc[channel_id]['name'],\n thumbnail_url=self.channels.loc[channel_id]['photo'],\n )",
"def channelinfo(self):\n\n return ChannelInfo(\n self._filetextbox.text(),\n self._idtextbox.text(),\n self._datafilebox.text()\n )",
"def channel_name(self):\n return self._channel_name_dict",
"def get_livechat_channel_info(self):\n self.ensure_one()\n if self.channel_id:\n return self.channel_id.sudo().get_livechat_info()\n return {}",
"def _RetrieveChannelData():\n # Get the necessary data.\n data = memcache.get(_CHANNEL_MEMCACHE_KEY)\n if not data:\n data = _StoreNewChannelData()\n return data",
"def get_channel_details(self):\n channel_id = self.connect_api.get_video_details()[0]['snippet']['channelId']\n response = self.connect_api.get_channel_details(channel_id)[0]\n\n return {\n 'title':response['snippet']['title'],\n 'subs':response['statistics']['subscriberCount'],\n 'views':response['statistics']['viewCount'],\n 'profile_image_link':response['snippet']['thumbnails']['default']['url']\n }",
"def get_portchannel_members(self, pc_name):\n cmd = \"show interfaces portchannel\"\n ret = self.sonichost.show_and_parse(cmd)\n for pc in ret:\n if pc[\"team dev\"] == pc_name:\n return pc[\"ports\"].split()\n return []",
"def get_channel_info(channel_name):\n\n utility_channels = ['A12A','A12B','A12C','A12D','AU12', \n 'EKG','EKG1','EKG2','EKG3','EKG4',\n 'EMG1','EMG2',\n 'EOG1','EOG2','EOG3','EOG4',\n 'EVT','SDT','CAL','EEG2','EAS',\n 'foto','mic','eA12','fd1']\n\n scalp_channels = ['Cz','Fz','MS1','MS2','Ms1','Ms2','Pz','Oz']\n\n \n\n if channel_name in scalp_channels:\n channel_info = 'scalp'\n elif channel_name in utility_channels:\n channel_info = 'util'\n elif 'm' in channel_name:\n if 'Rf' in channel_name:\n channel_info = 'util'\n else:\n channel_info = 'micro'\n else:\n channel_info = 'macro'\n \n return channel_info",
"def channel_from_snippet(resp):\n return {\n 'name': resp['snippet']['title'],\n 'image': resp['snippet']['thumbnails']['high']['url'],\n 'description': resp['snippet']['description'],\n 'channel_id': resp['snippet']['resourceId']['channelId'],\n }"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the caller of this Dial. | def caller(self, caller):
self._caller = caller | [
"def caller_user(self, caller_user):\n \n self._caller_user = caller_user",
"def caller_name(self, caller_name):\n \n self._caller_name = caller_name",
"def caller_address(self, caller_address):\n \n self._caller_address = caller_address",
"def caller_cnam(self, caller_cnam):\n\n self._caller_cnam = caller_cnam",
"def caller_id(self, caller_id):\n\n self._caller_id = caller_id",
"def dialer(self, dialer):\n allowed_values = [\"DEFAULT\", \"SHOONYA\"]\n if dialer not in allowed_values:\n raise ValueError(\n \"Invalid value for `dialer` ({0}), must be one of {1}\"\n .format(dialer, allowed_values)\n )\n\n self._dialer = dialer",
"def set_scripts_caller(self, caller):\n self._scripts_caller = caller",
"def set_caller_id(self, number):\n cmd = 'SET CALLERID %s' % number\n utils.agi_send(cmd)\n\n return True",
"def dialer(self):\n return self._dialer",
"def getCaller(self):\n return self.caller",
"def caller_id(self):\n return self._caller_id",
"def caller_address(self):\n return self._caller_address",
"def caller_user(self):\n return self._caller_user",
"def call_number(self):\n print \"[INFO] Calling number\"\n self.sipclient.dial(self.entered_digits)\n # Log call to call log.",
"def caller_id(self, caller_id):\n if caller_id is not None and not re.search('calling_number|called_number', caller_id):\n raise ValueError(\"Invalid value for `caller_id`, must be a follow pattern or equal to `/calling_number|called_number/`\")\n\n self._caller_id = caller_id",
"def caller_name(self):\n return self._caller_name",
"def dialstring(self, dialstring):\n\n self._dialstring = dialstring",
"def called_number(self, called_number):\n\n self._called_number = called_number",
"def call_root(self, call_root):\n\n self._call_root = call_root"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dialstatus of this Dial. | def dialstatus(self, dialstatus):
if dialstatus is None:
raise ValueError("Invalid value for `dialstatus`, must not be `None`") # noqa: E501
self._dialstatus = dialstatus | [
"def setPeerStatus(self, status):\n self.status = status",
"def SetStatus(self, status):\r\n self.status = status",
"def setStatus(self, status):\n self.battleDelegate.status = status",
"def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status",
"def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True",
"async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")",
"def status(self, status):\n self._status = status",
"def connection_status(self, connection_status):\n\n self._connection_status = connection_status",
"def set_status(self, status):\n self.set_attr('procstatus', status)",
"def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return",
"def status(self, status):\n\n self._status = status",
"def set_on_tunnel(self, status: bool):\n self._is_on_tunnel = status",
"def pwd_status(self, pwd_status):\n self._pwd_status = pwd_status",
"def set_status(self, status):\n if status == \"offline\":\n self._status.set_message(\"N\")\n self._status.set_foreground_color(\"red\")\n \n elif status == \"online\":\n self._status.set_message(\"Y\")\n self._status.set_foreground_color(\"Green\")\n \n elif status == \"away\":\n self._status.set_message(\"A\")\n self._status.set_foreground_color(\"Grey\")\n \n elif status == \"busy\":\n self._status.set_message(\"B\")\n self._status.set_foreground_color(\"Yellow\")",
"def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)",
"def status(self, status):\n self.__status = status",
"def SetConnectionStatus(self, state, info):\n self.connection_state = state\n self.connection_info = info",
"def set_status(self, status):\n self.status = status\n self.save()",
"def phone_number_status(self, phone_number_status):\n allowed_values = [\"INVALID\", \"ACTIVE\", \"PORTING\", \"PENDING\", \"PENDING_CANCELLATION\"]\n if phone_number_status.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for phone_number_status -> \" + phone_number_status)\n self._phone_number_status = \"outdated_sdk_version\"\n else:\n self._phone_number_status = phone_number_status"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the dialstring of this Dial. | def dialstring(self, dialstring):
self._dialstring = dialstring | [
"def dialer(self, dialer):\n allowed_values = [\"DEFAULT\", \"SHOONYA\"]\n if dialer not in allowed_values:\n raise ValueError(\n \"Invalid value for `dialer` ({0}), must be one of {1}\"\n .format(dialer, allowed_values)\n )\n\n self._dialer = dialer",
"def string_value(self, string_value):\n\n self._string_value = string_value",
"def set_cli(self, the_string):\n\n print(the_string, end='')\n return self",
"def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string",
"def SetStrName(self, name):\n self._str_name = name",
"def dni(self, dni: str):\n\n self._dni = dni",
"def setDni(self, dni):\r\n self.dni = dni",
"def set_datstr(self):\n\t\tself.datstr = str(self.dat)",
"def measurement_unit_string(self, measurement_unit_string):\n\n self._measurement_unit_string = measurement_unit_string",
"def dialstatus(self, dialstatus):\n if dialstatus is None:\n raise ValueError(\"Invalid value for `dialstatus`, must not be `None`\") # noqa: E501\n\n self._dialstatus = dialstatus",
"def setHint( self, hint ):\n self._urlEdit.setHint(hint)",
"def acct_statement_seq_str(self, acct_statement_seq_str):\n\n self._acct_statement_seq_str = acct_statement_seq_str",
"def custom_string(self, custom_string):\n\n self._custom_string = custom_string",
"def setDialsToPreset(self, presetText):\n d1 = presetText[0] # First dial (kHz)\n d2 = presetText[1]\n d3 = presetText[2]\n d4 = presetText[3] # Last dial (Hz)\n\n self.thousands.setValue(int(d1))\n self.hundreds.setValue(int(d2))\n self.tens.setValue(int(d3))\n self.ones.setValue(int(d4))",
"def command(self, command_string):\n\n # Query the instrument over serial. If serial is not configured, use TCP.\n with self.dut_lock:\n # Send command to the instrument over serial. If serial is not configured, send it over TCP.\n if self.device_serial is not None:\n self._usb_command(command_string)\n elif self.device_tcp is not None:\n self._tcp_command(command_string)\n elif self.user_connection is not None:\n self._user_connection_command(command_string)\n else:\n raise InstrumentException(\"No connections configured\")\n\n self.logger.info('Sent command to %s: %s', self.serial_number, command_string)",
"def dev_name(self, dev_name):\n self._dev_name = dev_name",
"def set_dispute_contact_state(self, state):\n if state == \"\":\n state = self.random_string_generator(6, string.ascii_uppercase)\n self.set_value_into_input_field(self.dispute_contact_state_textbox_locator, state)",
"def id_str(self, id_str: str):\n\n self._id_str = id_str",
"def set_adapter_name(self, sAdapterName):\n\t\tcall_sdk_function('PrlVirtNet_SetAdapterName', self.handle, sAdapterName)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the forward of this Dial. | def forward(self, forward):
self._forward = forward | [
"def move_forward(self):\n self.at(at_pcmd, True, 0, -self.speed, 0, 0)",
"def forward(self):\n pass",
"def forward(self):\r\n pass",
"def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def go_forward() -> None:\n set_motors_speed(FULL_SPEED, FULL_SPEED)",
"def forwarded(self, forwarded):\n\n self._forwarded = forwarded",
"def balance_forward(self, balance_forward):\n\n self._balance_forward = balance_forward",
"def move_forward_for_angle(self, angle, **kwargs):\n\t\tself._set_speed(kwargs)\n\t\tself.step_direction = FORWARD\n\t\tself._move(self._calculate_steps(angle))",
"def forward_one_zero():\n knob = nuke.thisKnob()\n knob.setAnimated()\n knob.setValueAt(1, previous_current_next_frame()[1])\n knob.setValueAt(0, previous_current_next_frame()[2])",
"def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)",
"def forward(self,distance):\n assert (type(distance) in [int, float]), \"parameter distance:%s is not a valid number\" % `distance`\n self._turtle.forward(distance)",
"def forward(self, distance):\n self._go(distance)",
"def forward_zero_one():\n knob = nuke.thisKnob()\n knob.setAnimated()\n knob.setValueAt(0, previous_current_next_frame()[1])\n knob.setValueAt(1, previous_current_next_frame()[2])",
"def move_forward(self, steps, **kwargs):\n\t\tself._set_speed(kwargs)\n\t\tself.step_direction = FORWARD\n\t\tself._move(steps)",
"def forward(self):\n raise NotImplementedError",
"def move_forward(self, distance):\r\n return self.move('forward', distance)",
"def forward(\n self\n ) -> None:\n if not self._forward_page_history_stack:\n # Do nothing if there is no forward page history.\n return\n\n self._back_page_history_stack.append(self._current_page)\n self._current_page = self._forward_page_history_stack.pop()",
"def set_port_forward_list(self, nPortFwdType, hPortFwdList):\n\t\tcall_sdk_function('PrlVirtNet_SetPortForwardList', self.handle, nPortFwdType, conv_handle_arg(hPortFwdList))",
"def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the forwarded of this Dial. | def forwarded(self, forwarded):
self._forwarded = forwarded | [
"def forward(self, forward):\n\n self._forward = forward",
"def forwarder(self, forwarder: ICNForwarder):\n self._forwarder = forwarder",
"def indirect_forwarding_flag(self, indirect_forwarding_flag):\n\n self._indirect_forwarding_flag = indirect_forwarding_flag",
"def forward(self):\n pass",
"def forward(self):\r\n pass",
"def indirect_forwarding_flag(self):\n return self._indirect_forwarding_flag",
"def handle_forward(bot, event):\n if not event.args:\n event.missing(\"<JID>\")\n return\n forward.data.channels[event.channel.lower()] = event.args\n for jid in event.args:\n forward.data.outs[jid] = event.user.data.name\n if not jid in event.chan.data.forwards: event.chan.data.forwards = event.args\n if event.args: event.chan.save()\n forward.save()\n event.done()",
"def forward_to(self):\n if \"forwardTo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"forwardTo\"], OneDriveObjectBase):\n return self._prop_dict[\"forwardTo\"]\n else :\n self._prop_dict[\"forwardTo\"] = Recipient(self._prop_dict[\"forwardTo\"])\n return self._prop_dict[\"forwardTo\"]\n\n return None",
"def set_port_forward_list(self, nPortFwdType, hPortFwdList):\n\t\tcall_sdk_function('PrlVirtNet_SetPortForwardList', self.handle, nPortFwdType, conv_handle_arg(hPortFwdList))",
"def forward(self):\n raise NotImplementedError",
"def set_forwarded_remote_consul_once(self, set_to=True):\n self.FORWARDED_CONSUL_ONCE_ALREADY = set_to",
"def forwarder(self) -> ICNForwarder:\n return self._forwarder",
"def forward(self):\n for edge in self.outgoing:\n edge.forward()",
"def update_forwards(self):\n # replace self._args with self._args's forwarded op\n args_forward = [op.forward for op in self.args]\n if any(forward is not None for forward in args_forward):\n new_args = tuple([op.forwarded for op in self.args])\n if self._args != new_args:\n self._args = new_args\n self.invalidate_property_cache('all_deps')\n\n # replace self._control_deps with self._control_deps's forwarded op\n control_deps_forward = [op.forward for op in self.control_deps]\n if any(forward is not None for forward in control_deps_forward):\n new_control_deps = OrderedSet([op.forwarded for op in self.control_deps])\n if self._control_deps != new_control_deps:\n self._control_deps = new_control_deps\n self.invalidate_property_cache('all_deps')",
"def __window_forward(self):\n pass",
"def set_follow(self, follow):\n self.follow = follow",
"def transfer(self, transfer):\n \n self._transfer = transfer",
"def referer(self, referer):\n\n self._referer = referer",
"def follow_origin(self, follow_origin):\n self._follow_origin = follow_origin"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the peer of this Dial. | def peer(self, peer):
if peer is None:
raise ValueError("Invalid value for `peer`, must not be `None`") # noqa: E501
self._peer = peer | [
"def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()",
"def connect(self, peer: \"LightningCommandsGenerator\", peer_listen_port: int):\n pass",
"def peer(self, value: Optional[MicrobitPeer]) -> None:\n if self.__peer is not None:\n self.__peer.remove_listener(self.__execute)\n\n if value is not None:\n value.add_listener(self.__execute)\n\n self.__peer = value\n self.__sync_x()\n self.__sync_y()\n self.__sync_z()\n self.__sync_current_gesture()",
"def connect_to_peer(self):\n pass",
"def set_local_peer(url: str, peer: Peer) -> Optional[Peer]:\n init_peers()\n if url in threadlocal.peers:\n old = threadlocal.peers[url]\n else:\n old = None\n threadlocal.peers[url] = peer\n return old",
"def set_peer_working(self, peer_id):\n self.peers[peer_id].set_working_state()",
"def set_peer_type(self, peer_type):\n self.__block_manager.set_peer_type(peer_type)",
"def send_peer(self, peer, sent_to):\n peer = Peer(peer, sent_to)\n self.__sent_peers.append(peer)",
"def set_peer_callback(self, callback):\n self._peer_callback = callback",
"def adopt_peer(self, peer):\n if isinstance(peer, peers_pb2.UserOutPeer):\n outpeer = peers_pb2.OutPeer(type=peers_pb2.PEERTYPE_PRIVATE, id=peer.uid, access_hash=peer.access_hash)\n self.peers_to_outpeers[peer_hasher(private_peer(peer.uid))] = outpeer\n elif isinstance(peer, peers_pb2.GroupOutPeer):\n outpeer = peers_pb2.OutPeer(type=peers_pb2.PEERTYPE_GROUP, id=peer.group_id, access_hash=peer.access_hash)\n self.peers_to_outpeers[peer_hasher(group_peer(peer.group_id))] = outpeer\n else:\n raise RuntimeError(\"Unknown peer type\")",
"def torrent_add_peer(self, torrent, peer, seeder):\n raise NotImplementedError, \"torrent_add_peer has not been defined\"",
"def record_peer(self, peer, torrent, user, active):\n raise NotImplementedError, \"record_peer has not been defined\"",
"def set_peer_attr(self, publickey, key, value, comment=None):\n self.del_peer_attr(publickey, key)\n self.add_peer_attr(publickey, key, value, comment)",
"def updatePeer(self, p):\n self.removePeer(p)\n self.addPeer(p)",
"def received_peer(self, peer, source):\n peer = Peer(peer, source)\n self.__received_peers.append(peer)",
"def add_peer(self, peer_id, peer_ip):\n self.peers.update({peer_id: peer.Peer(peer_ip)})",
"def set_port(self, party_port) -> None:\n\n self._port = party_port",
"def __init__( self, maxPeers=7, serverPort=3000, myID=None, serverHost = None, peerType = SIMPLE, mySuperPeer = NULL ):",
"def addPeerAt( self, loc, peerID, host, port,super ):\n \tself.getPeers()[ loc ] = (peerID, host, int(port),super)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Provide video codec vcodec = "h264" acodec = "copy" extra = "" split_cmd = "ffmpeg i '%s' vcodec %s acodec %s y %s" % (file_path, vcodec, acodec, extra) s_cmd = " i '%s' vcodec %s acodec %s"%(file_path, vcodec, acodec) | def split_video_random(file_path, start_pos, split_length, out_path):
s_cmd = " -i '%s'"%(file_path) #use default CODEC
try:
fileext = file_path.split(".")[-1]
except IndexError as e:
raise IndexError("No ext. in filename. Error: " + str(e))
split_start = start_pos
split_length = split_length
head, tail = os.path.split(file_path)
name, ext = tail.split('.')
filebase=name+'_'+str(start_pos)+'-'+str(split_length)
dstfilebase = out_path + '/' + filebase # create output file base
#split_str = ""
#split_str += " -ss " + str(split_start) + " -t " + str(split_length) + " '"+ dstfilebase + "." + fileext + "'"
s_str = ""
#s_str += "ffmpeg"+" -ss "+str(split_start)+" -t "+str(split_length) + s_cmd + " '"+dstfilebase + "." + fileext + "'"
s_str += "ffmpeg" + " -ss " + str(split_start) + s_cmd + " -t " + str(split_length) + " '"+ dstfilebase + "." + fileext + "'"
print("########################################################")
#print "About to run: "+split_cmd+split_str
print("About to run: "+s_str)
print("########################################################")
#output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read() | [
"def getCommandLine(self, vcodec, filename):\n\t\ttarget = \"\"\n\t\tpattern = self.getPattern()\n\t\tif self.preset != 0:\n\t\t\t(x, y), fps, br, target = self.presets[self.preset]\n\t\t\n\t\tcmdLine = []\n\t\tif not scripting.main_is_frozen():\n\t\t\tffmpegs = {\"linux\": \"bin/ffmpeg\", \"linux2\": \"bin/ffmpeg\", \"win32\": \"bin\\\\ffmpeg.exe\", \"darwin\": \"bin/ffmpeg.osx\"}\n\t\telse:\n\t\t\tffmpegs = {\"linux\": \"bin/ffmpeg\", \"linux2\": \"bin/ffmpeg\", \"win32\": \"bin\\\\ffmpeg.exe\", \"darwin\": \"../Resources/Bin/ffmpeg.osx\"}\n\t\tffmpeg = \"ffmpeg\"\n\t\tfor i in ffmpegs.keys():\n\t\t\tif i == sys.platform:\n\t\t\t\tffmpeg = ffmpegs[i]\n\t\t\t\tbreak\n\t\t\t\n\t\t\t\n\t\tbindir = scripting.get_main_dir()\n\t\tffmpeg = os.path.join(bindir, ffmpeg)\n\t\tcmdLine.append(ffmpeg)\n\t\t# scale the quality into the range understood by ffmpeg\n\t\tquality = 11 - self.quality\n\t\tquality = math.ceil(1 + (3.22222 * (quality - 1)))\n\t\tframeRate = self.fps\n\t\twidth, height = self.getSize()\n\n\t\tif not target:\n\t\t\tcmdLine.append(\"-y\")\n\t\t\tcmdLine.append(\"-qscale\")\n\t\t\tcmdLine.append(\"%d\"%quality)\n\t\t\tcmdLine.append(\"-r\")\n\t\t\tcmdLine.append(\"%.2f\"%frameRate)\n\t\t\tcmdLine.append(\"-s\")\n\t\t\tcmdLine.append(\"%dx%d\"%(width, height))\n\t\t\tcmdLine.append(\"-i\")\n\t\t\tcmdLine.append('%s'%pattern)\n\t\t\tcmdLine.append('-vcodec')\n\t\t\tcmdLine.append('%s'%vcodec)\n\t\t\tcmdLine.append('%s'%filename)\n\t\t\t#commandLine = \"\\\"%s\\\" -y -qscale %d -r %.2f -s %dx%d -i \\\"%s\\\" -vcodec %s \\\"%s\\\"\" \\\n\t\t\t#\t\t\t\t% (ffmpeg, quality, frameRate, width, height, pattern, vcodec, file)\n\t\telse:\n\t\t\t#commandLine = \"\\\"%s\\\" -y -qscale %d -s %dx%d -i \\\"%s\\\" -target %s \\\"%s\\\"\" % (ffmpeg, quality, width, height, pattern, target, file)\n\t\t\tcmdLine.append(\"-y\")\n\t\t\tcmdLine.append(\"-qscale\")\n\t\t\tcmdLine.append(\"%d\"%quality)\n\t\t\tcmdLine.append(\"-s\")\n\t\t\tcmdLine.append(\"%dx%d\"%(width, height))\n\t\t\tcmdLine.append(\"-i\")\n\t\t\tcmdLine.append('%s'%pattern)\n\t\t\tcmdLine.append('-target')\n\t\t\tcmdLine.append('%s'%target)\n\t\t\tcmdLine.append('%s'%filename)\n\t\tLogging.info(\"Command line for ffmpeg=\"+str(cmdLine), kw = \"animator\")\n\t\treturn cmdLine",
"def split_by_manifest(filename, manifest, vcodec=\"copy\", acodec=\"copy\",\n extra=\"\", **kwargs):\n if not os.path.exists(manifest):\n raise SystemExit\n\n with open(manifest) as manifest_file:\n manifest_type = manifest.split(\".\")[-1]\n if manifest_type == \"json\":\n config = json.load(manifest_file)\n elif manifest_type == \"csv\":\n config = csv.DictReader(manifest_file)\n else:\n raise SystemExit\n\n split_cmd = \"ffmpeg -i '%s' -vcodec %s -acodec %s -y %s\" % (filename,\n vcodec,\n acodec,\n extra)\n split_count = 1\n split_error = []\n try:\n fileext = filename.split(\".\")[-1]\n except IndexError as e:\n raise IndexError(\"No . in filename. Error: \" + str(e))\n for video_config in config:\n split_str = \"\"\n try:\n split_start = video_config[\"start_time\"]\n split_length = video_config.get(\"end_time\", None)\n if not split_length:\n split_length = video_config[\"length\"]\n filebase = video_config[\"rename_to\"]\n if fileext in filebase:\n filebase = \".\".join(filebase.split(\".\")[:-1])\n\n split_str += \" -ss \" + str(split_start) + \" -t \" + \\\n str(split_length) + \\\n \" '\"+ filebase + \".\" + fileext + \\\n \"'\"\n output = subprocess.Popen(split_cmd+split_str,\n shell = True, stdout =\n subprocess.PIPE).stdout.read()\n except KeyError as e:\n raise SystemExit",
"def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()",
"def compress_video(video_file_path):\n \"\"\"\n :param video_file_path:\n :return:\n \"\"\"\n log.info(\"Compression video...\")\n command = [\"ffmpeg\", \"-i\", video_file_path, \"-vcodec\", \"libx264\", \"-crf\", \"20\", video_file_path + \".out.mp4\"]\n execute(command, True)\n os.rename(video_file_path + \".out.mp4\", video_file_path)",
"def make_video(self,namein,nameout):\n import os\n os.system(f'ffmpeg -framerate 24 -pattern_type glob -i \"{namein}*.png\" {self.respath}/{self.date}/{nameout}.mp4')",
"def generate_vis_video(file_path, folder_path, op):\n if which('ffmpeg') is None:\n raise Exception('No ffmpeg found in path')\n if op == mv_str:\n cmd = 'ffmpeg -flags2 +export_mvs -export_side_data +venc_params -i ' + file_path + ' -vf codecview=mv=pf+bf+bb -y'\n else:\n cmd = 'ffmpeg -export_side_data +venc_params -i ' + file_path + ' -vf codecview=' + op + '=true -y'\n args = shlex.split(cmd)\n args.append(folder_path + '/report/' + op + '_vis.mp4')\n proc = subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n proc.communicate()\n if proc.returncode != 0:\n raise Exception(op + ' video generates failed, please check the version of your ffmpeg')\n print(op + ' visualization video generated')",
"def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks",
"def insert_buf_video(self):\n base_cmd = self._get_base_cmd()\n\n vfilters = []\n if self.disable_spinner:\n vfilters = [f\"[0:v]{self.vloop_cmd}[outv]\"]\n else:\n if self.black_frame and self.enable_black_cmd:\n vfilters.extend(\n [\n f\"[0:v]{self.vloop_cmd}[stallvid]\",\n f\"color=c=black:r={self.fps}[black]\",\n f\"[black][stallvid]scale2ref[black2][stallvid]\",\n f\"[stallvid][black2]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2:shortest=1:enable='{self.enable_black_cmd}'[stallvid2]\",\n ]\n )\n else:\n vfilters.append(f\"[0:v]{self.vloop_cmd}[stallvid2]\",)\n vfilters.extend(\n [\n f\"[stallvid2]avgblur={self.blur}:enable='{self.venable_cmd}',eq=brightness={self.brightness}:enable='{self.venable_cmd}'[stallvidblur]\",\n f\"movie=filename={self.spinner}:loop=0,setpts=N/(FRAME_RATE*TB)*{self.speed},fps=fps={self.fps}[spinner]\",\n f\"[stallvidblur][spinner]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2:shortest=1:enable='{self.venable_cmd}'[outv]\",\n ]\n )\n\n filters = [\";\".join(vfilters)]\n\n base_cmd.extend([\"-filter_complex\", \";\".join(filters)])\n base_cmd.extend([\"-map\", \"[outv]\"])\n base_cmd.extend([\"-c:v\", self.vcodec, \"-pix_fmt\", self.pixfmt, \"-vsync\", \"cfr\"])\n base_cmd.append(self._get_tmp_filename(\"video\"))\n\n self.run_command(base_cmd)",
"def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath",
"def loadVideo( iFileName, iFrameSize = (576, 720) ):\n import sys\n import subprocess as sp\n # ustvari klic ffmpeg in preusmeri izhod v cevovod\n command = [ 'ffmpeg',\n '-i', iFileName,\n '-f', 'image2pipe',\n '-pix_fmt', 'rgb24',\n '-vcodec', 'rawvideo', '-']\n pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)\n # definiraj novo spremeljivko\n oVideo = np.array([])\n iFrameSize = np.asarray( iFrameSize )\n frameCount = 0\n # zacni neskoncno zanko\n while True:\n frameCount += 1\n# print( 'Berem okvir %d ...' % frameCount )\n print(\"\\rBerem okvir %d ...\" % frameCount, end=\"\")\n # preberi Y*X*3 bajtov (= 1 okvir)\n raw_frame = pipe.stdout.read(np.prod(iFrameSize)*3)\n # pretvori prebrane podatke v numpy polje\n frame = np.fromstring(raw_frame, dtype='uint8') \n # preveri ce je velikost ustrezna, sicer prekini zanko\n if frame.size != (np.prod(iFrameSize)*3):\n print(\" koncano!\\n\")\n break;\n # preoblikuj dimenzije in pretvori v sivinsko sliko\n frame = colorToGray( frame.reshape((iFrameSize[0],iFrameSize[1],3)) )\n # sprazni medpomnilnik \n pipe.stdout.flush() \n # vnesi okvir v izhodno sprememnljivko\n if oVideo.size == 0:\n oVideo = frame\n oVideo = oVideo[...,None]\n else:\n oVideo = np.concatenate((oVideo,frame[...,None]), axis=2)\n # zapri cevovod\n pipe.terminate()\n # vrni izhodno spremenljivko\n return oVideo",
"def split_video_ffmpeg(input_video_paths, scene_list, output_file_template, video_name,\n arg_override='-c:v libx264 -preset fast -crf 21 -c:a aac',\n hide_progress=False, suppress_output=False):\n # type: (List[str], List[Tuple[FrameTimecode, FrameTimecode]], Optional[str],\n # Optional[str], Optional[bool], Optional[bool]) -> None\n\n if not input_video_paths or not scene_list:\n return\n\n logging.info(\n 'Splitting input video%s using ffmpeg, output path template:\\n %s',\n 's' if len(input_video_paths) > 1 else '', output_file_template)\n\n if len(input_video_paths) > 1:\n # TODO: Add support for splitting multiple/appended input videos.\n # https://trac.ffmpeg.org/wiki/Concatenate#samecodec\n # Requires generating a temporary file list for ffmpeg.\n logging.error(\n 'Sorry, splitting multiple appended/concatenated input videos with'\n ' ffmpeg is not supported yet. This feature will be added to a future'\n ' version of PySceneDetect. In the meantime, you can try using the'\n ' -c / --copy option with the split-video to use mkvmerge, which'\n ' generates less accurate output, but supports multiple input videos.')\n raise NotImplementedError()\n\n arg_override = arg_override.replace('\\\\\"', '\"')\n\n ret_val = None\n arg_override = arg_override.split(' ')\n filename_template = Template(output_file_template)\n scene_num_format = '%0'\n scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'\n\n try:\n progress_bar = None\n total_frames = scene_list[-1][1].get_frames() - scene_list[0][0].get_frames()\n if tqdm and not hide_progress:\n progress_bar = tqdm(\n total=total_frames,\n unit='frame',\n miniters=1,\n dynamic_ncols=True)\n processing_start_time = time.time()\n for i, (start_time, end_time) in enumerate(scene_list):\n duration = (end_time - start_time)\n call_list = ['ffmpeg']\n if suppress_output:\n call_list += ['-v', 'quiet']\n elif i > 0:\n # Only show ffmpeg output for the first call, which will display any\n # errors if it fails, and then break the loop. We only show error messages\n # for the remaining calls.\n call_list += ['-v', 'error']\n call_list += [\n '-nostdin',\n '-y',\n '-ss',\n str(start_time.get_seconds()),\n '-i',\n input_video_paths[0],\n '-t',\n str(duration.get_seconds())\n ]\n call_list += arg_override\n call_list += [\n '-sn',\n filename_template.safe_substitute(\n VIDEO_NAME=video_name,\n SCENE_NUMBER=scene_num_format % (i + 1))\n ]\n ret_val = invoke_command(call_list)\n if not suppress_output and i == 0 and len(scene_list) > 1:\n logging.info(\n 'Output from ffmpeg for Scene 1 shown above, splitting remaining scenes...')\n if ret_val != 0:\n break\n if progress_bar:\n progress_bar.update(duration.get_frames())\n if progress_bar:\n print('')\n logging.info('Average processing speed %.2f frames/sec.',\n float(total_frames) / (time.time() - processing_start_time))\n\n except CommandTooLong:\n logging.error(COMMAND_TOO_LONG_STRING)\n except OSError:\n logging.error('ffmpeg could not be found on the system.'\n ' Please install ffmpeg to enable video output support.')\n if ret_val is not None and ret_val != 0:\n logging.error('Error splitting video (ffmpeg returned %d).', ret_val)",
"def avi2mpg(filename):\n assert filename.endswith('.avi')\n ofile = '%s.mpg' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 1 %s' % (filename, ofile), ignore=True)\n return ofile",
"def __init__(self, output_filename, frame_width, frame_height,\n output_fps=30, vcodec='libx264', qp=15, preset='medium',\n input_pix_fmt='gray', output_pix_fmt='yuv420p', \n write_stderr_to_screen=False):\n # Open an ffmpeg process\n cmdstring = ('ffmpeg', \n '-y', '-r', '%d' % output_fps,\n '-s', '%dx%d' % (frame_width, frame_height), # size of image string\n '-pix_fmt', input_pix_fmt,\n '-f', 'rawvideo', '-i', '-', # raw video from the pipe\n '-pix_fmt', output_pix_fmt,\n '-vcodec', vcodec,\n '-qp', str(qp), \n '-preset', preset,\n output_filename) # output encoding\n \n if write_stderr_to_screen:\n self.ffmpeg_proc = subprocess.Popen(cmdstring, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n else:\n self.ffmpeg_proc = subprocess.Popen(cmdstring, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))",
"def mpg2avi(filename):\n assert filename.endswith('.mpg')\n ofile = '%s.avi' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 2 %s' % (filename, ofile), ignore=True)\n return ofile",
"def decode(src, dst):\n cmd = f\"\"\"\nffmpeg -y -hide_banner -nostats -v warning\n -i {src}\n -c:v rawvideo -an\n {dst}\n\"\"\"\n try:\n subprocess.check_call(shlex.split(cmd))\n except subprocess.CalledProcessError as err:\n raise DecodeFailed(f\"Failed to decode '{src}' - {err}\")",
"def extraer(origen, codec):\n\n if not os.path.exists(origen):\n print \"No se encontró:\", origen\n return\n\n if not os.path.isfile(origen):\n print origen, \"no es un archivo\"\n return\n\n print \"Extrayendo audio de:\", origen\n print \"Guardando en Formato:\", codec[1]\n\n destino = \"%s.%s\" % (origen, codec[1])\n\n comando = \"gst-launch-1.0 filesrc location=\\\"%s\\\" !\" % origen\n comando = \"%s decodebin name=t !\" % comando\n comando = \"%s queue ! audioconvert ! %s !\" % (comando, codec[0])\n comando = \"%s filesink location=\\\"%s\\\"\" % (comando, destino)\n comando = \"%s t. ! queue ! autovideosink\" % comando\n\n commands.getoutput(comando)",
"def main():\r\n\r\n # check command line for original file and track list file\r\n if len(sys.argv) != 3:\r\n print('usage: split <original_track> <track_list>')\r\n exit(1)\r\n\r\n # record command line args\r\n original_track = sys.argv[1]\r\n fileExt = '.'+original_track.split('.')[-1]\r\n track_list = sys.argv[2]\r\n # create a template of the ffmpeg call in advance\r\n cmd_string = 'ffmpeg -i {tr} -ss {st} -to {en} {nm}'\r\n\r\n # read each line of the track list and split into start, end, name\r\n with open(track_list, encoding='utf-8') as f:\r\n for line in f:\r\n # skip comment and empty lines\r\n if line.startswith('#') or len(line) <= 1:\r\n continue\r\n # create command string for a given track\r\n splitted = line.strip().split()\r\n start, end = splitted[0:2]\r\n name = '\"' + ' '.join(splitted[2:]) + fileExt + '\"'\r\n command = cmd_string.format(tr=original_track, st=start, en=end, nm=name)\r\n print(command)\r\n # use subprocess to execute the command in the shell\r\n subprocess.call(command, shell=True)\r\n\r\n return None",
"def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")",
"def merge_audio_video(self):\n\n if self.force_framerate:\n # FIXME: this seems to be necessary sometimes\n output_codec_options = [\n \"-c:v\",\n self.vcodec,\n \"-filter:v\",\n \"fps=fps=\" + str(self.fps),\n \"-c:a\",\n \"copy\",\n ]\n else:\n output_codec_options = [\"-c\", \"copy\"]\n\n combine_cmd = [\n \"ffmpeg\",\n self.overwrite_spec,\n ]\n\n if self.has_video:\n combine_cmd.extend([\n \"-i\",\n self._get_tmp_filename(\"video\"),\n ])\n\n if self.has_audio:\n combine_cmd.extend([\n \"-i\",\n self._get_tmp_filename(\"audio\"),\n ])\n\n combine_cmd.extend([\n *output_codec_options,\n self.output_file,\n ])\n\n self.run_command(combine_cmd)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get path to the PubChem template if it exists. | def _get_pubchem_template_path(self, het_id):
path = os.path.join(self.pubchem_templates, f"{het_id}.sdf")
return path if os.path.isfile(path) else "" | [
"def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path",
"def get_template_path(self):\r\n return self.application.settings.get(\"template_path\")",
"def template_path(cls):\n if cls.template:\n return cls.template\n else:\n return \"%s/%s/%s_tpl.html\" % (cls.platform, cls.target, cls.lang)",
"def template_path(path=\"\"):\n return join(current_app.config['TEMPLATE_PATH'], path)",
"def template_path(self):\n return self.get_config(\"templates\")",
"def template_path(self):\r\n return os.path.join(self.temp_path, 'template/django_project_template-master/')",
"def find_template(self):\n template_path = self.env[\"template_path\"]\n if os.path.exists(template_path):\n return template_path\n elif not template_path.startswith(\"/\"):\n recipe_dir = self.env.get(\"RECIPE_DIR\")\n search_dirs = [recipe_dir]\n if self.env.get(\"PARENT_RECIPES\"):\n # also look in the directories containing the parent recipes\n parent_recipe_dirs = list(\n {os.path.dirname(item) for item in self.env[\"PARENT_RECIPES\"]}\n )\n search_dirs.extend(parent_recipe_dirs)\n for directory in search_dirs:\n test_item = os.path.join(directory, template_path)\n if os.path.exists(test_item):\n return test_item\n raise ProcessorError(f\"Can't find {template_path}\")",
"def get_template_path(self):\n return os.path.join(os.path.abspath(os.path.dirname(\n inspect.getfile(self.__class__))), 'templates')",
"def template_path(self):\n return super().template_path+[os.path.join(os.path.dirname(__file__), \"templates\")]",
"def getTemplatePath(self, component):\n return 'site/templates/%s.php' % component.name",
"def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)",
"def _get_template_path(base_path, path):\n chromium_path = _get_path_components_following_mark(path, 'chromium')\n template_path = _GYP_TEMPLATE_PATH_MAP[chromium_path[0]]\n name = os.path.splitext(os.path.join(*chromium_path[1:]))[0] + '.template'\n return os.path.join(base_path, '..', template_path, name)",
"def _get_project_template(self): # suppress(no-self-use)\n parent = os.path.realpath(os.path.join(os.path.dirname(__file__),\n \"..\"))\n assert \"sample\" in os.listdir(parent)\n assert project_type in os.listdir(os.path.join(parent, \"sample\"))\n return os.path.join(parent, \"sample\", project_type)",
"def paper_root(dname):\n root_doc = None\n\n for fname in _list_files(dname):\n #Template metadata only exists in root\n if get_template(fname):\n root_doc = fname\n break\n\n return os.path.basename(root_doc) if root_doc else None",
"def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)",
"def template_file(self) -> str:\n return jsii.get(self, \"templateFile\")",
"def _get_path(experiment_template, hyperparameter):\n return experiment_template.format(\n std=hyperparameter.std, lr=hyperparameter.lr)",
"def get_firenado_template_path(self):\n return self.application.settings.get('firenado_template_path')",
"def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counts number of collisions among all bonds. Can be used for estimations of how 'wrong' the depiction is. | def count_bond_collisions(self):
errors = 0
for i in range(0, len(self.bonds)):
for a in range(i + 1, len(self.bonds)):
result = self._intersection(self.bonds[i], self.bonds[a])
if result:
errors += 1
return errors | [
"def collisions(self) -> int:\n return self.num_collisions",
"def numberOfCollisions(self):\n c = 0\n for i in range(self.n):\n for j in range(self.n):\n if self.pairManagerX[i][j] and self.pairManagerY[i][j]:\n c+=1\n return c//2",
"def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n return sum(collision_list)",
"def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n",
"def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count",
"def get_number_of_bulls(self):\n list_of_bulls = [i for i, j in zip(self.puzzle, self.guess) if i == j]\n bulls = len(list_of_bulls)\n return bulls",
"def count_balls(self, **kwargs):\n return 0",
"def n_bonds(self):\n return len(self.bonds)",
"def number_of_containing_bags(self) -> int:\n\n bag_count = 0\n for sub_bag_count, sub_bag_color in self.containing_bags:\n bag_count += sub_bag_count\n bag_count += (\n sub_bag_count * bag_registry[sub_bag_color].number_of_containing_bags\n )\n return bag_count",
"def how_many_collisions(buckets, loops = 1):\n results = []\n\n for i in range(loops):\n tries = 0\n tried = set()\n\n while True:\n random_key = str(random.random())\n hash_index = hash(random_key) % buckets\n\n if hash_index not in tried:\n tried.add(hash_index)\n tries += 1\n else:\n break\n result = tries / buckets * 100\n results.append(result)\n\n print(f\"{buckets} buckets, {tries} hases before collision.({result:.1f}%)\")\n\n print(statistics.mean(results))",
"def nClumps(self):\n \n return len(self)",
"def get_num_bonds(self) -> int:\n\n return len(self._bonds)",
"def count_surviving_particles(particles):\n\n #figure out which particles will intersect, ever, storing the information\n #as a list of tuples of the form (collision time, particleA, particleB)\n collisions = []\n for particleA, particleB in combinations(particles, 2):\n\n # print \"comparing\", particleA, particleB\n\n #figure out if the particles will ever collide, and if so, when\n collision_time = find_overall_collision_time(particleA, particleB)\n\n #if they do, store this information\n if collision_time:\n # print \"collision found:\", particleA, particleB, collision_time\n collisions.append((collision_time, particleA, particleB))\n\n\n #now that we've figured out all the possible particle collisions, apply\n #them to the particles, in the order in which they happen (this guarantees\n #that if a particle has already been destroyed by an earlier collision,\n #we'll know about it)\n collisions.sort()\n for collision_time, particleA, particleB in collisions:\n #check each of the particles - if either was destroyed before this\n #moment, the collision doesn't count\n if ((particleA.destruction_time and\n particleA.destruction_time < collision_time) or\n (particleB.destruction_time and\n particleB.destruction_time < collision_time)):\n continue\n\n #otherwise, mark each as having been destroyed in this collision\n particleA.destruction_time = collision_time\n particleB.destruction_time = collision_time\n\n #finally, count how many particles survive\n return len([particle for particle in particles\n if not particle.destruction_time])",
"def bucket_counts(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:\n pass",
"def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)",
"def diversity(population):\n damages = defaultdict(int)\n for ch in population:\n damages[ch.total_damage] += 1\n return len(damages)",
"def length(self):\n # TODO: Loop through all buckets\n # TODO: Count number of key-value entries in each bucket\n count = 0\n\n for bucket in self.buckets:\n for item in bucket.items():\n count += 1\n return count",
"def get_destroyed_ships_count(self):\n destroyed_ships_count = 0\n for row_index in range(self.rows):\n for column_index in range(self.columns):\n cell = self.grid[row_index][column_index]\n if cell.has_destroyed_ship():\n destroyed_ships_count += 1\n\n return destroyed_ships_count",
"def heavy_count(mol,idxs):\n count = 0\n for num, bonds in enumerate(mol.GetBonds()):\n if mol.GetBondWithIdx(num).GetBeginAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetEndAtomIdx()).GetSymbol() != 'H':\n count += 1\n elif mol.GetBondWithIdx(num).GetEndAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetBeginAtomIdx()).GetSymbol() != 'H':\n count += 1\n return count"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get batch generator `batch_generator` must define a `shape` property that returns the shape of generated sequences as a (n_samples, n_features) tuple. `batch_generator` must define a method called `get_steps_per_epoch` with the signature `def get_steps_per_epoch(self, protocol, subset)` that returns the number of batches to generate before ending an epoch. `batch_generator` may optionally define a method called `callbacks` with the signature `def callbacks(self, extract_embedding=None)` that is expected to return a list of Keras callbacks that will be added to the list of callbacks during training. This might come in handy in case the `batch_generator` depends on the internal state of the model currently being trained. | def get_generator(self, file_generator, batch_size=None, **kwargs):
raise NotImplementedError('') | [
"def fit_generator(self, generator: \"DataGenerator\", nb_epochs: int = 20, **kwargs) -> None:\n raise NotImplementedError",
"def create_keras_generator(self, part='train', batch_size=1, shuffle=True,\n reshape=None):\n from tensorflow.keras.utils import Sequence\n\n if self.supports_random_access():\n class KerasGenerator(Sequence):\n def __init__(self, dataset, part, batch_size, shuffle,\n reshape=None):\n self.dataset = dataset\n self.part = part\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.reshape = reshape or (\n (None,) * self.dataset.get_num_elements_per_sample())\n self.data_shape = self.dataset.get_shape()\n self.on_epoch_end()\n\n def __len__(self):\n return ceil(self.dataset.get_len(self.part) /\n self.batch_size)\n\n def __getitem__(self, idx):\n indexes = self.indexes[idx*self.batch_size:\n (idx+1)*self.batch_size]\n # for last batch, indexes has len <= batch_size\n n_elem = self.dataset.get_num_elements_per_sample()\n arrays = []\n for i in range(n_elem):\n array = np.empty(\n (len(indexes),) + self.data_shape[i],\n dtype=self.dataset.space[i].dtype)\n arrays.append(array)\n for j, ind in enumerate(indexes):\n out = tuple([array[j] for array in arrays])\n self.dataset.get_sample(ind, part=self.part, out=out)\n for i in range(n_elem):\n if self.reshape[i] is not None:\n arrays[i] = arrays[i].reshape(\n (len(indexes),) + self.reshape[i])\n return tuple(arrays) if n_elem > 1 else arrays[0]\n\n def on_epoch_end(self):\n self.indexes = np.arange(self.dataset.get_len(self.part))\n if self.shuffle:\n np.random.shuffle(self.indexes)\n\n generator = KerasGenerator(self, part, batch_size=batch_size,\n shuffle=shuffle, reshape=reshape)\n\n else:\n def keras_generator(dataset, part, batch_size, reshape=None):\n generator = dataset.generator(part)\n n_elem = dataset.get_num_elements_per_sample()\n num_steps_per_epoch = ceil(dataset.get_len(part) / batch_size)\n if reshape is None:\n reshape = (None,) * n_elem\n data_shape = dataset.get_shape()\n while True:\n for k in range(num_steps_per_epoch):\n batch_size_ = (batch_size if k < num_steps_per_epoch-1\n else dataset.get_len(part) % batch_size)\n arrays = []\n for i in range(n_elem):\n array = np.empty(\n (batch_size_,) + data_shape[i],\n dtype=dataset.space[i].dtype)\n arrays.append(array)\n for j in range(batch_size_):\n sample = next(generator)\n if n_elem == 1:\n sample = (sample,)\n for i, array in enumerate(arrays):\n array[j, :] = sample[i]\n for i in range(n_elem):\n if reshape[i] is not None:\n arrays[i] = arrays[i].reshape(\n (batch_size_,) + reshape[i])\n yield tuple(arrays) if n_elem > 1 else arrays[0]\n\n generator = keras_generator(self, part, batch_size=batch_size,\n reshape=reshape)\n\n return generator",
"def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras",
"def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )",
"def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)",
"def get_generator_batch_size(self):\n\n return self.generator_batch_size",
"def minibatch_generator(data, nb_epochs, gen_batch_size, gen_seq_len):\n data = np.array(data)\n data_len = data.shape[0]\n # using (data_len-1) because we must provide for the sequence shifted by 1 too\n steps_per_epoch = (data_len - 1) // (gen_batch_size * gen_seq_len)\n\n assert steps_per_epoch > 0, \"Not enough data, even for a single batch. Try using a smaller batch_size.\"\n\n rounded_data_len = steps_per_epoch * gen_batch_size * gen_seq_len\n xdata = np.reshape(data[0:rounded_data_len], [gen_batch_size, steps_per_epoch * gen_seq_len]) # [....####] => [....,####]\n ydata = np.reshape(data[1:rounded_data_len + 1], [gen_batch_size, steps_per_epoch * gen_seq_len])\n\n # batch generator\n for epoch in range(nb_epochs):\n for step in range(steps_per_epoch):\n x = xdata[:, step * gen_seq_len:(step + 1) * gen_seq_len]\n y = ydata[:, step * gen_seq_len:(step + 1) * gen_seq_len]\n\n # this will circulate shift UP for epoch > 0\n x = np.roll(x, -epoch, axis=0) # to continue continue continue the text from epoch to epoch (do not reset rnn state! except the last bottom sample)\n y = np.roll(y, -epoch, axis=0)\n\n yield x, y, epoch",
"def generate_validation_batch(self):\n assert self.validation_dataset is not None\n assert self.data_tags is not None\n \n # Sample indices and get data\n index_array = np.random.choice(self.num_validation_samples, self.p.trainer.batch_size)\n return self.get_data_from_indices(self.validation_dataset, index_array)",
"def list_batch_kwargs_generators(self):\n generators = []\n\n if \"batch_kwargs_generators\" in self._datasource_config:\n for key, value in self._datasource_config[\n \"batch_kwargs_generators\"\n ].items():\n generators.append({\"name\": key, \"class_name\": value[\"class_name\"]})\n\n return generators",
"def fit_generator(self, generator, nb_epochs=20, **kwargs):\n from art.data_generators import KerasDataGenerator\n\n # Try to use the generator as a Keras native generator, otherwise use it through the `DataGenerator` interface\n if isinstance(generator, KerasDataGenerator) and not hasattr(self, 'defences'):\n try:\n self._model.fit_generator(generator.generator, epochs=nb_epochs, **kwargs)\n except ValueError:\n logger.info('Unable to use data generator as Keras generator. Now treating as framework-independent.')\n super(KerasClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)\n else:\n super(KerasClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)",
"def get_batch_kwargs_generator(self, name):\n if name in self._batch_kwargs_generators:\n return self._batch_kwargs_generators[name]\n elif (\n \"batch_kwargs_generators\" in self._datasource_config\n and name in self._datasource_config[\"batch_kwargs_generators\"]\n ):\n generator_config = copy.deepcopy(\n self._datasource_config[\"batch_kwargs_generators\"][name]\n )\n else:\n raise ValueError(\n f\"Unable to load batch kwargs generator {name} -- no configuration found or invalid configuration.\"\n )\n generator = self._build_batch_kwargs_generator(**generator_config)\n self._batch_kwargs_generators[name] = generator\n return generator",
"def get_batch_inputs(self, inputs, batch_size=None):\n total_num = inputs.shape[0]\n batch_size = batch_size or self.batch_size\n for i in range(0, total_num, batch_size):\n yield inputs[i:i + batch_size]",
"def create_batch(client, generator: DataGenerator):\n try:\n event_data_batch = client.create_batch()\n for device in generator.devices:\n # event_data_batch.add(EventData(gen.generate_payload(device)))\n event_data_batch.add(EventData(generator.generate_payload(device)))\n return event_data_batch\n except Exception as e:\n print(str(e))",
"def add_batch_kwargs_generator(\n self, datasource_name, batch_kwargs_generator_name, class_name, **kwargs\n ):\n datasource_obj = self.get_datasource(datasource_name)\n generator = datasource_obj.add_batch_kwargs_generator(\n name=batch_kwargs_generator_name, class_name=class_name, **kwargs\n )\n return generator",
"def fit_generator(self, generator,\n steps_per_epoch,\n layer_min_delta=1e-3,\n patience=5,\n fine_tune=True,\n epochs=1,\n verbose=1,\n callbacks=[],\n validation_data=None,\n validation_steps=None,\n class_weight=None,\n max_q_size=10,\n workers=1,\n pickle_safe=False,\n initial_epoch=0):\n if verbose:\n print('Beginning greedy training')\n\n greedycb = keras.callbacks.EarlyStopping(min_delta=layer_min_delta, patience=patience)\n\n def greedymodel(layername):\n l = self.get_layer(layername)\n # workaround, input shape always there\n conf = l.get_config()\n conf['batch_input_shape'] = l.input_shape\n return Sequential([\n keras.layers.Dense.from_config(conf),\n # keras.layers.BatchNormalization(),\n keras.layers.Dropout(self.do),\n keras.layers.Dense(l.input_shape[-1], activation='linear', name='OUT')\n ])\n\n def trainlayer(model, train, val):\n model.compile(optimizer=self.optimizer, loss=keras.losses.mse, metrics=[keras.metrics.mae])\n if verbose:\n model.summary()\n model.fit_generator(train, steps_per_epoch, epochs=epochs, verbose=verbose,\n callbacks=[greedycb, *callbacks], validation_data=val,\n validation_steps=validation_steps, class_weight=class_weight, max_q_size=max_q_size,\n workers=workers, pickle_safe=pickle_safe, initial_epoch=initial_epoch)\n return model\n\n if verbose:\n print('\\nLayer 1:\\n')\n # layer 1 does not need its data transformed\n # l0 = greedymodel('IN')\n # l0 = trainlayer(l0, StackedAutoEncoder.LayerWiseGenWrapper(generator, lambda x: x),\n # StackedAutoEncoder.LayerWiseGenWrapper(validation_data, lambda x: x))\n # # Setting weights\n # self.get_layer('IN').set_weights(l0.get_layer('IN').get_weights())\n # self.get_layer('OUT').set_weights(l0.get_layer('OUT').get_weights())\n\n for i, layer in enumerate(self.lunits):\n if verbose:\n print('Pre-training layer {0}'.format(i+1))\n in_id = 'IN{0}'.format(i+1)\n # out_id = 'DEC{0}'.format(i)\n model = greedymodel(in_id)\n if i > 0:\n encode = keras.backend.function(inputs=[self.input, keras.backend.learning_phase()],\n outputs=[self.get_layer(in_id).input])\n else:\n encode = None\n gtrain = StackedAutoEncoder.LayerWiseGenWrapper(generator, encode)\n geval = StackedAutoEncoder.LayerWiseGenWrapper(validation_data, encode)\n\n model = trainlayer(model, gtrain, geval)\n\n self.get_layer(in_id).set_weights(model.get_layer(in_id).get_weights())\n # self.get_layer(out_id).set_weights(model.get_layer(out_id).get_weights())\n print('\\nCompleted layer', i+1)\n print()\n\n if fine_tune:\n print('Fine Tuning...')\n super().fit_generator(generator, steps_per_epoch, epochs=epochs, verbose=verbose,\n callbacks=callbacks, validation_data=validation_data,\n validation_steps=validation_steps, class_weight=class_weight,\n max_q_size=max_q_size, workers=workers,\n pickle_safe=pickle_safe, initial_epoch=initial_epoch)",
"def get_batch_gen(self, config):\n\n ################\n # Def generators\n ################\n\n def random_balanced_gen():\n print('trying to generate batch series with ', self.num_train, 'shapes')\n\n # Initiate concatenation lists\n tp_list = [] # points\n tev_list = [] # eigen vectors\n tevt_list = [] # transposed eigen vectors\n tv_list = [] # eigen values\n tevf_list = [] # full eigen vectors for ground truth maps\n ti_list = [] # cloud indices\n\n batch_n = 0\n i_batch = 0\n\n gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator\n # if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices\n # print(gen_indices.shape, config.batch_num)\n # if config.split == 'test':\n # print('test setting here not fully supported')\n # n_shapes = self.num_test # has to be defined\n # gen_indices = []\n # for i in range(n_shapes - 1):\n # for j in range(i + 1, n_shapes):\n # gen_indices += [i, j] # put all the pairs in order\n # gen_indices = np.array(gen_indices)\n\n\n # Generator loop\n for p_i in gen_indices:\n\n # Get points and other input data\n new_points = self.input_points[p_i]\n new_evecs = self.input_evecs[p_i][:, :self.neig]\n new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]\n new_evals = self.input_evals[p_i][:self.neig]\n\n new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]\n\n n = new_points.shape[0]\n\n if i_batch == config.batch_num:\n\n yield (np.concatenate(tp_list, axis=0),\n np.concatenate(tev_list, axis=0),\n np.concatenate(tevt_list, axis=1),\n np.concatenate(tv_list, axis=1),\n np.concatenate(tevf_list, axis=0),\n np.array(ti_list, dtype=np.int32),\n np.array([tp.shape[0] for tp in tp_list]))\n\n tp_list = []\n tev_list = []\n tevt_list = []\n tv_list = []\n tevf_list = []\n ti_list = []\n\n batch_n = 0\n i_batch = 0\n\n # Add data to current batch\n tp_list += [new_points]\n tev_list += [new_evecs]\n tevt_list += [new_evecs_trans]\n tv_list += [new_evals]\n tevf_list += [new_evecs_full]\n ti_list += [p_i]\n\n # Update batch size\n batch_n += n\n i_batch += 1\n\n # yield the rest if necessary (it will not be a full batch and could lead to mistakes because of\n # shape matching needing pairs !!!!)\n yield (np.concatenate(tp_list, axis=0),\n np.concatenate(tev_list, axis=0),\n np.concatenate(tevt_list, axis=1),\n np.concatenate(tv_list, axis=1),\n np.concatenate(tevf_list, axis=0),\n np.array(ti_list, dtype=np.int32),\n np.array([tp.shape[0] for tp in tp_list]))\n\n ##################\n # Return generator\n ##################\n\n # Generator types and shapes\n gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)\n gen_shapes = ([None, 3], [None, self.neig],\n [self.neig, None], [self.neig, None], [None, self.neig], [None], [None])\n\n return random_balanced_gen, gen_types, gen_shapes",
"def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)",
"def build_validation_iterator(dataset_name, batch_size, prepro_fn):\n dataset, dataset_info = tfds.load(\n dataset_name,\n split=tfds.Split.VALIDATION,\n as_supervised=True,\n with_info=True\n )\n n_samples = dataset_info.splits['validation'].num_examples\n steps_per_epoch = int(math.ceil(n_samples / batch_size))\n if prepro_fn is not None:\n dataset = dataset.map(prepro_fn, num_parallel_calls=AUTOTUNE)\n\n # Batch\n batched_dataset = dataset.padded_batch(\n batch_size,\n get_output_shapes(dataset),\n padding_values=get_padding_values(get_output_types(dataset)),\n drop_remainder=False\n )\n return batched_dataset, steps_per_epoch",
"def build_validation_iterator(dataset_name, batch_size, prepro_fn):\n dataset, dataset_info = tfds.load(\n dataset_name,\n split=tfds.Split.VALIDATION,\n as_supervised=True,\n with_info=True\n )\n n_samples = dataset_info.splits['validation'].num_examples\n steps_per_epoch = int(math.ceil(n_samples / batch_size))\n if prepro_fn is not None:\n dataset = dataset.map(\n prepro_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # Batch\n batched_dataset = dataset.padded_batch(\n batch_size,\n get_output_shapes(dataset),\n padding_values=get_padding_values(get_output_types(dataset)),\n drop_remainder=False\n )\n return batched_dataset, steps_per_epoch"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract embedding from internal Keras model | def extract_embedding(self, from_model):
return from_model | [
"def gensim_to_keras(model):\n return model.wv.get_keras_embedding()",
"def gensim_to_keras(model):\n emebed_layer = model.wv.get_keras_embedding(train_embeddings=False)\n return emebed_layer",
"def get_embedding(self, model):\n embedding = []\n for node in range(len(self.graph.nodes())):\n embedding.append(list(model[str(node)]))\n embedding = np.array(embedding)\n return embedding",
"def embedding(self, input):\r\n return self.params[self.id + \"word_embed\"][input, :]",
"def get_embeddings(self, img):\n return self.model(img)",
"def concept_embedding(concept_model: ConceptDetectionModel2D):\n return concept_model.to_embedding()",
"def getEmbedding(model, data, n):\n # batch optimizing to fit the model\n model.fit(data)\n\n # Retrieve the embeddings\n node_onehot = np.eye(n)\n res = model.feedforward_autoencoder(node_onehot)\n return res",
"def get_embeddings(self):\n return self.model.input_embeddings.weight.cpu().data.numpy()",
"def get_embedding_output(self):\n return self.embedding_output",
"def model_extract_document_embedding(self):\n input_ids = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"ids\")\n attention_mask = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"att\")\n token = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"tok\")\n\n # Embedding :\n if self.method_embedding == 'CamemBERT':\n Camembert_model = transformers.TFCamembertModel.from_pretrained(\"jplu/tf-camembert-base\")\n x = Camembert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'FlauBERT':\n # lr = 0.00001\n Flaubert_model = transformers.TFFlaubertModel.from_pretrained(\"jplu/tf-flaubert-base-uncased\")\n x = Flaubert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'XLM-RoBERTa':\n # lr = 0.00001\n XLMRoBERTa_model = transformers.TFXLMRobertaModel.from_pretrained(\"jplu/tf-xlm-roberta-base\")\n x = XLMRoBERTa_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'RoBERTa':\n # Experience Test path weights :\n PATH = '/kaggle/input/tf-roberta/'\n config = transformers.RobertaConfig.from_pretrained(PATH + 'config-roberta-base.json')\n Roberta_model = transformers.TFRobertaModel.from_pretrained(PATH + 'pretrained-roberta-base.h5',\n config=config)\n # Sinon :\n # Roberta_model = transformers.TFRobertaModel.from_pretrained('roberta-base')\n x = Roberta_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'BERT':\n BERT_model = transformers.TFBertModel.from_pretrained('bert-base-uncased')\n x = BERT_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n else:\n logger.critical(\"unknown embedding method name : '{}'\".format(self.method_embedding))\n\n # word vectors shape : (None, maxlen, 768)\n x = x[0]\n cls_token = x[:, 0, :]\n\n model = tf.keras.models.Model(inputs=[input_ids, attention_mask, token], outputs=cls_token)\n return model",
"def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_",
"def extract_torch_models_embeddings(dataloader, model, cuda, embedding_size=512):\n # model.eval()\n # embeddings = np.zeros((len(dataloader.dataset), embedding_size))\n #\n # one_embedding = torch.zeros(batch_size, embedding_size, 1, 1)\n #\n # def copy_data(m, i, o):\n # one_embedding.copy_(o.data)\n #\n # layer = model._modules.get('avgpool')\n # h = layer.register_forward_hook(copy_data)\n #\n # meshcodes = []\n # k = 0\n # for images, _, meshcode in tqdm(dataloader):\n # if cuda:\n # images = images.cuda()\n # _ = model(images)\n # embeddings[k:k + images.shape[0]] = one_embedding.numpy()[:, :, 0, 0] # batchsize x 512 x 1 x 1\n # k += images.shape[0]\n # meshcodes += list(meshcode)\n #\n # h.remove()\n # return embeddings, meshcodes\n\n model.eval()\n # 1D embedding, dataset_size by embedding_size\n embeddings = np.zeros((len(dataloader.dataset), embedding_size))\n labels = []\n k = 0\n for images, _, label in tqdm(dataloader):\n if cuda:\n images = images.cuda()\n embeddings[k:k + images.shape[0]] = model.get_embedding(images).data.cpu().numpy()\n k += images.shape[0]\n labels += list(label)\n\n return embeddings, labels",
"def word_embeddings(self):\n return self.embed.params",
"def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n\n ### START CODE HERE ###\n # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n emb_matrix = np.zeros((vocab_len, emb_dim))\n\n # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n for word, index in word_to_index.items():\n emb_matrix[index, :] = word_to_vec_map[word]\n\n # Define Keras embedding layer with the correct output/input sizes, make it trainable.\n # Use Embedding(...). Make sure to set trainable=False.\n embedding_layer = Embedding(vocab_len,emb_dim, trainable=False)\n ### END CODE HERE ###\n\n # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.build((None,))\n\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n\n return embedding_layer",
"def get_embedding(self, layer):\n assert(hasattr(self, layer))\n return getattr(self, layer).get_embedding()",
"def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer",
"def get_embedding(self, input):\n if type(input) is str:\n input = torch.tensor(self.word2index[input], dtype=torch.long).to(self.device).unsqueeze(0)\n return self.model.predict(input)",
"def __glove_embed__(sequence, model):\n embedded = []\n for word in sequence:\n embedded.append(model[word])\n return embedded",
"def get_embedding(self,extracted_face):\n # scale pixel values\n extracted_face = extracted_face.astype('float32')\n # standardize pixel values across channels (global)\n mean, std = extracted_face.mean(), extracted_face.std()\n extracted_face = (extracted_face - mean) / std\n # transform face into one sample\n extracted_face = np.expand_dims(extracted_face, axis=0)\n # make prediction to get embedding\n with graph_facenet.as_default():\n with sess1.as_default():\n yhat = self.model.predict(extracted_face)\n \n # print(len(sess.graph._nodes_by_name.keys()))\n clear_session()\n return yhat[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a logger that produces reasonable output. | def _get_logger():
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter("%(asctime)s [%(levelname)8s] %(message)s"))
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
return logger | [
"def get_logger() -> Logger:\n global __logger# pylint: disable=global-statement\n verbosity = config.verbosity\n log_path = config.log_filename\n noisy = config.noisy\n if __logger is None:\n name = 'default'\n log_levels = ['ERROR', 'WARNING', 'INFO', 'DEBUG']\n level = log_levels[verbosity]\n cLevel = log_levels[verbosity] if noisy else 'ERROR'\n handlers = ['file', 'console']\n logging_config.dictConfig({\n 'version': 1,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s - %(levelname)s - %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'}\n },\n 'handlers': {\n 'console': {\n 'level': cLevel,\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n 'stream': 'ext://sys.stdout'\n },\n 'file': {\n 'level': level,\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'default',\n 'filename': log_path,\n 'maxBytes': (10*1024*1024),\n 'backupCount': 3,\n 'mode': 'a'\n }\n },\n 'loggers': {\n 'default': {\n 'level': level,\n 'handlers': handlers\n }\n },\n 'disable_existing_loggers': False\n })\n __logger = logging.getLogger(name)\n return __logger",
"def get_logger():\n return logger",
"def get_logger():\n global initialized\n logger = logging.getLogger(\"mlonmcu\")\n if len(logger.handlers) == 0:\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(get_formatter(minimal=True))\n # stream_handler.setLevel(?)\n logger.addHandler(stream_handler)\n logger.propagate = False\n initialized = True\n return logger",
"def get_logger():\n global _logger\n\n # if already created, don't acquire lock\n if _logger:\n return _logger\n\n _logger_lock.acquire()\n\n try:\n if _logger is not None:\n return _logger\n\n # Get scoped Foreshadow logger.\n my_logger = logging.getLogger(\"foreshadow\")\n\n # interactive = False\n # if hasattr(sys, \"ps1\"):\n # interactive = True\n # # check python -i\n # elif hasattr(sys.flags, \"interactive\"):\n # interactive = sys.flags.interactive\n\n # if interactive:\n # my_logger.setLevel(LEVELS[\"info\"])\n # else:\n # my_logger.setLevel(LEVELS[\"warning\"])\n my_logger.setLevel(LEVELS[\"info\"])\n stream_target = sys.stderr\n\n # Add Stream Handler based on if interactive or not.\n handler = logging.StreamHandler(stream_target)\n # handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))\n handler.setFormatter(LOGGING_FORMATTER)\n my_logger.addHandler(handler)\n\n _logger = my_logger\n return _logger\n\n finally:\n _logger_lock.release()",
"def build_logger():\n return log",
"def _verbose_logger():\n if len(_verbose_loggers) > 0:\n return _verbose_loggers[-1]\n else:\n return _null_logger()",
"def get_logger(set_info=False):\n\n logging.basicConfig(format=\"%(message)s\", stream=sys.stdout)\n logger = logging.getLogger(\"pythonanywhere\")\n if set_info:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARNING)\n return logger",
"def get_logger(name='main'):\n config = load_config()\n\n name = '%s.%s' % (PROGRAM_NAME, name)\n\n logger = logging.Logger(name)\n\n formatter = logging.Formatter(name + '[%(process)d] %(levelname)s: %(message)s')\n\n handler = SysLogHandler(facility=syslog.LOG_AUTH)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n if config['logging']['to_stderr']:\n handler = StreamHandler(sys.stderr)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger",
"def get_logger(cls):\n if hasattr(cls, '__module__') and hasattr(cls, '__name__'):\n logger = logging.getLogger(cls.__module__ + '.' + cls.__name__)\n else:\n logger = logging.getLogger(str(cls))\n \n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('[%(levelname)s] %(message)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger",
"def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )",
"def get_main_logger():\n\n # Use verbose debug logging for now.\n console_loglevel = VERBOSITY_LEVELS[2]\n file_loglevel = VERBOSITY_LEVELS[2]\n\n console_fmt = logging.Formatter(\n '%(name)s: %(levelname)s %(message)s')\n file_fmt = logging.Formatter(\n '%(asctime)s - %(name)s: %(levelname)s %(message)s')\n\n log = logging.getLogger('toggledarkly')\n\n console_log = logging.StreamHandler()\n console_log.setFormatter(console_fmt)\n console_log.setLevel(console_loglevel)\n log.addHandler(console_log)\n\n file_log = handlers.RotatingFileHandler(\n LOG_FILE_PATH, maxBytes=(1048576*5), backupCount=5\n )\n file_log.setFormatter(file_fmt)\n file_log.setLevel(file_loglevel)\n log.addHandler(file_log)\n\n if SYSTEMD_SUPPORT:\n journald_log = JournalHandler()\n journald_log.setLevel(file_loglevel)\n journald_log.setFormatter(console_fmt)\n log.addHandler(journald_log)\n \n log.setLevel(VERBOSITY_LEVELS[2])\n\n return log",
"def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))",
"def get_logger(\n facility=None,\n level='warning',\n name=None,\n logfmt='%(name)s[%(process)d] %(levelname).1s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S ',\n child=None\n):\n\n dc(f\"facility={facility}, level={level}, name={name!r}, logfmt={logfmt!r}, datefmt={datefmt!r}, child={child!r}\")\n\n # If no name is provided, use the name of the current program (minus\n # any file extension).\n if not name:\n name=os.path.basename(sys.argv[0]).rsplit('.',1)[0]\n\n if facility is None:\n if child:\n # Initialize this log as a child of the main logger.\n dc(f\"Setting up child logger {child!r}.\")\n log=logging.getLogger().getChild(child)\n else:\n # Assume this process has already set up a logger (or just wants to\n # use the default logger), and return that.\n dc(f\"No facility, so getting root logger.\")\n log=logging.getLogger()\n if not log.handlers:\n # Suppress \"No handlers could be found ...\" message, in case our\n # root logger hasn't been set up. NullHandler is a bit bucket.\n log.addHandler(logging.NullHandler)\n if name:\n log.name=name\n dc(f\"Returning with logger {log!r}\")\n return log\n\n if not child:\n # Child loggers use the parent logger's facility, handler, and\n # formatting.\n h=None\n if isinstance(facility,logging.Handler):\n dc(f\"facility is logging.Handler {facility!r}\")\n # The caller has provided a handler for us.\n h=facility\n if isinstance(h,logging.StreamHandler):\n # Prefix our log format with the date and time.\n if 'asctime' in logfmt:\n logfmt='%(asctime)s '+logfmt\n f=logging.Formatter(logfmt,datefmt=datefmt)\n else:\n if isinstance(facility,str):\n dc(f\"facility is string {facility!r}\")\n if facility in syslog_facilities:\n # It looks like we're logging to syslog.\n facility=logging.handlers.SysLogHandler.facility_names[facility]\n else:\n # This string must be a filename, so open it for appending.\n dc(f\"Treating facility={facility!r} as a filename.\")\n facility=os.path.expanduser(os.path.expandvars(facility))\n dc(f\"Expanded filename is {facility!r}.\")\n if os.path.isfile(facility):\n mode='a'\n elif not os.path.exists(facility):\n mode='w'\n else:\n raise ValueError('\"%s\" exists but is not a regular file.'%(facility,))\n facility=open(facility,mode)\n\n if isinstance(facility,int):\n dc(f\"facility is integer {facility!r}\")\n # This is a syslog facility number, or had better be.\n system=platform.system()\n if system=='Darwin':\n h=logging.handlers.SysLogHandler(address='/var/run/syslog',facility=facility)\n elif system=='Linux':\n h=logging.handlers.SysLogHandler(address='/dev/log',facility=facility)\n else:\n dc(f\"Createing SysLogHandler for this logger.\")\n h=logging.handlers.SysLogHandler(\n address=('localhost',logging.handlers.SYSLOG_UDP_PORT),\n facility=facility\n )\n dc(f\"Createing logging.Formatter from logfmt={logfmt!r}\")\n f=logging.Formatter(logfmt)\n elif isinstance(facility,IOBase):\n dc(f\"facility is {facility!r}\")\n # This is a stream, so add date and time to the start of our log format.\n h=logging.StreamHandler(facility)\n logfmt='%(asctime)s'+logfmt\n dc(f\"Createing logging.Formatter from logfmt={logfmt!r}, datefmt={datefmt!r}\")\n f=logging.Formatter(logfmt,datefmt=datefmt)\n else:\n raise ValueError('bad log facility value: %r'%(facility,))\n\n if isinstance(level,str):\n # If level is a string, make sure it is upper case.\n level=level.upper()\n dc(f\"level is string {level!r}\")\n elif isinstance(level,int) and level in _nameToLevel:\n dc(f\"level is int {level!r}\")\n level=_nameToLevel[level]\n dc(f\"converted level is int {level!r}\")\n else:\n raise ValueError('bad log level value: %r'%(level,))\n\n # Now create the new logger, and return it to the caller.\n if not child:\n dc(f\"Applying formatter {f!r} to handler {h!r}\")\n h.setFormatter(f)\n log=logging.getLogger(name)\n dc(f\"Adding handler to logger\")\n log.addHandler(h)\n l=_nameToLevel[level]\n dc(f\"_nameToLevel[{level!r}]{_nameToLevel[level]!r}\")\n log.setLevel(_nameToLevel[level])\n dc(f\"Returning with logger {log!r}\")\n return log",
"def get_logger(name='root', log_file=None, log_level=logging.DEBUG):\n logger = logging.getLogger(name)\n if name in logger_initialized:\n return logger\n for logger_name in logger_initialized:\n if name.startswith(logger_name):\n return logger\n formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n stream_handler = logging.StreamHandler(stream=sys.stdout)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n if log_file is not None and dist.get_rank() == 0:\n log_file_folder = os.path.split(log_file)[0]\n os.makedirs(log_file_folder, exist_ok=True)\n file_handler = logging.FileHandler(log_file, 'a')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger_initialized[name] = True\n return logger",
"def _loggerInit(self):\n logger = logging.getLogger(__name__)\n\n formatter = logging.Formatter(\"%(asctime)s %(name)-24s[%(process)d]: %(levelname)-7s %(message)s\")\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.setFormatter(formatter)\n\n logger.addHandler(hdlr)\n logger.setLevel('INFO')\n\n return logger",
"def create_logger():\n logger = logging.getLogger('robodj')\n logger.setLevel(logging.DEBUG)\n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh = logging.StreamHandler()\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # add handler to logger object\n return logger",
"def get_standard_logger():\n standard_logger = logging.getLogger(\"instana\")\n\n ch = logging.StreamHandler()\n f = logging.Formatter('%(asctime)s: %(process)d %(levelname)s %(name)s: %(message)s')\n ch.setFormatter(f)\n standard_logger.addHandler(ch)\n standard_logger.setLevel(logging.DEBUG)\n return standard_logger",
"def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!",
"def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
import count or FPKM table | def import_countOrFPKMTable(
self,filename_I):
#import and format the data
io = base_importData();
io.read_tab(filename_I);
countOrFPKMTable = self.format_countOrFPKMTable(io.data);
return countOrFPKMTable; | [
"def load_counttable(filename, small=False):\n if small:\n counttable = _SmallCounttable(1, [1])\n counttable.load(filename)\n\n else:\n counttable = _Counttable(1, [1])\n counttable.load(filename)\n\n return counttable",
"def count(self):\n ans = self.execute(self.commands.table_count(self.name))\n return ans[0][0]",
"def count_entries(self,tablename):\r\n\t\tquery=\"Select count(*) from \"+tablename\r\n\t\ttry:\r\n\t\t\tself.__cur.execute(query)\r\n\t\texcept Exception as e:\r\n\t\t\tself.__conn.rollback()\r\n\t\t\traise e\r\n\t\tfetcheddata = self.__cur.fetchone()\r\n\t\treturn fetcheddata[0]",
"def database_count():\n a = len(_os.listdir(_datapath())) - 3\n b = _os.walk(_datapath()) # generator\n c = [1]\n c = len([c[0] + 1 for root, dirs, files in b for name in files]) - 6\n print(\"Total number of companies contained: {}\".format(a))\n print(\"Total number of detailed sheets: {}\".format(c))\n _gc.collect()",
"def Count(table):\n\n Log(f'Counting {type(table)}...')\n if isinstance(table, hl.Table):\n ht = table\n cnt = munchify({'rows': ht.count()})\n elif isinstance(table, hl.MatrixTable):\n mt = table\n cnt = munchify(dict(zip(['variants', 'samples'], mt.count())))\n else:\n LogException(f'Counting is not implemented for type {type(table)}.')\n\n Log(f'Counts: {JsonDumps(cnt)}.')\n\n return cnt",
"def count(self):\n return Env.backend().execute(ir.TableCount(self._tir))",
"def parse_count_tracking(self, fp, cursor, table):\n\t\tpass",
"def get_table_count(table_name, query, headers, base_url, maxpagesize):\n logging.info(\"Running get_table_count() . . . \")\n\n #task_instance = context['task_instance']\n #headers = task_instance.xcom_pull('build_auth_headers', key='auth_headers')\n\n r_count = requests.get('{0}/ws/schema/table/{1}/count?{2}'.format(base_url, table_name, query), headers=headers)\n r_status = r_count.status_code\n if r_status != 200:\n logging.info('Response NOT successful. I got code {} '.format(r_status))\n raise ValueError('Response NOT successful. I got code {} '.format(r_status))\n else:\n logging.info('Response successful! I got code {} '.format(r_status))\n\n count_json = r_count.json()\n row_count = count_json['count']\n\n pages = int(math.ceil(row_count / maxpagesize))\n\n return row_count, pages",
"def count_entries(self, tablename):\n query = \"Select count(*) from \" + tablename\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchone()\n return fetcheddata[0]",
"def count(self,table):\n if self.non_char.search(table):\n #not a table\n count_sql = \"select count(1) from ({}) as tmp_a \".format(table)\n else:\n #table or view\n count_sql = \"select count(1) from \\\"{}\\\"\".format(table)\n return self.get(count_sql)[0]",
"def RP_raw_countTables(self):\n\t\traw_countTables_cmnd = \"python2 %s/riboseq/riboseq_build_exp_RAW_countTables.py --rootDir %s --libSetFile %s --threadNumb %s\" % (\n\t\t\tself.rootDir, self.rootDir, self.libSetFile, self.threadNumb)\n\t\tsubprocess.Popen(raw_countTables_cmnd, shell=True).wait()",
"def getCounts(conn, showEmpty = True):\n\n with conn.cursor() as cur:\n cur.execute(\"select table_name from information_schema.tables where table_schema='public' order by table_schema, table_name;\")\n tables = cur.fetchall()\n results = {}\n\n for table in tables:\n table = table[0]\n\n cur.execute(\"select count(*) from %s\" % table)\n count = cur.fetchone()[0]\n\n if showEmpty or count > 0:\n results[table] = count\n # prettySummaryTable.add_row([table, count])\n # print \"%s: %s\" % (table, count)\n\n return results",
"def get_db_line_count(self):\n# substatements = [\"SELECT COUNT(*) a FROM {0}\".format(tbname) for tbname in self.get_all_tablenames()]\n# cmd = \"SELECT sum(a) FROM ({0})\".format(' union all '.join(substatements))\n# self._execute(cmd)\n# return self.fetchone()[0]\n\n count = 0\n for sub_tbnames in _divide(self.get_all_tablenames(), 300):\n substatements = [\"SELECT COUNT(*) a FROM {0}\".format(tbname) for tbname in sub_tbnames]\n cmd = \"SELECT sum(a) FROM ({0})\".format(' union all '.join(substatements))\n self._execute(cmd)\n count += self.fetchone()[0]\n \n return count",
"def cfindex(path='./'):\n # Get files\n cnt_files = [f for f in os.listdir(path) if f.endswith('.cnt')]\n cnt_files.sort()\n # Get descriptions\n descr = [readcounts(path+f,silent=True).par.description for f in cnt_files]\n # Print left aligned table\n t = Table([cnt_files,descr],names=['File','Description'])\n t.pprint(align='<',show_dtype=False,max_width=400)",
"def countTable(self, in_table_name):\n self.cursor.execute('SELECT COUNT(*) FROM {};'.format(in_table_name))\n return self.cursor.fetchone()[0]",
"def import_all():\n years = range(2557, 2561)\n for year in years:\n path = 'import%d.txt' % year\n file = open(path)\n data = csv.reader(file)\n table = [row for row in data]\n table[:4]\n\n thai_baht_import = groupby(table, lambda x: x[1])\n for key, group in thai_baht_import:\n total_baht = 0\n for item in group:\n total_baht += int(item[2])\n print(item[1], total_baht)",
"def exp_calculator_with_count(count_table_file):\n count_table = pd.read_table(count_table_file, index_col=0)\n columns = count_table.columns\n\n gene_len = count_table[columns[0]]\n rpkm_dict = dict()\n tpm_dict = dict()\n for sample in columns[1:]:\n # Divide the read counts by the length of each gene in kilobases.\n # This gives you reads per kilobase (RPK)\n rpk = count_table[sample]/gene_len\n # get rpkm/fpkm\n total_counts = sum(count_table[sample])/1000\n \"\"\"\n rpkm = (count_table[sample]/gene_len)/(sum(count_table[sample])/1000)*1000000\n \"\"\"\n rpkm = rpk/total_counts*1000000\n # get tpm\n norm_gene_len_total_counts = sum(rpk)\n tpm = rpk/norm_gene_len_total_counts*1000000\n \"\"\"\n tpm = (count_table[sample]/gene_len)/sum(count_table[sample]/gene_len)*1000000\n \"\"\"\n # save\n rpkm_dict[sample] = rpkm\n tpm_dict[sample] = tpm\n # save results\n df_rpkm = pd.DataFrame(rpkm_dict, index=count_table.index)\n df_tpm = pd.DataFrame(tpm_dict, index=count_table.index)\n df_rpkm.to_csv(count_table_file+'.fpkm.xls', sep='\\t')\n df_tpm.to_csv(count_table_file+'.tpm.xls', sep='\\t')\n #\n return rpkm_dict, tpm_dict",
"def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion",
"def get_source_records_count(self, tap_type, table):\n run_query_method = getattr(self, f'run_query_{tap_type.lower()}')\n result = run_query_method(f'SELECT count(1) FROM {table}')\n return result[0][0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat attr tables into a dictionary for rapid alignment of attr table with tracking_id | def reformat_attrTable(
self):
#format into a dictionary of rows for quick aligning with the tracking_id
if self.attrTable: attrTable = self.attrTable[:];
else: attrTable = [];
attrTable_dict = {};
for row in attrTable:
attrTable_dict[row['tracking_id']] = row;
return attrTable_dict; | [
"def _organize_attributes(self, row, existing_entities, ignore=[]):\n output = {\"attributes\": {}, \"relationshipAttributes\": {},\n \"root\": {}, \"custom\": {}}\n for column_name, cell_value in row.items():\n # Remove the required attributes so they're not double dipping.\n if column_name in ignore:\n continue\n # Remove any cell with a None / Null attribute\n elif cell_value is None:\n continue\n # If the Attribute key starts with [Relationship]\n # Move it to the relation\n elif column_name.startswith(\"[Relationship]\"):\n cleaned_key = column_name.replace(\"[Relationship]\", \"\").strip()\n\n if cleaned_key == \"meanings\":\n\n terms = self._splitField(cell_value)\n reference_object = [\n {\"typeName\": \"AtlasGlossaryTerm\",\n \"uniqueAttributes\": {\n \"qualifiedName\": \"{}@Glossary\".format(t)\n }\n } for t in terms\n ]\n\n output[\"relationshipAttributes\"].update(\n {cleaned_key: reference_object}\n )\n else:\n # If there is a value separator in the cell value\n # assuming it's trying to make an array of relationships\n if self.config.value_separator in cell_value:\n relationships = self._splitField(cell_value)\n all_references = []\n\n for rel in relationships:\n reference_object = self._parse_relationship_value(\n rel, existing_entities)\n all_references.append(reference_object)\n output[\"relationshipAttributes\"].update(\n {cleaned_key: all_references}\n )\n # There is no value separator in the cell value\n # Thus it's a single string that needs to be parsed\n else:\n reference_object = self._parse_relationship_value(\n cell_value, existing_entities)\n output[\"relationshipAttributes\"].update(\n {cleaned_key: reference_object}\n )\n\n # TODO: Add support for Business\n elif column_name.startswith(\"[root]\"):\n # This is a root level attribute\n cleaned_key = column_name.replace(\"[root]\", \"\").strip()\n output_value = cell_value\n if self.config.value_separator in cell_value:\n # There's a delimiter in here\n output_value = self._splitField(cell_value)\n\n # This seems like a poor place to add business logic like this\n if cleaned_key == \"classifications\":\n output_value = [output_value] if not isinstance(\n output_value, list) else output_value\n output_value = [AtlasClassification(\n c).to_json() for c in output_value]\n elif cleaned_key == \"labels\" and not isinstance(output_value, list):\n output_value = [output_value]\n\n output[\"root\"].update({cleaned_key: output_value})\n\n elif column_name.startswith(\"[custom]\"):\n cleaned_key = column_name.replace(\"[custom]\", \"\").strip()\n\n output[\"custom\"].update({cleaned_key: cell_value})\n else:\n output[\"attributes\"].update({column_name: cell_value})\n\n return output",
"def _process_attrs(attrs):\n new_attrs = OrderedDict()\n for attr in attrs:\n col = attr\n if isinstance(attr, tuple):\n col, attr = attr\n # special cases\n if attr == 'class_name':\n attr = '__class__.__name__'\n if attr == 'repr':\n attr = repr\n new_attrs[col] = attr\n\n return new_attrs",
"def _ToDynamoAttributes(self, table_def, attrs):\r\n dyn_attrs = dict()\r\n for k, v in attrs.items():\r\n dyn_attrs[k] = self._ToDynamoValue(table_def.GetColumnByKey(k), v)\r\n return dyn_attrs",
"def _reformat_attr_dict(self,atr_dict):\n flat = {}\n c = map(lambda x:{x['customLabel']:x},atr_dict)\n for val in c:\n flat[val.keys()[0]] = val[val.keys()[0]]\n return flat",
"def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed",
"def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}",
"def get_relations_attributes(self):\n cursor = self.conn.cursor()\n q = (\n \"SELECT c.table_name, c.column_name FROM information_schema.columns c \"\n \"INNER JOIN information_schema.tables t ON c.table_name = t.table_name \"\n \"AND c.table_schema = t.table_schema \"\n \"AND t.table_type = 'BASE TABLE' \"\n \"AND t.table_schema = 'public' \"\n \"AND c.table_name != 'queries'\"\n )\n cursor.execute(q)\n rows = cursor.fetchall()\n cursor.close()\n\n tables_attributes = {}\n for table, attribute in rows:\n if table in tables_attributes:\n tables_attributes[table].append(attribute)\n else:\n tables_attributes[table] = [attribute]\n\n tables = list(tables_attributes.keys())\n relations_attributes = {}\n relations = []\n relations_tables = {}\n x = self.get_queries_incremental(target=\"\")\n for group in x:\n for q in group:\n for r in q[\"moz\"][\"from\"]:\n if r[\"name\"] not in relations:\n relations.append(r[\"name\"])\n relations_attributes[r[\"name\"]] = tables_attributes[r[\"value\"]]\n relations_tables[r[\"name\"]] = r[\"value\"]\n return tables, relations, relations_attributes, relations_tables",
"def _convert_table_to_dict(self, data_table):\n column_names = ['star_name', 'distance', 'brightness', 'luminosity']\n stars = {}\n for line in data_table:\n stars[line[0]] = {column_names[i] : line[i] for i in range(1, len(column_names))}\n return stars",
"def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result",
"def make_attribute_table(gems: List[DataConcepts]) -> List[Mapping[str, BaseValue]]:\n flattened_gems = recursive_flatmap(\n obj=gems, func=lambda x: [x], unidirectional=False\n )\n types_with_attributes = (\n ProcessSpec,\n ProcessRun,\n MaterialSpec,\n MeasurementSpec,\n MeasurementRun,\n )\n all_rows = []\n attributed_gems = [\n x for x in flattened_gems if isinstance(x, types_with_attributes)\n ]\n for gem in attributed_gems:\n row_dict = {\"object\": gem, \"object_type\": type(gem).__name__}\n if hasattr(gem, \"conditions\"):\n for cond in gem.conditions:\n row_dict[f\"CONDITION: {cond.name}\"] = cond.value\n if hasattr(gem, \"parameters\"):\n for param in gem.parameters:\n row_dict[f\"PARAMETER: {param.name}\"] = param.value\n if hasattr(gem, \"properties\"):\n for prop in gem.properties:\n if isinstance(prop, PropertyAndConditions):\n row_dict[f\"PROPERTY: {prop.property.name}\"] = prop.property.value\n for cond in prop.conditions:\n row_dict[f\"CONDITION: {cond.name}\"] = cond.value\n else:\n row_dict[f\"PROPERTY: {prop.name}\"] = prop.value\n all_rows.append(row_dict)\n return all_rows",
"def aggregate_by_primary_attribute(table):\n result = {}\n for row in table:\n for attribute_to_aggregate_by in row[1].split(','):\n attribute_to_aggregate_by.strip()\n attribute_data = row[0]\n if attribute_to_aggregate_by not in result:\n result[attribute_to_aggregate_by] = [attribute_data]\n else:\n result[attribute_to_aggregate_by] += [attribute_data]\n return result",
"def _prepare_links_table(self):\n\n links_tbl = OrderedDict()\n for colname in itertools.islice(self._pinfos, 1, None):\n links_tbl[colname] = {}\n links_tbl[colname][\"name\"] = f\"{colname}\"\n fname = colname.replace(\"%\", \"_pcnt\") + \".html\"\n links_tbl[colname][\"fname\"] = fname\n links_tbl[colname][\"hlink\"] = f\"<a href=\\\"{fname}\\\">{colname}</a>\"\n\n return links_tbl",
"def attributes_metadata(self):\n\n attribute_meta = collections.defaultdict(dict)\n\n for attribute in self.attributes:\n attribute_meta[attribute.name]['valuemap'] = attribute.valuemap\n attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers\n\n return dict(attribute_meta)",
"def genotype_to_dict(all_genotypes):\n\n rows = {}\n\n for rs, individuals in all_genotypes.items():\n rows.setdefault(rs, {})\n for individual, genotypes in individuals.items():\n rows[rs].setdefault(individual, \"\")\n for genotype in genotypes:\n rows[rs][individual] =\\\n genotype[\"genotype\"].replace(\"|\", \"\")\n break\n\n return rows",
"def _createAttributeFormattingMap(self, scanf_list, reformat=True):\n\n order = []\n scanf_map = {}\n for entry in scanf_list:\n\n # grab attribute\n attribute = re.split('\\s', entry)[0]\n\n # add to order\n if attribute.startswith('_') or (not attribute in order):\n order.append(attribute)\n\n # reformat entry since sscanf doesn't support %g\n if reformat:\n entry = entry.replace('%g', '%f')\n\n # make format entry into list if multiple formats exist\n if attribute in scanf_map:\n formats = scanf_map[attribute]\n if not isinstance(formats, list):\n scanf_map[attribute] = [formats]\n scanf_map[attribute].append(entry)\n else:\n scanf_map[attribute] = entry\n\n return scanf_map, order",
"def _collect_attributes(self):\n\n self.attributes = collections.OrderedDict()\n\n for attr in self.cube.all_attributes:\n self.attributes[self.logical(attr)] = attr",
"def revealed_attrs(proof: dict) -> dict:\n\n rv = {}\n for referent in proof['proof']['proofs']:\n revealed = proof['proof']['proofs'][referent]['primary_proof']['eq_proof']['revealed_attrs']\n rv[referent] = {attr: decode(revealed[attr]) for attr in revealed}\n return rv",
"def parse_distmat_to_dict(table):\r\n\r\n col_headers, row_headers, data = parse_matrix(table)\r\n assert(col_headers == row_headers)\r\n\r\n result = defaultdict(dict)\r\n for (sample_id_x, row) in zip(col_headers, data):\r\n for (sample_id_y, value) in zip(row_headers, row):\r\n result[sample_id_x][sample_id_y] = value\r\n return result",
"def gfa_table_to_dict(gfa_data):\n gfa_target = gfa_data[\"TARGETID\"]\n gfa_loc = gfa_data[\"GFA_LOC\"]\n gfa_gmag = gfa_data[\"GAIA_PHOT_G_MEAN_MAG\"]\n gfa = dict()\n for lid, tgid, mag in zip(gfa_loc, gfa_target,gfa_gmag):\n print(zip(gfa_loc, gfa_target,gfa_gmag))\n if lid in gfa:\n gfa[lid].append(mag)\n else:\n gfa[lid] = list([mag])\n gfa = {f: np.array(av) for f, av in gfa.items()}\n return gfa"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat count table into a flattened table of sample_names/values | def reformat_countTable(
self,analysis_id_I=None,sna2experimentID_I=None,
sna2sns_I=None):
if self.countTable: countTable = self.countTable[:];
else: countTable = [];
countTable_flat = self.reformat_countOrFPKMTable(
countOrFPKMTable_I=countTable,
analysis_id_I=analysis_id_I,
sna2experimentID_I=sna2experimentID_I,
sna2sns_I=sna2sns_I,
count_or_FPKM = 'count');
return countTable_flat; | [
"def build_contingency_table(\n sample_data: pd.Series, batch_data: pd.Series\n) -> pd.DataFrame:\n categorical_values = pd.concat([sample_data, batch_data])\n data_origins = np.array([\"sample\"] * len(sample_data) + [\"batch\"] * len(batch_data))\n\n return pd.crosstab(index=categorical_values, columns=data_origins)",
"def init_sample_table(vocab_counts):\r\n count = [ele for ele in vocab_counts]\r\n pow_frequency = np.array(count)**0.75\r\n power = sum(pow_frequency)\r\n ratio = pow_frequency / power\r\n table_size = 1e8\r\n count = np.round(ratio*table_size)\r\n sample_table = []\r\n for idx, x in enumerate(count):\r\n sample_table += [idx]*int(x)\r\n return np.array(sample_table)",
"def table(_list):\n _list = pd.Series(_list)\n counts = _list.value_counts()\n counts = counts.rename('freq')\n counts.index.name = 'values'\n counts = counts.reset_index(drop=False)\n return counts",
"def prettyPrintDataCounts(counts):\n\n print 'OBJECT COUNTS\\n'\n\n consoleTable = generatePrettyPrintTable(['Table', 'Entries'])\n\n for table in sorted(counts.keys()):\n consoleTable.add_row([table, counts[table]])\n\n print consoleTable.draw()",
"def combine_counts(\n fns,\n define_sample_name=None,\n):\n counts = []\n for fn in fns:\n df = pd.read_table(fn, skiprows=1, index_col=0)\n counts.append(df[df.columns[-1]])\n combined_counts = pd.DataFrame(counts).T\n if define_sample_name:\n names = [define_sample_name(x) for x in fns]\n combined_counts.columns = names\n combined_counts.index.name = ''\n return combined_counts",
"def make_all_count_tables(folderName):\r\n\tallSamples = get_all_files(folderName)\r\n\t\r\n\tallCountDicts = []\r\n\tfor fileName in allSamples:\r\n\t\tsampleDict = makeCountDict(fileName, folderName)\r\n\t\tif sampleDict == None:\r\n\t\t\tprint (\"Warning: \"+fileName+\" doesn't have read counts. Ignored.\")\r\n\t\telse:\r\n\t\t\tallCountDicts.append(sampleDict)\r\n\r\n\tconditions = get_conditions(allSamples)\r\n\t#combos = get_file_combinations(conditions)\r\n\r\n\tallCountDicts = check_count_dicts(allCountDicts)\r\n\r\n\tmake_count_table(allCountDicts, folderName+\"All_Counts.tsv\")\r\n\r\n\tfor s in allCountDicts:\r\n\t\tmake_count_table([s], folderName+s[\"name\"])\r\n\r\n\t#for c in combos:\r\n\t#\tmake_count_table(get_dicts_for_comparison(c,allCountDicts), folderName+make_output_name(c))\r",
"def biom_to_table(table: biom.Table):\n return (table.matrix_data.toarray().astype(int).tolist(),\n table.ids('observation').tolist(),\n table.ids('sample').tolist(),\n list(map(dict, table.metadata(axis='observation') or ())))",
"def reformat_countOrFPKMTable(\n self,\n countOrFPKMTable_I=None,\n analysis_id_I=None,\n sna2experimentID_I=None,\n sna2sns_I=None,\n count_or_FPKM = 'count'):\n #format into a dictionary of rows for quick aligning with the tracking_id\n countOrFPKMTable_flat = [];\n for row in countOrFPKMTable_I:\n for k,v in row.items():\n if k=='tracking_id':continue;\n tmp = {};\n tmp['analysis_id'] = analysis_id_I;\n tmp['tracking_id'] = row['tracking_id'];\n\n sample_name_lst = k.split('_');\n sample_name_base = '_'.join(sample_name_lst[:-1]);\n sample_name_rep = eval(sample_name_lst[-1]);\n if sna2experimentID_I: \n experiment_id = sna2experimentID_I[sample_name_base];\n else:\n experiment_id=None;\n tmp['experiment_id'] = experiment_id;\n if sna2sns_I: \n sample_name = sna2sns_I[sample_name_base][sample_name_rep];\n else:\n sample_name=k;\n tmp['sample_name'] = sample_name;\n\n tmp['value'] = v;\n tmp['value_units'] = count_or_FPKM;\n tmp['used_'] = True;\n tmp['comment_'] = None;\n countOrFPKMTable_flat.append(tmp);\n return countOrFPKMTable_flat;",
"def write_count_tables2( count_data, id_dict, file_name ):\n\ttaxa = count_data[0]\n\tcounts = count_data[1]\n\tappended_counts = count_data[2]\n\t\n\t#open our two output files. \n\traw = open( file_name + \"_raw_cnt.txt\", 'w' )\n\tappended = open( file_name + \"_full_cnt.txt\", 'w' )\n\t\n\t#write header lines. \n\theader = [\"taxa\", \"taxon_level\", \"count\"]\n\traw.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\tappended.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\t\n\t#iterate over numerically sorted taxon list. \n\t#we store the indexes of the sort so we can rapidly access taxa and count data types. \n\tfor tax_idx in np.argsort( taxa.astype( np.int ) ):\n\t\ttax = taxa[ tax_idx ] \n\t\ttax_dat = id_dict[tax][0]\n\t\tlevel = str(int( tax_dat[1] )) #extra shit to turn this into easy int for sorting/filteirng.\n\t\tname = tax_dat[2] + \";\" + tax\n\t\tprint_list = [ name, level ] \n\t\t\n\t\t#now send the results to outfile. \n\t\tcnt = str( counts[ tax_idx ] )\n\t\traw.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )\n\t\tcnt = str( appended_counts[ tax_idx ] )\n\t\tappended.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )",
"def to_table(self):\n table = Table()\n table['THETA_LO'] = Quantity([self.counts.offset[:-1]], unit=self.counts.offset.unit)\n table['THETA_HI'] = Quantity([self.counts.offset[1:]], unit=self.counts.offset.unit)\n table['ENERG_LO'] = Quantity([self.counts.energy[:-1]], unit=self.counts.energy.unit)\n table['ENERG_HI'] = Quantity([self.counts.energy[1:]], unit=self.counts.energy.unit)\n table['counts'] = self.counts.to_table()['data']\n if self.counts.data_err is not None:\n table['counts_err'] = self.counts.to_table()['data_err']\n table['livetime'] = self.livetime.to_table()['data']\n table['bkg'] = self.bg_rate.to_table()['data']\n if self.bg_rate.data_err is not None:\n table['bkg_err'] = self.bg_rate.to_table()['data_err']\n table.meta['HDUNAME'] = \"bkg_2d\"\n return table",
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out",
"def print_name_count_matrix(names, count_matrix):\n table = [[name] + counts for name, counts in zip(names, count_matrix)]\n print(tabulate(table, names, tablefmt='psql'))",
"def table(x):\n c = Counter(x)\n return list(c), list(c.values())",
"def accumulate_entries_as_tables(entries):\n name_table = {}\n num_table = {}\n for number, name in entries:\n name_table[name] = number\n num_table[number] = name\n\n return name_table, num_table",
"def add_counts(counter,df,name=\"count_\"):\r\n\r\n counts = counter.transform(df[\"text\"].tolist())\r\n\r\n # Create the column names\r\n num_words = counts.shape[1]\r\n column_names = [name+str(i) for i in range(num_words)]\r\n\r\n # Add in the count matrices\r\n counts = pd.DataFrame(counts.toarray(),columns=column_names)\r\n return pd.concat([df.reset_index(drop=True),counts.reset_index(drop=True)],axis=1)",
"def write_count_tables( count_data, id_dict, file_name ):\n\ttaxa = count_data[0]\n\tcounts = count_data[1]\n\tappended_counts = count_data[2]\n\n\t#open our two output files. \n\traw = open( file_name + \"_raw_cnt.txt\", 'w' )\n\tappended = open( file_name + \"_full_cnt.txt\", 'w' )\n\n\t#write header lines. \n\theader = [\"taxa\", \"taxon_level\", \"count\"]\n\traw.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\tappended.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\n\t#iterate over numerically sorted taxon list. \n\tfor tax in sorted(taxa, key=lambda item: int(item)):\n\t\ttax_dat = id_dict[tax][0]\n\t\tlevel = tax_dat[1] #extra shit to turn this into easy int for sorting/filteirng.\n\t\tname = tax_dat[2]\n\t\tprint_list = [ name, level ] \n\t\n\t\t#now send the results to outfile. \n\t\ttax_bool = taxa == tax #bool array for where tax id resided in data fields. \n\t\tcnt = str( counts[ tax_bool ][0] )\n\t\traw.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )\n\t\tcnt = str( appended_counts[ tax_bool ][0] )\n\t\tappended.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )",
"def normalize_counts(df):\n # Normalize counts by dividing kmer counts in each row by the number of bases\n df = df.apply(normalize_row, axis = \"columns\")\n return df",
"def make_taxon_table(result_together, samples):\n ##get a named list\n ##result = dict(zip(taxon,SB_100)) #continue from here\n pathogens = pd.Series()\n for sample in samples:\n pathogens = pathogens.append(result_together[sample]['species']['species'])\n\n # Get the unique genera \n pathogens = pathogens.unique()\n d = {'pathogens': pathogens}\n taxon_table = pd.DataFrame(d)\n\n # Remove the non detected pathogens\n taxon_table = taxon_table[taxon_table['pathogens'] != 'non_detected']\n # Create a dataframe with the sample names with values set at zero\n zeros_dataframe = pd.DataFrame(data=0, index=np.arange(len(taxon_table.index)),\\\n columns= samples)\n # Set the index of the zeros dataframe\n zeros_dataframe.index = taxon_table.index\n # Create a frame list\n frame = [taxon_table,zeros_dataframe]\n # Concatenate the dataframes along the columns\n taxon_table = pd.concat(frame, axis=1)\n # Set the index of the dataframe to the names of the pathogens\n taxon_table = taxon_table.set_index('pathogens')\n\n # Loop through every sample while getting the frequency for each pathogen\n for sample in samples:\n #print(sample)\n # Get the detect/pathogens for each sample\n detect = result_together[sample]['species']['species']\n # Get the index in a list form\n index = detect.index.tolist()\n # Get all the frequencies for the dtected pathogens\n frequency = result_together[sample]['species']['freq']\n # Loop\n for pathogen in taxon_table.index.tolist():\n for i in index:\n if (pathogen == detect[i]):\n taxon_table.loc[pathogen,sample] = frequency[i]\n \n return(taxon_table)",
"def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat fpkm table into flattened table of sample_names/values | def reformat_fpkmTable(
self,analysis_id_I=None,sna2experimentID_I=None,
sna2sns_I=None):
if self.fpkmTable: fpkmTable = self.fpkmTable[:];
else: fpkmTable = [];
fpkmTable_flat = self.reformat_countOrFPKMTable(
countOrFPKMTable_I=fpkmTable,
analysis_id_I=analysis_id_I,
sna2experimentID_I=sna2experimentID_I,
sna2sns_I=sna2sns_I,
count_or_FPKM = 'fpkm');
return fpkmTable_flat; | [
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out",
"def biom_to_table(table: biom.Table):\n return (table.matrix_data.toarray().astype(int).tolist(),\n table.ids('observation').tolist(),\n table.ids('sample').tolist(),\n list(map(dict, table.metadata(axis='observation') or ())))",
"def format_data_to_table(items):\n table = '| Field Name | Value |\\n'\n for key, value in items:\n table += f'| {key} | {value} |\\n'\n return table",
"def from_multicsv(self,input_data):\n reformatted_data = []\n for (i,row) in enumerate(input_data):\n if i==0:\n headers = row\n else:\n data_row = {}\n for (j,h) in enumerate(headers):\n if j<len(row):\n data_row.update({h : row[j]})\n else:\n data_row.update({h : 0})\n reformatted_data.append(data_row)\n return reformatted_data",
"def data2table(data_list, table):\n take_Data(data_list, table, depth=0)",
"def _reorganize_data(self) -> None:\n\n metadata = self.metadata\n\n self.features = []\n self.labels = []\n\n for i in range(len(metadata)):\n try:\n if isinstance(metadata[i][\"hpt_res\"], str):\n hpt = ast.literal_eval(metadata[i][\"hpt_res\"])\n else:\n hpt = metadata[i][\"hpt_res\"]\n\n if isinstance(metadata[i][\"features\"], str):\n feature = ast.literal_eval(metadata[i][\"features\"])\n else:\n feature = metadata[i][\"features\"]\n\n self.features.append(feature)\n self.labels.append(hpt[metadata[i][\"best_model\"]][1])\n except Exception as e:\n logging.exception(e)\n self.labels = (np.array(self.labels) > self.threshold).astype(int)\n self.features = pd.DataFrame(self.features, copy=False)\n self.features.fillna(0, inplace=True)\n self.features_mean = np.average(self.features.values, axis=0)\n\n self.features_std = np.std(self.features.values, axis=0)\n\n self.features_std[self.features_std == 0] = 1.0\n\n return",
"def early_table(filename):\n dir_name = globals.main_path + \"\\\\pkl tables\"\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n pkl_name = filename+\".pkl\"\n file_path = os.path.join(dir_name, pkl_name)\n if filename == \"summary_table\":\n for i in range(len(globals.summary_table.index)):\n for j in globals.header_summary_table[3:len(globals.header_summary_table)]:\n globals.summary_table.at[i, j] = round(globals.summary_table.at[i, j], 4) # 4 ספרות אחרי הנקודה\n globals.summary_table.to_pickle(file_path) # כאן שמרתי פיקל של הטבלה !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n summary_table_list = globals.summary_table.values.tolist()\n summary_table_int = [list(map(int, x)) for x in summary_table_list]\n for i in range(len(summary_table_list)):\n summary_table_list[i][0] = summary_table_int[i][0]\n summary_table_list[i][1] = summary_table_int[i][1]\n summary_table_list[i][2] = summary_table_int[i][2]\n summary_table_list[i][3] = summary_table_int[i][3]\n summary_table_list = [list(map(str, x)) for x in summary_table_list] # make str list\n return summary_table_list\n else:\n globals.data_quality_table.to_pickle(file_path) # כאן שמרתי פיקל של הטבלה !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n dq_table_list = globals.data_quality_table.values.tolist()\n dq_table_int = [list(map(int, x)) for x in dq_table_list]\n for i in range(len(dq_table_list)):\n dq_table_list[i][0] = dq_table_int[i][0]\n dq_table_list[i][1] = dq_table_int[i][1]\n dq_table_list[i][2] = dq_table_int[i][2]\n dq_table_list[i][3] = dq_table_int[i][3]\n dq_table_list[i][9] = str(dq_table_list[i][9]) + ' %'\n dq_table_list[i][15] = str(dq_table_list[i][15]) + ' %'\n dq_table_list = [list(map(str, x)) for x in dq_table_list] # make str list\n return dq_table_list",
"def tree2OTU_table(mvp_tree):\n series = []\n for terminal in mvp_tree.feature_tree.get_terminals():\n try:\n series.append(terminal.sample_series)\n except:\n print('there is no sample series in tree2OTU ')\n df = pd.dataframe(series)\n return df",
"def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_keys = self.metadata.get_foreign_keys(parent_name, table_name)\n for foreign_key in foreign_keys:\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(\n table_name, parent_name, foreign_key, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n dtypes = self.metadata.get_dtypes(table_name, ids=True)\n for name, dtype in dtypes.items():\n table_rows[name] = table_rows[name].dropna().astype(dtype)\n\n final_data[table_name] = table_rows[list(dtypes.keys())]\n\n return final_data",
"def to_frame(self):\n # Create a set of dictionaries/lists for each column\n data = dict([(i_var.name, []) for i_var in self.inputs])\n data.update({self.OUTPUT_LABEL: [], self.INPUT_LABEL: [], self.name: []})\n\n # A very ugly loop to produce all the probabilities in a nice way.\n # Note that this just reproduces what is already in `self.lookup`.\n # Honestly, I just haven't thought of a better way to get nice output.\n for i_index, i_state in enumerate(self.input_states):\n for o_var, results in zip(self.outputs, self.per_state_results):\n for o_state, o_p in enumerate(results[i_index]):\n for i_var, s in zip(self.inputs, i_state):\n data[i_var.name].append(s)\n data[self.OUTPUT_LABEL].append(o_var.name)\n data[self.INPUT_LABEL].append(o_state)\n data[self.name].append(o_p)\n all_data = pd.DataFrame(data=data)\n\n # The magnificent pivot table function does all the work\n return pd.pivot_table(data=all_data, values=[self.name],\n index=[i_var.name for i_var in self.inputs],\n columns=[self.OUTPUT_LABEL, self.INPUT_LABEL])",
"def show_pivot_table(value):\n\n pivot_df = pd.pivot_table(\n data=df, values=value, columns=\"pos_simple\", index=\"ht_bins\"\n )\n pivot_df = pivot_df.reset_index()\n pivot_df = pivot_df.astype({\"ht_bins\": \"str\"})\n cols = [{\"name\": col, \"id\": col} for col in pivot_df.columns]\n data = pivot_df.to_dict(\"records\")\n\n return data, cols",
"def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()",
"def test_format_unifrac_sample_mapping(self):\n a = [[1,0,0], [0,2,4], [7,0,9.0]]\n otu_ids = ['OTUa','OTUb','OTUc']\n sample_ids = ['Sa','Sb','Sc']\n result = format_unifrac_sample_mapping(sample_ids, otu_ids, a)\n self.assertEqual(result, ['OTUa\\tSa\\t1', 'OTUb\\tSb\\t2', 'OTUb\\tSc\\t4', 'OTUc\\tSa\\t7', 'OTUc\\tSc\\t9.0'])",
"def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data",
"def _make_new_table(self, features):\n\n categorical_features = np.array(features[:, self.categorical_ids])\n transformed_categorical = self.encoder.transform(categorical_features).toarray()\n\n # If there are non-categorical features in the data\n if len(self.non_categorical_ids) == 0:\n transformed_features = transformed_categorical\n else:\n # Stack transformed categorical and non-categorical data\n non_categorical_features = np.array(features[:, self.non_categorical_ids])\n frames = (non_categorical_features, transformed_categorical)\n transformed_features = np.hstack(frames)\n\n return transformed_features",
"def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df",
"def test_format_unifrac_sample_mapping(self):\r\n a = [[1, 0, 0], [0, 2, 4], [7, 0, 9.0]]\r\n otu_ids = ['OTUa', 'OTUb', 'OTUc']\r\n sample_ids = ['Sa', 'Sb', 'Sc']\r\n result = format_unifrac_sample_mapping(sample_ids, otu_ids, a)\r\n self.assertEqual(\r\n result,\r\n ['OTUa\\tSa\\t1',\r\n 'OTUb\\tSb\\t2',\r\n 'OTUb\\tSc\\t4',\r\n 'OTUc\\tSa\\t7',\r\n 'OTUc\\tSc\\t9.0'])",
"def _format_data(self):\n formatted_data = []\n\n for row in self._data_agg_by_mean_value.iterrows():\n \n car_make = row[0]\n mean_car_value = round(row[1][0], 2)\n formatted_data.append({'car_make': car_make, 'mean_car_value': mean_car_value})\n\n return formatted_data",
"def FlattenTable(ht):\n\n keyCol = list(ht.key)\n doneFlag = False\n while not doneFlag:\n ht = ht.flatten().expand_types().flatten()\n doneFlag = True\n for k, t in ht.row.items():\n if str(t.dtype).startswith('array'):\n Log(f'Flattening {k} array.')\n try:\n maxLen = ht.aggregate(hl.agg.max(hl.len(ht[k])))\n minLen = ht.aggregate(hl.agg.min(hl.len(ht[k])))\n except:\n LogException(f'Cannot aggregate min or max lenght of the the array {k}.')\n\n if minLen == maxLen:\n Log(f'{maxLen} new column to be created out of {k} array.')\n expr = dict()\n for i in range(1, maxLen+1):\n expr[f'{k}_{i}'] = ht[k][i-1]\n try:\n ht = ht.annotate(**expr)\n except:\n LogException(f'Cannot perform annotation with expression {expr}.')\n try:\n ht = ht.drop(k)\n except:\n LogException(f'Cannot drop {k} from table.')\n doneFlag = False\n else:\n Log(f'{k} of type {t} can not be flattend beacuase its length is variable min:{minLen} max:{maxLen}.', level='WARNING')\n Log(f'Variable length array {k} is converted to string with \" ~#^#~ \" as a seperator', level='WARNING')\n expr = dict()\n expr[k] = hl.str(' ~#^#~ ').join(ht[k])\n try:\n ht = ht.annotate(**expr)\n except:\n LogException(f'Cannot perform annotation with expression {expr}.')\n\n ht = ht.key_by(*keyCol)\n Count(ht)\n return ht"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reformat count or FPKM tables into flattened table of sample_names/values for rapid alignment of attr table with tracking_id | def reformat_countOrFPKMTable(
self,
countOrFPKMTable_I=None,
analysis_id_I=None,
sna2experimentID_I=None,
sna2sns_I=None,
count_or_FPKM = 'count'):
#format into a dictionary of rows for quick aligning with the tracking_id
countOrFPKMTable_flat = [];
for row in countOrFPKMTable_I:
for k,v in row.items():
if k=='tracking_id':continue;
tmp = {};
tmp['analysis_id'] = analysis_id_I;
tmp['tracking_id'] = row['tracking_id'];
sample_name_lst = k.split('_');
sample_name_base = '_'.join(sample_name_lst[:-1]);
sample_name_rep = eval(sample_name_lst[-1]);
if sna2experimentID_I:
experiment_id = sna2experimentID_I[sample_name_base];
else:
experiment_id=None;
tmp['experiment_id'] = experiment_id;
if sna2sns_I:
sample_name = sna2sns_I[sample_name_base][sample_name_rep];
else:
sample_name=k;
tmp['sample_name'] = sample_name;
tmp['value'] = v;
tmp['value_units'] = count_or_FPKM;
tmp['used_'] = True;
tmp['comment_'] = None;
countOrFPKMTable_flat.append(tmp);
return countOrFPKMTable_flat; | [
"def make_taxon_table(result_together, samples):\n ##get a named list\n ##result = dict(zip(taxon,SB_100)) #continue from here\n pathogens = pd.Series()\n for sample in samples:\n pathogens = pathogens.append(result_together[sample]['species']['species'])\n\n # Get the unique genera \n pathogens = pathogens.unique()\n d = {'pathogens': pathogens}\n taxon_table = pd.DataFrame(d)\n\n # Remove the non detected pathogens\n taxon_table = taxon_table[taxon_table['pathogens'] != 'non_detected']\n # Create a dataframe with the sample names with values set at zero\n zeros_dataframe = pd.DataFrame(data=0, index=np.arange(len(taxon_table.index)),\\\n columns= samples)\n # Set the index of the zeros dataframe\n zeros_dataframe.index = taxon_table.index\n # Create a frame list\n frame = [taxon_table,zeros_dataframe]\n # Concatenate the dataframes along the columns\n taxon_table = pd.concat(frame, axis=1)\n # Set the index of the dataframe to the names of the pathogens\n taxon_table = taxon_table.set_index('pathogens')\n\n # Loop through every sample while getting the frequency for each pathogen\n for sample in samples:\n #print(sample)\n # Get the detect/pathogens for each sample\n detect = result_together[sample]['species']['species']\n # Get the index in a list form\n index = detect.index.tolist()\n # Get all the frequencies for the dtected pathogens\n frequency = result_together[sample]['species']['freq']\n # Loop\n for pathogen in taxon_table.index.tolist():\n for i in index:\n if (pathogen == detect[i]):\n taxon_table.loc[pathogen,sample] = frequency[i]\n \n return(taxon_table)",
"def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;",
"def combine_counts(\n fns,\n define_sample_name=None,\n):\n counts = []\n for fn in fns:\n df = pd.read_table(fn, skiprows=1, index_col=0)\n counts.append(df[df.columns[-1]])\n combined_counts = pd.DataFrame(counts).T\n if define_sample_name:\n names = [define_sample_name(x) for x in fns]\n combined_counts.columns = names\n combined_counts.index.name = ''\n return combined_counts",
"def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_keys = self.metadata.get_foreign_keys(parent_name, table_name)\n for foreign_key in foreign_keys:\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(\n table_name, parent_name, foreign_key, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n dtypes = self.metadata.get_dtypes(table_name, ids=True)\n for name, dtype in dtypes.items():\n table_rows[name] = table_rows[name].dropna().astype(dtype)\n\n final_data[table_name] = table_rows[list(dtypes.keys())]\n\n return final_data",
"def write_count_tables2( count_data, id_dict, file_name ):\n\ttaxa = count_data[0]\n\tcounts = count_data[1]\n\tappended_counts = count_data[2]\n\t\n\t#open our two output files. \n\traw = open( file_name + \"_raw_cnt.txt\", 'w' )\n\tappended = open( file_name + \"_full_cnt.txt\", 'w' )\n\t\n\t#write header lines. \n\theader = [\"taxa\", \"taxon_level\", \"count\"]\n\traw.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\tappended.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\t\n\t#iterate over numerically sorted taxon list. \n\t#we store the indexes of the sort so we can rapidly access taxa and count data types. \n\tfor tax_idx in np.argsort( taxa.astype( np.int ) ):\n\t\ttax = taxa[ tax_idx ] \n\t\ttax_dat = id_dict[tax][0]\n\t\tlevel = str(int( tax_dat[1] )) #extra shit to turn this into easy int for sorting/filteirng.\n\t\tname = tax_dat[2] + \";\" + tax\n\t\tprint_list = [ name, level ] \n\t\t\n\t\t#now send the results to outfile. \n\t\tcnt = str( counts[ tax_idx ] )\n\t\traw.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )\n\t\tcnt = str( appended_counts[ tax_idx ] )\n\t\tappended.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )",
"def reformat_fpkmTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.fpkmTable: fpkmTable = self.fpkmTable[:];\n else: fpkmTable = [];\n\n fpkmTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=fpkmTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'fpkm');\n return fpkmTable_flat;",
"def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;",
"def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data",
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out",
"def write_count_tables( count_data, id_dict, file_name ):\n\ttaxa = count_data[0]\n\tcounts = count_data[1]\n\tappended_counts = count_data[2]\n\n\t#open our two output files. \n\traw = open( file_name + \"_raw_cnt.txt\", 'w' )\n\tappended = open( file_name + \"_full_cnt.txt\", 'w' )\n\n\t#write header lines. \n\theader = [\"taxa\", \"taxon_level\", \"count\"]\n\traw.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\tappended.write(\"%s\\n\" %( \"\\t\".join( header ) ) )\n\n\t#iterate over numerically sorted taxon list. \n\tfor tax in sorted(taxa, key=lambda item: int(item)):\n\t\ttax_dat = id_dict[tax][0]\n\t\tlevel = tax_dat[1] #extra shit to turn this into easy int for sorting/filteirng.\n\t\tname = tax_dat[2]\n\t\tprint_list = [ name, level ] \n\t\n\t\t#now send the results to outfile. \n\t\ttax_bool = taxa == tax #bool array for where tax id resided in data fields. \n\t\tcnt = str( counts[ tax_bool ][0] )\n\t\traw.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )\n\t\tcnt = str( appended_counts[ tax_bool ][0] )\n\t\tappended.write(\"%s\\n\" %( \"\\t\".join( print_list + [cnt] ) ) )",
"def produce_variant_stats_table(infiles, table_file):\n\n var_types=['splicing','UTR3','UTR5','intronic','intergenic','exonic']\n \n # split the input files per task\n sample_no = len(infiles)/3\n raw_variant_files = infiles[0:sample_no]\n kg1_filtered_variant_files = infiles[sample_no:sample_no*2]\n inhouse_filtered_variant_files = infiles[sample_no*2:sample_no*3]\n\n out = open(table_file,'w')\n \n import itertools\n #header = ['sample'] + ['raw_'+t for t in var_types] + ['rare_'+t for t in var_types] + ['raw_synonymous','rare_synonymous']\n filtering_stages = ['raw_','kg1_','inhouse_']\n header = ['sample'] + \\\n [f+t for (f,t) in itertools.product(filtering_stages, var_types)] + \\\n [s+'_synonymous' for s in filtering_stages]\n out.write(('\\t'.join(header))+'\\n')\n for i in range(0,sample_no):\n out.write(os.path.basename(raw_variant_files[i][0]).split('.')[0])\n for fname in [raw_variant_files[i][0], kg1_filtered_variant_files[i][0], \\\n inhouse_filtered_variant_files[i][0]]: # exonic variant stats of raw variants and rare variants\n counts = dict.fromkeys(var_types,0)\n f=open(fname) \n for l in f.xreadlines():\n for var_type in var_types:\n if l.find(' '+var_type)>0:\n counts[var_type] += int(l.split()[0])\n break\n out.write('\\t'+'\\t'.join([str(counts[t]) for t in var_types]))\n f.close()\n\n for fname in [raw_variant_files[i][1], kg1_filtered_variant_files[i][1], \\\n inhouse_filtered_variant_files[i][1]]: # synonymous variants stats of raw and rare variants\n f=open(fname)\n found=False\n for l in f.xreadlines():\n if l.find(\" synonymous\")>0:\n found=True\n out.write('\\t'+l.split()[0])\n break \n if not found: out.write('\\t0') \n f.close()\n out.write('\\n')\n out.close()",
"def init_sample_table(vocab_counts):\r\n count = [ele for ele in vocab_counts]\r\n pow_frequency = np.array(count)**0.75\r\n power = sum(pow_frequency)\r\n ratio = pow_frequency / power\r\n table_size = 1e8\r\n count = np.round(ratio*table_size)\r\n sample_table = []\r\n for idx, x in enumerate(count):\r\n sample_table += [idx]*int(x)\r\n return np.array(sample_table)",
"def get_formatted_data(self, df):\n \n df = df[['Customer ID','new_label','sales_label']]\n \n\n\n mp = dict()\n new_label = df['new_label'].unique()\n\n mp2 = dict()\n sales_label = df['sales_label'].unique()\n\n for i in new_label:\n mp[i] = [ set(), 0]\n\n for i in sales_label:\n mp2[i] = [ set(), 0]\n\n\n for i in range(len(df)):\n mp[df['new_label'][i]][0].add(df['Customer ID'][i])\n mp2[df['sales_label'][i]][0].add(df['Customer ID'][i])\n\n for i in mp:\n mp[i][0] = list(mp[i][0])\n mp[i][1] = len(mp[i][0])\n \n for i in mp2:\n mp2[i][0] = list(mp2[i][0])\n mp2[i][1] = len(mp2[i][0])\n\n # return mp,mp2,self.cluster2_data,self.cluster1_data\n return mp",
"def make_all_count_tables(folderName):\r\n\tallSamples = get_all_files(folderName)\r\n\t\r\n\tallCountDicts = []\r\n\tfor fileName in allSamples:\r\n\t\tsampleDict = makeCountDict(fileName, folderName)\r\n\t\tif sampleDict == None:\r\n\t\t\tprint (\"Warning: \"+fileName+\" doesn't have read counts. Ignored.\")\r\n\t\telse:\r\n\t\t\tallCountDicts.append(sampleDict)\r\n\r\n\tconditions = get_conditions(allSamples)\r\n\t#combos = get_file_combinations(conditions)\r\n\r\n\tallCountDicts = check_count_dicts(allCountDicts)\r\n\r\n\tmake_count_table(allCountDicts, folderName+\"All_Counts.tsv\")\r\n\r\n\tfor s in allCountDicts:\r\n\t\tmake_count_table([s], folderName+s[\"name\"])\r\n\r\n\t#for c in combos:\r\n\t#\tmake_count_table(get_dicts_for_comparison(c,allCountDicts), folderName+make_output_name(c))\r",
"def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df",
"def map(self):\n self.df_primary_col = self.df.columns\n #print(len(self.df.columns))\n array1 = self.meta.index# sample id of metadata\n array2 = self.df.index # sample id of feature-table\n mapped_dict ={'metadata':[],'feature_table':[]}\n for i in range(len(array1)):\n for j in range(len(array2)):\n if array2[j] == array1[i]:\n mapped_dict['metadata'].append(i)\n mapped_dict['feature_table'].append(j)\n break\n\n temp_table = self.df.iloc[mapped_dict['feature_table'],:]\n temp_table.index = list(range(temp_table.shape[0]))\n temp_meta = self.meta.iloc[mapped_dict['metadata'],:]\n temp_meta.index = list(range(temp_meta.shape[0]))\n assert temp_meta.shape[0] == temp_table.shape[0]\n self.df = pd.concat([temp_table,temp_meta],axis=1)\n new_index = []\n for ele in mapped_dict['metadata']:\n new_index.append(array1[ele])\n self.df.index=new_index",
"def _reorganize_data(self) -> None:\n\n metadata = self.metadata\n\n self.features = []\n self.labels = []\n\n for i in range(len(metadata)):\n try:\n if isinstance(metadata[i][\"hpt_res\"], str):\n hpt = ast.literal_eval(metadata[i][\"hpt_res\"])\n else:\n hpt = metadata[i][\"hpt_res\"]\n\n if isinstance(metadata[i][\"features\"], str):\n feature = ast.literal_eval(metadata[i][\"features\"])\n else:\n feature = metadata[i][\"features\"]\n\n self.features.append(feature)\n self.labels.append(hpt[metadata[i][\"best_model\"]][1])\n except Exception as e:\n logging.exception(e)\n self.labels = (np.array(self.labels) > self.threshold).astype(int)\n self.features = pd.DataFrame(self.features, copy=False)\n self.features.fillna(0, inplace=True)\n self.features_mean = np.average(self.features.values, axis=0)\n\n self.features_std = np.std(self.features.values, axis=0)\n\n self.features_std[self.features_std == 0] = 1.0\n\n return",
"def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table",
"def build_contingency_table(\n sample_data: pd.Series, batch_data: pd.Series\n) -> pd.DataFrame:\n categorical_values = pd.concat([sample_data, batch_data])\n data_origins = np.array([\"sample\"] * len(sample_data) + [\"batch\"] * len(batch_data))\n\n return pd.crosstab(index=categorical_values, columns=data_origins)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for rendering hours as json. | def json_hours(request):
current_site = Site.find_for_request(request)
if request.method == 'GET':
if request.GET.get('fallback'):
fallback = request.GET['fallback']
return JsonResponse(
{
'llid': get_default_unit().location.libcal_library_id,
}
)
else:
libcalid = request.GET['libcalid']
all_building_hours = json.dumps(get_building_hours_and_lid(current_site))
return JsonResponse(
{
'all_building_hours': all_building_hours,
'current_hours': get_json_hours_by_id(int(libcalid), all_building_hours),
'llid': libcalid,
'llid_fallback': get_default_unit().location.libcal_library_id,
}
) | [
"def hours(self) -> pli.Series:",
"def _draw_hours(self):\n tmp_str_list = []\n for i in range(0, self._g_width, self._min_grid):\n if i % self._hour_grid == 0:\n tmp_str_list.append('<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n tmp_str_list.append('<text class=\"Label\" x=\"%d\" y=\"%d\">%d</text>' % (\n i + 20, 20, (i / self._hour_grid + self._offset) % 24))\n else:\n tmp_str_list.append('<polyline class=\"SubHour\" points=\"%d,%d,%d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n return \"\".join(tmp_str_list)",
"def timetables():\n timetable = get_timetable()\n\n return json.dumps(timetable)",
"def hours(self, venue_id):\n response = self._request(V2_ENDPOINTS['HOURS'] + venue_id)\n return response",
"def hours(self):\n return self.config['hours']",
"def format_hours(self, data):\n return unicode('%f' % data).rstrip('0').rstrip('.')",
"def hourly(self):\n return c.Hourly(self)",
"def open_hours_detail(self):\n return self._open_hours_detail",
"def pickup_rush_hours_2018():\n query = ''' SELECT * FROM temp_pickup_rush_hours_2018; '''\n result = execute_query(query) \n return jsonify([{'time': a[0], 'pickups': a[1]} for a in result])",
"def get_opening_hours_display(self):\n hours = self.opening_hours.all()\n\n results = []\n\n for hour in hours:\n if hour.is_closed is True:\n results.append((\n hour.get_weekday_display(),\n _(\"closed\")\n )) \n else:\n results.append((\n hour.get_weekday_display(),\n hour.from_hour,\n hour.to_hour()\n ))\n return results",
"def display_hour_range(value):\n\n if value is None or value == 'Day':\n return {'display': 'none'}\n else:\n return {'display': 'unset'}",
"def pickup_rush_hours():\n query = ''' WITH t1 AS (\n SELECT STRFTIME('%H:00', tpep_pickup_datetime) AS pickup_time\n FROM trips)\n SELECT pickup_time, COUNT(pickup_time) AS pickup_counts\n FROM t1\n GROUP BY pickup_time\n ORDER BY pickup_time; \n '''\n result = execute_query(query) \n return jsonify([{'time': a[0], 'pickups': a[1]} for a in result])",
"def polling_hours(self, hours):\n return hours",
"def hourly_data(self):\n return self._hourly_data",
"def get_hourly_weather(self) -> str:\n raw_dicts = self.raw_hourly[:24]\n hourly_dicts = [self._generate_hourly_report(d) for d in raw_dicts]\n reports = self._format_hourly_reports(hourly_dicts)\n report_string = (\n f\"\\nNext 24 hours in {self.loc_name}:\\n\"\n )\n for report in [self._generate_hourly_report_string(report) for report in reports]:\n report_string += report + \"\\n\"\n return report_string",
"def stats_format_hours(app_id, hours, hours_anon, hours_auth,\r\n max_hours, max_hours_anon, max_hours_auth):\r\n hourNewStats = dict(label=\"Anon + Auth\", disabled=\"True\", values=[], max=0)\r\n hourNewAnonStats = dict(label=\"Anonymous\", values=[], max=0)\r\n hourNewAuthStats = dict(label=\"Authenticated\", values=[], max=0)\r\n\r\n hourNewStats['max'] = max_hours\r\n hourNewAnonStats['max'] = max_hours_anon\r\n hourNewAuthStats['max'] = max_hours_auth\r\n\r\n for h in sorted(hours.keys()):\r\n # New answers per hour\r\n #hourNewStats['values'].append(dict(x=int(h), y=hours[h], size=hours[h]*10))\r\n if (hours[h] != 0):\r\n hourNewStats['values'].append([int(h), hours[h],\r\n (hours[h] * 5) / max_hours])\r\n else:\r\n hourNewStats['values'].append([int(h), hours[h], 0])\r\n\r\n # New Anonymous answers per hour\r\n if h in hours_anon.keys():\r\n #hourNewAnonStats['values'].append(dict(x=int(h), y=hours[h], size=hours_anon[h]*10))\r\n if (hours_anon[h] != 0):\r\n hourNewAnonStats['values'].append([int(h), hours_anon[h],\r\n (hours_anon[h] * 5) / max_hours])\r\n else:\r\n hourNewAnonStats['values'].append([int(h), hours_anon[h], 0])\r\n\r\n # New Authenticated answers per hour\r\n if h in hours_auth.keys():\r\n #hourNewAuthStats['values'].append(dict(x=int(h), y=hours[h], size=hours_auth[h]*10))\r\n if (hours_auth[h] != 0):\r\n hourNewAuthStats['values'].append([int(h), hours_auth[h],\r\n (hours_auth[h] * 5) / max_hours])\r\n else:\r\n hourNewAuthStats['values'].append([int(h), hours_auth[h], 0])\r\n return hourNewStats, hourNewAnonStats, hourNewAuthStats",
"def display_hours():\n\twith open('tiger_den_hours.txt', 'r') as hours, open('hours.txt', 'w+') as o:\n\t\tfor line in hours: \n\t\t\to.write(line + '\\n')\n\t\t\tprint(line.strip())",
"def get_time_dict(self):\n return {\n \"activity\" : self.activity_name,\n \"start_time\": self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"end_time\": self.end_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"days\": self.days,\n \"hours\": self.hours,\n \"minutes\": self.minutes,\n \"seconds\": self.seconds,\n }",
"def hours(hours):\n return Duration(hours=hours)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for rendering events feed data as json. | def json_events(request):
if request.method == 'GET':
ttrss_url = request.GET['feed']
# need xml for this.
university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'
n = datetime.datetime.now()
return JsonResponse(
{
'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))
}
) | [
"def view_events():\n result = get_events_helper(Event)\n return jsonify(result[0]), result[1]",
"def get_events(request):\n events = Event.objects.all()\n return JsonResponse({\n 'events': [event.to_dict() for event in events],\n })",
"def api_get_events():\n events = []\n return jsonify(events)",
"def get_all_events():\n # TODO(funkysayu): Implement the user visibility limit.\n events = Event.query.all()\n return jsonify(events=[e.to_dict() for e in events])",
"def output_timeline(self):\n data = {}\n events = []\n for event in self.events:\n one_event = dict()\n one_event['name'] = event.name\n one_event['date'] = event.date.strftime('%Y-%m-%d')\n if event.amount > 0:\n one_event['type'] = 'income'\n if event.spendable > 0:\n one_event['spendable'] = str(Decimal(event.spendable).quantize(\n Decimal('.01'), rounding=ROUND_HALF_UP))\n one_event['allocations'] = event.sources\n else:\n one_event['type'] = 'expense'\n one_event['sources'] = event.sources\n events.append(one_event)\n data['events'] = events\n pprint.pprint(data)",
"def to_json(self):\n self._load_all_events()\n return json.dumps(\n [x.to_dict() for events in self._events.values() for x in events], indent=2\n )",
"def show_events_list():\r\n\tevents_list = Page.objects.filter(tags='events').order_by('-created')\r\n\treturn {'events_list': events_list}",
"def events_view(request):\n try:\n\n if 'start' and 'end' in request.params:\n datetime_start = datetime.fromtimestamp(int(request.params['start']))\n datetime_end = datetime.fromtimestamp(int(request.params['end']))\n\n events = DBSession.query(Events.id, Events.date_start, Events.date_end, Events.title).filter(\n Events.date_start.between(datetime_start, datetime_end)).all()\n\n return [dict(id=item.id, title=item.title, start=str(item.date_start), end=str(item.date_end), allDay=False)\n for item in events]\n\n except DBAPIError, ex:\n print ex\n return Response(conn_err_msg, content_type='text/plain', status_int=500)\n\n except Exception, ex:\n print ex\n return Response(ex, content_type='text/plain', status_int=500)",
"def __json__(self, request=None):\n act_json = {\n \"id\": self.id,\n \"title\": self.title,\n \"notes\": self.notes,\n \"act_type\": self.ACT_TYPE[self.act_type],\n \"year\": self.year,\n \"events\": [] if not hasattr(self, 'events')\n else [event.__json__() for event in self.events],\n }\n return act_json",
"def get_event_data_ajax(request):\n if request.method == \"POST\":\n event_id = request.POST.get('id')\n event = Event.get_event_by_id(event_id)\n if event:\n response_data = {\n 'title': event.title,\n 'description': event.description,\n 'date': datetime.combine(event.date, event.time).strftime(\"%I:%M%p on %B %d, %Y\"),\n 'map_coordinates': event.place,\n 'result': '100'\n }\n else:\n response_data = {'result': '114'}\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n else:\n return HttpResponseRedirect('/')",
"def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()",
"def format(self):\n if isinstance(self._obj, list):\n # list of objects\n event_obj = []\n for o in self._obj:\n item = {}\n if 'to_json' in dir(o):\n item = o.to_json()\n elif isinstance(o, dict):\n item = o\n else:\n for oa in [x for x in o.__dict__ if not x.startswith('_')]:\n if type(getattr(o, oa)) is datetime.datetime:\n item[oa] = getattr(o, oa).strftime(\n '%m/%d/%Y %H:%M:%S')\n elif type(getattr(o, oa)) is UUID:\n item[oa] = str(getattr(o, oa))\n else:\n item[oa] = getattr(o, oa)\n event_obj.append(item)\n\n else:\n event_obj = {}\n for x in [attr for attr in self.__dict__ if not attr.startswith('_')]:\n event_obj[x] = getattr(self, x)\n data = {}\n data['time'] = self._timestamp\n data['sourcetype'] = self._key\n data['event'] = {\n 'request': self.format_request(),\n 'auth': self._auth,\n 'user': self._user,\n 'eventData': event_obj,\n 'event': self._name,\n }\n return data",
"def json_encode_list(self, eventlist):\n jsonlist = []\n if eventlist:\n for event in eventlist:\n jsonevent = {}\n start, end = event.convert_to_timestamp()\n jsonevent['id'] = event.id\n jsonevent['title'] = event.title\n jsonevent['url'] = reverse('Event',\n current_app=self.request.resolver_match.namespace,\n args=[event.id])\n jsonevent['class'] = event.type\n jsonevent['start'] = int(start * 1000)\n jsonevent['end'] = int(end * 1000)\n jsonlist.append(jsonevent)\n return {\"result\": jsonlist, \"success\": \"1\"}",
"def process_events_json(events):\n trace=process_events(events)\n return json.dumps(trace, separators=(',',':'), indent=2)",
"def json_news(request):\n if request.method == 'GET':\n feed = request.GET['feed']\n return JsonResponse(\n {\n 'news': get_news(feed),\n }\n )",
"def events(request):\n events = []\n\n # Get all upcoming events.\n upcoming = Event.objects.filter(start_date__gte=datetime.date.today)\n upcoming = upcoming.order_by('start_date')\n\n for e in upcoming:\n events.append(e)\n\n context = collect_events(events)\n return render(request, 'events.html', context)",
"def info_event_json(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)",
"def get_events_json(self, query_string, **kwargs):\n\n response = self._search_events(query_string, output_format=\"json\", **kwargs)\n\n return response.text",
"def event_activity_json(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(event_id, limit, q))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for rendering news feed data as json. | def json_news(request):
if request.method == 'GET':
feed = request.GET['feed']
return JsonResponse(
{
'news': get_news(feed),
}
) | [
"def news():\n response.generic_patterns = ['.rss']\n nodes = db().select(db.node.ALL, orderby=db.node.title)\n return dict(\n \ttitle = 'node rss feed',\n\t link = 'http://127.0.0.1:8000/thinker/default/index', description = 'idea news',\n\t created_on = request.now,\n\t items = [\n\t dict(title = row.title,\n\t link = URL('show', args=row.id),\n\t description = MARKMIN(row.body).xml(),\n\t created_on = row.created_on\n\t ) for row in nodes])",
"def serialize_news(self):\n return {\n 'category': self.category,\n 'datetime': self.datetime,\n 'headline': self.headline,\n 'image': self.image,\n 'related': self.related,\n 'source': self.source,\n 'summary': self.summary,\n 'url': self.url,\n }",
"def top_news():\n data = get_top_news()\n return jsonify(data)",
"def json_events(request):\n if request.method == 'GET':\n ttrss_url = request.GET['feed']\n\n # need xml for this. \n university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'\n\n n = datetime.datetime.now()\n return JsonResponse(\n {\n 'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))\n }\n )",
"def GET(self, *args):\n all_news= self.get_all_news()\n all_news.sort( key=lambda n : n['date'], reverse=True)\n if len(args):\n n_last=int(args[0])\n all_news = all_news[:n_last]\n\n return json.dumps(all_news)",
"def home_feeds(request):\n result = {}\n \n result['feeds'] = []\n\n u = request.user\n\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u).order_by('-timestamp')\n result['feeds'] = [ f.get_json(me=u, android=True) for f in feeds ]\n\n return JSONHttpResponse(result)",
"def news(news_post, request):\n\n return {'post': news_post}",
"def json(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'content': self.content,\n 'author_id': self.author_id\n }",
"def newsitems_geojson(request):\r\n # Note: can't use @cache_page here because that ignores all requests\r\n # with query parameters (in FetchFromCacheMiddleware.process_request).\r\n # So, we'll use the low-level cache API.\r\n\r\n # Copy-pasted code from ajax_place_newsitems. Refactoring target:\r\n # Seems like there are a number of similar code blocks in\r\n # ebpub.db.views?\r\n\r\n pid = request.GET.get('pid', '')\r\n schema = request.GET.get('schema', None)\r\n if schema is not None:\r\n schema = get_object_or_404(Schema, slug=schema)\r\n\r\n nid = request.GET.get('newsitem', '')\r\n\r\n newsitem_qs = NewsItem.objects.by_request(request)\r\n if nid:\r\n newsitem_qs = newsitem_qs.filter(id=nid)\r\n else:\r\n filters = FilterChain(request=request, queryset=newsitem_qs, schema=schema)\r\n if pid:\r\n filters.add_by_place_id(pid)\r\n else:\r\n # Whole city!\r\n pass\r\n\r\n # More copy/paste from ebpub.db.views...\r\n # As an optimization, limit the NewsItems to those published in the\r\n # last few days.\r\n filter_sf_dict = _get_filter_schemafields(schema)\r\n filters.update_from_request(filter_sf_dict)\r\n if not filters.has_key('date'):\r\n end_date = today()\r\n start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)\r\n filters.add('date', start_date, end_date)\r\n newsitem_qs = filters.apply()\r\n newsitem_qs = newsitem_qs.by_request(request)\r\n\r\n # Put a hard limit on the number of newsitems, and throw away\r\n # older items.\r\n newsitem_qs = newsitem_qs.select_related().order_by('-item_date', '-pub_date', '-id')\r\n newsitem_qs = newsitem_qs[:constants.NUM_NEWS_ITEMS_PLACE_DETAIL]\r\n\r\n # Done preparing the query; cache based on the raw SQL\r\n # to be sure we capture everything that matters.\r\n cache_seconds = 60 * 5\r\n cache_key = 'newsitem_geojson:' + _make_cache_key_from_queryset(newsitem_qs)\r\n output = cache.get(cache_key, None)\r\n if output is None:\r\n newsitem_list = list(newsitem_qs)\r\n output = api_items_geojson(newsitem_list)\r\n cache.set(cache_key, output, cache_seconds)\r\n\r\n response = HttpResponse(output, mimetype=\"application/javascript\")\r\n patch_response_headers(response, cache_timeout=60 * 5)\r\n return response",
"def lastNews(self, request):\n data = []\n per_page = 20\n start_indx = 0\n fin_indx = per_page\n if \"source_id\" not in request.POST:\n return {\n \"error\": 1,\n \"message\": \"Source ID doesn't provide\"\n }\n if \"page\" in request.POST:\n start_indx = int(request.POST[\"page\"])*per_page\n fin_indx = (int(request.POST[\"page\"])+1)*per_page\n source_id = request.POST[\"source_id\"]\n source = NewsSource.objects.get(pk=source_id)\n if source:\n last_news = NewsTonal.objects.filter(\n news_item__source=source\n ).order_by(\n \"-news_item__date\"\n )[start_indx:fin_indx]\n for item in last_news:\n item_date = item.news_item.date.replace(\n tzinfo=pytz.utc\n ).astimezone(pytz.timezone(settings.TIME_ZONE))\n data.append({\n \"title\": item.news_item.title,\n \"date\": item_date.strftime(\"%d/%m/%Y %H:%M:%S\"),\n \"link\": item.news_item.link,\n \"tonality_index\": item.tonality_index\n })\n return data",
"def get_news_list():\n # 获取参数\n args_dict = request.args\n page = args_dict.get('p', 1)\n per_page = args_dict.get('per_page', constants.HOME_PAGE_MAX_NEWS)\n category_id = args_dict.get('cid', 1)\n\n # 校验参数\n try:\n page = int(page)\n per_page = int(per_page)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg='参数错误')\n\n # 3. 查询数据并分页\n filters = []\n # 如果分类id不为1,那么添加分类id的过滤\n if category_id != \"1\":\n filters.append(News.category_id == category_id)\n\n try:\n paginate = News.query.filter(*filters).order_by(News.create_time.desc()).paginate(page, per_page, False)\n # 获取查询出来的数据\n items = paginate.items\n # 获取总页数\n total_page = paginate.pages\n # print(total_page)\n current_page = paginate.page\n # print(current_page)\n except Exception as e:\n print(\"*\"*100)\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg='数据查询失败')\n\n news_li = []\n for news in items:\n news_li.append(news.to_basic_dict())\n\n # 4. 返回数据\n return jsonify(errno=RET.OK, errmsg=\"OK\", totalPage=total_page, currentPage=current_page, newsList=news_li, cid=category_id)",
"def stories(request):\n req = urllib.request.Request('http://exp-api:8000/api/v1/stories/')\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n json_data = json.loads(resp_json)\n\n return render(\n request,\n 'stories_homepage.html',\n context={'data': json_data['results']}\n )",
"def render():\n # sites = check_cc_sites_status()\n queries = News.get_all_queries_by_request(request)\n articles = News.query \\\n .filter(*queries) \\\n .order_by(News.id.desc())\\\n .limit(30)\\\n .all()\n articles = [i.serialize for i in articles]\n\n return render_template(\"index.html\", news=articles, cc_sites=None)",
"def news_list():\n\n # 1.获取参数\n cid = request.args.get('cid',\"1\")\n page = request.args.get('page',\"1\")\n per_page = request.args.get('per_page',\"10\")\n\n # 2,参数类型转换\n try:\n page = int(page)\n per_page = int(per_page)\n except Exception as e:\n page = 1\n per_page = 10\n\n # 3.分页查询\n try:\n # 判断分类编号是否,不等于1,最新分类是按照时间倒序排列的\n filters = []\n if cid != \"1\":\n filters.append(News.category_id == cid)\n paginate = News.query.filter(*filters).order_by(News.create_time.desc()).paginate(page,per_page,False)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"分页获取失败\")\n\n # 4.获取分页对象属性,总页数,当前页,当前页对象\n totalPage = paginate.pages\n currentPage = paginate.page\n items = paginate.items\n\n # 5.将当前页对象列表,转成字典列表\n newsList = []\n for item in items:\n newsList.append(item.to_dict())\n\n # 6.响应,返回json数据\n return jsonify(errno=RET.OK,errmsg=\"获取成功\",totalPage = totalPage,currentPage = currentPage,newsList = newsList)",
"def news(self, news: list) -> list:\n news_formatted = []\n\n for new in news:\n post_header = new.find(\"div\", {\"class\": \"post-header\"})\n post_inner = new.find(\"div\", {\"class\": \"post-inner\"})\n data_header = self.__format_header(post_header)\n data_inner = self.__format_inner(post_inner)\n\n news_formatted.append({\n **data_header,\n **data_inner\n })\n\n return news_formatted",
"def get(self):\n date_query_param = request.args.get('date', '')\n date_query_param = self.to_date(date_query_param) if date_query_param else ''\n title_query_param = request.args.get('title', '')\n\n # find article by date and title\n if date_query_param and title_query_param:\n news_article = NewsArticleModel.find_by_date_and_title(date_query_param, title_query_param)\n return news_article.json()\n # find article(s) by date only\n elif date_query_param and not title_query_param:\n news_articles = NewsArticleModel.find_by_date(date_query_param)\n return {\n 'news_articles': [news_article.json() for news_article in news_articles]\n }\n # find articles by title only\n elif title_query_param and not date_query_param:\n news_article = NewsArticleModel.find_by_title(title_query_param)\n return news_article.json()\n else:\n return {'message': 'Article not found'}, 404",
"def _jsonify_feed(feed, root=None):\n\tjsonified_feeds = [fd.to_dict() for fd in feed.sub_feeds]\n\tjsonified_feed = {'title': feed.title, 'subfeeds': jsonified_feeds}\n\tjsonified_feed['root_url'] = feed.root_url\n\tjsonified_feed['root_dir'] = feed.root_dir\n\tjsonified_feed['items_url'] = feed.items_url\n\tif root:\n\t\tjsonified_feed['items_url'] = root+'static/'+jsonified_feed['items_url']\n\treturn jsonified_feed",
"def _get_news_item(self, url, title, date, news):\n return {\"url\": url, \"title\": title, \"datee\": date, \"news\": news}",
"def json_posts_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post_result in posts:\n post_dict = get_public_post_dict(post_result[0], post_result[2])\n out['posts'].append(post_dict)\n\n return jsonify(out)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View for retreiving the chat status for Ask a Librarian pages. Returns json. | def chat_status(request):
if request.method == 'GET':
ask_name = request.GET['name']
status = get_chat_status_and_css(ask_name)
return JsonResponse(
{
'chat_status': status[0],
'chat_css': status[1],
}
) | [
"def chat_status(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'GET' and request.is_ajax():\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n status = team.num_waiting_messages\n return HttpResponse(json.dumps({\"num_messages\": status}))\n else:\n return HttpResponseNotFound()",
"def chat_list(request):\n if request.method == 'GET':\n chats = Chat.objects.all()\n \n chats_serializer = ChatSerializer(chats, many=True)\n return JsonResponse(chats_serializer.data, safe=False)",
"def get_chat_statuses():\n return {\n 'uofc-ask': get_chat_status_css('uofc-ask'),\n 'crerar': get_chat_status_css('crerar'),\n 'eckhart': get_chat_status_css('crerar'),\n 'law': get_chat_status_css('law'),\n 'ssa': get_chat_status_css('ssa'),\n 'dissertation-office': get_chat_status_css('dissertation-office')\n }",
"def load_messages(request):\n thread = models.MessageThread.objects.get(hash_id=request.GET['id'])\n \n # check if user is a part of this chat\n if not request.user in thread.clients.all():\n return HttpResponse(status=403)\n\n # query for messages filter\n q = [Q(thread=thread)]\n if 'before' in request.GET:\n q.append(Q(date__lt=int(request.GET['before'])))\n\n # query messages matching filter\n messages = models.Message.objects.filter(*q).order_by('-id')\n messages_data = serializers.MessageListSerializer(messages[:30]).data\n\n # mark any unread messages in chat as read\n thread.mark_read(request.user)\n return JsonResponse({\"messages\":messages_data,\"end\":messages.count() <= 30})",
"def index(request):\n\n messages = []\n last_state = 0\n try:\n history = MessageHistory.objects.all()\n\n messages = []\n last_state = history.latest('id').id\n for entry in history:\n if not entry.is_addition or entry.message.is_deleted:\n continue\n\n messages.append({\n 'id': entry.message.id,\n 'username': entry.message.user.username,\n 'message_text': entry.message.message_text,\n 'pub_date': entry.message.pub_date.isoformat(),\n 'mine': entry.message.user == request.user,\n })\n except ObjectDoesNotExist:\n pass\n\n return render(request, 'app/index.html', {'messages_json': json.dumps(messages), 'last_state': last_state})",
"def p_status(request):\n \"\"\"\n 功能说明: 金豆商城活动, 用户是否参加活动\n -----------------------------------------------\n 修改人 修改时间\n -----------------------------------------------\n 张嘉麒 2017-11-07\n \"\"\"\n data = {}\n args = request.loads() or request.QUERY.casts(active_id=int)\n active_id = int(args.active_id or 0)\n if not active_id:\n data['status'] = 0\n return jsonp_fail(request, message=u'参数不正确')\n user_id = request.user_id\n status = common.active_status(user_id, active_id)\n data['status'] = status\n active_count_num = common.active_count(active_id)\n data['active_num'] = active_count_num\n\n return jsonp_ok(request, data)",
"def status_view(self):\n return self.post(action=\"status_trn\")",
"def get_chat_status(name):\n try:\n libid = LIBCHAT_IDS[name]\n response = requests.get(LIBCHAT_STATUS_URL + libid, timeout=12)\n data = json.loads(response.content)\n except(requests.exceptions.Timeout, json.decoder.JSONDecodeError):\n data = json.loads('{\"online\":false,\"who\":{}}')\n\n return data['online']",
"def load_inbox(request):\n threads = models.MessageThread.objects.filter(clients=request.user).annotate(\n unread_count=Count('receipts',filter=Q(receipts__recipient=request.user))\n )\n thread_data = serializers.MessageThreadListSerializer(threads).data\n #user = userauth_models.User.objects.filter(username=request.user.username)\n #print(user.username)\n #print(get_channel_layer())\n #print(request.session['channel_name'])\n return JsonResponse({'threads':thread_data})",
"def chatlist(request):\n\n chats = get_chat_list()\n chat_list = pagination(request, chats, CHATS_PER_PAGE)\n\n dic = {'chatlist': chat_list}\n return render_to_response('whatsapp/chatlist.html', dic, context_instance=RequestContext(request))",
"def ajax_status(request):\r\n if not request.user.is_authenticated():\r\n raise PermissionDenied\r\n\r\n\r\n qs = UserPreference.objects.filter(\r\n user=request.user,\r\n key=NOTIFICATION_PREF_KEY\r\n )\r\n\r\n return HttpResponse(json.dumps({\"status\":len(qs)}), content_type=\"application/json\")",
"def get_all_unread_messages(request):\n if request.method == 'POST':\n # get the user name from the request \n user_name = request.POST.get('user_name')\n\n try:\n # get the user id by given the user_name from the User object.\n user_obj = User.objects.filter(username=user_name).first()\n # get all the unreaded messages that sent to the requsted user\n return HttpResponse(json.dumps(list(Message.objects.values().filter(receiver=user_obj.username).filter(read=False))),content_type='application/json')\n except:\n return HttpResponseNotFound(\"user name does not exsit\")",
"def chirping(request):\n chirps = Chirp.objects.all().select_related()\n rendered = render_to_string(\"message.html\", {\n 'chirps': chirps,\n 'user': request.user,\n 'is_admin': request.user.is_superuser})\n\n return HttpResponse(json.dumps([{'chirps': rendered}]),\n mimetype=\"application/json\")",
"def test_get_project_chat_messages_passes(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"chat\": [], \"pagination\": None})\n # to do: add comments and test again",
"def list(self, request, *args, **kwargs):\n return super(PublicChatViewSet, self).list(request, *args, **kwargs)",
"def chat(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'POST':\n # There is data in the post request, but we don't need anything but\n # the message because normal users can't send as staff or other teams\n m = Message.objects.create(time=timezone.now(), text=request.POST.get('message'),\n is_response=False, team=team)\n team.num_waiting_messages = 0\n messages = [m]\n else:\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n if(team.hunt.is_locked and not team.is_playtester_team):\n return render(request, 'access_error.html', {'reason': \"hunt\"})\n if request.is_ajax():\n messages = Message.objects.filter(pk__gt=request.GET.get(\"last_pk\"))\n else:\n messages = Message.objects\n messages = messages.filter(team=team).order_by('time')\n\n # The whole message_dict format is for ajax/template uniformity\n rendered_messages = render_to_string('chat_messages.html',\n {'messages': messages, 'team_name': team.team_name})\n message_dict = {team.team_name: {'pk': team.pk, 'messages': rendered_messages}}\n try:\n last_pk = Message.objects.latest('id').id\n except Message.DoesNotExist:\n last_pk = 0\n team.num_waiting_messages = 0\n\n team.save() # Save last_*_message vars\n context = {'message_dict': message_dict, 'last_pk': last_pk}\n if request.is_ajax() or request.method == 'POST':\n return HttpResponse(json.dumps(context))\n else:\n context['team'] = team\n return render(request, 'chat.html', context)",
"def json_chat(self):\r\n return {\"idChat\": self.id_chat, \"idRequest\": self.id_request,\r\n \"idService\": self.id_service, \"idMemberATE\": self.id_member_ate}",
"def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200",
"def get(self, id):\n if id is None:\n chat = get_all_rows('chat')\n return jsonify(chat)\n else:\n chat = query_by_id('chat', id)\n\n if chat is not None:\n response = jsonify(chat)\n else:\n raise RequestError(404, 'chat not found')\n\n return response"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pour un indice de disque k, la famille des listes de des indices, et l'indice du disque k dans la famille L, renvoie la liste / l'ensemble des indices de disques dont la vitesse va etre modifiee par k | def influence(k,L,n):
try:
to_check = L[n-1] #set des indices
contact_direct=C(k,0)
return list(to_check.intersection(contact_direct))
except:
return [] | [
"def indices(self):",
"def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks",
"def ind(k):\n return k-1",
"def list_indices(self):",
"def occurk(self,couleur,k):\n l = []\n for i in self.liste:\n l.append(i.couleur)\n # verification que k ieme occurence de la boule de couleur existe\n if couleur == \"R\":\n if l.count(0) < k:\n raise Exception(\"Il n'existe pas assez de \",couleur)\n else:\n return l.index(0)+k-1\n if couleur == \"V\":\n if l.count(1) < k:\n raise Exception(\"Il n'existe pas assez de \",couleur)\n else:\n return l.index(1)+k-1\n if couleur == \"B\":\n if l.count(2) < k:\n raise Exception(\"Il n'existe pas assez de \",couleur)\n else:\n return l.index(2)+k-1",
"def k_rank_approximate(doc_matrix, k):\n return []",
"def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists",
"def indices(i,j,k):\n\n start = time.time()\n count = 0\n indices = []\n for (k31,l31) in upper_remove[i]:\n for (k32,l32) in upper_remove[j]:\n for (k3,l3) in upper:\n newupper = upperremove([(k3,l3)])\n for (k4,l4) in newupper:\n newupper1 = upperremove([(k31,l31)])\n for (k41,l41) in newupper1:\n index1 = indexremove((i,k31,l31))\n for r in range(2):\n l11 = index1[r]\n l21 = index1[1-r]\n index2 = indexremove((j,k32,l32))\n for s in range(2):\n l12 = index2[s]\n l22 = index2[1-s]\n index3 = indexremove((k3,l3))\n for perm in permutation(index3):\n k11, l1, l2 = perm\n index4 = indexremove((k41,l41))\n for perm1 in permutation(index4):\n k12, k21, l42 = perm1\n index5 = indexremove((k4,l4))\n for perm2 in permutation(index5):\n k2, k22, k42 = perm2\n entry0 = ((k,l1),(k2,l2),(k3,l3),(k4,l4))\n entry1 = ((k11,l11),(k21,l21),(k31,l31),(k41,l41))\n entry2 = ((k12,l12),(k22,l22),(k32,l32),(k42,l42))\n if valid_entries(entry0) and valid_entries(entry1) and valid_entries(entry2):\n count += 1\n indices.append((entry0, entry1, entry2))\n print(entry0,entry1,entry2) \n print('done:',count,'terms')\n end = time.time()\n print('Indices time:', (end - start)/60)\n return indices",
"def kfold_indices(self, n,k):\n \n all_indices = np.arange(n,dtype=int)\n np.random.shuffle(all_indices)\n idx = [int(i) for i in np.floor(np.linspace(0,n,k + 1))]\n train_folds = []\n valid_folds = []\n\n for fold in range(k):\n valid_indices = all_indices[idx[fold]:idx[fold + 1]]\n valid_folds.append(valid_indices)\n train_folds.append(np.setdiff1d(all_indices, valid_indices))\n\n return train_folds, valid_folds",
"def k_plus_proches_voisins(modeles_avec_distances, k):\n # tri de la liste portant sur les premiers élements de chaque Tuple\n plus_proches_voisins = sorted(modeles_avec_distances,\n key=lambda x: x['distance']) \n \n return plus_proches_voisins[ : k]",
"def degreeDiscountIC(DG, k):\n key_list = []\n d_list = []\n dd_list = []\n t_list = []\n for each in DG.nodes:\n d_list.append(DG.degree(each))\n dd_list.append(DG.degree(each))\n t_list.append(0)\n for i in range(0, k):\n remain_nodes = []\n dd_list_new = copy.deepcopy(dd_list)\n for inode in DG.nodes:\n if inode not in key_list:\n remain_nodes.append(inode)\n else:\n dd_list_new[inode] = 0\n u = dd_list_new.index(max(dd_list_new))\n key_list.append(u)\n remain_nodes.remove(u)\n for adj in list(DG.neighbors(u)):\n if adj in remain_nodes:\n t_list[adj] = t_list[adj] + 1\n val = DG.get_edge_data(u, adj)['weight']\n dd_list[adj] = d_list[adj] - (2 * t_list[adj]) - ((d_list[adj] - t_list[adj]) * t_list[adj] * val)\n print('t_list', t_list)\n print('d_list', d_list)\n print('dd_list', dd_list)\n return key_list",
"def k_vizinhos(self, all_distancias, k):\n \n self.all_distancias = all_distancias\n \n \n lista_enumerada = (list(enumerate(self.all_distancias)))\n lista_nova = []\n \n \n for i in range(len(lista_enumerada)):\n lista_nova.append(list(lista_enumerada[i]))\n\n for i in range(len(lista_enumerada)):\n lista_temp = lista_nova[i]\n lista_temp.reverse()\n lista_nova[i] = lista_temp\n\n lista_nova.sort()\n vizinho = []\n\n for i in range(self.k):\n vizinho.append(min(lista_nova))\n lista_nova.pop(0)\n return vizinho",
"def k_nearest_neighbors(d_matrix, k):\n k_matrix = d_matrix.copy()\n rows = len(d_matrix)\n for i in range(rows):\n sorted_indexes = np.argsort(d_matrix[i])\n for index in sorted_indexes[k + 1:]:\n k_matrix[i][index] = 0\n # print(index)\n return k_matrix",
"def indicesIter(self):\n \n pass",
"def indices_hkl(self, H, K, L):\n from cctbx import miller\n _symm_equiv = miller.sym_equiv_indices(self.sg, (H, K, L))\n _indices = sorted([i.h() for i in _symm_equiv.indices()],\n reverse=True)\n if len(_indices) < _symm_equiv.multiplicity(False):\n _indices = _indices + [(-hh, -kk, -ll)\n for (hh, kk, ll) in _indices]\n return _indices",
"def k_fold_split_indexes(N, k):\n\n indexes = np.random.RandomState(RANDOM_SEED).permutation(N)\n splitted = np.array_split(indexes, k)\n return splitted",
"def print_idxlist_to_textlists(self, idx_list, worst=True, k=None, devData=None, y_pred=None, \\\n print_window=True, dataClass=None, return_indices=False): \n print (\"indices counts =\", idx_list.shape[0])\n boo = \"worst\" if worst else \"best\"\n print (\"ranked by {} cross-entropy loss\".format(boo))\n \n idx_list = [idx for (idx,ce) in self.rank_predictions(idx_selected=idx_list, worst=worst) ]\n ce_list = [ce for (idx,ce) in self.rank_predictions(idx_selected=idx_list, worst=worst) ]\n if k is not None:\n print (\"top {} results\".format(k))\n idx_list = idx_list[:k]\n ce_list = ce_list[:k] \n \n devData = (self.devX, self.devX_pos, self.devX_capitals, self.devY) if (devData is None) else devData\n y_pred = self.y_pred if (y_pred is None) else y_pred\n dataClass = self.dataClass if (dataClass is None) else dataClass\n \n word_windows = list(map(dataClass.vocab.ids_to_words, devData[0][idx_list]))\n pos_windows = list(map(dataClass.posTags.ids_to_words, devData[1][idx_list]))\n capital_windows = list(map(dataClass.capitalTags.ids_to_words, devData[2][idx_list])) \n gold_ner_class = [dataClass.nerTags.ids_to_words([tag]) for tag in devData[3][idx_list]]\n pred_ner_class = [dataClass.nerTags.ids_to_words([tag]) for tag in y_pred[idx_list]] \n\n if word_windows:\n cen = len(word_windows[0])//2 \n for i in range(len(word_windows)):\n print (\"\\nID {}\".format(idx_list[i]))\n print (\"KL divergence {}\".format(ce_list[i]))\n print (\"FEATURES: \\\"{}\\\", {}, {}\".format(word_windows[i][cen], pos_windows[i][cen], \\\n capital_windows[i][cen]))\n print (\"Gold NER {}\".format(gold_ner_class[i]))\n print (\"Pred NER {}\".format(pred_ner_class[i]))\n if print_window:\n print (\"Text window {}\".format(word_windows[i]))\n print (\"PoS window {}\".format(pos_windows[i]))\n print (\"Caps window {}\".format(capital_windows[i]))\n else:\n print (\"empty -- no predictions were made\")\n\n if return_indices:\n return idx_list",
"def CombinationsFromList(original_list, k):\r\n dim = len(original_list)\r\n pos = 0\r\n sol = [0] * k\r\n solutions = []\r\n \r\n def Init():\r\n if pos == 0:\r\n sol[0] = 0\r\n else:\r\n sol[pos] = sol[pos - 1]\r\n\r\n def Succesor():\r\n if sol[pos] < dim:\r\n sol[pos] += 1\r\n return True\r\n return False\r\n \r\n def Valid():\r\n for i in range(pos):\r\n if sol[i] == sol[pos]:\r\n return False\r\n return True \r\n\r\n while pos > - 1:\r\n succ = Succesor()\r\n while succ and not Valid():\r\n succ = Succesor()\r\n if succ:\r\n if k - 1 == pos:\r\n # print(sol[:k])\r\n # aici trebuie sa printam\r\n # print(\"[ \", end=\"\")\r\n # for i in range(k):\r\n # print(original_list[sol[i] - 1], end=\" \")\r\n # print(\"]\") \r\n \r\n new_sol = []\r\n for i in range(k): \r\n new_sol.append(original_list[sol[i] - 1]) \r\n solutions.append(new_sol)\r\n else:\r\n pos += 1\r\n Init()\r\n else:\r\n pos -= 1\r\n return solutions",
"def findKDistantIndices(self, nums: List[int], key: int, k: int) -> List[int]:\r\n n = len(nums)\r\n diff = [0] * (n + 10)\r\n for i, num in enumerate(nums):\r\n if num != key:\r\n continue\r\n left, right = max(0, i - k), min(n - 1, i + k)\r\n diff[left] += 1\r\n diff[right + 1] -= 1\r\n for i in range(1, len(diff)):\r\n diff[i] += diff[i - 1]\r\n\r\n return [i for i, num in enumerate(diff) if num]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new hotel to the system | async def add_hotel_endpoint(request):
hotel_name = request.args["hotel_name"][0]
hotel_id = model.add_hotel(hotel_name)
return json({"hotel_id": hotel_id}) | [
"def create_hotel(game, player, hotel):\n first_action = ensure_action(game, 'create_hotel', player)\n if 'creation_tile' not in first_action:\n raise GamePlayNotAllowedError('cannot create tile without playing a '\n 'creation tile')\n if hotel not in hotels_off_board(game):\n raise GamePlayNotAllowedError('must create hotel that is off the board')\n hotel['tiles'] = [first_action['creation_tile']]\n any_added = True\n while any_added:\n any_added = False\n for tile in tiles_adjacent_to_hotel(hotel):\n if tile in game['lonely_tiles']:\n hotel['tiles'].append(tile)\n game['lonely_tiles'].remove(tile)\n any_added = True\n if bank_shares(game, hotel):\n player['shares'][hotel['name']] += 1\n game['action_queue'].pop(0)\n advance_turn(game, player)",
"def add_office(self, *params):\n for off in Amity.offices:\n office_object = self.session.query(AmityOffices).filter(AmityOffices.Room_name == off).first()\n if office_object is None: # check if the office exists\n new_office = AmityOffices(Room_name=off)\n self.session.add(new_office)\n self.session.commit()\n else:\n office_object.Room_name = off\n self.session.commit()\n return \"The offices were added\"",
"def addHotspot( self, hotspot ):\n self._hotspots.append(hotspot)",
"async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})",
"async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})",
"def process_item(self, item, spider):\n session = self.Session()\n hotel = Hotel()\n hotel.hotel_name = item[\"hotel_name\"]\n hotel.address = item[\"address\"],\n hotel.link = item[\"link\"]\n hotel.quality_star = item[\"quality_star\"]\n hotel.rating = item[\"rating\"]\n hotel.number_people_rating = item[\"number_people_rating\"]\n hotel.description = item[\"description\"]\n hotel.distance = item[\"distance\"]\n hotel.image = item[\"image\"]\n hotel.price = item[\"price\"]\n hotel.city_id = item[\"city_id\"]\n hotel.hotel_id = item[\"hotel_id\"]\n try:\n session.add(hotel)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item",
"def add_offer(self):\r\n self.add_company_row()\r\n job_title = self.form.job_title.data\r\n pay_offer = self.form.pay_offer.data\r\n contact_email = self.form.recruiter_email.data\r\n job_description = self.form.job_description.data\r\n timestamp = datetime.now()\r\n offer = Offer(\r\n job_title=job_title,\r\n pay_offer=pay_offer,\r\n contact_email=contact_email,\r\n job_description=job_description,\r\n date=timestamp.strftime(\"%a %b %y\"),\r\n timestamp=timestamp,\r\n employer=self.company\r\n )\r\n db.session.add(offer)\r\n db.session.commit()",
"def add_food(self, _food):\n self.food.append(_food)",
"def new_booking():\n booking_data = request.form.to_dict()\n booking_data_with_booking_id = data_manager.booking_code_generator(booking_data)\n if \"city\" in booking_data:\n data_manager.add_to_company(booking_data_with_booking_id)\n else:\n data_manager.add_to_individuals(booking_data_with_booking_id)\n send_bookig_code(booking_data_with_booking_id)\n return \"Done\"",
"def newrestaurant():\n if request.method == 'POST':\n newRestaurant = Restaurant(name=request.form[\"name\"],\n user_id=login_session['user_id'])\n session.add(newRestaurant)\n session.commit()\n flash(\"%s restaurant succesfully created\" % request.form[\"name\"])\n return redirect(url_for('showrestaurants'))\n else:\n return render_template('newRestaurant.html')",
"def new_adopter():\n form = AdopterForm(request.form)\n if request.method == \"POST\":\n new_adopter = Adopter()\n form.populate_obj(new_adopter)\n session.add(new_adopter)\n session.commit()\n flash( \"New adopter '\" + new_adopter.name + \"' added!\")\n return redirect(url_for(\"show_adopters\"))\n\n else:\n output = render_template(\n 'page_head.html',\n title = \"Add a New Adopter! XD\",\n form = form )\n output += render_template('new_adopter.html', form = form )\n return output",
"def add_station(self, station_id=None, time=None, location=None):",
"def add_incident(app):\n\n with app.app_context():\n incident1 = Incident(external_id='1',\n links=[],\n tags=[],\n date=\"2020-05-29\",\n date_text=\"May 29th\",\n name=\"Police arrest a man for speaking at them from a distance\",\n description=\"Officer line moves protesters back and as protesters comply they attempt to arrest a man that was not visibly violent.\",\n city=\"Las Vegas\",\n state=\"Nevada\",\n location=Location(city='Las Vegas', state='Nevada'))\n\n db.session.add(incident1)\n db.session.commit()",
"def add_new_vacation():\n \n username = session[\"logged_in_username\"]\n user = crud.get_user_by_username(username=username)\n\n state = request.args.get(\"state\").title()\n city = request.args.get(\"city\").title()\n departure_date = request.args.get(\"departure-date\")\n arrival_date = request.args.get(\"arrival-date\")\n\n check_city, check_state = crud.check_if_city_state_in_db_create_if_not(city=city, state=state)\n \n new_vacation_label = crud.create_vacation_label(departure_date=departure_date, arrival_date=arrival_date, \n state_id=check_state.state_id)\n new_vacation = crud.create_vacation(vacation_label_id=new_vacation_label.vacation_label_id, user_id=user.user_id)\n\n return redirect(f\"/profile_{user.username}\")",
"def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)",
"def add_location(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.add'\n return self.call(self.options)",
"def set_up_hotels(game):\n game['hotels'] = map(lambda h: {'name': h, 'tiles': []}, hotel_names)",
"def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()",
"def create_hotels_list(self):\n\n hotels = load.loader.get_all_hotels()\n\n self.clear_widgets()\n\n for hotel in hotels:\n btn = HotelButton(text=hotel.name)\n self.add_widget(btn)\n btn.bind(on_release=lambda bt: self.select(bt.text))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add inventory to a given hotel | async def add_inventory_endpoint(request):
hotel_id = request.args["hotel_id"][0]
room_type = request.args["room_type"][0]
room_inventory = request.args["room_inventory"][0]
model.add_inventory(hotel_id, room_type, room_inventory)
return json({"success": True}) | [
"def add_inventory(self, current_inventory):\n for item in self.inventory:\n current_inventory.append(item)\n # remove supplies from the tile\n self.inventory = []",
"def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0",
"def inventoryAdd(obj):\n \n size=1\n if obj==\"TSA Trophy\":\n size =2\n print(\"The TSA Trophy takes two hands to pick up.\")\n if len(inventory)+size>2:\n print(\"Your hands are too full to pick up\",obj+\".\")\n else:\n print(\"You picked up\",obj)\n inventory.append(obj)\n inventoryCall()",
"def inventoryAdd(obj):\n size=1\n if obj==\"TSA Trophy\":\n size =2\n print(\"The TSA Trophy takes two hands to pick up.\")\n if len(inventory)+size>2:\n print(\"Your hands are too full to pick up\",obj+\".\")\n else:\n print(\"You picked up\",obj)\n inventory.append(obj)\n inventoryCall()",
"def add_item(self, item):\n self._inventory.append(item)",
"def add_fruit(inventory, fruit, quantity=0):\n if fruit in inventory:\n inventory[fruit] += quantity\n else:\n inventory[fruit] = quantity",
"def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")",
"def add_to_inventory(item, location, quantity, user=None):\n\n try:\n inventory = Inventory.objects.get(item=item, location=location)\n inventory.quantity += quantity\n inventory.save()\n except ObjectDoesNotExist:\n inventory = Inventory.objects.create(item=item, location=location, quantity=quantity)\n\n transaction = InventoryTransaction.objects.create(inventory=inventory, quantity=quantity, user=user)\n\n return transaction",
"def add_inventory(cd_instance, lst_Inventory):\r\n \r\n lst_Inventory.append(cd_instance) \r\n return lst_Inventory",
"def add_inventories(self, *inventories):\n for inventory in inventories:\n if inventory.is_active is False:\n L.warning('Trying to add pre-occupied inventory')\n raise errors.InventoryError(*messages.ERR_INACTIVE_INVENTORY)\n\n self.inventories.add(*inventories)\n self.inventories.update(is_active=False)",
"def add_to_inventory(inventory, item_name, count, weight, item_type, gathered_items=[]):\n\n item_dict = collectable_item(item_name, count, weight, item_type)\n if inventory:\n for index in range(len(inventory)):\n for key in inventory[index]:\n if item_name == key:\n inventory[index].get(item_name)[0] += 1\n inventory[index].get(key)[1] = inventory[index].get(key)[0] * weight\n elif item_name not in gathered_items:\n inventory.append(item_dict)\n gathered_items.append(item_name)\n else:\n inventory.append(item_dict)\n gathered_items.append(item_name)\n\n return inventory",
"def addEquipmenttoRecipe(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n recipe_instruction_id = instruction_helpers.getRecipeInstructionID(recipe)\r\n recipeEquipmentInsertQuery = \"\"\"INSERT into recipe_equipment (recipe_instruction_id, equipment_id) VALUES (%s, %s)\"\"\"\r\n try:\r\n for ind, instr in enumerate(recipe.instructions):\r\n for equip in instr.equipment:\r\n cursor.execute(recipeEquipmentInsertQuery, (recipe_instruction_id[ind], equip.equipment_id))\r\n db.commit()\r\n except Exception:\r\n print('Error: OOPs something went wrong while adding Equipment to a Recipe!')\r\n finally:\r\n cursor.close()\r\n db.close()",
"def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')",
"def add_newInventory(id, title, artist, table):\r\n dicRow = {'ID': id, 'Title': title, 'Artist': artist}\r\n table.append(dicRow)",
"def buyshotel(self):\n # Check if user has enough gold and has not already purchased item\n if self.gold >= 5000 and wp.shotel not in self.weaponinventory:\n # Pops item from list to a single variable\n item = self.shopinventory.pop(3)\n # Adds item to player inventory\n self.weaponinventory.append(item)\n # readds the item back into the shop\n self.shopinventory.insert(0, wp.shotel)\n # Takes gold from player\n self.gold -= 5000\n # Brings them to purchased page\n cpw.i = 8\n # If the item is already in the inventory\n elif wp.shotel in self.weaponinventory:\n # Brings them to already purchased screen\n cpw.i = 7\n # If player does not have enough gold and item\n else:\n # Brings them to insufficient funds page\n cpw.i = 6",
"def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1",
"def _push(self, entry):\n self.inventory.append(entry)\n self._balance += entry.quantity",
"def put_inventory(self, InstanceId: str, Items: List) -> Dict:\n pass",
"def add_ingredient_to_shop_list (self, ingredient) :\n found = False\n qty_available = self.quantity_in_fridge (ingredient)\n for ing in self.shop_list :\n if ing.equals(ingredient) :\n qty_needed = ingredient.quantity - qty_available\n ing.add_quantity (qty_needed)\n found = True\n if found == False :\n ingredient.set_quantity(ingredient.quantity - qty_available)\n self.shop_list.append(ingredient)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cancel an existing reservation | async def cancel_reservation_endpoint(request):
reservation_id = request.args["reservation_id"][0]
model.cancel_reservation(reservation_id)
return json({"success": True}) | [
"def cancel_reservation(client, reservation_id, dry_run):\n print('Canceling reservation {}'.format(reservation_id))\n response = client.cancel_capacity_reservation(\n CapacityReservationId=reservation_id,\n DryRun=dry_run\n )\n print(response)",
"def cancel_reservation(payload, clothes_id):\n selection = Reserve.query.filter_by(clothes_id=clothes_id).all()\n # if the given clothes has not been reserved, abort 404\n if len(selection) == 0:\n abort(404)\n # if two or more user reserved the same clothe, abort umprocessable\n if len(selection) >= 2:\n abort(422)\n # check if access user_id matches reservation user_id\n reservation = selection[0]\n # querying who is accessing and check role\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n role = access_user.role\n # if user role is \"user\", check if access user_id matches\n # reservation user_id\n reservation_user = reservation.user\n if role == 'user' and access_user.id != reservation_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes\n clothes = reservation.clothes\n\n # set error status\n error = False\n # cancel that reservation\n try:\n clothes.status = \"\"\n reservation.delete()\n formatted_clothes = clothes.format()\n formatted_user = reservation_user.format()\n except Exception:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n reservation.close_session()\n clothes.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })",
"def do_cancel_reservation (self, user):\n try:\n if not user:\n ret = self.trex.cancel_reservation()\n else:\n ret = self.trex.cancel_reservation(user.split(' ')[0])\n print termstyle.green(\"*** TRex reservation canceled successfully ***\")\n except TRexException as inst:\n print termstyle.red(inst)",
"def cancel(self):\n \n #I need to work out the encrypting of the email of the person or the booking reference \n #(I should generate a booking unique key with the email and timeslot)\n# salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()[:5]\n information = \"%s\"%(self.email)\n# useremail = self.email\n act_key = hashlib.sha256(self.key.encode() + information.encode()).hexdigest()\n \n print(act_key)\n# if isinstance(useremail, unicode):\n# useremail = useremail.encode('utf8')\n# act_key= \"23jsjsnb652jss394h5h595n0\"#hashlib.sha1(salt+useremail).hexdigest()\n title = \"manage your booking\"\n link = u\"http://127.0.0.1:8000/activate/%s\"%(act_key)\n send_encrypted_link(self.email, information, title, act_key, link)",
"def cancel_booking(self, booking_id):\n ch = input(\"Want to cancel booking (y/n): \")\n if ch == 'y':\n try:\n c = self.conn.cursor()\n c.execute(\"select r.TIMING from bookings as b inner join routes r on b.ROUTE_ID = r.id where b.id={}\".format(booking_id))\n temp = c.fetchone()\n if temp:\n\n mydate = datetime.strptime(datetime.now().strftime(\"%Y/%m/%d\"), \"%Y/%m/%d\") + temp[0]\n mydatetime = mydate - timedelta(hours=0, minutes=30)\n if datetime.now() > mydatetime:\n print(\"can't cancel current ride.\")\n return False\n\n c.execute(\"UPDATE bookings SET status=0 WHERE id={}\".format(booking_id))\n c.execute(\"SELECT route_id,occupancy FROM bookings WHERE id={}\".format(booking_id))\n data = c.fetchone()\n c.execute(\"UPDATE routes SET seats_available=seats_available+{} \\\n WHERE id={}\"\n .format(data[1], data[0]))\n self.conn.commit()\n print(\"\\nBooking canceled.\")\n return True\n except Exception as e:\n print(type(e).__name__, \": \", e)\n else:\n print(\"\\nAction aborted!\")\n\n return False",
"def cancel_room():\n try:\n user = User.get_user()\n except ValueError as err:\n return jsonify({\"error\": str(err)})\n\n booking_id = request.form.get(\"booking_id\")\n if not booking_id:\n return jsonify({\"error\": \"No booking id sent to server!\"})\n if \",\" in booking_id:\n return jsonify({\"error\": \"Only one booking may be cancelled at a time.\"})\n\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n if booking:\n if (booking.user is not None) and (booking.user != user.id):\n return jsonify({\"error\": \"Unauthorized: This reservation was booked by someone else.\"}), 400\n if booking.is_cancelled:\n return jsonify({\"error\": \"This reservation has already been cancelled.\"}), 400\n\n if booking_id.isdigit():\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"}), 400\n try:\n wharton.delete_booking(sessionid, booking_id)\n save_wharton_sessionid()\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n lid=1,\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': [{\"booking_id\": booking_id, \"cancelled\": True}]})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n resp = studyspaces.cancel_room(booking_id)\n if \"error\" not in resp:\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': resp})",
"def reservation_expire(self, context):",
"def cancel(self, order_id: int):",
"def pending_mission_cancel(request, pk):\n\n\t# Retrieve the object\n\tpending_mission = get_object_or_404(PendingMission, pk=pk, kingdom=request.user.kingdom)\n\n\t# Cancel\n\tpending_mission.delete()",
"def cancel_seat(self, passenger, flight):\n if flight.remove_passenger(passenger):\n return OperationStatus(\"Passenger was removed\", True)\n return OperationStatus(\"Passenger was not booked on flight\", False)",
"def cancel_booking(self,\n booking_id,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/bookings/{booking_id}/cancel')\n .http_method(HttpMethodEnum.POST)\n .template_param(Parameter()\n .key('booking_id')\n .value(booking_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()",
"def cancel():\n context = {'user': toolkit.g.get('user') or toolkit.g.get('author')}\n organization_id = toolkit.request.args.get('organization_id', None)\n try:\n toolkit.get_action('member_request_cancel')(\n context, {\"organization_id\": organization_id})\n id = 'cancel'\n return toolkit.redirect_to('member_request.mylist', id=id)\n except logic.NotAuthorized:\n toolkit.abort(401, not_auth_message)\n except logic.NotFound:\n toolkit.abort(404, request_not_found_message)",
"def cancelPedido(pedido: Pedido):\n if not pedido.executed:\n raise DuplicateExecution(\n 'O pedido não pode ser cancelado pois não está sendo executado!'\n )\n\n for reserva in pedido.reservas.all():\n reserva.delete()\n\n pedido.executed = False\n pedido.save()",
"def cancelar_interesse():\r\n\r\n # Converte o arquivo JSON em um dicionário\r\n info = request.json\r\n id_req = info['id']\r\n\r\n # Indica se a carona foi cancelada ou não\r\n carona_cancelada = False\r\n\r\n # Remove a carona com o id recebido da lista de caronas\r\n for carona in caronas:\r\n if id_req == carona['id']:\r\n caronas.remove(carona)\r\n carona_cancelada = True\r\n break;\r\n\r\n # Envia uma mensagem para o servidor indicando se a requisição foi ou não cancelada\r\n if(carona_cancelada == True):\r\n texto = \"Carona com id {0} foi cancelada!\".format(id_req)\r\n else:\r\n texto = \"Nenhuma carona com id {0} foi encontrada\".format(id_req)\r\n \r\n resposta = make_response(texto, 200)\r\n resposta.mimetype = 'text/plain'\r\n\r\n return resposta",
"def cancel(self):\n self.session.rollback()",
"def cancel_a_parcel(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('canceled' , id)\n db.insert(query, tuple)",
"def cancel(self):\n\t\tself.is_active = False\n\t\tself.save()",
"def test_cancelBooking(self):\n user_id = \"12\"\n car_id = \"6\"\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n\n booking = db.session.query(Booking).filter( Booking.user_id == user_id,\n Booking.car_id == car_id,\n Booking.begin_time == begin_datetime).first()\n \n # Delete row from the database\n db.session.delete(booking)\n\n # Update car's availability \n car = Car.query.get(car_id)\n car.booked = False\n\n # Commit changes\n db.session.commit()\n self.assertFalse(self.bookingExists(user_id, car_id))",
"def hook_cancel_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"CANCEL req:%s\", request_id)\n self.send_message(assignee_chat_id, c.MSG_REQUEST_CANCELED)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update(\n {\"current_request\": None, \"reviewed_request\": None, \"state\": c.State.AVAILABLE}\n )\n del self.updater.dispatcher.bot_data[request_id]\n self.updater.dispatcher.update_persistence()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new reservation | async def add_reservation_endpoint(request):
hotel_id = request.args["hotel_id"][0]
room_type = request.args["room_type"][0]
arrival_date = request.args["arrival_date"][0]
departure_date = request.args["departure_date"][0]
status = request.args["status"][0]
reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)
if reservation_id == model.OPERATION_ERROR_RETURN_CODE:
return json({"success": False})
return json({"success": True, "reservation_id": reservation_id}) | [
"def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201",
"def reservations_add(request):\n try:\n\n if 'date_start' and 'date_end' and 'title' and 'email' and 'phone' in request.json_body:\n date_start = request.json_body['date_start']\n date_end = request.json_body['date_end']\n title = request.json_body['title']\n email = request.json_body['email']\n phone = request.json_body['phone']\n\n item = Reservations(date_start=date_start, date_end=date_end, title=title, user=email, phone=phone)\n\n DBSession.add(item)\n\n transaction.commit()\n\n return dict(status=True)\n\n except DBAPIError, ex:\n print ex\n return Response(conn_err_msg, content_type='text/plain', status_int=500)\n\n except Exception, ex:\n print ex\n return Response(ex, content_type='text/plain', status_int=500)",
"def add(self):\n if(self.check_overlap() == True):\n flag = True\n print \"Reservations overlap: cannot schedule at this time\"\n # TODO: why is there a while loop?\n while flag:\n self.start_time = str(self.time_string(self.start_time) + datetime.timedelta(minutes=30))\n self.end_time = str(self.time_string(self.end_time)+ datetime.timedelta(minutes=30))\n if(self.check_overlap()==False):\n flag = False\n return \"Reservations can be scheduled at :\", str(self.start_time)\n else: \n Reservation.save(self)\n print \"Reservation added successfully.\"",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400",
"def reservation(self, reservation):\n\n self._reservation = reservation",
"def make_reservation(root, flight_start_time, flight_end_time, flight_number):\n # validate if the slot is getting booked for 30 minutes only\n if not root.valid_interval(flight_start_time, flight_end_time):\n print(\"Please provide a half an hour interval, in 24 Hour clock military format.\")\n\n # check if no other flight is scheduled over the runway during that time\n elif root.busy_runway(flight_start_time, flight_end_time) is None:\n print(\"Runway is booked during: {} to {}\".format(flight_start_time, flight_end_time))\n \n # schedule the flight\n else:\n root.insert(flight_start_time, flight_end_time, flight_number)\n print(\"Runway reservation made for flight number {} from {} to {}\".format(flight_number, flight_start_time, flight_end_time))",
"def reserve_time(reservation_id):\n user_id = session['user_id']\n restaurant_to_reserve = db.session.query(Reservation).filter_by(id=reservation_id).update({'reservation_status':\"Reserved\",'user_id': user_id})\n print reservation_id\n db.session.commit()\n\n print \"added reservation\"\n return redirect('/')",
"def test_reservation_creation_succeeds(self, add_reservations):\n reservation = add_reservations[0]\n\n assert reservation.flight is not None\n assert reservation.seat_number is not None\n assert reservation.booked is False\n assert reservation.type is not None\n assert reservation.made_by is not None\n assert reservation.date_made is not None",
"def new_reservation(restaurant_id):\n print restaurant_id\n return render_template(\"new_reservation_form.html\", restaurant_id=restaurant_id)",
"def agregar_reserva(self, horario):\n\n from reservas.models import Reserva\n\n reserva, new = Reserva.objects.get_or_create(\n cliente=self,\n completada=False\n )\n reserva.agregar_horario(horario)\n return reserva",
"async def add_reserve(op: dict, sign_up_name: str, reserve_role: str, main: Optional[bool] = False) -> dict:\n op[\"Sign-ups\"][\"Reserves\"] += [{\"name\": sign_up_name, \"role\": reserve_role, \"move-main\": main}]\n return op",
"def reservation_id(self, reservation_id):\n self._reservation_id = reservation_id",
"def reservation() -> Response:\n # log incoming request\n log(app.current_request.to_dict(), app.current_request.json_body)\n\n # perform routing based off request\n if app.current_request.method == \"GET\":\n # GET reservation; if 'id' query param is available, use to get a single res. if no params then list all res.\n if not app.current_request.query_params:\n return rs.list_reservations(\n table_name=RES_TABLE\n )\n elif app.current_request.query_params.get(\"guid\"):\n return rs.get_reservation(\n table_name=RES_TABLE,\n reservation_guid=app.current_request.query_params[\"guid\"]\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"Query params were not empty but, did not have an 'guid' attribute. Please read the OpenAPI\"\n \" document on how to use this endpoint.\"\n }\n )\n elif app.current_request.method == \"POST\":\n # POST reservation; the reservation to create needs to be in the requests body\n if app.current_request.json_body:\n return rs.create_reservation(\n table_name=RES_TABLE,\n reservation=app.current_request.json_body\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"The request's body was empty. \"\n \"Please read the OpenAPI document on how to use this endpoint\"\n }\n )\n elif app.current_request.method == \"PUT\":\n # PUT reservation; the reservation to update needs to be in the requests body\n # POST reservation; the reservation to create needs to be in the requests body\n if app.current_request.json_body:\n return rs.update_reservation(\n table_name=RES_TABLE,\n reservation=app.current_request.json_body\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"The request's body was empty. \"\n \"Please read the OpenAPI document on how to use this endpoint\"\n }\n )\n else:\n # DELETE reservation; the 'id' query param indicates what reservation to delete\n if app.current_request.query_params.get(\"guid\"):\n return rs.delete_reservation(\n table_name=RES_TABLE,\n reservation_guid=app.current_request.query_params[\"guid\"]\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"Query params did not have an 'id' attribute. Please read the OpenAPI\"\n \" document on how to use this endpoint.\"\n }\n )",
"def reservations_edit(request):\n try:\n\n if 'time_start' and 'time_end' and 'title' and 'email' and 'phone' and 'id' and 'date' in request.json_body:\n\n time_start = request.json_body['time_start']\n time_end = request.json_body['time_end']\n title = request.json_body['title']\n email = request.json_body['email']\n phone = request.json_body['phone']\n id = request.json_body['id']\n date = request.json_body['date']\n\n item = DBSession.query(Reservations).get(id)\n\n item.title = title\n item.date_start = '%s %s' % (date, time_start)\n item.date_end = '%s %s' % (date, time_end)\n item.user = email\n item.phone = phone\n\n DBSession.add(item)\n\n transaction.commit()\n\n return dict(status=True)\n\n except DBAPIError, ex:\n print ex\n return Response(conn_err_msg, content_type='text/plain', status_int=500)\n\n except Exception, ex:\n print ex\n return Response(ex, content_type='text/plain', status_int=500)",
"def validate_and_save(self, reservation, form):\n if not reservation.validate():\n context_data = self.get_context_data(reservation=reservation)\n context_data[\"error\"] = self.get_error_message(form, reservation)\n return render(self.request, self.template_name, context_data)\n\n reservation.save()\n return redirect(calendar_url_reservation(reservation))",
"def add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):\r\n try:\r\n booking = Booking()\r\n booking.restaurant_id = rest_id\r\n booking.user_id = user_id\r\n booking.booking_datetime = booking_datetime\r\n booking.entrance_datetime = entrance_datetime\r\n booking.number_of_people = number_of_people\r\n booking.table_id = table_id\r\n booking.datetime = datetime.datetime.now()\r\n db.session.add(booking)\r\n db.session.commit()\r\n return booking.id\r\n except:\r\n db.session.rollback()\r\n return None",
"def create_reservation(parent_arg, reservation_name, reservation_slots=100):\n\n res_config = bigquery_reservation_v1.Reservation(slot_capacity=reservation_slots, ignore_idle_slots=False)\n res = res_api.create_reservation(parent=parent_arg,\n reservation_id=reservation_name,\n reservation=res_config)\n return res.name",
"def create_reservations(payload, user_id):\n error = False\n # get posted data from json request\n body = request.get_json()\n keys = body.keys()\n # if request does not have json body, abort 400\n if body is None:\n abort(400)\n # if json does not have key 'auth0_id', abort 400\n if 'auth0_id' not in keys:\n abort(400)\n # if json does not have key 'reservation', abort 400\n if 'reservations' not in keys:\n abort(400)\n # if auth0_id in body does not match auth0_id in payload, abort 401\n if body['auth0_id'] != payload['sub']:\n abort(401)\n\n # query who is accessing\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n # check if user_id in URL matches the access user id\n if user_id != access_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes and store them in variable \"clothes\"\n if not isinstance(body['reservations'], list):\n abort(400)\n for value in body['reservations']:\n if not isinstance(value, int):\n abort(400)\n # check if all clothes indeed exist\n clothes = []\n for clothes_id in body['reservations']:\n # query clothes\n selection = Clothes.query.get(clothes_id)\n if selection is None:\n abort(404)\n # if that clothes has been already reserved, abort 422\n if selection.status == \"reserved\":\n abort(422)\n clothes.append(selection)\n\n # query user\n user = User.query.get(user_id)\n formatted_user = user.format()\n\n # make reservations\n try:\n reservations = []\n formatted_clothes = []\n for item in clothes:\n new_reservation = Reserve()\n new_reservation.user = user\n new_reservation.clothes = item\n item.status = \"reserved\"\n reservations.append(new_reservation)\n # commit these reservations\n for reservation in reservations:\n reservation.insert()\n formatted_clothes.append(reservation.clothes.format())\n except Exception:\n # rollback all sessions\n for reservation in reservations:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n # close all sessions\n for reservation in reservations:\n reservation.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })",
"def reserve(self, context, reservation, resource, usage, delta):\n\n pass # Pragma: nocover"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing reservation | async def get_reservation_endpoint(request):
reservation_id = request.args["reservation_id"][0]
reservation_dict = model.get_reservation(reservation_id)
return json(reservation_dict) | [
"def reservation() -> Response:\n # log incoming request\n log(app.current_request.to_dict(), app.current_request.json_body)\n\n # perform routing based off request\n if app.current_request.method == \"GET\":\n # GET reservation; if 'id' query param is available, use to get a single res. if no params then list all res.\n if not app.current_request.query_params:\n return rs.list_reservations(\n table_name=RES_TABLE\n )\n elif app.current_request.query_params.get(\"guid\"):\n return rs.get_reservation(\n table_name=RES_TABLE,\n reservation_guid=app.current_request.query_params[\"guid\"]\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"Query params were not empty but, did not have an 'guid' attribute. Please read the OpenAPI\"\n \" document on how to use this endpoint.\"\n }\n )\n elif app.current_request.method == \"POST\":\n # POST reservation; the reservation to create needs to be in the requests body\n if app.current_request.json_body:\n return rs.create_reservation(\n table_name=RES_TABLE,\n reservation=app.current_request.json_body\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"The request's body was empty. \"\n \"Please read the OpenAPI document on how to use this endpoint\"\n }\n )\n elif app.current_request.method == \"PUT\":\n # PUT reservation; the reservation to update needs to be in the requests body\n # POST reservation; the reservation to create needs to be in the requests body\n if app.current_request.json_body:\n return rs.update_reservation(\n table_name=RES_TABLE,\n reservation=app.current_request.json_body\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"The request's body was empty. \"\n \"Please read the OpenAPI document on how to use this endpoint\"\n }\n )\n else:\n # DELETE reservation; the 'id' query param indicates what reservation to delete\n if app.current_request.query_params.get(\"guid\"):\n return rs.delete_reservation(\n table_name=RES_TABLE,\n reservation_guid=app.current_request.query_params[\"guid\"]\n )\n else:\n return Response(\n status_code=400,\n body={\n \"error\": \"Query params did not have an 'id' attribute. Please read the OpenAPI\"\n \" document on how to use this endpoint.\"\n }\n )",
"def reservation(self):\n return self._reservation",
"def select_reservation(self, ctx: dataclasses.dataclass) -> ResultE[dataclasses.dataclass]:\n pk = cf.get_int_or_none(ctx.pk) or 0\n if pk <= 0:\n return self._error('Missed Reservation ID', ctx, ReservationErrors.missed_reservation)\n try:\n data = self._reservations_repo.get(pk)\n except Exception as err:\n return self._error(\n f\"Error select Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.error, exc=err\n )\n if data == Nothing:\n return self._error(\n f\"Unknown Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.missed_reservation\n )\n if hasattr(ctx, 'source'):\n ctx.source = data.unwrap()\n else:\n ctx.reservation = data.unwrap()\n return Success(ctx)",
"def get_res_by_id(res_id):\n # look up ID, if non-exist return error message\n res = session.query(Reservation).filter_by(id=res_id).first()\n if not res:\n return jsonify({'error': 'no reservation with id {} found'.format(res_id)}), 400\n return jsonify({'reservation': res.serialize()})",
"def get(self):\n return all_reservations()",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Reservation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ReservationArgs.__new__(ReservationArgs)\n\n __props__.__dict__[\"concurrency\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"ignore_idle_slots\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"multi_region_auxiliary\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"reservation_id\"] = None\n __props__.__dict__[\"slot_capacity\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Reservation(resource_name, opts=opts, __props__=__props__)",
"def reservation_id(self) -> str:\n return pulumi.get(self, \"reservation_id\")",
"def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201",
"def reservation_id(self):\n return self._reservation_id",
"def reservation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reservation_id\")",
"def get_reserved(reserved_id, **kwargs):\n try:\n reserved_row = get_row(Reserved, reserved_id)\n except:\n raise\n return reserved_row",
"def snapshot_reservation_detail(self) -> 'outputs.SnapshotReservationDetailResponse':\n return pulumi.get(self, \"snapshot_reservation_detail\")",
"def validate_travel_reservation(self, travel_reservation):\n user_reservation = Reservation.objects.filter(member=self.context['request'].user)\n if travel_reservation in user_reservation:\n return travel_reservation\n else:\n raise ReservationDoesNotExists",
"def reservations(request):\n return dict(status=True)",
"def reservation_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"reservation_id\")",
"def get_wharton_gsr_reservations():\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400",
"def reservations(self) -> Sequence['outputs.ReservationResponse']:\n return pulumi.get(self, \"reservations\")",
"def get_reservation_rest(restaurant_id, from_date, to_date):\n\n url = \"{}/list/{}\".format(BOOKING_MICROSERVICE_URL, restaurant_id)\n if from_date:\n url = HttpUtils.append_query(url, \"fromDate\", from_date)\n if to_date:\n url = HttpUtils.append_query(url, \"toDate\", to_date)\n\n response = HttpUtils.make_get_request(url)\n return response",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List the inventory of a hotel in a specific date range | async def list_inventory_endpoint(request):
hotel_id = request.args["hotel_id"][0]
start_date = request.args["start_date"][0]
end_date = request.args["end_date"][0]
inventory = model.list_inventory(hotel_id, start_date, end_date)
if inventory == model.OPERATION_ERROR_RETURN_CODE:
return json({"success": False})
return json({"success": True, "inventory": inventory}) | [
"def getFreeBookablesByIntervalDate(self,resourceId,startDate,endDate):\n url = self.urlBookables+'{0}/{1}/{2}/'.format(resourceId,startDate,endDate)\n return ExecuteQuery().Query(url, 'GET')",
"def inventory(request, concierge, template=\"concierges/inventory_check.html\"):\n inventory = []\n for x in xrange(0, 2):\n day = date.today() + timedelta(days=x)\n dow = DayOfWeek.objects.get(isoweekday=day.isoweekday())\n day_info = {'day': day, 'times': []}\n schedules = dow.tourschedule_set.filter(active=True, tour_type__active=True,\n tour_type__default_site_skin__is_concierge_cta=True).order_by('tour_type__order')\n for sched in schedules:\n product = sched.tour_type.get_product(day, schedule=sched).product\n tour_info = {\n 'day': day,\n 'time': sched.pretty_time,\n 'tour_type': sched.tour_type,\n 'seats_available': Decimal(product.items_in_stock) - Decimal(product.total_sold)\n }\n day_info['times'].append(tour_info)\n\n inventory.append(day_info)\n\n ctx = RequestContext(request, {\n 'concierge': concierge,\n 'inventory': inventory\n })\n\n return render_to_response(template, context_instance=ctx)",
"def stock_prices_in_range(prices, start_date, end_date):\n return list(price for price in prices if start_date <= price.date <= end_date)",
"def ready_items_today(self, thisdate):\n items = InventoryItem.objects.filter(product=self,\n inventory_date__lte=thisdate,\n expiration_date__gt=thisdate)\n items = items.filter(received=Decimal(\"0\"), planned__gt=0)\n return items",
"def lookup_entries_by_date_range(self):\n self.clear_console()\n print(self.format_header('Lookup by Date Range'))\n print('Search for entries from...')\n from_date = ConsoleUI.get_a_date('enter From Date')\n self.clear_console()\n print(self.format_header('Lookup by Date Range'))\n print('Search for entries from {} to...'.format(from_date.strftime('%m-%d-%Y')))\n # get the end date\n while True:\n to_date = ConsoleUI.get_a_date('enter To date')\n if to_date.strftime('%m-%d-%Y') < from_date.strftime('%m-%d-%Y'):\n print('Please enter a date AFTER the From Date')\n else:\n break\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(Entry.created_timestamp.between(\n from_date,\n to_date + datetime.timedelta(days=1) - datetime.timedelta(seconds=1)\n ))\n self.display_one_at_a_time(entries)",
"def get_date_range():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n animals = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n animals.append(animal)\n\n return jsonify(animals)",
"def getTicketList(fromDates=[date.today()-timedelta(days=30)], untilDates=[date.today()]):\n global ticketList\n ticketList = []\n for x in range(len(fromDates)):\n fromDate = fromDates[x]\n untilDate = untilDates[x]\n y = 0\n while True:\n # Add 1 to y each loop for page increment\n y = y + 1\n toAppend = backend.fetchTicketList(y, fromDate, untilDate)\n incrementRequestsSentCount()\n if len(toAppend) == 0:\n break\n for item in toAppend:\n ticketList.append(item)\n filterView()",
"def get_availabilities(date):\n day_of_week = dateutil.parser.parse(date).weekday()\n availabilities = []\n available_probability = 0.3\n if day_of_week == 0:\n start_hour = 10\n while start_hour <= 16:\n if random.random() < available_probability:\n # Add an availability window for the given hour, with duration determined by another random number.\n vaccine_type = get_random_int(1, 4)\n if vaccine_type == 1:\n availabilities.append(\"{}:00\".format(start_hour))\n elif vaccine_type == 2:\n availabilities.append(\"{}:30\".format(start_hour))\n else:\n availabilities.append(\"{}:00\".format(start_hour))\n availabilities.append(\"{}:30\".format(start_hour))\n start_hour += 1\n\n if day_of_week == 2 or day_of_week == 4:\n availabilities.append(\"10:00\")\n availabilities.append(\"16:00\")\n availabilities.append(\"16:30\")\n\n return availabilities",
"def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)",
"def test_get_dealer_historical_inventory(self):\n pass",
"def get_ride_report(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Report).filter(Report.date_created>=startDate, Report.date_created<=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200",
"def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list",
"def archive_date_range(request, inst):\n\n # Ensure the instrument is correctly capitalized\n inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]\n\n template = 'archive_date_range.html'\n sort_type = request.session.get('image_sort', 'Recent')\n group_type = request.session.get('image_group', 'Exposure')\n context = {'inst': inst,\n 'base_url': get_base_url(),\n 'sort': sort_type,\n 'group': group_type}\n\n return render(request, template, context)",
"def getPurchaseDates(self):\n\t\treturn self.dateList",
"def get_availabilities(date):\n day_of_week = dateutil.parser.parse(date).weekday()\n availabilities = []\n available_probability = 0.3\n if day_of_week == 0:\n start_hour = 10\n while start_hour <= 16:\n if random.random() < available_probability:\n # Add an availability window for the given hour, with duration determined by another random number.\n appointment_type = get_random_int(1, 4)\n if appointment_type == 1:\n availabilities.append('{}:00'.format(start_hour))\n elif appointment_type == 2:\n availabilities.append('{}:30'.format(start_hour))\n else:\n availabilities.append('{}:00'.format(start_hour))\n availabilities.append('{}:30'.format(start_hour))\n start_hour += 1\n\n if day_of_week == 2 or day_of_week == 4:\n availabilities.append('10:00')\n availabilities.append('16:00')\n availabilities.append('16:30')\n\n return availabilities",
"def fetchTicketList(page, fromDate, untilDate):\n return desk.tickets.filter_tickets(page=page, query=f\"created_at:>'{fromDate}'%20AND%20created_at:<'{untilDate}'\")",
"def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n min_date = common.dtime(year_from, month_from, day_from)\n max_date = common.dtime(year_to, month_to, day_to)\n\n return [[line[ID], line[TITLE], int(line[PRICE]), int(line[MONTH]), int(line[DAY]), int(line[YEAR])]\n for line in table if min_date < common.dtime(line[YEAR], line[MONTH], line[DAY]) < max_date]",
"def dru_list(self, from_date, to_date):\n return self.au.list_dru(fday=from_date, tday=to_date)",
"def search_by_date_range():\n\n entries = retrieve_all_entries()\n dates = []\n idx = 0\n for entry in entries:\n if entry.task_date in dates: continue\n idx += 1\n print(\"{}: {}\".format(idx, entry.task_date))\n dates.append(entry.task_date)\n #date_range = input(\"Provide a range of dates from the above list, in mm/dd/yyyy format (e.g. 12/01/2016,12/08/2016) to lookup entries between those dates: \")\n start_date = get_task_date(\"start\")\n end_date = get_task_date(\"end\")\n print(\"\")\n entries = entries.where((Entry.task_date >= start_date) & (Entry.task_date <= end_date))\n if entries.count() > 0:\n print_entries(entries)\n return entries\n else:\n input(\"No entries were found!!! Press enter to continue..\")\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Best Path Heuristic (consistent) (seems to be a very good heuristic) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find which dirty tile is best to start from For each dirty tile in state.dirty_locations 1.1 Set it as the start node 1.2 Use Total Manhattan Distance(third heuristic) to find route of least cost to visit every other dirty tile 1.3 Compare with previous start tile, and keep the better start (tiebreak with roomba proximity to start tile) 2. Find roomba proximity to the best start tile 3. Add the results of steps 1 and 2 The heuristic is the sum of the distance to the best start tile and the cost from said tile | def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float:
# TODO a nontrivial consistent heuristic
if not state.dirty_locations:
return 0
best_start = 0 # best dirty tile to start from
best_cost = INF # cost of the path from the above start tile
for i in range(len(state.dirty_locations)):
estimate_cost = 0
lowest_cost = INF
closest_dirty = 0
dirty_locations = list(state.dirty_locations)
current_pos = dirty_locations.pop(i)
# find the shortest cost solution path from this starting tile
while dirty_locations:
for j in range(len(dirty_locations)):
manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col)
if manhattan < lowest_cost:
lowest_cost = manhattan
closest_dirty = j
estimate_cost += lowest_cost
current_pos = dirty_locations.pop(closest_dirty)
lowest_cost = INF
# if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start
if estimate_cost < best_cost:
best_cost = estimate_cost
best_start = i
# if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile
if estimate_cost == best_cost:
current_pos = state.position
dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)
dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col)
if dist_to_i < dist_to_prev_best:
best_start = i
current_pos = state.position
# Calculate distance to the best start tile
dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)
# Returned heuristic is the sum of distance to the start tile and estimated cost from said tile
return dist_to_start + best_cost | [
"def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n\n \"*** YOUR CODE HERE ***\"\n # Se detallan los intentos sucesivos que tuvimos en la búsqueda de una heurística adecuada.\n # En todas las situaciones partimos del problema relajado al no considerar que haya paredes en el laberinto.\n # Se agregan para cada caso las estadísticas dadas para tinyCorners, mediumCorners y bigCorners.\n \n \"\"\"\n # PRIMER INTENTO\n # Función heurística: Mínima distancia Manhattan a una esquina no visitada.\n # La función es admisible, pero no es muy buena. Se terminan expandiendo\n # demasiados nodos.\n \n # Costo Nodos_expandidos\n # Tiny: 28 226\n # Medium: 106 1491\n # Big: 162 5862\n\n remainingCorners = state[1]\n if(remainingCorners == ()):\n return 0\n \n currentPos = state[0] \n \n return min(manhattanDistance(currentPos, corner) for corner in remainingCorners)\n \"\"\"\n \n \"\"\"\n # SEGUNDO INTENTO\n # Función heurística: Suma de distancias Euclídeas a todas las esquinas no visitadas.\n # Si bien presenta una mejora en la cantidad de nodos expandidos\n # sin ser tan compleja computacionalmente, vimos que la heurística no es admisible\n # pues, por ejemplo, sobreestima el costo de ruta si quedan dos esquinas para visitar\n # y nos encontramos en una tercer esquina.\n # En este caso, se forma un triángulo rectángulo entre nuestra posición y los objetivos,\n # y la heurística devuelve un costo de ruta mayor (un cateto + hipotenusa) que la ruta óptima\n # (un cateto + el otro cateto).\n \n # Costo Nodos_expandidos\n # Tiny: 28 182\n # Medium: 106 722\n # Big: 166 2681 (Vemos que devuelve un costo de ruta subóptimo)\n \n remainingCorners = state[1]\n if(remainingCorners == ()):\n return 0\n \n currentPos = state[0] \n \n return sum((euclideanDistance(currentPos, corner)) for corner in remainingCorners)\n \"\"\"\n \n \"\"\"\n # TERCER INTENTO\n # Función heurística: Suma de distancias Manhattan recorriendo las esquinas restantes de una forma greedy.\n # Agregando un poco de procesamiento, podemos llegar a una función que mejora el rendimiento de una manera \n # notable. Sin embargo, encontramos que en un caso muy peculiar la función también sobreestimaría el costo de ruta,\n # haciéndola no admisible en casos particulares.\n \n # Costo Nodos_expandidos\n # Tiny: 28 155\n # Medium: 106 692\n # Big: 162 1740\n \n remainingCorners = state[1]\n currentPos = state[0]\n currentSum = 0\n while(remainingCorners != ()):\n nextCornerAndDist = min([(x, manhattanDistance(x, currentPos)) for x in remainingCorners], key = lambda x: x[1])\n remainingCorners = tuple(x for x in remainingCorners if x != nextCornerAndDist[0])\n currentSum = currentSum + nextCornerAndDist[1]\n currentPos = nextCornerAndDist[0]\n \n return currentSum\n \"\"\"\n \n # CUARTO INTENTO\n # Función heurística: Cantidad de desplazamientos necesarios para recorrer las esquinas restantes de manera óptima.\n # Aprovechando que el problema requiere siempre recorrer a lo sumo cuatro puntos, creemos que podemos demandar\n # un poco más de procesamiento y llegar a una heurística con las bondades del caso anterior, pero que se mantenga admisible.\n # Vemos que expande más nodos en los casos pequeños, pero la misma cantidad en el más grande. \n \n # Costo Nodos_expandidos\n # Tiny: 28 159\n # Medium: 106 741\n # Big: 162 1740\n \n return MinimumPathLength(state[0], state[1])",
"def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n # Función heurística: Suma de los pesos de las aristas del Árbol Recubridor Minimal\n # (Minimum Spanning Tree) formado por los puntos faltantes y la posición actual,\n # tomando como pesos de las aristas la distancia en el laberinto entre los puntos involucrados.\n # Sabemos que la función es admisible: El objetivo es encontrar un camino Hamiltoniano\n # de suma de pesos de arista mínimos con los vértices antes descriptos. Un camino Hamiltoniano\n # es un árbol recubridor del grafo, con lo cual el árbol recubridor minimal siempre tendrá\n # una suma de pesos de aristas menor o igual a la cantidad de movimientos que necesitaríamos\n # para recorrer el camino óptimo.\n # Para calcular el valor de la heurística, construimos el grafo antes descripto calculando las \n # distancias entre todos los vértices. Luego, generamos un árbol recubridor minimal \n # usando el algoritmo de Kruskal y retornamos la suma de los pesos de las aristas elegidas.\n \n # Generamos una lista de todos los puntos con comida.\n foodList = GridToList(foodGrid)\n \n # Precalculamos las distancias en el laberinto entre cada par de puntos con comida.\n if(len(problem.heuristicInfo) == 0):\n problem.heuristicInfo = fillMazeDistances(foodList, problem)\n \n # Calculamos el peso del árbol recubridor minimal, pasando como vertices\n # las coordenadas de la comida restante junto con la posición actual,\n # además de las distancias entre comidas en el laberinto.\n return minSpanTreeWeight(foodList + [position], problem.heuristicInfo)",
"def cornersFastInconsistentHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \n \"*** YOUR CODE HERE ***\"\n \"\"\"\n This heuristic checks the distance to the closest unvisited corner,\n then checks the distance from this unvisited corner to the \n next closest remaining unvisited corner, for all corners.\n The heuristic returns the sum of these distances.\n \"\"\"\n \n def notVisited(corner):\n for i in state[2]:\n if corner == i:\n return False\n return True\n \n def closestDistance(xy1, points):\n minDistance = 999\n visitedPoint = None\n for xy2 in points:\n #distance = round(( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5)\n distance = abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n if distance < minDistance:\n minDistance = distance\n visitedPoint = xy2\n if visitedPoint != None:\n points.remove(visitedPoint)\n return minDistance, points, visitedPoint\n \n def cornersNotVisited(corners):\n unVisited = []\n for corner in corners:\n if notVisited(corner):\n unVisited.append(corner)\n return unVisited\n \n xy1 = (state[0], state[1])\n unVisited = cornersNotVisited(corners)\n heuristic = 0\n while len(unVisited) > 0:\n nextClosestDistance,unVisited,xy1 = closestDistance(xy1, unVisited)\n heuristic += nextClosestDistance\n return heuristic\n return 0",
"def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi heurística consiste en hacer simplemente el máximo de las distancias reales del state a cada nodo con comida\n He provado diferentes heurísticas y esta es la que me expande menos nodos, aunque no es la más óptima temporalmente\n Tardé mucho tiempo en darme cuenta de que había una función que calculaba la distancia real entre dos nodos\n NOTA: NO EJECUTAR CON LABERINTOS MÁS GRANDES QUE EL tinySearch. El algoritmo requiere muchísimo tiempo\n \"\"\"\n max = 0 # Inicializo el máximo en 0\n for food in foodGrid.asList(): # Esto me da cada food como un nodo (x,y), pero sólo los nodos que tengan comida\n distance = mazeDistance(position, food, problem.startingGameState) # Distancia real del state a una comida\n if max < distance: # Cálculo del máximo\n max = distance\n return max\n\n # La siguiente heurística también servía, y de hecho tardaba mucho menos, pero el autograder me daba 2/4\n # ya que se expandían más de 12.000 nodos.\n # return len(foodGrid.asList())",
"def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance",
"def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \n \"*** YOUR CODE HERE ***\"\n \"\"\"\n This heuristic checks the distance to the closest unvisited corner,\n then checks the distance from this unvisited corner to the \n next closest remaining unvisited corner, for all corners.\n The heuristic returns the sum of these distances.\n \"\"\"\n \n def notVisited(corner):\n for i in state[2]:\n if corner == i:\n return False\n return True\n \n def closestDistance(xy1, points):\n minDistance = 999\n visitedPoint = None\n for xy2 in points:\n distance = abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n if distance < minDistance:\n minDistance = distance\n visitedPoint = xy2\n if visitedPoint != None:\n points.remove(visitedPoint)\n return minDistance, points, visitedPoint\n \n def cornersNotVisited(corners):\n unVisited = []\n for corner in corners:\n if notVisited(corner):\n unVisited.append(corner)\n return unVisited\n \n xy1 = (state[0], state[1])\n unVisited = cornersNotVisited(corners)\n heuristic = 0\n nextClosestDistance = 0\n nextClosestDistance,unVisited,xy1 = closestDistance(xy1, unVisited)\n while len(unVisited) > 0:\n nextClosestDistance,unVisited,xy1 = closestDistance(xy1, unVisited)\n heuristic += nextClosestDistance\n return heuristic\n return 0",
"def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n \n def accurateDistance(point1, point2, gameState):\n x1, y1 = point1\n x2, y2 = point2\n walls = gameState.getWalls()\n assert not walls[x1][y1], 'point1 is a wall: ' + point1\n assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)\n prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)\n return len(search.bfs(prob))\n \n def generateEdges(vertices):\n edges = set([])\n while len(vertices) > 0:\n xy1 = vertices.pop()\n for i in vertices:\n xy2 = i\n #weight = abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n weight = accurateDistance(xy1, xy2, problem.startingGameState)\n edges.add((xy1, xy2, weight))\n return edges\n \n def minEdge():\n e = None\n u = None\n v = None\n w = 9999\n for i in edges:\n if i[2] < w:\n if i[0] in visited and i[1] in unVisited:\n e = i\n u = i[0]\n v = i[1]\n w = i[2]\n elif i[1] in visited and i[0] in unVisited:\n e = i\n u = i[0]\n v = i[0]\n w = i[2]\n return e, v, w\n\n food = set([])\n countX = -1\n countY = -1\n for i in foodGrid:\n countX+=1\n countY = -1\n for j in i:\n countY+=1\n if j == True:\n food.add((countX, countY))\n tempFoodSet = food\n food = frozenset(food)\n unVisited = set([])\n for i in food:\n unVisited.add(i)\n if 'edges' not in problem.heuristicInfo.keys():\n problem.heuristicInfo['edges'] = generateEdges(tempFoodSet)\n edges = problem.heuristicInfo['edges']\n \n heuristic = 0\n visited = set([])\n newEdges = set([])\n if len(unVisited) != 0:\n visited.add(unVisited.pop())\n while visited != food:\n e,v,w = minEdge()\n unVisited.remove(v)\n visited.add(v)\n newEdges.add(e)\n heuristic+=w\n return heuristic",
"def cornersHeuristic(state, problem):\n\n # Useful information.\n # corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # *** Your Code Here ***\n corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # Get unvisited corners\n successor = [False, False, False, False]\n currentPosition = state[0]\n currentStatus = state[1]\n\n # Take the manhattan distance of the nodes\n # current position and all corners tuple location\n # Iterate through all corners\n for corner in range(len(corners)):\n successor[corner] = distance.manhattan(currentPosition,\n corners[corner]) * (not currentStatus[corner]) # Ignore corners already visited\n return max(successor) # Return the max value from all calculated manhattan values of all corner",
"def findRoute(self,x1,y1,x2,y2):\r\n\t\tstart = (x1,y1)\r\n\t\tgoal = (x2,y2)\r\n\r\n\t\tclosedSet = set()\r\n\t\topenSet = {start}\r\n\t\tcameFrom = {}\r\n\r\n\t\ttuple_key = []\r\n\t\tfor col in range(self.maze_width):\r\n\t\t\tfor row in range(self.maze_heihg):\r\n\t\t\t\ttuple_key.append((col, row))\r\n\r\n\t\t# Create a dictionary with all the inicial weights\r\n\r\n\t\tgScore = dict(zip(tuple_key, [1000000000] * len(tuple_key)))\r\n\t\tfScore = dict(zip(tuple_key, [1000000000] * len(tuple_key)))\r\n\r\n\t\tgScore[start] = 0\r\n\t\tfScore[start] = self.manh_dist(start, goal)\r\n\r\n\t\twhile openSet:\r\n\t\t\t'''Get the node in open set with the minimum fScore'''\r\n\t\t\tmin_value = 2000000000\r\n\t\t\tfor cell in openSet:\r\n\t\t\t\tif fScore[cell] < min_value:\r\n\t\t\t\t\tmin_value = fScore[cell]\r\n\t\t\t\t\tcurrent = cell\r\n\t\t\tif current == goal: # if current node is the goal, we are done\r\n\t\t\t\treturn self.redo_path(cameFrom, current) # return all the path\r\n\r\n\t\t\topenSet.pop() # take out the current node from the options to evaluate\r\n\t\t\tclosedSet.add(current) # and add it to the visited ones\r\n\t\t\tadjacent_nodes = self.adjacent_cell(current[0], current[1]) # check for all the possible next steps (adjacent and open spots)\r\n\t\t\tfor neighbor in adjacent_nodes:\r\n\t\t\t\tif neighbor in closedSet: # if the point has been visited, continue to the next one\r\n\t\t\t\t\tcontinue\r\n\t\t\t\ttentative_gScore = gScore[current] + 1\r\n\r\n\t\t\t\tif neighbor not in openSet:\r\n\t\t\t\t\topenSet.add(neighbor) # possible next step\r\n\t\t\t\telif tentative_gScore >= gScore[neighbor]:\r\n\t\t\t\t\tcontinue # Not a better path\r\n\r\n\t\t\t\tcameFrom[neighbor] = current #store the path\r\n\t\t\t\tgScore[neighbor] = tentative_gScore\r\n\t\t\t\tfScore[neighbor] = gScore[neighbor] + self.manh_dist(neighbor, goal)\r\n\t\treturn []",
"def heuristic(self, state):\n\t\t\n\t\t# Redefine this if you are doing best first, A* search, etc.\n\t\treturn 0",
"def astar(maze, heuristic_func=manhatten_dist):\n\n \"\"\"\n [문제 02] 제시된 stage1의 맵 세가지를 A* Algorithm을 통해 최단경로를 return하시오.(20점)\n (Heuristic Function은 위에서 정의한 manhatten_dist function을 사용할 것.)\n \"\"\"\n\n # start_point=maze.startPoint()\n # end_point=maze.circlePoints()[0]\n start_point=maze.__start\n end_point=maze.__objective[0]\n path=[]\n\n ####################### Write Your Code Here ################################\n iteration = 0\n max_iteration = len(maze.mazeRaw)**3\n start_node = Node(None, start_point)\n start_node.g = 0 ##\n end_node = Node(None, end_point)\n end_node.g = end_node.h = 0 ##\n start_node.h = heuristic_func(start_node.location, end_node.location)\n\n open_list = []\n closed_list = []\n\n open_list.append(start_node)\n \n while len(open_list) > 0:\n # print(\"Open list:\", end=\"\")\n # for node in open_list:\n # print(node.location, end=\"\")\n # print()\n iteration += 1\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item < current_node:\n current_node = item\n current_index = index\n if iteration > max_iteration: \n raise Exception(\"failure\")\n\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Goal test\n if current_node == end_node: \n current = current_node\n while current is not None:\n path.append(current.location)\n current = current.parent\n break\n\n children = []\n x, y = current_node.location\n for (row, col) in maze.neighborPoints(x, y):\n node_location = (row, col)\n new_node = Node(current_node, node_location)\n children.append(new_node)\n\n for child in children:\n if child in closed_list:\n continue\n if child in open_list:\n continue\n child.g = current_node.g + 1\n # if len([i for i in open_list if child == i and child.g > i.g]) > 0:\n # continue\n\n child.h = heuristic_func(child.location, end_node.location)\n open_list.append(child)\n\n path.reverse()\n return path\n ############################################################################",
"def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:45:1, 0:42:1]\n pos = np.empty(x.shape + (2,))\n pos[:, :, 0] = x;\n pos[:, :, 1] = y\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = _get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return []",
"def better_heuristic(state, problem):\n # Enter your code here and remove the pass statement below\n sammy, medal = state\n if len(medal):\n return manhattan_distance_cost(sammy, medal[0], problem)\n else:\n return 0",
"def gen_heuristic(state, problem):\n # Enter your code here and remove the pass statement below\n sammy, medal = state\n if medal:\n return max(manhattan_distance_cost(sammy, medl, problem) for medl in medal)\n else:\n return 0",
"def a_star(grid, h, start, goal):\n\n path = []\n path_cost = 0\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n\n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal:\n print('Found a path.')\n found = True\n break\n else:\n # Get the new vertexes connected to the current vertex\n for a in valid_actions(grid, current_node):\n next_node = (current_node[0] + a.delta[0], current_node[1] + a.delta[1])\n new_cost = current_cost + a.cost + h(next_node, goal)\n\n if next_node not in visited:\n visited.add(next_node)\n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node, a)\n\n if found:\n # retrace steps\n n = goal\n path_cost = branch[n][0]\n path.append(goal)\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print('**********************')\n print('Failed to find a path!')\n print('**********************')\n return path[::-1], path_cost",
"def a_star(my_map, start_locs, goal_locs, h_values, agent, constraints):\n\n ##############################\n # Task 1.1: Extend the A* search to search in the space-time domain\n # rather than space domain, only.\n # Build constraint table if there are constraints\n\n constraint_table = build_constraint_table(constraints, agent)\n\n open_list = []\n closed_list = dict()\n nodes_opened = 0\n max_opened = 500\n start_loc = start_locs[0]\n goal_loc = goal_locs[0]\n if len(start_locs) > 1: # If there is more than 1 start location then this is a multi-cell agent\n multi = True\n else:\n multi = False\n\n # determine when the last constraint is on the goal node (or any of the goal node cells in the case of multi-cell)\n earliest_goal_timestep = 0\n if len(constraint_table) != 0:\n for time in [item for item in sorted(list(constraint_table.keys()), reverse=True)]:\n flat_list = [item for sublist in constraint_table[time] for item in sublist]\n if(goal_locs[0] in flat_list):\n earliest_goal_timestep = time\n break\n elif(multi): # if multi cell check if any of the agents goal cells are constrained \n if(goal_locs[1] in flat_list): \n earliest_goal_timestep = time\n break\n\n h_value = h_values[start_loc]\n goal_orientation = orientation(goal_locs)\n\n root = {'loc': start_loc,'orientation': orientation(start_locs), 'g_val': 0, 'h_val': h_value, 'time': 0, 'parent': None}\n push_node(open_list, root)\n closed_list[(root['loc'], root['time'], root['orientation'])] = root\n\n while len(open_list ) > 0 and nodes_opened < max_opened:\n curr = pop_node(open_list)\n nodes_opened = nodes_opened + 1\n \n if curr['loc'] == goal_loc and curr['orientation'] == goal_orientation and curr['time'] >= earliest_goal_timestep:\n return get_path(curr)\n ############################\n child_orient = curr['orientation']\n for dir in range(7):\n if dir < 5:\n child_loc = move(curr['loc'], dir)\n elif not multi: \n continue\n\n if dir == 5:\n # clockwise rotation \n child_orient = curr['orientation'] - 1\n if child_orient < 1:\n child_orient = 4\n if dir == 6:\n # counter-clockwise rotation \n child_orient = curr['orientation'] + 1\n if child_orient > 4:\n child_orient = 1\n \n if test_map(my_map, child_loc[0], child_loc[1], child_orient, dir):\n continue\n \n # check if the head location is constrained \n if is_constrained(curr['loc'], child_loc, child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if this is a multi cell agent check if the tail is constrained \n if multi:\n # check the next tail location \n row_t, col_t, _, _ = find_tail_positions(curr['loc'][0], curr['loc'][1], curr['orientation'], dir)\n next_row_t, next_col_t, next_row_t_inter, next_col_t_inter = find_tail_positions(child_loc[0], child_loc[1], child_orient, dir)\n\n if is_constrained((row_t,col_t), (next_row_t, next_col_t), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if the agent is rotating check if the intermediate location is constrained\n if dir == 5 or dir == 6: \n if is_constrained((row_t,col_t), (next_row_t_inter, next_col_t_inter), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n child = {'loc': child_loc,\n 'orientation': child_orient,\n 'g_val': curr['g_val'] + 1,\n 'h_val': h_values[child_loc] + orient_cost(child_orient, goal_orientation),\n 'time': curr['time'] + 1,\n 'parent': curr}\n\n if (child['loc'], child['time'], child['orientation']) in closed_list:\n existing_node = closed_list[(child['loc'], child['time'], child['orientation'])]\n \n if compare_nodes(child, existing_node):\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n else:\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n \n return None # Failed to find solutions",
"def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:LENGTH:1, 0:WIDTH:1]\n pos = np.empty(x.shape + (2,))\n # x.shape = (LENGTH,WIDTH)\n # x.shape + (2,) = (LENGTH,WIDTH,2)\n pos[:, :, 0] = x\n pos[:, :, 1] = y\n # pos.shape = (1890, 2)\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n # If axis is an integer, it specifies the axis of x along which to compute the vector norms\n # axis = 1: h.shape = 1890\n # axis = 0: h.shape = 2\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n # print(\"Path\", closedSet)\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return [], closedSet",
"def a_star(grid, heuristic_func, start, goal):\n\n path = []\n path_cost = 0\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n\n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal:\n print('Found a path.')\n found = True\n break\n else:\n # Get the new vertexes connected to the current vertex\n for a in valid_actions(grid, current_node):\n next_node = (current_node[0] + a.delta[0], current_node[1] + a.delta[1])\n new_cost = current_cost + a.cost + heuristic_func(next_node, goal)\n\n if next_node not in visited:\n visited.add(next_node)\n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node, a)\n\n if found:\n # retrace steps\n n = goal\n path_cost = branch[n][0]\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n\n return path[::-1], path_cost",
"def calc_heuristic(self):\n row = len(self.grid)\n col = len(self.grid[0])\n\n self.heuristic = [[0 for x in range(col)] for y in range(row)]\n for i in range(row):\n for j in range(col):\n row_diff = abs(i - self.goal_node[0])\n col_diff = abs(j - self.goal_node[1])\n self.heuristic[i][j] = int(abs(row_diff - col_diff) + min(row_diff, col_diff) * 2)\n\n print \"Heuristic:\"\n for i in range(len(self.heuristic)):\n print self.heuristic[i]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the header string for this description If the description is empty, return an empty string. Otherwise, the raw data is joined together and returned with no '' components. | def to_header(self):
if not self.filled:
return ''
return "\n".join(self.data) | [
"def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'",
"def get_generic_header():\n headerstr = 'surface_area,' \\\n 'volume,' \\\n 'sav_ratio,' \\\n 'sphericity,' \\\n 'anisotropy,' \\\n 'centroid_x,' \\\n 'centroid_y,' \\\n 'centroid_z,' \\\n 'crofton_perimeter,' \\\n 'elongation,' \\\n 'euler_number_3d,' \\\n 'equivalent_diameter,' \\\n 'feret_shape_3d,' \\\n 'flatness,' \\\n 'shape_va3d,' \\\n 'orientation_phi,' \\\n 'orientation_theta,'\n return headerstr",
"def make_header_string(self, header_dict=None, mode='composite',\n hdr_key='#'):\n if (header_dict is None) and (self.hdr_dict is None):\n self.make_header_dict(mode)\n if header_dict is None:\n header_dict = self.hdr_dict\n\n hdr_list = []\n for k, v in header_dict.items():\n if (v.__class__ is str) and (len(v) is 0):\n hdr_list.append(k)\n else:\n hdr_list.append('{}: {}'.format(k, v))\n return '\\n'.join(['{} {}'.format(hdr_key, i) for i in hdr_list])",
"def header_text(self):\n return os.linesep.join(map(str, self.headers))",
"def get_header(self):\n\n fields_dict = {\n 'index' : self.index\n }\n\n # Make sure the source is defined\n if self.source_name is None:\n fields_dict['source'] = 'undefined'\n else:\n fields_dict['source'] = self.source_name\n\n # Add the host if it is defined\n if self.host is not None:\n fields_dict['host'] = self.host\n\n return \"***SPLUNK*** \" + self.make_fields_list(fields_dict) + '\"\\r\\n'",
"def createHeader(self, title):\n template = u\"<h1>{0}</h1>\"\n \n if self.outVars.hasHeader == True:\n return template.format(title)\n else:\n return \"\"",
"def table_header(self):\n title = 'HYPERPARAMETER FINE-TUNING RESULTS'\n title_len = len(title)\n extra_spaces = self.max_length - title_len\n left_spaces = extra_spaces // 2\n right_spaces = extra_spaces - left_spaces - 1\n\n return '| ' + (left_spaces * ' ') + title + (right_spaces * ' ') + ' |\\n'",
"def make_nonstandard_header():\n\toutput = \"\"\n\tnumber_of_headers = random.randint(0, 4)\n\tfor i in range(number_of_headers):\n\t\toutput += 'x-'\n\t\toutput += ''.join(random.choice(chars) for x in range(random.randint(1, 10)))\n\t\toutput += ': '\n\t\toutput += ''.join(random.choice(chars) for x in range(random.randint(1, 10)))\n\t\toutput += \"\\r\\n\"\n\treturn output",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def _hdr(self) -> str:\n\n if self.__hdr is None and self.is_valid:\n self.__hdr = _pkt_hdr(self)\n return self.__hdr",
"def get_headers_string(self):\n response = \"\"\n for row in self.headers:\n response += f\"{row}: {self.headers[row]}\\r\\n\"\n return response",
"def to_header(self):\n if self.star_tag:\n return '*'\n return ', '.join(['\"%s\"' % item for item in self.as_set(True)])",
"def _info_header(self):\n # First line\n unit = self._source\n if unit is None:\n source = ''\n else:\n source = f' from {type(unit).__name__}-{unit}'\n unit = self._sink\n if unit is None:\n sink = ''\n else:\n sink = f' to {type(unit).__name__}-{unit}'\n if self.ID:\n return f\"{type(self).__name__}: {self.ID}{source}{sink}\"\n else:\n return f\"{type(self).__name__}{source}{sink}\"",
"def header_content(self):\n\n if not self.has_header():\n pass\n\n return binascii.unhexlify(binascii.hexlify(bytes(bytearray(self.records[0].data)))).decode('ascii')",
"def design_report_header(self):\n rstr = nl() + \" \" + nl() + t('table border-collapse= \"collapse\" border=\"1px solid black\" width=100%') + nl()\n rstr += t('tr') + nl()\n row = [0, '<object type= \"image/PNG\" data= \"cmpylogoSeatAngle.png\" height=60 ></object>',\n '<font face=\"Helvetica, Arial, Sans Serif\" size=\"3\">Created with</font>' \" \" \" \" \" \" \" \" \" \" '<object type= \"image/PNG\" data= \"Osdag_header.png\" height=60 '' \" \" \" \" \" \" \"></object>']\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Company Name\", \"detail\", text_two=self.company_name, is_row=False)\n rstr += design_summary_row(0, \"Project Title\", \"detail\", text_two=self.project_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Group/Team Name\", \"detail\", text_two=self.group_team_name, is_row=False)\n rstr += design_summary_row(0, \"Subtitle\", \"detail\", text_two=self.sub_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Designer\", \"detail\", text_two=self.designer, is_row=False)\n rstr += design_summary_row(0, \"Job Number\", \"detail\", text_two=self.job_number, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Date\", \"detail\", text_two=time.strftime(\"%d /%m /%Y\"), is_row=False)\n rstr += design_summary_row(0, \"Client\", \"detail\", text_two=self.client, is_row=False)\n rstr += t('/tr')\n rstr += t('/table') + nl() + \" \" + nl()\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n return rstr",
"def _cdata_header(self, colsep=\"|\"):\n fields = self.fields\n header_lines = []\n line = \"\"\n for fieldName in self.field_names:\n width = int(fields[fieldName]['attr']['width'])\n line += self._entry(fieldName, width, colsep)\n header_lines.append(line)\n\n line = \"\"\n for fieldName in self.field_names:\n width = int(fields[fieldName]['attr']['width'])\n line += self._entry(fields[fieldName]['attr']['format'], width=width, colsep=colsep)\n header_lines.append(line)\n\n line = \"\"\n for fieldName in self.field_names:\n width = int(fields[fieldName]['attr']['width'])\n (l, m) = divmod(width, 10)\n guide = \"\"\n for i in range(l):\n guide += \"\".join(map(str, list(range(10))))\n guide += \"\".join(map(str, list(range(m))))\n line += self._entry(guide, width=width, colsep=colsep)\n header_lines.append(line)\n\n line = \"\"\n for fieldName in self.field_names:\n width = int(fields[fieldName]['attr']['width'])\n guide = \"-\" * width\n line += self._entry(guide, width=width, colsep=colsep)\n header_lines.append(line)\n\n return header_lines",
"def _write_header(self, head_msg=None):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n header = \"\\n%s\\nDateTime: %s \\nMessage: %s \\n\" % (\"*\" * 100, now, head_msg)\n\n return header",
"def get_header():\n # This simple_test_header cloud and the simple_test_point should be in sync. Some tests depend on it.\n header = \"\"\"ply\nformat ascii 1.0\nelement vertex 3\nproperty float x\nproperty float y\nproperty float z\n\"\"\"\n return header",
"def generate_output_header(self, query_type='RDAP'):\n\n output = '\\n{0}{1}{2} query for {3}:{4}\\n\\n'.format(\n ANSI['ul'],\n ANSI['b'],\n query_type,\n self.obj.address_str,\n ANSI['end']\n )\n\n return output"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
d1, d2 为单层val为数值类型的字典 把字典的相同key的value相加 d1 will be changed | def dicttvalsplus(d1, d2):
middle_dict = {}
for key in d2.keys():
middle_dict[key] = d1.get(key, 0) + d2.get(key, 0)
d1.update(middle_dict)
return d1 | [
"def update_dict_by_adding_another(dict1, dict2):\n for key in dict2.keys():\n if key not in dict1:\n if hasattr(dict2[key], 'copy'):\n dict1[key] = dict2[key].copy()\n else:\n dict1[key] = dict2[key]\n else:\n if (isinstance(dict1[key], DataArray) and isinstance(dict2[key], DataArray)):\n if 'units' not in dict1[key].attrs or 'units' not in dict2[key].attrs:\n raise InvalidStateError(\n 'DataArray objects must have units property defined')\n try:\n dict1[key] += dict2[key].to_units(dict1[key].attrs['units'])\n except ValueError: # dict1[key] is missing a dimension present in dict2[key]\n dict1[key] = dict1[key] + dict2[key].to_units(dict1[key].attrs['units'])\n else:\n dict1[key] += dict2[key] # += is in-place addition operator\n return # not returning anything emphasizes that this is in-place",
"def update(d1, d2):\n BASIC_TYPES = (int, float, str, bool, complex)\n if isinstance(d1, dict) and isinstance(d2, dict):\n for key, value in d2.items():\n # print(key, value)\n if key in d1:\n # key exists\n if isinstance(d1[key], BASIC_TYPES):\n d1[key] = value\n else:\n update(d1[key], value)\n\n else:\n d1[key] = deepcopy(value)\n else:\n # if it is any kind of object\n d1 = deepcopy(d2)",
"def update(self, other):\n if isinstance(other, self.__class__):\n for x, n in six.iteritems(other):\n self[x] += n\n else:\n for x in other:\n self[x] += 1",
"def __add__(self, other):\n retval = FloatDict()\n if isinstance(other, dict):\n for k in list(self.keys()):\n if k in list(other.keys()):\n retval[k] = self[k] + other[k]\n else:\n retval[k] = self[k]\n for k in list(other.keys()):\n if k not in list(self.keys()):\n retval[k] = other[k]\n if isinstance(other, float):\n for k in list(self.keys()):\n retval[k] = self[k] + other\n return retval",
"def add_vectors(v1,v2):\n \n #iterates through second dictionnary\n for key in v2:\n #if key is in v1 and v2 then we would add the values\n if key in v1:\n v1[key] = v1[key] +v2[key]\n #checks if the value at current key is 0\n if v1[key] == 0:\n # if value is 0 then we delete the key \n del v1[key]\n #if the key is not in v1 then we create a new key with the same value in v2\n elif key not in v1:\n v1[key] = v2[key]\n #checks if the value at current key is 0\n if v1[key] == 0:\n # if value is 0 then we delete the key \n del v1[key]",
"def merge_int_dicts(d1, d2):\n merged_dict = collections.defaultdict(int)\n for d in (d1, d2):\n for k in d:\n merged_dict[k] += d[k]\n return merged_dict",
"def merge_2_dictionnaries(dict1, dict2):\n result = dict1\n for k, v in dict2.items():\n result[k] = (result.get(k) or 0) + v\n return result",
"def incrementSparseVector(v1, scale, v2):\n # BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this)\n allKeys = list(set(v1.keys()) | set(v2.keys()))\n for key in allKeys:\n v1[key] += scale * v2[key]\n # END_YOUR_CODE",
"def merge_into(a, b):\n\tfor k, v in b.items():\n\t\ta[k] = a.get(k, 0) + v",
"def __updateDict(self,dictionary,other):\n for key in other:\n if key in dictionary:\n dictionary[key] = other[key]\n else:\n binKey = toBytes(key)\n if binKey in dictionary:\n dictionary[binKey] = other[key]\n else:\n dictionary[key] = other[key]",
"def fuse(self, other):\n d = other.record\n for key in d:\n if key in self.record:\n if type(self.record[key]) is Union:\n self.record[key].add(d[key])\n else:\n self.record[key] = Union([self.record[key], d[key]])\n else:\n self.record[key] = d[key]",
"def addCountDictionaries(firstCountDictionary, secondCountDictionary):\n for transitionId in secondCountDictionary.keys():\n if transitionId in firstCountDictionary.keys():\n firstCountDictionary[transitionId] = firstCountDictionary[transitionId] + secondCountDictionary[transitionId]\n else:\n firstCountDictionary[transitionId] = secondCountDictionary[transitionId]",
"def update_dictionary_key(d1, d2):\n d3 = {}\n skip = False\n for k1, v1 in d1.items():\n for k2, v2 in d2.items():\n if (len(set(v1) ^ set(v2)) == 1 and k1 in v2) or len(set(v1) ^ set(v2)) == 0:\n d3.update({k2: v1})\n skip = True\n break\n if not skip:\n d3.update({k1: v1})\n skip = False\n return d3",
"def incrementSparseVector(v1, scale, v2):\n for key in set(v1.keys()).union(set(v2.keys())):\n v1[key] += scale * v2[key]",
"def soft_dict_update(d1, d2):\n for key, value in list(d2.items()):\n if key not in d1:\n d1[key] = value",
"def incrementSparseVector(v1, scale, v2):\n # BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this)\n for k in v2.keys():\n v1[k] += scale*v2[k]\n\n return v1\n # END_YOUR_CODE",
"def __iadd__(self, other):\n if not isinstance(other, dict):\n msg = 'Can not concatenate Dict and {}'.format(type(other))\n raise TypeError(msg)\n for key, val in other.items():\n if key in self:\n self._append_key(key, val)\n else:\n self[key] = val\n return self",
"def add_key_values(d1):\n for key, values in d1.items():\n if key not in values:\n values.append(key)\n return d1",
"def combine_ngrams_dicts(dict1, dict2):\n for ngram, cnt in dict2.iteritems():\n if ngram in dict1:\n dict1[ngram] += cnt\n else:\n dict1[ngram] = cnt\n\n return dict1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
x_max = max(seq) x_min = min(seq) epilson = 1e6 new_seq = [10000 (epilson + x x_min )/(epilson + x_max x_min) for x in seq] | def normalization(seq):
new_seq = [6.3578286171 * x for x in seq]
return new_seq | [
"def geo_seq(val, ratio, length):\n return [val * pow(ratio, i) for i in range(length)]",
"def sequence(start, end, factor):\n\n values = []\n v = start\n while v < end:\n values.append(v)\n v *= factor\n return values",
"def generate_values_in_range():\n\treturn [x * 0.5 for x in range(4,12)]",
"def linmap(x, max_value, min_value):\n a = min(x)\n b = max(x)\n ans = [(max_value - min_value) * (el - a) / (b - a) + min_value for el in x]\n return ans",
"def yield_spectral_range(self):\n return [min(self.x), max(self.x), len(self.x)]",
"def gen_seq():\n v = 1\n while 1:\n yield 1.0 / v\n v *= 2",
"def inverse_hyperbolic_sine(amount, start, stop, truncated, sequence):\n ratio = (start + stop) / 5\n for x in range(start, amount):\n y = abs(round(ratio * math.asinh(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def normalize_gene_list(self, genes):\n\n while len(genes) < self.num_of_genes:\n vec = Vector.random_2D()\n vec.limit(5000, 3000)\n genes.append(vec)\n\n while len(genes) > self.num_of_genes:\n genes.pop()\n\n return genes",
"def abser(x):\n it = iter(x)\n while True:\n yield np.abs(it.next())",
"def math_map_list(values, toMin=0, toMax=1):\n minValue = min(values)\n maxValue = max(values)\n delta = maxValue - minValue\n deltaTarget = toMax - toMin\n newValues = [toMin +(value-minValue)*deltaTarget/delta for value in values]\n return newValues",
"def arange(a, b, dx, logx=False):\n\tif a > b:\n\t\treturn []\n\telif logx:\n\t\treturn [math.exp(math.log(a) + dx*i) for i in range(int((math.log(b)-math.log(a))/dx))] + [b]\n\telse:\n\t\treturn [a + dx*i for i in range(int((b-a)/dx))] + [b]",
"def _events_amplitude(x, idx_sup_thr, idx_start, idx_stop, sf):\n amp_range = np.array([])\n distance_ms = np.array([])\n # Loop on each event\n for i, j in zip(idx_start, idx_stop):\n idx_event = np.arange(idx_sup_thr[i], idx_sup_thr[j])\n if idx_event.size > 0:\n amp_range = np.append(amp_range, np.ptp(x[idx_event]))\n distance = np.abs(np.argmax(x[idx_event]) - np.argmin(\n x[idx_event]))\n distance_ms = np.append(distance_ms, distance / sf * 1000)\n else:\n amp_range = 0.\n distance_ms = 0.\n\n return amp_range, distance_ms",
"def calc_x(DET, dets: list) -> list:\n li_x = []\n\n for det in dets:\n x_value = calc_truncate(det/DET)\n li_x.append(x_value)\n\n return li_x",
"def sines(amount, start, stop, truncated, sequence):\n\n for x in range(start, amount):\n y = abs(round(stop * math.sin(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def running_mean(sequence):\n sum_x = c = 0\n for x in sequence:\n c += 1\n sum_x += x\n yield round(float(sum_x)/c,2)",
"def normalize_values(values, new_min=0.0, new_max=1.0):\n old_max = max(values)\n old_min = min(values)\n old_range = (old_max - old_min)\n new_range = (new_max - new_min)\n return [(((value - old_min) * new_range) / old_range) + new_min for value in values]",
"def energy_to_lambda(energy_ev=[]):\n energy_mev = energy_ev * 1000\n lambda_array = np.sqrt(81.787 / energy_mev)\n return lambda_array",
"def normalize(x):\n sumx = sum(x)\n y = []\n for xi in x:\n xi = xi*(1./sumx)\n y.append(xi)\n return y",
"def hypotes(amount, start, stop, truncated, sequence):\n ratio = (start + stop) / 10\n for x in range(start, amount):\n y = abs(round(ratio * math.hypot(x, start)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write array to a file as text or binary (default). | def quick_save_array(data, file_name, delimiter=',', ):
data.tofile(file_name, sep=delimiter) | [
"def writeAsText(fileName, array):\n ioWriter = open(fileName, 'w')\n for item in array:\n print item\n ioWriter.write(item + \"\\n\")\n ioWriter.close()\n print \"Tersimpan dalam \" + fileName",
"def save_txt(data, file_path):\n array = sanitise_array(data)\n\n # If the data is floating then format the values in scientific notation.\n if np.issubdtype(array.dtype, np.floating):\n array = array.astype(np.float32)\n formatter = lambda x: f'{x:.12E}'\n elif np.issubdtype(array.dtype, np.integer):\n array = array.astype(np.int32)\n formatter = lambda x: str(x)\n else:\n raise TypeError(f'Type of the data could not be serialised - {array.dtype}')\n\n lines = [' '.join(formatter(val) for val in row) + '\\n' for row in array]\n with open(file_path, 'w') as f:\n f.writelines(lines)",
"def write_array(name, arr):\n np.save(name, arr)",
"def write_array(path, arr):\n path = Path(path)\n file_ext = path.suffix\n if file_ext == '.npy':\n return np.save(str(path), arr)\n raise NotImplementedError(\"The file extension `{}` is not currently supported.\" % file_ext)",
"def write_file(self):\n print 'Writing '+self.name+' binary...'\n if self.vals is not None:\n if len(self.vals) == self.size:\n stream = self.pack_mem()\n with open(self.name+'.bin','wb') as f:\n f.write(stream)\n print 'File written: '+self.name+'.bin'\n else:\n print 'Error: input array for '+self.name+'is not the right '+\\\n 'size (should be '+str(self.size)+'). Skipping.'\n else:\n print 'No array provided, skipping.'",
"def print_to_file(arr, fid, sep=\"\", format=\"%s\"):\n\n f = array_create.array(arr, bohrium=False)\n return f.tofile(fid, sep=sep, format=format)",
"def writeIEEEbin(filename, arr):\n if arr.ndim == 2:\n arr = arr.T\n else:\n arr = arr.swapaxes(0, 2)\n arr.astype('>f8').tofile(filename)\n print('Écriture de {}'.format(filename))",
"def save_array(array, filename):\n np.save(filename, array)",
"def WriteArrayToFile(data: np.ndarray, filename: str, sampleRate: int = 44100) -> None:\n soundfile.write(filename, data.T, sampleRate, \"PCM_24\")",
"def write_general_arr(X, data_folder, fname, txt=True, compress=False):\n if txt:\n assert not compress\n fpath = data_folder + os.sep + fname + '.txt'\n np.savetxt(fpath, X, delimiter=',')\n else:\n if compress:\n fpath = data_folder + os.sep + fname + '.npy'\n np.save(fpath, X)\n else:\n fpath = data_folder + os.sep + fname + '.npz'\n np.savez(fpath, a=X)\n return fpath",
"def save_data(filename, arr):\n\n # Open a file handle for binary writing\n with open(filename, \"wb\") as outfile:\n\n # Get the arrays shape as unsigned integers and write to file\n sh = np.asarray(arr.shape).astype(np.uint32)\n sh.tofile(outfile)\n\n # Convert the data to float32 and write it to file\n arr = arr.astype(np.float32)\n arr.tofile(outfile)",
"def save_txt(data, file):\n with open(file, 'w', encoding='utf-8') as f:\n for line in data:\n f.write(line + '\\n')\n # f.write(str(data))\n f.close()",
"def writerow(filehandle, array):\n for elem in array:\n filehandle.write(elem)\n filehandle.write(\"\\t\")\n filehandle.write(\"\\n\")",
"def binary_out(array, fnam, dt=np.dtype(np.float64), endianness='big', appendDim=False):\r\n if appendDim == True :\r\n fnam_out = fnam + '_'\r\n for i in array.shape[:-1] :\r\n fnam_out += str(i) + 'x' \r\n fnam_out += str(array.shape[-1]) + '.raw'\r\n else :\r\n fnam_out = fnam\r\n arrayout = np.array(array, dtype=dt)\r\n if sys.byteorder != endianness:\r\n arrayout.byteswap(True)\r\n arrayout.tofile(os.path.abspath(fnam_out))",
"def dump_raw_data(filename, data):\r\n # TODO: THIS\r\n rawfile = open(filename,'wb')\r\n a = array.array('f')\r\n for o in data:\r\n a.fromlist(list(o))\r\n #if is_little_endian():\r\n # a.byteswap()\r\n a.tofile(rawfile)\r\n rawfile.close()",
"def output_byte_array(name, doc, array):\n header(name=name, doc=doc)\n for i, b in enumerate(array):\n data(name=name, idx=i, size=1, value=b)\n declare(name=name, length=len(array))",
"def export_array(array, output_path, metadata):\n # Write numpy array to GeoTiff\n try:\n with rio.open(output_path, \"w\", **metadata) as dst:\n dst.write(array, 1)\n except Exception as error:\n output_message = print(f\"ERROR: {error}\")\n else:\n output_message = print(f\"Exported: {os.path.split(output_path)[-1]}\")\n\n return output_message",
"def writeCSV(filename, array, verbose):\n numpy.savetxt(filename, array, delimiter=',', fmt='%d')\n return",
"def write_numpy(filename, data):\n return np.save(filename, data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the datetimes from the excel file | def get_datetimes(file_name):
csv_file = open(file_name, 'rb')
file_content = csv.reader(csv_file)
# ignore header
file_content.next()
datetimes = []
for row in file_content:
datetimes.append(row[0])
csv_file.close()
return datetimes | [
"def get_dates(folder=os.getcwd()):\n \n \n res = []\n files = os.listdir(folder)\n for i in files:\n i = re.sub(\".xlsx\", \"\", i)\n i = datetime.strptime(i, \"%y%m%d\")\n res.append(i)\n return res",
"def getFileDates(self, file_id):\n sq = self.getEntry('File', file_id)\n start_time = sq.utc_start_time.date()\n stop_time = sq.utc_stop_time.date()\n return [start_time, stop_time]",
"def get_events(path, sheet_index=0, key=\"SF\"):\r\n\r\n wb = xlrd.open_workbook(path)\r\n sheet = wb.sheet_by_index(sheet_index)\r\n events = []\r\n\r\n # Scan the excel file for all cells that contanin the key (\"SF\") and return them\r\n for i in range(sheet.nrows):\r\n for j in range(sheet.ncols):\r\n if (sheet.cell_value(i, j) == 'Date'):\r\n date_row = i\r\n if (sheet.cell_value(i, j) == key):\r\n events.append([sheet.cell_value(i, 0), str(parser.parse(sheet.cell_value(date_row, j)).date())])\r\n\r\n return events",
"def get_raw_datetimes():\n raw_datetimes = []\n with open(RAW_DATETIMES_PATH, 'r') as f:\n for x in f.read().splitlines():\n try:\n raw_datetimes.append(datetime.datetime(year=int(x[1:5]), month=int(x[6:8]), day=int(x[9:11])))\n except ValueError:\n raw_datetimes.append('NA')\n return raw_datetimes",
"def read_schedule_file():\n\twb = open_workbook(filename=get_current_path() + '\\\\paydown_schedule.xlsx')\n\toutput = {}\n\tfor st_name in wb.sheet_names():\n\t\tws = wb.sheet_by_name(st_name)\n\t\toutput[st_name] = []\n\t\trow = 1\n\n\t\twhile row < ws.nrows:\n\t\t\tif is_blank_line(ws, row):\n\t\t\t\tbreak\n\n\t\t\toutput[st_name].append(read_line(ws, row, ['Date', 'Factor', 'Coupon']))\n\t\t\trow = row + 1\n\t\t# end of while loop\n\n\t\treturn output",
"def obs_dates(msfile, config, logger):\n logger.info('Starting archive file summary.')\n logger.info('To find the exact files imported here search the VLA archive (https://archive.nrao.edu/archive/advquery.jsp) for:')\n tb.open(msfile+'/OBSERVATION')\n times = tb.getcol('TIME_RANGE')\n tb.close()\n for i in range(len(times[0])):\n start_time = qa.time({'value':times[0][i],'unit':'s'},form='fits')[0]\n end_time = qa.time({'value':times[1][i],'unit':'s'},form='fits')[0]\n start_time = start_time.replace('T',' ')\n end_time = end_time.replace('T',' ')\n logger.info('Project: {0}\\tStart Time: {1}\\tEnd Time: {2}'.format(config['global']['project_name'],start_time,end_time))\n logger.info('Completed archive file summary.')",
"def test_read_date(self):\n filename = get_current_path() + '\\\\samples\\\\expense_sample.xls'\n wb = open_workbook(filename=filename)\n ws = wb.sheet_by_name('Expense Report')\n\n d = read_date(ws, 5, 1)\n self.assertEqual(d, datetime.datetime(2015,12,10))",
"def get_date_time_strings(self) -> List[str]:\n return_list: List[str] = []\n for occ in self.occurrences:\n s: str = Event.occurrence_to_string(occ)\n return_list.append(s)\n return return_list",
"def get_dates(path, files):\n photos = []\n\n for file in files:\n # Open file and get date\n with open(path + \"/\" + file, 'rb') as f:\n tags = exifread.process_file(f)\n date = tags[\"EXIF DateTimeOriginal\"]\n\n photos.append({\n \"name\": file,\n \"date\": str(date)\n })\n\n os.remove(path + \"/\" + file)\n\n return photos",
"def list_dates(product):\n\n if product == 'analysis_assim':\n files = _list_files(product)\n dates = []\n for f in files:\n date = _date_from_filename(f)\n dates.append(date)\n dates = list(set(dates)) # Get unique dates\n else:\n template = (HS_DATA_EXPLORER_URI + 'files_explorer/get-folder-contents'\n '/?selection_path=%2Fprojects%2Fwater%2Fnwm%2Fdata%2F{0}'\n '%3Ffolder&query_type=filesystem')\n if 'long_range' in product:\n product = 'long_range'\n uri = template.format(product)\n response = urlopen(uri).read()\n dates = re.findall(r'\\>([0-9]+)\\<', response)\n return sorted(dates)",
"def dates_in_storage():\n pattern = os.path.join(path_to_module(), _folder_store, 'data_????-??-??.h5')\n files = glob.glob(pattern)\n\n dates = []\n for f in files:\n b, e = os.path.splitext(os.path.basename(f))\n c = b.split('data_')[1]\n\n year, month, day = c.split('-')\n date = arrow.Arrow(int(year), int(month), int(day))\n dates.append(date)\n\n return sorted(dates)",
"def dates_from_file(self, filename=\"\"):\n\n dates = []\n\n try:\n with open(filename, \"r\", newline=\"\") as txt_file:\n reader = csv.reader(txt_file, delimiter=\",\")\n\n for line in reader:\n if len(line) > 1:\n start = datetime.datetime.strptime(\n line[0],\n self.date_format\n )\n end = datetime.datetime.strptime(\n line[1],\n self.date_format\n )\n delta = end - start\n\n for day in range(delta.days + 1):\n dates.append(start + datetime.timedelta(days=day))\n\n elif len(line) > 0:\n dates.append(\n datetime.datetime.strptime(\n line[0],\n self.date_format\n )\n )\n\n except Exception as e:\n logging.error(\"Failed to import dates file %s: %s\", filename, e)\n\n return dates",
"def load_calendar(index_path):\n return pd.read_csv(\n os.path.join(index_path, 'SH000300.csv'),\n dtype={'date':str}\n ).iloc[:, 0].tolist()",
"def get_relax_timestamps_from_file(self):\n\n global client_id\n reset_global()\n\n directory = self.path+self.ident+'/'\n mSF = re.compile(r\"(\\w+)-primary.txt\")\n\n times = []\n for filename in os.listdir(directory):\n if mSF.search(filename):\n reset_user()\n reset_task()\n client_id = mSF.search(filename).group(1)\n f = open(os.path.join(directory, filename), \"r\", encoding=\"utf8\")\n for line in f:\n # print line\n time = parse_relax_times(line)\n if time!=None:\n times.append(time)\n continue\n else:\n continue\n\n # also save to object var\n # self.extracted_cog_res = results\n return times",
"def convert_date_icmbio(self,xls_data,columns_name_index,xls_file_instance):\n index_date = columns_name_index['dt_auto']\n date_obj = None\n \n for index in range(len(xls_data)):\n if xls_data[index][index_date] != ''\\\n and xls_data[index][index_date] != None: \n date_obj = xlrd.xldate_as_datetime(\n xls_data[index][index_date],xls_file_instance.datemode\n )\n xls_data[index][index_date] = date_obj.date()\n \n else: \n xls_data[index][index_date] = 'null' \n\n return xls_data",
"def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 8,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 9,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # ,pd.DataFrame(list1)\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] # col_name_1, \n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))",
"def data(self):\n\n try:\n sheet = load_workbook(self.arquivo, read_only=True)\n act_sheet = sheet.active\n lines = act_sheet.rows\n if self.l1 != 0:\n lines = islice(lines, self.l1, None)\n data = []\n for line in lines:\n if isinstance(self.usecols, tuple):\n content = [line[value].value for value in self.usecols]\n else:\n content = [line[self.usecols].value]\n\n if content[0] is not None:\n data.append(content)\n\n except InvalidFileException:\n book = xlrd.open_workbook(self.arquivo)\n sheet = book.sheet_by_index(0)\n data = []\n for line in range(self.l1, sheet.nrows, 1):\n conteudo = [sheet.row(line)[value].value if isinstance(sheet.row(line)[value].value, float)\n else 0.0 for value in self.usecols]\n data.append(conteudo)\n\n return data",
"def read_spectrograms_excel(filename):\n\n xl = pd.ExcelFile(filename)\n key = xl.sheet_names[0]\n df = pd.read_excel(xl, index_col=0)\n\n if df.index.dtype == pd.Timestamp:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n\n # Replace _ with \" \"\n key = \" \".join(key.split(\"_\"))\n\n return key, df",
"def extract_times_from_file(filename):\n\ttimes = set()\n\twith open(filename, 'r') as file:\n\t\tfor time in file:\n\t\t\ttimes.add(str(time))\n\treturn times"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.