query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Metainformation for RDF output
def rdfMeta(self): return { 'label' : {'uri' : [ RdfURI('skos:prefLabel'), RdfURI('dcel:title') ] }, 'description' : {'uri' : [ RdfURI('v:description'), RdfURI('dcel:description'), RdfURI('rev:text'), RdfURI('bibtex:abstract') ], 'property' : 'get_description' }, 'context' : {'uri' : 'skos:inScheme', 'condition' : ('is_root', False) }, 'top_concept' : {'uri' : 'skos:topConceptOf', 'condition' : ('is_root', True), 'property' : 'context' }, 'type_tag' : {'uri' : 'ov:wordType', 'uri_pattern': 'ov:I%s' }, 'word_senses' : {'uri' : 'wn20schema:containsWordSense', 'condition': ('in_synset', None)}, 'in_synset' : {'uri' : 'wn20schema:inSynset'}, 'parent' : {'uri' : [ RdfURI('skos:broader') ]}, 'childOf' : {'uri' : [ RdfURI('skos:narrower')]}, 'list_related' : {'uri' : 'skos:related'}, #'is_root' : {'uri' : 'dcel:language' }, #'relations' : {'uri' : 'skos:inScheme' }, #'meanings' : {'uri' : 'skos:inScheme' }, #'frame' : {'uri' : 'skos:inScheme' }, #'lexical_form': {'uri' : [ RdfURI('skos:prefLabel'), RdfURI('dcel:title') ] }, #'in_synset' : {'uri' : 'skos:inScheme' }, #'tag_count' : {'uri' : 'skos:inScheme' }, #'words' : {'uri' : 'skos:inScheme' }, }
[ "def get_metadata(self, g, item, type='Dataset'):\n DCAT = Namespace('http://www.w3.org/ns/dcat#')\n SMA = Namespace('http://schema.org/')\n meta = dict()\n #default sparql\n #meta = self.get_default_metadata(g)\n self.logger.info('FsF-F2-01M : Trying to get some core domain agnostic (DCAT, DC, schema.org) metadata from RDF graph')\n if not meta.get('object_identifier'):\n meta['object_identifier'] = []\n for identifier in (list(g.objects(item, DC.identifier)) + list(g.objects(item, DCTERMS.identifier)) +\n list(g.objects(item, SDO.identifier)) + list(g.objects(item, SMA.identifier)) +\n list(g.objects(item, SDO.sameAs))+ list(g.objects(item, SMA.sameAs))):\n meta['object_identifier'].append(str(identifier))\n\n '''\n meta['object_identifier'] = (g.value(item, DC.identifier) or\n g.value(item, DCTERMS.identifier) or\n g.value(item, SDO.identifier) or\n g.value(item, SMA.identifier) or\n g.value(item, SMA.sameAs))\n '''\n '''\n if self.source_name != self.getEnumSourceNames().RDFA.value:\n meta['object_identifier'] = str(item)\n meta['object_content_identifier'] = [{'url': str(item), 'type': 'application/rdf+xml'}]\n '''\n if not meta.get('language'):\n meta['language'] = str(g.value(item, DC.language) or g.value(item, DCTERMS.language) or\n g.value(item, SDO.inLanguage) or g.value(item, SMA.inLanguage))\n if not meta.get('title'):\n meta['title'] = str(g.value(item, DC.title) or g.value(item, DCTERMS.title) or g.value(item, SMA.name) or g.value(item, SDO.name) or g.value(item, SMA.headline) or g.value(item, SDO.headline))\n if not meta.get('summary'):\n meta['summary'] = str(g.value(item, DC.description) or g.value(item, DCTERMS.description) or g.value(item, DCTERMS.abstract) or\n g.value(item, SMA.description) or g.value(item, SDO.description)\n or g.value(item, SMA.abstract) or g.value(item, SDO.abstract))\n if not meta.get('publication_date'):\n meta['publication_date'] = str(g.value(item, DC.date) or g.value(item, DCTERMS.date) or\n g.value(item, DCTERMS.issued)\n or g.value(item, SMA.datePublished) or g.value(item, SMA.dateCreated)\n or g.value(item, SDO.datePublished) or g.value(item, SDO.dateCreated)\n )\n if not meta.get('publisher'):\n meta['publisher']=[]\n for publisher in (list(g.objects(item, DC.publisher)) or list(g.objects(item, DCTERMS.publisher)) or\n list(g.objects(item, SMA.publisher)) or list(g.objects(item, SDO.publisher)) or\n list(g.objects(item, SMA.provider)) or list(g.objects(item, SDO.provider))):\n publishername = (g.value(publisher,FOAF.name) or (g.value(publisher,SMA.name))or (g.value(publisher,SDO.name)))\n if publishername:\n meta['publisher'].append(str(publishername))\n else:\n meta['publisher'].append(str(publisher))\n #meta['publisher'] = str(g.value(item, DC.publisher) or g.value(item, DCTERMS.publisher) or\n # g.value(item, SMA.publisher) or g.value(item, SDO.publisher) or g.value(item, SMA.provider) or g.value(item, SDO.provider))\n if not meta.get('keywords'):\n meta['keywords'] = []\n for keyword in (list(g.objects(item, DCAT.keyword)) + list(g.objects(item, DCTERMS.subject)) +\n list(g.objects(item, DC.subject))\n or list(g.objects(item, SMA.keywords)) or list(g.objects(item, SDO.keywords))):\n meta['keywords'].append(str(keyword))\n #TODO creators, contributors\n if not meta.get('creator'):\n meta['creator'] = []\n for creator in (list(g.objects(item, DCTERMS.creator)) or list(g.objects(item, DC.creator)) or list(g.objects(item, SMA.author))):\n if g.value(creator,FOAF.name):\n meta['creator'].append(str(g.value(creator,FOAF.name)))\n else:\n meta['creator'].append(str(creator))\n\n if not meta.get('contributor'):\n meta['contributor'] = []\n for contributor in (list(g.objects(item, DCTERMS.contributor)) or list(g.objects(item, DC.contributor)) or list(g.objects(item, SMA.contributor))):\n meta['contributor'].append(str(contributor))\n if not meta.get('license'):\n meta['license'] = str(g.value(item, DCTERMS.license) or g.value(item, SDO.license) or g.value(item, SMA.license))\n if not meta.get('access_level'):\n meta['access_level'] = str(g.value(item, DCTERMS.accessRights) or g.value(item, DCTERMS.rights) or\n g.value(item, DC.rights)\n or g.value(item, SDO.conditionsOfAccess) or g.value(item, SMA.conditionsOfAccess) )\n if not meta.get('related_resources'):\n meta['related_resources'] = []\n for dctrelationtype in [\n DCTERMS.references, DCTERMS.source, DCTERMS.isVersionOf, DCTERMS.isReferencedBy, DCTERMS.isPartOf,\n DCTERMS.hasVersion, DCTERMS.replaces, DCTERMS.hasPart, DCTERMS.isReplacedBy, DCTERMS.requires,\n DCTERMS.isRequiredBy\n ]:\n dctrelation = g.value(item, dctrelationtype)\n if dctrelation:\n meta['related_resources'].append({\n 'related_resource': str(dctrelation),\n 'relation_type': str(dctrelationtype)\n })\n for schemarelationtype in [\n SMA.isPartOf, SMA.includedInDataCatalog, SMA.subjectOf, SMA.isBasedOn, SMA.sameAs,\n SDO.isPartOf, SDO.includedInDataCatalog, SDO.subjectOf, SDO.isBasedOn, SDO.sameAs\n ]:\n schemarelation = g.value(item, schemarelationtype)\n if schemarelation:\n meta['related_resources'].append({\n 'related_resource': str(schemarelation),\n 'relation_type': str(schemarelationtype)\n })\n\n if meta:\n meta['object_type'] = type\n meta = {k: v for k, v in meta.items() if v not in [None, 'None',[]]}\n self.logger.info(\n 'FsF-F2-01M : Found some core domain agnostic (DCAT, DC, schema.org) metadata from RDF graph -: '+str(meta))\n return meta", "def test_meta_output(self):\n jsonld_path = os.path.join(testscriptstempdir, 'metajson.jsonld')\n rdf_path = os.path.join(testscriptstempdir, 'metardf.ttl')\n meta_context_path = os.path.join(testscriptstempdir, 'metacontext.jsonld')\n\n # Generate an image of the metamodel\n gen = ContextGenerator(source_yaml_path, importmap=BIOLINK_IMPORT_MAP)\n base = gen.schema.id\n if base[-1] not in '/#':\n base += '/'\n base += gen.schema.name\n with open(meta_context_path, 'w') as tfile:\n tfile.write(gen.serialize())\n with open(jsonld_path, 'w') as tfile:\n tfile.write(JSONLDGenerator(source_yaml_path, fmt=JSONLDGenerator.valid_formats[0],\n importmap=BIOLINK_IMPORT_MAP).serialize(context=meta_context_path))\n g = Graph()\n g.load(jsonld_path, format=\"json-ld\")\n g.serialize(rdf_path, format=\"ttl\")\n g.bind('meta', METAMODEL_NAMESPACE)\n new_ttl = g.serialize(format=\"turtle\").decode()\n new_g = Graph()\n new_g.parse(data=new_ttl, format=\"turtle\")\n self.check_size(g, new_g, URIRef(base), 11, 91, 13, \"meta\")", "def parse_metadata(self):\n #self.source_name = self.getEnumSourceNames().LINKED_DATA.value\n #self.logger.info('FsF-F2-01M : Trying to request RDF metadata from -: {}'.format(self.source_name))\n rdf_metadata = dict()\n rdf_response_graph = None\n\n #if self.rdf_graph is None:\n if not self.json_ld_content and self.target_url:\n if not self.accept_type:\n self.accept_type = AcceptTypes.rdf\n requestHelper: RequestHelper = RequestHelper(self.target_url, self.logger)\n requestHelper.setAcceptType(self.accept_type)\n requestHelper.setAuthToken(self.auth_token,self.auth_token_type)\n neg_source, rdf_response = requestHelper.content_negotiate('FsF-F2-01M')\n if requestHelper.checked_content_hash:\n if requestHelper.checked_content.get(requestHelper.checked_content_hash).get('checked') and 'xml' in requestHelper.content_type:\n requestHelper.response_content = None\n self.logger.info('FsF-F2-01M : Ignoring RDF since content already has been parsed as XML')\n if requestHelper.response_content is not None:\n self.content_type = requestHelper.content_type\n else:\n self.content_type = 'application/ld+json'\n rdf_response = self.json_ld_content\n if self.content_type is not None:\n self.content_type = self.content_type.split(';', 1)[0]\n #handle JSON-LD\n if self.content_type in ['application/ld+json','application/json','application/vnd.schemaorg.ld+json']:\n if self.target_url:\n jsonld_source_url = self.target_url\n else:\n jsonld_source_url = 'landing page'\n if self.json_ld_content:\n self.source_name = MetadataSources.SCHEMAORG_EMBEDDED\n elif self.source_name != MetadataSources.RDF_TYPED_LINKS and self.source_name != MetadataSources.RDF_SIGNPOSTING_LINKS:\n self.source_name = MetadataSources.SCHEMAORG_NEGOTIATED\n self.logger.info('FsF-F2-01M : Try to parse RDF (JSON-LD) from -: %s' % (jsonld_source_url))\n if isinstance(rdf_response, bytes):\n try:\n rdf_response = rdf_response.decode(\"utf-8\")\n except:\n pass\n if isinstance(rdf_response, dict) or isinstance(rdf_response, list):\n self.logger.info('FsF-F2-01M : Try to parse JSON-LD using JMESPath retrieved as dict from -: %s' % (jsonld_source_url))\n # in case two or more JSON-LD strings are embedded\n if isinstance(rdf_response, list):\n json_dict = None\n if len(rdf_response) > 1:\n self.logger.info(\n 'FsF-F2-01M : Found more than one JSON-LD embedded in landing page try to identify Dataset or CreativeWork type')\n for meta_rec in rdf_response:\n meta_rec_type = str(meta_rec.get('@type')).lower().lstrip('schema:')\n if meta_rec_type in ['dataset']:\n json_dict = meta_rec\n break\n if meta_rec_type in self.SCHEMA_ORG_CREATIVEWORKS:\n json_dict = meta_rec\n if not json_dict:\n rdf_response = rdf_response[0]\n else:\n rdf_response = json_dict\n try:\n rdf_metadata = self.get_schemorg_metadata_from_dict(rdf_response)\n if rdf_metadata:\n self.setLinkedNamespaces(str(rdf_response))\n else:\n self.logger.info('FsF-F2-01M : Could not identify schema.org JSON-LD metadata using JMESPath, continuing with RDF graph processing')\n # quick fix for https://github.com/RDFLib/rdflib/issues/1484\n # needs to be done before dict is converted to string\n #print(rdf_response)\n if rdf_response.get('@context'):\n if rdf_response.get('@graph'):\n try:\n #drop duplicate context in graph\n if isinstance(rdf_response.get('@graph'), list):\n for grph in rdf_response.get('@graph'):\n if grph.get('@context'):\n del grph['@context']\n else:\n if rdf_response.get('@graph').get('@context'):\n del rdf_response['@graph']['@context']\n except Exception as e:\n print('Failed drop duplicate JSON-LD context in graph')\n pass\n #Fixing Dereferencing issues: https://github.com/json-ld/json-ld.org/issues/747\n if isinstance(rdf_response.get('@context'), list):\n for ctxi, ctxt in enumerate(rdf_response.get('@context')):\n if 'schema.org' in ctxt:\n rdf_response['@context'][ctxi] = 'https://schema.org/docs/jsonldcontext.json'\n if isinstance(rdf_response.get('@context'), str):\n if 'schema.org' in rdf_response.get('@context'):\n rdf_response['@context'] = 'https://schema.org/docs/jsonldcontext.json'\n rdf_response = jsonld.expand(rdf_response)\n rdf_response = json.dumps(rdf_response)\n except Exception as e:\n print('RDF Collector Error: ',e)\n pass\n #t ry to make graph from JSON-LD string\n if isinstance(rdf_response, str):\n try:\n rdf_response = str(rdf_response).encode('utf-8')\n except:\n self.logger.info('FsF-F2-01M : UTF-8 string conversion of JSON-LD failed')\n pass\n self.logger.info('FsF-F2-01M : Try to parse JSON-LD using RDFLib retrieved as string from -: %s' % (jsonld_source_url))\n try:\n jsonldgraph = rdflib.ConjunctiveGraph()\n rdf_response_graph = jsonldgraph.parse(data=rdf_response, format='json-ld')\n #rdf_response_graph = jsonldgraph\n self.setLinkedNamespaces(self.getAllURIS(jsonldgraph))\n except Exception as e:\n print('JSON-LD parsing error', e, rdf_response[:100])\n self.logger.info('FsF-F2-01M : Parsing error (RDFLib), failed to extract JSON-LD -: {}'.format(e))\n\n elif self.accept_type == AcceptTypes.rdf:\n #print('ACCEPT: ',self.accept_type)\n # parse all other RDF formats (non JSON-LD schema.org)\n # parseformat = re.search(r'[\\/+]([a-z0-9]+)$', str(requestHelper.content_type))\n format_dict = {'text/ttl':'turtle',\n 'application/xhtml+xml':'rdfa',\n 'application/n-triples':'nt',\n 'application/n-quads':'nquads'\n }\n if self.content_type in format_dict:\n parseformat = (None, format_dict[self.content_type])\n else:\n parseformat = re.search(r'[\\/+]([a-z0-9]+)$', str(self.content_type))\n if parseformat:\n parse_format = parseformat[1]\n if parse_format not in ['xml', 'n3','turtle', 'nt', 'pretty-xml','trix','trig','nquads', 'json-ld','hext']:\n parse_format = 'turtle'\n if 'html' not in str(parse_format) and 'zip' not in str(parse_format) :\n RDFparsed = False\n self.logger.info('FsF-F2-01M : Try to parse RDF from -: %s as %s' % (self.target_url,parse_format))\n badline = None\n while not RDFparsed:\n try:\n graph = rdflib.Graph(identifier = self.target_url)\n graph.parse(data=rdf_response, format=parse_format)\n rdf_response_graph = graph\n self.setLinkedNamespaces(self.getAllURIS(rdf_response_graph))\n RDFparsed = True\n except Exception as e:\n #<unknown>:74964:92: unclosed token\n errorlinematch = re.search(r'\\sline\\s([0-9]+)',str(e))\n if not errorlinematch:\n errorlinematch = re.search(r'<unknown>:([0-9]+)',str(e))\n if errorlinematch and parseformat[1] !='xml':\n if int(errorlinematch[1])+1 != badline:\n badline = int(errorlinematch[1])\n self.logger.warning(\n 'FsF-F2-01M : Failed to parse RDF, trying to fix RDF string and retry parsing everything before line -: %s ' % str(badline))\n splitRDF = rdf_response.splitlines()\n if len(splitRDF) >=1 and badline <= len(splitRDF) and badline > 1:\n rdf_response = b'\\n'.join(splitRDF[:badline-1])\n else:\n RDFparsed = True # end reached\n else:\n RDFparsed = True\n else:\n RDFparsed = True # give up\n if not RDFparsed:\n continue\n else:\n self.logger.warning(\n 'FsF-F2-01M : Failed to parse RDF -: %s %s' % (self.target_url, str(e)))\n else:\n self.logger.info('FsF-F2-01M : Seems to be HTML not RDF, therefore skipped parsing RDF from -: %s' % (self.target_url))\n else:\n self.logger.info('FsF-F2-01M : Could not determine RDF serialisation format for -: {}'.format(self.target_url))\n\n #else:\n # neg_source, rdf_response = 'html', self.rdf_graph\n if not rdf_metadata:\n rdf_metadata = self.get_metadata_from_graph(rdf_response_graph)\n return self.source_name, rdf_metadata", "def rdfize(json_entry):\n\n entry = json_entry\n\n try:\n\n ctx = {\n \"@context\": {\n \"@base\": \"https://bio.tools/\",\n \"biotools\": \"https://bio.tools/ontology/\",\n \"edam\": \"http://edamontology.org/\",\n \"pubmed\": \"https://www.ncbi.nlm.nih.gov/pubmed/\",\n \"pmc\": \"https://www.ncbi.nlm.nih.gov/pmc/\",\n \"doi\": \"https://doi.org/\",\n \"dc\": \"http://purl.org/dc/terms/\",\n \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\",\n\n # \"hasContact\": \"dc:publisher\",\n # \"hasPublication\": \"dc:references\",\n\n # \"id\": \"datacite:identifier\",\n \"id\": \"dc:identifier\",\n # \"name\": \"datacite:title\",\n \"name\": \"dc:title\",\n # \"description\": \"datacite:description\",\n \"description\": \"dc:description\",\n # \"license\": \"datacite:rights\",\n \"license\": \"dc:license\",\n \"hasContact\": \"datacite:contributor\",\n \"toolType\": \"datacite:resourceType\",\n \"additionDate\": \"datacite:date\",\n \"language\": \"datacite:format\",\n \"homepage\": \"datacite:alternateIdentifier\",\n \"hasPublication\": \"dc:references\",\n \"download\": \"datacite:alternateIdentifier\",\n\n \"hasOperation\": \"biotools:has_function\",\n \"hasInputData\": \"edam:has_input\",\n \"hasOutputData\": \"edam:has_output\",\n \"hasTopic\": \"edam:has_topic\"\n }\n }\n entry['@id'] = str(entry['biotoolsID'])\n entry['@type'] = {\"@id\": 'biotools:Resource'}\n entry.update(ctx)\n\n # for contact in entry['contact']:\n # if not \"hasContact\" in entry.keys():\n # entry['hasContact'] = [contact['name']]\n # else :\n # entry['hasContact'].append(contact['name'])\n\n # for download in entry['download']:\n # if download['url']:\n # if not \"download\" in entry.keys():\n # entry['download'] = [download['url']]\n # else :\n # entry['download'].append(download['url'])\n\n for publication in entry['publication']:\n if publication['pmid']:\n if not \"hasPublication\" in entry.keys():\n entry['hasPublication'] = [{\"@id\": 'pubmed:' + publication['pmid']}]\n else:\n entry['hasPublication'].append({\"@id\": 'pubmed:' + publication['pmid']})\n if publication['pmcid']:\n if not \"hasPublication\" in entry.keys():\n entry['hasPublication'] = [{\"@id\": 'pmc:' + publication['pmcid']}]\n else:\n entry['hasPublication'].append({\"@id\": 'pmc:' + publication['pmcid']})\n if publication['doi']:\n if not (\"<\" in publication['doi'] or \">\" in publication['doi']):\n if not \"hasPublication\" in entry.keys():\n entry['hasPublication'] = [{\"@id\": \"https://doi.org/\" + publication['doi']}]\n else:\n entry['hasPublication'].append({\"@id\": \"https://doi.org/\" + publication['doi']})\n\n for item in entry['function']:\n for op in item['operation']:\n if not \"hasOperation\" in entry.keys():\n entry['hasOperation'] = [{\"@id\": op['uri']}]\n else:\n entry['hasOperation'].append({\"@id\": op['uri']})\n\n for input in item['input']:\n if not \"hasInputData\" in entry.keys():\n entry['hasInputData'] = [{\"@id\": input['data']['uri']}]\n else:\n entry['hasInputData'].append({\"@id\": input['data']['uri']})\n\n for output in item['output']:\n if not \"hasOutputData\" in entry.keys():\n entry['hasOutputData'] = [{\"@id\": output['data']['uri']}]\n else:\n entry['hasOutputData'].append({\"@id\": output['data']['uri']})\n\n for item in entry['topic']:\n if not \"hasTopic\" in entry.keys():\n entry['hasTopic'] = [{\"@id\": item['uri']}]\n else:\n entry['hasTopic'].append({\"@id\": item['uri']})\n\n except KeyError as error:\n print(json.dumps(entry, indent=4, sort_keys=True))\n print()\n\n raw_jld = json.dumps(entry)\n return raw_jld", "def json(self,rdf_content):\n g = Graph()\n \n lb = Namespace('http://rdf.lightbase.cc/ontology/')\n dc = Namespace('http://purl.org/dc/elements/1.1/')\n \n #print(rdf_content)\n \n result = g.parse(data=rdf_content, format=\"application/rdf+xml\")\n \n self.rdf_collection = json_for_graph(result)\n self.rdf_identifier = g.objects(None,dc['identifier']).next().toPython()\n\n # Get base name here\n self.base_name = g.objects(None,lb['baseName']).next()\n \n # Test with SPARQL \n teste = result.query(\n \"\"\"\n PREFIX lb: <http://rdf.lightbase.cc/ontology/> \n PREFIX dc: <http://purl.org/dc/elements/1.1/>\n SELECT ?fieldName ?fieldData\n WHERE {\n ?x lb:fieldName ?fieldName .\n ?x dc:description ?fieldData . \n }\n \"\"\"\n )\n \n # I need one specific field\n arquivo = result.query(\"\"\"\n PREFIX lb: <http://rdf.lightbase.cc/ontology/>\n PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n SELECT ?arquivo\n WHERE {\n ?x rdf:type lb:registro .\n ?x lb:arquivo ?arquivo .\n } \n \"\"\")\n \n # Return metadata as dict\n self.metadata = dict(teste.result)\n self.metadata['url'] = arquivo.result[0]\n self.metadata['id'] = self.rdf_identifier", "def _construct_metadata(self):\n if self.properties:\n return self._step_type_to_output_format_map[self.type]()\n return None", "def parse(self,rdf_content):\n g = Graph()\n \n lb = Namespace('http://rdf.lightbase.cc/ontology/')\n dc = Namespace('http://purl.org/dc/elements/1.1/')\n \n result = g.parse(data=rdf_content, format=\"application/rdf+xml\")\n self.rdf_collection = result.serialize(format='turtle')\n self.rdf_identifier = g.objects(None,dc['identifier']).next().toPython()\n\n # Get base name here\n self.base_name = g.objects(None,lb['baseName']).next()\n \n # Test with SPARQL \n teste = result.query(\n \"\"\"\n PREFIX lb: <http://rdf.lightbase.cc/ontology/> \n PREFIX dc: <http://purl.org/dc/elements/1.1/>\n SELECT ?fieldName ?fieldData\n WHERE {\n ?x lb:fieldName ?fieldName .\n ?x dc:description ?fieldData .\n }\n \"\"\"\n )\n \n \n # I need one specific field\n arquivo = result.query(\"\"\"\n PREFIX lb: <http://rdf.lightbase.cc/ontology/>\n PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n SELECT ?arquivo\n WHERE {\n ?x rdf:type lb:registro .\n ?x lb:arquivo ?arquivo .\n } \n \"\"\")\n \n # Return metadata as tuple\n self.metadata = dict(teste.result)\n self.metadata['url'] = arquivo.result[0]\n self.metadata['id'] = self.rdf_identifier\n #print(self.metadata)\n \n #print(self.rdf_identifier)", "def metadata(self) -> dict[str, Any]:", "def to_rdfxml(self):\n return rdfviews.MyndighetsforeskriftDescription(self).to_rdfxml()", "def create_rdf_output(prefixes, prefixes_used, subjects):\n output = ''\n\n for prefix_used in sorted(prefixes_used):\n output += '@prefix %s: <%s/> .\\n' % (prefix_used, prefixes[prefix_used])\n\n output += '\\n'\n\n for s, po in subjects.items():\n n = 0\n limit = len(po)\n\n output += '<%s>\\n' % s\n\n for p, o in po:\n n += 1\n p = get_rdf_for_triple_part(p, prefixes)\n o = get_rdf_for_triple_part(o, prefixes)\n\n output += ' %s %s ' % (p, o)\n output += ';' if n < limit else '.'\n output += '\\n'\n\n return output", "def _gen_meta(self):\n meta = {\"encode_dict\" : self.encode_dict,\n \"word_length\" : self.word_len,\n \"data_length\" : self.data_length,\n \"magic_number\" : MAGIC_NUMBER}\n return meta", "def printMetadata(self):\n print (\"************COMMONDATA************\")\n print (\"Setname:\", self.setname, \"PROC:\", self.proc)\n print (\"NDAT:\", self.ndata,\"NSYS:\",self.nsys)", "def to_rdf(self: Concept, format: str = \"text/turtle\") -> str:\n return self._to_graph().serialize(format=format)", "def sparql(self):\n return f\"\"\"\n {{\n {self.__sparql__}\n BIND(\"{self.edge_label}\" AS ?edge_label) .\n BIND(\"{self.relation}\" AS ?relation) .\n }}\n \"\"\"", "def __repr__(self):\n return ('SQL Schema Document: <id={}, uri={}, mimetype={}, tags={}>'\n .format(self.id, self.uri, self.mimetype, self.tags))", "def Attributes(self) -> _n_5_t_17:", "def pretty_metadata(self):\n if self.meta:\n return ' | '.join(val for _, val in self.meta.items())\n return ''", "def __repr__(self):\n\n\t\tvalue = \"triples map id: {}\\n\".format(self.triples_map_name)\n\t\tvalue += \"\\tlogical source: {}\\n\".format(self.data_source)\n\t\tvalue += \"\\treference formulation: {}\\n\".format(self.reference_formulation)\n\t\tvalue += \"\\titerator: {}\\n\".format(self.iterator)\n\t\tvalue += \"\\tsubject map: {}\\n\".format(self.subject_map.value)\n\n\t\tfor predicate_object_map in self.predicate_object_maps_list:\n\t\t\tvalue += \"\\t\\tpredicate: {} - mapping type: {}\\n\".format(predicate_object_map.predicate_map.value, predicate_object_map.predicate_map.mapping_type)\n\t\t\tvalue += \"\\t\\tobject: {} - mapping type: {} - datatype: {}\\n\\n\".format(predicate_object_map.object_map.value, predicate_object_map.object_map.mapping_type, str(predicate_object_map.object_map.datatype))\n\t\t\tif predicate_object_map.object_map.mapping_type == \"parent triples map\":\n\t\t\t\tvalue += \"\\t\\t\\tjoin condition: - child: {} - parent: {} \\n\\n\\n\".format(predicate_object_map.object_map.child,predicate_object_map.object_map.parent)\n\n\t\treturn value + \"\\n\"", "def to_nquad(triple, graph_name=None):\n s = triple['subject']\n p = triple['predicate']\n o = triple['object']\n g = triple.get('name', {'value': graph_name})['value']\n\n quad = ''\n\n # subject is an IRI\n if s['type'] == 'IRI':\n quad += '<' + s['value'] + '>'\n else:\n quad += s['value']\n quad += ' '\n\n # property is an IRI\n if p['type'] == 'IRI':\n quad += '<' + p['value'] + '>'\n else:\n quad += p['value']\n quad += ' '\n\n # object is IRI, bnode, or literal\n if o['type'] == 'IRI':\n quad += '<' + o['value'] + '>'\n elif(o['type'] == 'blank node'):\n quad += o['value']\n else:\n escaped = (\n o['value']\n .replace('\\\\', '\\\\\\\\')\n .replace('\\t', '\\\\t')\n .replace('\\n', '\\\\n')\n .replace('\\r', '\\\\r')\n .replace('\\\"', '\\\\\"'))\n quad += '\"' + escaped + '\"'\n if o['datatype'] == RDF_LANGSTRING:\n if o['language']:\n quad += '@' + o['language']\n elif o['datatype'] != XSD_STRING:\n quad += '^^<' + o['datatype'] + '>'\n\n # graph\n if g is not None:\n if not g.startswith('_:'):\n quad += ' <' + g + '>'\n else:\n quad += ' ' + g\n\n quad += ' .\\n'\n return quad" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save an image as a collection of tiles. The image is split into a set of fixedsized (with the exception of rightmost and bottommost) tiles.
def save_tiled_image(img, root, level, tile_geom, img_type="jpeg"): assert(img.ndim == 2 or (img.ndim == 3 and img.shape[2] <= 3)) n_channels = 1 if img.ndim == 2 else img.shape[2] dst_path = root + os.path.sep + 'level_{:d}'.format(level) tg = (min(tile_geom[0], img.shape[1]), min(tile_geom[1], img.shape[0])) nh = int(floor(img.shape[1] / tg[0])) + (1 if img.shape[1] % tg[0] != 0 else 0) nv = int(floor(img.shape[0] / tg[1])) + (1 if img.shape[0] % tg[1] != 0 else 0) tile_meta = dict({'level': level, 'level_image_width': img.shape[1], 'level_image_height': img.shape[0], 'level_image_nchannels': 1 if img.ndim == 2 else img.shape[2], 'n_tiles_horiz': nh, 'n_tiles_vert': nv, 'tile_width': tg[0], 'tile_height': tg[1]}) if os.path.exists(dst_path): shutil.rmtree(dst_path) os.mkdir(dst_path) for i in range(nv): for j in range(nh): i0, j0 = i * tg[1], j * tg[0] i1, j1 = min((i + 1) * tg[1], img.shape[0]), min((j + 1) * tg[0], img.shape[1]) if n_channels == 1: im_sub = img[i0:i1, j0:j1] else: im_sub = img[i0:i1, j0:j1, :] tile_meta['tile_' + str(i) + '_' + str(j)] = dict( {'name': dst_path + '/tile_' + str(i) + '_' + str(j) + '.' + img_type, 'i': i, 'j': j, 'x': j0, 'y': i0}) imsave(dst_path + os.path.sep + 'tile_' + str(i) + '_' + str(j) + '.' + img_type, im_sub) with open(dst_path + os.path.sep + 'meta.json', 'w') as fp: json.dump(tile_meta, fp, separators=(',', ':'), indent=' ', sort_keys=True) return tile_meta
[ "def make_tiles(self):\n num_tiles = self._puzzle_height * self._puzzle_width\n #subsurface is a ract(left, top, width, height\n \n for idx in xrange(num_tiles):\n self._tiles.append(self._tiles_sprite.subsurface(\n (idx * TILE_SIZE, 0, TILE_SIZE, TILE_SIZE)))", "def make_tiles(self, x_size, y_size, x_step, y_step, output_path, verbose=True):\n\n fig, ax = self.make_figure()\n x = self.doc.header['$EXTMIN'][0]\n y = self.doc.header['$EXTMIN'][1]\n\n # Slide until the bottom edge of the window is above the top of\n # the elements in the doc\n while y < self.doc.header['$EXTMAX'][1]:\n\n # Get window into document\n xlim = (x, x + x_size)\n ylim = (y, y + y_size)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # to check if image is empty\n # import cv2\n # im = cv2.imread('2.jpg')\n # if im is None:\n # Print(\"Image is empty\")\n\n # to get percentage of empty space in image\n # from PIL import Image\n # image = Image.open(\"pepper.png\")\n # bg = image.getpixel((0,0))\n # width, height = image.size\n # bg_count = next(n for n,c in image.getcolors(width*height) if c==bg)\n # img_count = width*height - bg_count\n # img_percent = img_count*100.0/width/height\n\n filename = \"%s_x_%s_%s_y_%s_%s.png\" % (\"tile_\", xlim[0], xlim[1], ylim[0], ylim[1])\n if verbose:\n print('Writing: %s' % filename)\n fig.savefig(os.path.join(output_path, filename), dpi=self.dpi)\n\n # Step\n x += x_step\n if x > self.doc.header['$EXTMAX'][0]:\n x = self.doc.header['$EXTMIN'][0]\n y += y_step", "def output_tiles_to_sheet(tiles, square_width, out_folder, group_name, file_index):\n out_filename = '{0}{1}{2}_{3}.png'.format(out_folder, os.sep, group_name, file_index)\n tile_png = open(out_filename, 'wb') # binary mode is important\n\n png_writer = png.Writer(square_width, square_width)\n\n # Get some information about the tiles we are injecting into the large sheet\n num_tiles = len(tiles)\n num_tile_rows = len(tiles[0])\n num_tiles_per_row = square_width / num_tile_rows\n\n # build rows\n output_rows = []\n for cur_row in range(0, square_width):\n row_out = []\n # row_debug = []\n\n for cur_tile_index in range(0, num_tiles_per_row):\n cur_tile_row = int(cur_row / num_tile_rows)\n tile_index = cur_tile_index + cur_tile_row * num_tiles_per_row\n if tile_index < num_tiles:\n tile_row_index = cur_row % num_tile_rows\n # row_debug.append((tile_index, tile_row_index))\n row_out.extend(tiles[tile_index][tile_row_index])\n else:\n # row_debug = list(itertools.repeat((99, 99), 8))\n # create a row of white\n row_out.extend(list(itertools.repeat(255, num_tile_rows * 3)))\n\n # print row_debug\n output_rows.append(row_out)\n\n png_writer.write(tile_png, output_rows)", "def stich_tiles(panoid, tiles, directory, final_directory):\n\n tile_width = 512\n tile_height = 512\n\n panorama = Image.new('RGB', (26*tile_width, 13*tile_height))\n\n for x, y, fname, url in tiles:\n\n fname = directory + \"/\" + fname\n tile = Image.open(fname)\n\n panorama.paste(im=tile, box=(x*tile_width, y*tile_height))\n\n del tile\n\n# print fname\n\n panorama.save(final_directory + (\"/%s.jpg\" % panoid))\n del panorama", "def _render_tiles(self, tiles, wslice, hslice):\n\n for row in tiles:\n for atile in row:\n basex = wslice*atile.x\n basey = hslice*atile.y\n if atile.visited is True:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=atile.bg)\n else:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=\"black\")", "def mbtiles_image_tiles(self):\n tile_count = self.mbtiles.execute('select count(zoom_level) from tiles;').fetchone()[0]\n\n # Progress bar\n widgets = ['- Uploading %s image tiles: ' % (tile_count), progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]\n progress = progressbar.ProgressBar(widgets = widgets, maxval = tile_count).start()\n completed = 0\n\n # Create eventlet pile\n pile = eventlet.GreenPile(self.args.concurrency)\n\n # Get tiles\n tiles = self.mbtiles.execute('select zoom_level, tile_column, tile_row, tile_data from tiles;')\n t = tiles.fetchone()\n while t:\n key = '%s/%s/%s/%s.png' % (self.tileset, t[0], t[1], t[2])\n pile.spawn(self.send_file, key, t[3])\n\n # Get next and update\n t = tiles.fetchone()\n completed = completed + 1\n progress.update(completed)\n\n # Wait for pile and stop progress bar\n list(pile)\n progress.finish()", "def generate_tiles(region, delete_used_dir = True):\n directory_structure_for_region(region)\n for png in tqdm(listdir(TILE_PICTURE_LOCATIONS + region + ORIGINAL)):\n #change to include negative numbers\n match = search(r'\\d+', png)\n year = match.group()\n mask_images(region, year + \".png\") \n make_transparent_png(region, year + \".png\")\n geotiff_create(region, year + \".png\")\n create_raster_tiles(region, year + \".tif\", year)\n if delete_used_dir:\n delete_directory_contents(region, MASKED)\n delete_directory_contents(region, TRANSPARENT_PNG)\n delete_directory_contents(region, GEOTIFF)\n delete_directory_contents(region, TRANSPARENT_PNG)\n delete_directory_contents(region, INTERTIFF)\n delete_directory_contents(region, TRANSLATED_PNG)", "def test_write_tiled_pages():\n data = random_data('uint8', (5, 219, 301, 3))\n with TempFileName('tiled_pages') as fname:\n imwrite(fname, data, tile=(96, 64))\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 5\n page = tif.pages[0]\n assert page.is_tiled\n assert not page.is_contiguous\n assert page.planarconfig == CONTIG\n assert page.photometric == RGB\n assert not page.is_sgi\n assert page.imagewidth == 301\n assert page.imagelength == 219\n assert page.tilewidth == 64\n assert page.tilelength == 96\n assert page.samplesperpixel == 3\n image = tif.asarray()\n assert_array_equal(data, image)\n assert__str__(tif)", "def stitchTiles(rows, cols, waldoTiles):\n print(\"Stitching tiles...\")\n newImage = Image.new('RGB', (64 * cols, 64 * rows))\n ims = []\n\n # Iterates through the cropped images and adds them to a list.\n for tile in os.listdir('./temp/'):\n im = Image.open(f'./temp/{tile}')\n if tile not in waldoTiles:\n im = im.convert('1')\n ims.append(im)\n\n # \"Pastes\" the cropped tiles into newImage.\n i, x, y = 0, 0, 0\n for _ in range(rows):\n for _ in range(cols):\n newImage.paste(ims[i], (x, y))\n i += 1\n x += 64\n y += 64\n x = 0\n\n newImage.save(\"./foundwaldo.jpg\")\n print(\"Done\")\n\n print(\"\\nDeleting tiles...\")\n # Removes the temp directory containing the cropped images.\n shutil.rmtree('./temp')\n print(\"Done\")", "def make_tiles(raster_file, label_file, dir_tiles):\n print(\"MAKE TILES\")\n # Get sub-folder names\n dir_imgtiles, dir_labeltiles = tiles.get_tiles_directories(dir_tiles)\n\n # Create sub-folders\n dir_imgtiles.mkdir(parents=True, exist_ok=True)\n dir_labeltiles.mkdir(parents=True, exist_ok=True)\n\n # Create image and label tiles\n tiles.create_tiles(raster_file, dir_imgtiles)\n print(f\"The image tiles are created in the folder {dir_imgtiles}.\")\n tiles.create_tiles(label_file, dir_labeltiles)\n print(f\"The label tiles are created in the folder {dir_labeltiles}.\")", "def make_image(tilestates: List[TileState]) -> List[str]:\n data = []\n\n width = int(len(tilestates) ** 0.5)\n tile_width = len(tilestates[0].data[0])\n\n for tile_row in range(0, width):\n tile_data = [\n t.data for t in tilestates[tile_row * width : tile_row * width + width]\n ]\n\n for i in range(1, tile_width - 1):\n row = \"\"\n for td in tile_data:\n row += td[i][1:-1]\n data.append(row)\n\n return data", "def save_spritemaps(layer, imgs, dir):\n for i in range(len(imgs)):\n try:\n os.makedirs(dir, 0o777)\n except:\n pass\n file = layer + '_' + str(i) + '.png'\n vis = imgs[i][0]\n imageio.imwrite(dir+file, vis)", "def buildTiles(self, items, attributes):\n matrix = {}\n\n def addItem(tx, ty, px, py, **itemparams):\n if '{}|{}'.format(tx, ty) not in matrix:\n matrix['{}|{}'.format(tx, ty)] = []\n matrix['{}|{}'.format(tx, ty)].append([px, py, itemparams])\n\n params = {}\n\n for zoom in self.ZOOMLEVELS:\n\n if not os.path.exists('{}/{}'.format(self.DESTPATH, zoom)): # create directory\n os.makedirs('{}/{}'.format(self.DESTPATH, zoom))\n\n for item in items:\n _last = None\n for node in item.parameters['nodes']:\n coord = deg2num(float(node['lat']), float(node['lon']), zoom)\n\n if _last is not None:\n\n if _last[0] <= coord[0]: # eval tiles in x direction\n dx = range(_last[0], coord[0] + 1)\n else:\n dx = range(_last[0], coord[0] - 1, -1)\n\n if _last[1] <= coord[1]: # eval tiles in y direction\n dy = range(_last[1], coord[1] + 1)\n else:\n dy = range(_last[1], coord[1] - 1, -1)\n\n for x in dx: # loop through tiles\n for y in dy:\n lstart = (_last[2] + (_last[0] - x) * 256, _last[3] + (_last[1] - y) * 256) # start point\n lend = (coord[2] + (coord[0] - x) * 256, coord[3] + (coord[1] - y) * 256) # end point\n\n if os.path.exists('{}/{}/{}-{}.png'.format(self.DESTPATH, zoom, x, y)):\n img = Image.open('{}/{}/{}-{}.png'.format(self.DESTPATH, zoom, x, y))\n else:\n img = Image.new('RGBA', (256, 256))\n draw = ImageDraw.Draw(img)\n\n draw.line([lstart, lend], fill=self.LINECOLOR, width=(zoom - 15) * 2) # draw line\n img.save('{}/{}/{}-{}.png'.format(self.DESTPATH, zoom, x, y))\n\n _last = coord", "def _create_tiles(self):\r\n for column in range(self.columns):\r\n for row in range(self.rows):\r\n tile_name = str(column) + ',' + str(row)\r\n self.tiles[tile_name] = Tile(column=column, row=row)", "def generate_tiles(self, state):\n rows = state.map.split()[::-1] # Y-axis is positive, so start at the bottom\n height = len(rows)\n width = len(rows[0])\n self.tiles = [[None for _ in range(height)] for _ in range(width)]\n for y, row in enumerate(rows):\n for x, char in enumerate(row):\n self.tiles[x][y] = Tile(char, x, y)", "def download_tile_tms(tile, imagery, folder, zoom, supertile):\n\n image_format = get_image_format(imagery['url'])\n r = requests.get(url(tile.split('-'), imagery['url']))\n tile_img = op.join(folder, '{}{}'.format(tile, image_format))\n tile = tile.split('-')\n\n #super-tile special case\n if supertile:\n new_zoom = zoom + 1 #get zoom from ml-enabler database\n # get children\n child_tiles = children(int(tile[0]), int(tile[1]), int(tile[2]), zoom=new_zoom)\n child_tiles.sort()\n\n new_dim = 256 * (2 * (new_zoom - zoom))\n\n w_lst = []\n for i in range (2 * (new_zoom - zoom)):\n for j in range(2 * (new_zoom - zoom)):\n window = Window(i * 256, j * 256, 256, 256)\n w_lst.append(window)\n\n # request children\n with rasterio.open(tile_img, 'w', driver='jpeg', height=new_dim,\n width=new_dim, count=3, dtype=rasterio.uint8) as w:\n for num, t in enumerate(child_tiles):\n t = [str(t[0]), str(t[1]), str(t[2])]\n r = requests.get(url(t, imagery['url']))\n img = np.array(Image.open(io.BytesIO(r.content)), dtype=np.uint8)\n try:\n img = img.reshape((256, 256, 3)) # 4 channels returned from some endpoints, but not all\n except ValueError:\n img = img.reshape((256, 256, 4))\n img = img[:, :, :3]\n img = np.rollaxis(img, 2, 0)\n w.write(img, window=w_lst[num])\n else:\n r = requests.get(url(tile, imagery['url']))\n with open(tile_img, 'wb')as w:\n w.write(r.content)\n return tile_img", "def write_images(self):\n while self.cache:\n # pop the first and write it out\n fn, image = self.cache.pop(0)\n tifffile.imwrite(fn, image)", "def create_tiff(tile_dir, cleanup=False):\n # sort the tile images into a dict with keys being row and column location\n tile_paths = {}\n for j in os.listdir(tile_dir):\n if j.endswith('.npy'):\n m = PATTERN.search(j).groupdict()\n row_ind, col_ind = int(m['row']), int(m['col'])\n if row_ind not in tile_paths:\n tile_paths[row_ind] = {}\n\n tile_paths[row_ind][col_ind] = os.path.join(tile_dir, j)\n\n # this makes a 8-bit, mono image (initializes as 1x1x3 matrix)\n im = pyvips.Image.black(1, 1, bands=3)\n\n # build image by rows\n for r in range(len(tile_paths)):\n row_im = pyvips.Image.black(1, 1, bands=3)\n\n for c in range(len(tile_paths[0])):\n tilepath = tile_paths[r][c]\n\n tile = numpy2vips(np.load(tilepath))\n # tile = pyvips.Image.new_from_file(tilepath, access=\"sequential\")\n row_im = row_im.insert(tile, row_im.width, 0, expand=True)\n\n # insert row\n im = im.insert(row_im, 0, im.height, expand=True)\n\n # save the pyramidal tiff image\n filename, left, top = m['filename'], int(m['left']), int(m['top'])\n save_path = join(tile_dir, filename + '.tiff')\n im.tiffsave(save_path, tile=True, tile_width=240, tile_height=240, pyramid=True, compression='lzw')\n\n if cleanup:\n # remove the npy files\n for r in range(len(tile_paths)):\n for c in range(len(tile_paths[0])):\n tilepath = tile_paths[r][c]\n remove(tilepath)\n return save_path, filename, left, top", "def stitch_tiles(grid: Tuple[Tuple[Tile, ...], ...]) -> Image:\n trimmed_grid = remove_borders(grid)\n image_size = len(trimmed_grid[0][0].pixels) * len(trimmed_grid[0])\n\n image_pixels = []\n for grid_row in trimmed_grid:\n first_tile = grid_row[0]\n # Stitch together a complete row from each tile in that row\n for i, pixel_row in enumerate(first_tile.pixels):\n image_row: List[str] = []\n image_row.extend(pixel_row)\n for remaining_tile in grid_row[1:]:\n image_row.extend(remaining_tile.pixels[i])\n image_pixels.append(image_row)\n\n return Image(image_size, tuple(tuple(row) for row in image_pixels))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a tiled image. All the information about the tile geometry and tile paths is taken from img_meta.
def load_tiled_image(img_meta): img_w, img_h = long(img_meta['level_image_width']), long(img_meta['level_image_height']) nh, nv = long(img_meta['n_tiles_horiz']), long(img_meta['n_tiles_vert']) img = np.zeros((img_h, img_w, 3), dtype=np.uint8) for i in range(nv): for j in range(nh): tile_id = 'tile_'+str(i)+'_'+str(j) tile = imread(img_meta[tile_id]['name']).astype(np.uint8) # the tile might not have the regular default shape, so it's better to use the # tile's shape than 'tile_width' and 'tile_height' x, y = long(img_meta[tile_id]['x']), long(img_meta[tile_id]['y']) img[x:x+tile.width, y:y+tile.height, :] = tile return img
[ "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def load_images_pygame(tmxdata, mapping, *args, **kwargs):\n from itertools import product\n from pygame import Surface\n import pygame, os\n\n\n def handle_transformation(tile, flags):\n if flags:\n fx = flags & TRANS_FLIPX == TRANS_FLIPX\n fy = flags & TRANS_FLIPY == TRANS_FLIPY\n r = flags & TRANS_ROT == TRANS_ROT\n\n if r:\n # not sure why the flip is required...but it is.\n newtile = pygame.transform.rotate(tile, 270)\n newtile = pygame.transform.flip(newtile, 1, 0)\n\n if fx or fy:\n newtile = pygame.transform.flip(newtile, fx, fy)\n\n elif fx or fy:\n newtile = pygame.transform.flip(tile, fx, fy)\n\n # preserve any flags that may have been lost after the transformation\n return newtile.convert(tile)\n\n else:\n return tile\n\n\n pixelalpha = kwargs.get(\"pixelalpha\", False)\n force_colorkey = kwargs.get(\"force_colorkey\", False)\n force_bitdepth = kwargs.get(\"depth\", False)\n\n if force_colorkey:\n try:\n force_colorkey = pygame.Color(*force_colorkey)\n except:\n msg = \"Cannot understand color: {0}\"\n raise Exception, msg.format(force_colorkey)\n\n tmxdata.images = [0] * tmxdata.maxgid\n\n for firstgid, t in sorted((t.firstgid, t) for t in tmxdata.tilesets):\n path = os.path.join(os.path.dirname(tmxdata.filename), t.source)\n\n image = pygame.image.load(path)\n\n w, h = image.get_size()\n tile_size = (t.tilewidth, t.tileheight)\n real_gid = t.firstgid - 1\n\n colorkey = None\n if t.trans:\n colorkey = pygame.Color(\"#{0}\".format(t.trans))\n\n # i dont agree with margins and spacing, but i'll support it anyway\n # such is life. okay.jpg\n tilewidth = t.tilewidth + t.spacing\n tileheight = t.tileheight + t.spacing\n\n # some tileset images may be slightly larger than the tile area\n # ie: may include a banner, copyright, ect. this compensates for that\n width = ((int((w-t.margin*2) + t.spacing) / tilewidth) * tilewidth) - t.spacing\n height = ((int((h-t.margin*2) + t.spacing) / tileheight) * tileheight) - t.spacing\n\n # using product avoids the overhead of nested loops\n p = product(xrange(t.margin, height+t.margin, tileheight),\n xrange(t.margin, width+t.margin, tilewidth))\n\n for (y, x) in p:\n real_gid += 1\n gids = tmxdata.mapGID(real_gid)\n if gids == []: continue\n\n original = image.subsurface(((x,y), tile_size))\n\n for gid, flags in gids:\n tile = handle_transformation(original, flags)\n tile = pygame_convert(tile, colorkey, force_colorkey, pixelalpha)\n tmxdata.images[gid] = tile", "def load_tile(self, tile: str):\n if self.tile is None or self.tile != tile:\n self.tile = tile\n self.tile_x = np.load(get_tile_x_path(self.tiles_dir, tile))\n self.tile_y = np.load(get_tile_y_path(self.tiles_dir, tile))", "def save_tiled_image(img, root, level, tile_geom, img_type=\"jpeg\"):\n assert(img.ndim == 2 or (img.ndim == 3 and img.shape[2] <= 3))\n\n n_channels = 1 if img.ndim == 2 else img.shape[2]\n dst_path = root + os.path.sep + 'level_{:d}'.format(level)\n\n tg = (min(tile_geom[0], img.shape[1]), min(tile_geom[1], img.shape[0]))\n nh = int(floor(img.shape[1] / tg[0])) + (1 if img.shape[1] % tg[0] != 0 else 0)\n nv = int(floor(img.shape[0] / tg[1])) + (1 if img.shape[0] % tg[1] != 0 else 0)\n\n tile_meta = dict({'level': level,\n 'level_image_width': img.shape[1],\n 'level_image_height': img.shape[0],\n 'level_image_nchannels': 1 if img.ndim == 2 else img.shape[2],\n 'n_tiles_horiz': nh,\n 'n_tiles_vert': nv,\n 'tile_width': tg[0],\n 'tile_height': tg[1]})\n\n if os.path.exists(dst_path):\n shutil.rmtree(dst_path)\n os.mkdir(dst_path)\n\n for i in range(nv):\n for j in range(nh):\n i0, j0 = i * tg[1], j * tg[0]\n i1, j1 = min((i + 1) * tg[1], img.shape[0]), min((j + 1) * tg[0], img.shape[1])\n if n_channels == 1:\n im_sub = img[i0:i1, j0:j1]\n else:\n im_sub = img[i0:i1, j0:j1, :]\n tile_meta['tile_' + str(i) + '_' + str(j)] = dict(\n {'name': dst_path + '/tile_' + str(i) + '_' + str(j) + '.' + img_type,\n 'i': i, 'j': j,\n 'x': j0, 'y': i0})\n imsave(dst_path + os.path.sep + 'tile_' + str(i) + '_' + str(j) + '.' + img_type, im_sub)\n\n with open(dst_path + os.path.sep + 'meta.json', 'w') as fp:\n json.dump(tile_meta, fp, separators=(',', ':'), indent=' ', sort_keys=True)\n\n return tile_meta", "def tile(self):\n return self.my_source.load_tile(self)", "def load_tile_table(self, filename, width, height):\n\n image = pygame.image.load(filename).convert()\n image_width, image_height = image.get_size()\n tile_table = []\n for tile_y in range(0, int(image_height/height)):\n line = []\n for tile_x in range(0, int(image_width/width)):\n rect = (tile_x*width, tile_y*height, width, height)\n tile_table.append(image.subsurface(rect))\n return tile_table", "def loadTiles(self):\n self.tile = pygame.image.load(\"./hextile.png\").convert()\n self.tile.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n\n self.cursor = pygame.image.load(\"./hexcursor.png\").convert()\n self.cursor.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n self.cursorPos = self.cursor.get_rect()", "def _get_tile(self):\r\n\r\n tile_url = \"https://mts1.google.com/vt/\"\r\n # tile_url = \"http://mt1.google.com/vt/\"\r\n params = {\r\n 'lyrs': 'y',\r\n 'x': self.x,\r\n 'y': self.y,\r\n 'z': self.zoom,\r\n 'src': 'app'}\r\n self.img = get_pic(requests.get(tile_url, params=params))\r\n return self.img", "def load_image(self, image):\n img_path = image['path']\n # Load the image\n pimg = Image.open(img_path).convert(\"RGB\")\n img = pimg\n # Transforms the image\n if self.img_transforms:\n img = self.img_transforms(img)\n # Should be a Tensor after this\n timg = img\n return timg", "def load_map(self, map_path):\n\n # Read file\n map_str = []\n with open(map_path, 'r') as file:\n for row in file.readlines():\n map_str.append(row.strip().split(\" \"))\n\n # Create Tiles with map position as key\n for i, row in enumerate(map_str):\n for j, tile_str in enumerate(row):\n pos = (i, j)\n tile = make_tile(tile_str, pos)\n tile.exploded_signal.connect(self.change_tile)\n self.tiles[pos] = tile\n self.everything[tile.id] = tile", "def _load_heat_map(self):\n heat_map_path = \"media/heat_map_{}.png\".format(self.top1_label)\n heat_map = cv2.imread(heat_map_path)\n self.heat_map = cv2.resize(heat_map, (NET_WEIGHT, NET_HEIGHT))\n debug(\"Heat map loaded.\")", "def download_tile_tms(tile, imagery, folder, zoom, supertile):\n\n image_format = get_image_format(imagery['url'])\n r = requests.get(url(tile.split('-'), imagery['url']))\n tile_img = op.join(folder, '{}{}'.format(tile, image_format))\n tile = tile.split('-')\n\n #super-tile special case\n if supertile:\n new_zoom = zoom + 1 #get zoom from ml-enabler database\n # get children\n child_tiles = children(int(tile[0]), int(tile[1]), int(tile[2]), zoom=new_zoom)\n child_tiles.sort()\n\n new_dim = 256 * (2 * (new_zoom - zoom))\n\n w_lst = []\n for i in range (2 * (new_zoom - zoom)):\n for j in range(2 * (new_zoom - zoom)):\n window = Window(i * 256, j * 256, 256, 256)\n w_lst.append(window)\n\n # request children\n with rasterio.open(tile_img, 'w', driver='jpeg', height=new_dim,\n width=new_dim, count=3, dtype=rasterio.uint8) as w:\n for num, t in enumerate(child_tiles):\n t = [str(t[0]), str(t[1]), str(t[2])]\n r = requests.get(url(t, imagery['url']))\n img = np.array(Image.open(io.BytesIO(r.content)), dtype=np.uint8)\n try:\n img = img.reshape((256, 256, 3)) # 4 channels returned from some endpoints, but not all\n except ValueError:\n img = img.reshape((256, 256, 4))\n img = img[:, :, :3]\n img = np.rollaxis(img, 2, 0)\n w.write(img, window=w_lst[num])\n else:\n r = requests.get(url(tile, imagery['url']))\n with open(tile_img, 'wb')as w:\n w.write(r.content)\n return tile_img", "def InitImage(self):\r\n if Projectile.IMAGE is None:\r\n Projectile.IMAGE = character.LoadImage(self.IMAGE_FILE, scaled=True, \r\n colorkey=game_constants.SPRITE_COLORKEY)\r\n self.image = self.IMAGE", "def load_map(self, node=0):\n # Set current node to new loaded map.\n self.current_node = node\n self.map._map_matrix = self.map.load_mapfile(node)\n \n for y in range(len(self.map._map_matrix)):\n for x in range(len(self.map._map_matrix[y])):\n location = (x*TILE_DIMENSION+10, y*TILE_DIMENSION+10)\n tile_key = self.map._map_matrix[y][x]\n tile_area = self.map.TILES[tile_key].area\n screen.blit(self.map.parent_image,location,tile_area)\n \n pygame.display.flip()\n return", "def load_image_grid(cls, filename, rows, columns, **kwargs):\n image = cls.load_image(filename)\n return pyglet.image.ImageGrid(image, rows, columns, **kwargs)", "def generateImage(self, **kwargs):\n\n start_x = kwargs.get('start_x', None)\n start_y = kwargs.get('start_y', None)\n tile_width = kwargs.get('tile_width', 5)\n tile_height = kwargs.get('tile_height', 5)\n\n # Check that we have x and y tile coordinates\n if start_x == None or start_y == None :\n start_x, start_y = self.getXY()\n\n # Determine the size of the image\n width, height = 256 * tile_width, 256 * tile_height\n\n #Create a new image of the size require\n map_img = Image.new('RGB', (width,height))\n sat_img = Image.new('RGB', (width,height))\n\n for x in range(0, tile_width):\n for y in range(0, tile_height) :\n if True:\n if args.label:\n # Store the image with labels\n url = 'https://mt0.google.com/vt/lyrs=y&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt/lyrs=s&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n sat_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n\n if True:\n if args.label:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom) # work needs to be done\n if args.debug: print(url)\n\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n map_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n return map_img, sat_img", "def render_image(self, name, pos):\n\n if \":\" in name:\n # If tileset not loaded, load each image of the tileset into the cache\n if name not in self.image_cache:\n base_name = name[:name.index(\":\")]\n tileset = pygame.image.load(\"res/gfx/\" + base_name + \".png\")\n tileset_rect = tileset.get_rect()\n tileset_width = int(tileset_rect.w / 64)\n tileset_height = int(tileset_rect.h / 64)\n for x in range(0, tileset_width):\n for y in range(0, tileset_height):\n index = x + (y * tileset_width)\n if index in self.level.map.alphas:\n self.image_cache[base_name + \":\" + str(index)] = tileset.subsurface(pygame.Rect(x * 64, y * 64, 64, 64))\n else:\n self.image_cache[base_name + \":\" + str(index)] = tileset.subsurface(pygame.Rect(x * 64, y * 64, 64, 64)).convert()\n\n # If the image object for the passed string isn't in the cache, add it to the cache\n if name not in self.image_cache:\n self.image_cache[name] = pygame.image.load(\"res/gfx/\" + name + \".png\")\n\n # Reset the timeout for these variables since we've just used them\n if self.enable_cache_timeout:\n self.image_timeout[name] = 0\n\n draw_x = 0\n draw_y = 0\n\n if pos[0] == \"CENTERED\":\n draw_x = (self.SCREEN_WIDTH / 2) - (self.image_cache[name].get_rect().w / 2)\n else:\n draw_x = pos[0]\n if pos[1] == \"CENTERED\":\n draw_y = (self.SCREEN_HEIGHT / 2) - (self.image_cache[name].get_rect().h / 2)\n else:\n draw_y = pos[1]\n\n self.screen.blit(self.image_cache[name], (draw_x, draw_y))", "def load_image(self, image, get_meta=False):\n reader = LoadImageCzi()\n image = reader.load_image(image, get_meta_data=True)\n log.info(\n \"Loaded file using aicsimage. File path: {}.\".format(\n image.get_meta(\"aics_filePath\")\n )\n )\n return image", "def test_read_incomplete_tile_separate():\n fname = public_file('GDAL/separate_tiled.tif')\n with TiffFile(fname) as tif:\n assert tif.byteorder == '>'\n assert len(tif.pages) == 1\n assert len(tif.series) == 1\n # assert page properties\n page = tif.pages[0]\n assert page.photometric == RGB\n assert page.planarconfig == SEPARATE\n assert page.compression == PACKBITS\n assert page.imagewidth == 35\n assert page.imagelength == 37\n assert page.bitspersample == 8\n assert page.samplesperpixel == 3\n # assert series properties\n series = tif.series[0]\n assert series.shape == (3, 37, 35)\n assert series.dtype.name == 'uint8'\n assert series.axes == 'SYX'\n # assert data\n data = page.asarray()\n assert data.flags['C_CONTIGUOUS']\n assert data.shape == (3, 37, 35)\n assert data.dtype.name == 'uint8'\n assert tuple(data[:, 19, 31]) == (50, 50, 50)\n assert tuple(data[:, 36, 34]) == (70, 70, 70)\n\n assert_decode_function(page)\n assert__str__(tif)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the sound for the given animal, and prints it with animation.
async def speak(animal, session): response = await session.get( 'https://ericappelt.com/animals/{0}'.format(animal) ) sound = await response.text() radprint('The {0} says "{1}".'.format(animal, sound))
[ "def make_a_sound():\n print('quack')", "def random_animal_sound():\n\n sounds = ['moo', 'quack', 'bark', 'roar', 'meow']\n return random.choice(sounds)", "def goes(self):\n\n animal_name = self.__class__.__name__.lower()\n print('The %s goes \"%s!\"' % (animal_name, self.sound))", "def playback_word(self):\n playsound(self.sound_file)", "def get_audio(self, word: str) -> str:\n try:\n link = self.__get_audio_link(word)\n\n r = requests.get(link)\n\n if not r.ok:\n return ''\n except Exception:\n return ''\n\n file_path = os.path.join(self.path, f'{word}.ogg')\n with open(file_path, 'wb') as f:\n f.write(r.content)\n\n if self.normalize:\n effects.normalize(AudioSegment.from_ogg(file_path)).export(file_path)\n\n return f'[sound:{word}.ogg]'", "def Sound(sound_file_name):\n return pygame.mixer.Sound(get_file(sound_file_name))", "def play_sound():\n os.system(\"play -nq -t alsa synth {} sine {}\".format(0.1, 440))", "def getSound(sample):\n if not isinstance(sample,Sample):\n repTypeError(\"getSound(sample): Input is not a Sample\")\n return sample.getSound()", "def BACKGROUND_MUSIC(self): \n musicSound = Sound(source = 'ninja.wav')\n musicSound.play()", "def hit_sound(self):\n self.alien_explosion_sound.play()", "def emit_sound(self, sound):\n sound_manager.emit_sound(sound, self.index)", "def play_sound() -> None:\n # Please note that I do not like to put import statements here because\n # it is categorized as a code smell. However, I need this to get rid of\n # the message in the beginning that is forced upon every developer who\n # needs Pygame. On a side note, I am looking to replace Pygame with\n # PySide2 in the future.\n from os import environ\n environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"True\"\n\n import pygame.mixer\n pygame.mixer.init()\n pygame.mixer.music.load(\"../../media/beep.wav\")\n pygame.mixer.music.play()", "def play(sound):\n if not isinstance(sound,Sound):\n #print \"play(sound): Input is not a sound\"\n #raise ValueError\n repTypeError(\"play(sound): Input is not a sound\")\n sound.play()", "def get_sound(name:str):\r\n if not isinstance(name, str):\r\n raise TypeError\r\n if not os.path.exists('DATA/'+name):\r\n raise ValueError\r\n\r\n fullname = os.path.join(\"DATA\",name)\r\n sound = pygame.mixer.Sound(fullname)\r\n return sound", "def animal_print_stats(animal=None):\n if debug: print 'animal_print_stats: animal=', animal\n result = ''\n if animal:\n (emoji, item) = animal_match(animal)\n if debug: print 'animal_print_stats: emoji=', emoji\n if debug: print 'animal_print_stats: item=', item\n if emoji:\n (saved, killed) = (item[1], item[2])\n animal = animal_name(emoji).upper()\n result = \"\\n:%s: %d *%s* saved and %d killed\\n\" % (\n emoji, saved, animal, killed)\n if debug: print 'animal_print_stats: item[3]=', item[3]\n # index 3 is the method of kill\n for method in sorted(item[3]):\n result += '%s=%d, ' % (method, item[3][method])\n result += '\\n'\n else:\n for emoji in sorted(animals):\n sound = animals[emoji][0]\n saved = animals[emoji][1]\n killed = animals[emoji][2]\n animal = animal_name(emoji).upper()\n result += \"\\n:%s: %d *%s* saved and %d killed\" % (\n emoji, saved, animal, killed)\n return result", "def beep(speaker_id):\n # TODO: this.", "def tell_joke(self):\n tts = gTTS(text=random.choice(self.jokes), lang='en')\n tts.save('jokes/joke.mp3')\n playsound('jokes/joke.mp3')", "def play_effect(self, effect: str) -> None:\n self._sound_effects[effect].play()", "def TestSound():\n SoundsPath = os.path.join(AudioFilesPath, MySet.Sound + \".mp3\")\n Parent.PlaySound(SoundsPath, MySet.Volume*0.01)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Context manager to patch joblib to report into tqdm progress bar given as argument
def tqdm_joblib(tqdm_object): def tqdm_print_progress(self): if self.n_completed_tasks > tqdm_object.n: n_completed = self.n_completed_tasks - tqdm_object.n tqdm_object.update(n=n_completed) original_print_progress = joblib.parallel.Parallel.print_progress joblib.parallel.Parallel.print_progress = tqdm_print_progress try: yield tqdm_object finally: joblib.parallel.Parallel.print_progress = original_print_progress tqdm_object.close()
[ "def tqdm(self, iterable, **kwargs):\n if self.verbose:\n if \"file\" not in kwargs:\n kwargs[\"file\"] = sys.stdout\n return tqdm(iterable, **kwargs)\n return iterable", "def test_set_progress(self):\n pass", "def get_progress_bar():\n if isnotebook():\n from tqdm import tqdm_notebook as progressbar\n else:\n from tqdm import tqdm as progressbar\n\n return progressbar", "def tqdm_notebook(*args, **kwargs): # pragma: no cover\n from warnings import warn\n\n from .notebook import tqdm as _tqdm_notebook\n warn(\"This function will be removed in tqdm==5.0.0\\n\"\n \"Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\",\n TqdmDeprecationWarning, stacklevel=2)\n return _tqdm_notebook(*args, **kwargs)", "def get_tqdm(**kwargs):\n return tqdm(**get_tqdm_kwargs(**kwargs))", "def progress_monitor():\n return ProgressMonitorStep()", "def progress_monitor():\n return ProgressMonitorStep()", "def make_tqdm_progress_bar_fn(description='', leave=True):\n def tqdm_progress_bar_fn(num_steps):\n try:\n import tqdm # pylint: disable=g-import-not-at-top\n except ImportError:\n raise ImportError('Please install tqdm via pip install tqdm')\n return iter(tqdm.tqdm(range(num_steps), desc=description, leave=leave))\n return tqdm_progress_bar_fn", "def on_progress(self, *args) -> None:", "def runParallelTqdm(func, arglist, workers=1):\n if not isinstance(arglist, list):\n arglist = [arglist]\n workers = min(max(workers, 1), os.cpu_count())\n\n slotManager = Manager()\n opened = slotManager.list(range(workers - 1, -1, -1))\n filled = slotManager.dict()\n\n pb = tqdm(total=len(arglist), desc=\"Overall\", leave=True,\n position=workers, ascii=(os.name == \"nt\"),\n unit=\"task\", mininterval=0.2)\n\n executor = ProcessPoolExecutor(max_workers=workers)\n tasks = [executor.submit(_worker, func, args, opened, filled)\n for args in arglist]\n\n for _ in as_completed(tasks):\n # Adjust Overall progress bar position\n if len(executor._pending_work_items) < workers:\n pb.clear()\n pb.pos = (-max(filled.values()) - 1) if filled else 0\n pb.refresh()\n pb.update(1)\n\n executor.shutdown(wait=True)\n pb.close()\n return [task.result() for task in tasks]", "def update_progress(\n self, progress: float, msg: str = \"Progress\", **tqdm_args\n ) -> None:\n # show the alert\n self.show()\n\n # cast the progress to float and perform sanity checks\n progress = float(progress)\n if self.progress_output not in self.children:\n total = tqdm_args.get(\"total\", 1)\n else:\n total = self.progress_bar.total\n if not (0 <= progress <= total):\n raise ValueError(f\"progress should be in [0, {total}], {progress} given\")\n\n # Prevent adding multiple times\n if self.progress_output not in self.children:\n\n self.children = [self.progress_output]\n\n tqdm_args.setdefault(\"bar_format\", \"{l_bar}{bar}{n_fmt}/{total_fmt}\")\n tqdm_args.setdefault(\"dynamic_ncols\", False)\n tqdm_args.setdefault(\"total\", 1)\n tqdm_args.setdefault(\"desc\", msg)\n tqdm_args.setdefault(\"colour\", getattr(color, self.type))\n\n with self.progress_output:\n self.progress_output.clear_output()\n self.progress_bar = tqdm(**tqdm_args)\n self.progress_bar.container.children[0].add_class(f\"{self.type}--text\")\n self.progress_bar.container.children[2].add_class(f\"{self.type}--text\")\n\n # Initialize bar\n self.progress_bar.update(0)\n\n self.progress_bar.update(progress - self.progress_bar.n)\n\n if progress == total:\n self.progress_bar.close()\n\n return", "def test_increment_progress(self):\n pass", "def rli_progressbar():\n wrap_erase()\n center_text(2, \"Wait...\")\n wrap_refresh()\n return api.progress_bar(1.0, 100.0, rli_progress)", "def finish_progress_bar():\n global _progress_obj\n\n # print_nl = True\n if _progress_obj != None:\n # if isinstance(_progress_obj, Counter):\n # print_nl = False\n _progress_obj.finish()\n # if print_nl:\n print_new_line()\n _progress_obj = None\n return None", "def progress_bar(iterations, width=None, scale=1, units='', title=None):\n\n # Instance progress bar\n\n if has_widgets and is_inside_notebook():\n\n try:\n\n if width is None:\n\n bar_width = 50\n\n else:\n\n bar_width = int(width)\n\n # Default is the HTML bar, which only works within a notebook\n\n this_progress_bar = ProgressBarHTML(iterations, bar_width, scale=scale, units=units, title=title)\n\n except:\n\n # Fall back to Ascii progress bar\n\n if width is None:\n\n bar_width = 30\n\n else:\n\n bar_width = int(width)\n\n # Running in a terminal. Fall back to the ascii bar\n\n this_progress_bar = ProgressBarAscii(iterations, bar_width, scale=scale, units=units, title=title)\n\n else:\n\n if width is None:\n\n bar_width = 30\n\n else:\n\n bar_width = int(width)\n\n # No widgets available, fall back to ascii bar\n\n this_progress_bar = ProgressBarAscii(iterations, bar_width, scale=scale, units=units, title=title)\n\n yield this_progress_bar # type: ProgressBarBase\n\n this_progress_bar.finish()", "def test_progressbar_formatter():\n mock_stage_info = pyspark.status.SparkStageInfo(\n stageId=4, currentAttemptId=1, name=\"test\", numTasks=100, numActiveTasks=10,\n numCompletedTasks=20, numFailedTasks=5)\n duration = datetime.timedelta(days=1, hours=1, minutes=1, seconds=1)\n\n a = sparkprog._format_stage_info(bar_width=10, stage_info=mock_stage_info, duration=duration)\n\n assert a == '[Stage 4:==> (20 + 10 / 100 Dur: 1d01h01m:01s]'", "def get_tqdm():\n ipy_str = \"\"\n try:\n from IPython import get_ipython\n ipy_str = str(type(get_ipython()))\n except ImportError:\n pass\n\n if 'zmqshell' in ipy_str:\n from tqdm import tqdm_notebook as tqdm\n return tqdm\n if 'terminal' in ipy_str:\n from tqdm import tqdm\n return tqdm\n\n if sys.stderr is not None and sys.stderr.isatty():\n from tqdm import tqdm\n return tqdm\n\n from tqdm import tqdm\n def hidden_tqdm(*args, **kwargs):\n if \"disable\" in kwargs:\n return tqdm(*args, **kwargs)\n kwargs[\"disable\"] = True\n return tqdm(*args, **kwargs)\n\n return hidden_tqdm", "def tqdm(iterable, **kwargs):\n for k, v in get_tqdm_defaults().items():\n kwargs.setdefault(k, v)\n\n if type(iterable) is int:\n iterable, total = range(iterable), iterable\n else:\n try:\n total = len(iterable)\n except TypeError:\n total = None\n\n if 'total' not in kwargs and total is not None:\n kwargs['total'] = total\n\n return _tqdm(iterable, **kwargs)", "def get_progress_reporter(send_back):\n def progress_reporter_callback(step, downloaded, content_len):\n send_back(\n Progress(\n step=step,\n downloaded=downloaded,\n content_length=content_len\n )\n )\n\n return progress_reporter_callback", "def test_progress_bar_print_disabled(tqdm_write, mock_print, tmpdir):\n model = PrintModel()\n bar = ProgressBar()\n trainer = Trainer(\n default_root_dir=tmpdir,\n num_sanity_val_steps=0,\n limit_train_batches=1,\n limit_val_batches=1,\n limit_test_batches=1,\n limit_predict_batches=1,\n max_steps=1,\n callbacks=[bar],\n )\n bar.disable()\n trainer.fit(model)\n trainer.test(model, verbose=False)\n trainer.predict(model)\n\n mock_print.assert_has_calls(\n [call(\"training_step\", end=\"\"), call(\"validation_step\", file=ANY), call(\"test_step\"), call(\"predict_step\")]\n )\n tqdm_write.assert_not_called()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns portal wide groups.
def getPortalGroups(self): for principal in principalRegistry.getPrincipals(''): if IGroupAwarePrincipal.providedBy(principal): continue yield principal.id, principal.title
[ "def Groups(self) -> GroupCollection:", "def get_public_groups():\n public_groups = MrMapGroup.objects.filter(\n is_public_group=True\n )\n return public_groups", "def category_groups(self):\n\n return self._GET('category_groups')", "def get_all_template_groups(self) -> dict:\n return self._get(\"/template/templateGroups\")", "def groups(self):\n\n return list(self.grpimg.keys())", "def query_all_groups():\n grp = MetalGroup.query.order_by(MetalGroup.level).all()\n return grp", "def test_get_groups_list(self):\n pass", "def get_groups(self):\n return sorted([k for k, v in self.TOKENIZED.groupindex.items()])", "def _get_groups(self):\n if self._groups is None:\n self._groups = PlatoUserGroups(self)\n\n return self._groups", "def get_groups():\n \n # Retrieve the admin object\n admin = get_user(get_jwt_identity())\n groups_data = admin.groups\n\n return jsonify(groups_schema.dump(groups_data))", "def get_groups():\n try:\n cat_groups = list(\n mongo.db.category_groups.find().sort(\"group_name\", 1)\n )\n except Exception as e:\n flash(\n \"Something went wrong when accessing the database to get\"\n + \"category groups\" + e\n )\n return []\n else:\n category_groups = []\n colours = get_colours()\n length = len(colours)\n\n index = 0\n for group in cat_groups:\n category_groups.append(\n {\n \"group_name\": group['group_name'],\n \"colour\": colours[index % length]\n }\n )\n index += 1\n\n return category_groups", "def test_get_all_ldap_groups(self):\n pass", "def __getSiteGroup( self, stagingSites ):\n tier1 = ''\n groupName = ''\n for site in stagingSites:\n result = getSiteTier( site )\n if not result['OK']:\n self.log.error( result['Message'] )\n continue\n tier = result['Value']\n if tier in [0, 1]:\n tier1 = site\n if tier == 0:\n break\n\n if tier1:\n grid, sname, ccode = tier1.split( '.' )\n groupName = '.'.join( ['Group', sname, ccode] )\n\n return S_OK( groupName )", "def _get_resource_groups(self):\n print('Getting resource groups...')\n\n return self._run_az(['group', 'list'])", "def _create_groups(self, groups):\n\n acls = self.mumblectl.getACL(self.settings['mumble_server_id'], 0)\n glist = []\n for mgroup in acls[1]:\n glist.append(mgroup.name)\n\n newgroups = False\n for agroup in groups:\n if not str(agroup.name.replace(' ', '').lower()) in glist:\n group = self.mur.Group()\n group.name = str(agroup.name.replace(' ', '').lower())\n group.members = []\n group.add = []\n group.remove = []\n group.inheritable = True\n group.inherit = True\n group.inherited = False\n acls[1].append(group)\n newgroups = True \n\n if newgroups:\n self.mumblectl.setACL(self.settings['mumble_server_id'], 0, acls[0], acls[1], acls[2])\n\n return acls", "def groups( self , pattern = None ):\n return EclSum.cNamespace().create_group_list( self , pattern )", "def get_groups(self, skip_revoked_deprecated=True, stix_format=True):\n all_groups = self.COMPOSITE_DS.query(Filter(\"type\", \"=\", \"intrusion-set\"))\n \n if skip_revoked_deprecated:\n all_groups = self.remove_revoked_deprecated(all_groups)\n \n if not stix_format:\n all_groups = self.translate_stix_objects(all_groups)\n return all_groups", "def get_all(self):\n hostgroups = [g for g\n in self.request.mongo_connection.\n shinken.hostgroups.find(\n {\"register\": {\"$ne\": \"0\"}},\n {'_id': 0}\n )]\n hostgroups = [hostgroup.HostGroup(**g) for g in hostgroups]\n return hostgroups", "def get_peer_groups(client: Client, *_):\n groups = client.get_peergroups_request()\n contents = []\n for group in groups:\n contents.append({\n 'Name': group\n })\n\n context = {\n 'Exabeam.PeerGroup(val.Name && val.Name === obj.Name)': contents\n }\n\n human_readable = tableToMarkdown('Exabeam Peer Groups', contents)\n return human_readable, context, groups" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a User Credential for an S3 Storage Gateway
def s3( login_manager: LoginManager, *, endpoint_id: uuid.UUID, storage_gateway: uuid.UUID, globus_identity: str, local_username: str, s3_key_id: str, s3_secret_key: str, display_name: str | None, ) -> None: gcs_client = login_manager.get_gcs_client(endpoint_id=endpoint_id) auth_client = login_manager.get_auth_client() # TODO: replace with SDK class once available policies = dict( DATA_TYPE="s3_user_credential_policies#1.0.0", s3_key_id=s3_key_id, s3_secret_key=s3_secret_key, ) data = UserCredentialDocument( storage_gateway_id=storage_gateway, identity_id=auth_client.maybe_lookup_identity_id(globus_identity), username=local_username, policies=policies, display_name=display_name, ) res = gcs_client.create_user_credential(data) display(res, simple_text=res.full_data.get("message"))
[ "def _get_s3_creds(client):\n access_key = client.config.plugin_get_value('access-key')\n secret_key = client.config.plugin_get_value('secret-key')\n\n if access_key is None:\n # this means there are no stored s3 creds for this user - set them up\n\n # before we do anything, can they do object storage?\n status, resp = client.call_operation('account', 'view')\n\n if status != 200:\n # something went wrong - give up\n print('Key generation failed!')\n sys.exit(4)\n\n if 'Object Storage' not in resp['capabilities']:\n # this account isn't in the EAP :( help them out\n print('You are not yet enrolled in the Object Storage Early Adopters Program.')\n result = input_helper('Would you like to request enrollment now? [Y/n]')\n\n if result in ('','y','Y'):\n status, resp = client.call_operation('tickets', 'create', [\n '--summary', 'Looking to join Object Storage Early Adopters Program',\n '--description', 'Please grant me access to the Object Storage Early '\n 'Adopters Program. This ticket generated by the Linode CLI.'\n ])\n \n if status != 200:\n print('Ticket submission failed! Please open a ticket requesting '\n 'access with `linode-cli tickets create`')\n sys.exit(5)\n\n print('Ticket \"Looking to join Object Storage Early Adopters Program\" opened!')\n print(\"Please keep an eye on that ticket for updates, and try again once you're enrolled.\")\n exit(0)\n\n # label caps at 50 characters - trim some stuff maybe\n # static characters in label account for 13 total\n # timestamp is 10 more\n # allow 13 characters both for username and hostname\n timestamp_part = str(time.time()).split('.')[0]\n truncated_user = getpass.getuser()[:13]\n truncated_hostname = socket.gethostname()[:13]\n\n creds_label = 'linode-cli-{}@{}-{}'.format(\n truncated_user,\n truncated_hostname,\n timestamp_part)\n\n if len(creds_label) > 50:\n # if this is somehow still too long, trim from the front\n creds_label = creds_label[50-len(creds_label):]\n\n status, resp = client.call_operation('object-storage', 'keys-create',\n ['--label', \"{}\".format(creds_label)])\n\n if status != 200:\n # something went wrong - give up\n print('Key generation failed!')\n sys.exit(3)\n\n access_key = resp['access_key']\n secret_key = resp['secret_key']\n\n client.config.plugin_set_value('access-key', access_key)\n client.config.plugin_set_value('secret-key', secret_key)\n client.config.write_config(silent=True)\n\n return access_key, secret_key", "def _get_credentials(self):\n print \" -- Getting credentials for Amazon S3 staging bucket\"\n\n uri = URITemplate(self.baseuri + \"/{username}/credentials\").expand(\n username=self.username)\n resp = self.session.get(uri)\n self.handle_http_error(\n resp,\n custom_messages={\n 401: \"Token is not authorized\",\n 404: \"Token does not have upload scope\"})\n return resp", "def _create_credentials(self, user, **options):\n\n raise CoreNotImplementedError()", "def setup_s3():\n s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n logging.info('Successfully initialized S3 client')\n return s3", "def test_upload__credentials(self):\n credentials = {\n \"aws_access_key_id\": \"foo\",\n \"aws_secret_access_key\": \"bar\",\n \"aws_session_token\": \"baz\",\n }\n self._upload_test(credentials=credentials, show_progress=False)", "def save(self, s3_bucket, iam_group, user_store):\n if self.exists():\n raise Exception(\"AWS user already exists\")\n # create iam user\n create_user(self.name)\n # create keypair\n self.access_key_id, self.secret_access_key = create_access_key(self.name)\n # add user to data requests IAM group\n add_user_to_group(self.name, iam_group)\n # create user folder (labeled by username) in s3 bucket\n create_s3_folder(s3_bucket, self.name)\n # updates spreadsheet/db with user info and credentials\n user_store.add_user(self)", "def test_create_bucket(self):\n username = self.new_user.username\n bucket = s3buckets.create_bucket(username)\n\n self.assertTrue(isinstance(bucket, Bucket))", "def _setup_s3(settings, path, prompt=True):\r\n ret = {'default_s3_buckets': {}, 's3_credentials': settings.get('s3_credentials', {})}\r\n\r\n if prompt:\r\n use = raw_input(\"\\nWould you like to set up Amazon S3? [Y/n] \")\r\n if use.lower() != \"y\" and use != \"\":\r\n puts(\"\\n- Not configuring Amazon S3.\")\r\n return ret\r\n\r\n existing_access_key = settings.get('default_s3_access_key_id', None) or \\\r\n os.environ.get('AWS_ACCESS_KEY_ID', None)\r\n existing_secret_key = settings.get('default_s3_secret_access_key', None) or \\\r\n os.environ.get('AWS_SECRET_ACCESS_KEY', None)\r\n\r\n #import ipdb; ipdb.set_trace();\r\n\r\n access_key_prompt = \"\\nPlease enter your default Amazon Access Key ID:\"\r\n if existing_access_key:\r\n access_key_prompt += ' [%s] ' % existing_access_key\r\n else:\r\n access_key_prompt += ' (leave blank to skip) '\r\n default_aws_access_key_id = raw_input(access_key_prompt)\r\n\r\n if default_aws_access_key_id == '' and existing_access_key:\r\n default_aws_access_key_id = existing_access_key\r\n\r\n\r\n if default_aws_access_key_id:\r\n secret_key_prompt = \"\\nPlease enter your default Amazon Secret Access Key:\"\r\n if existing_secret_key:\r\n secret_key_prompt += ' [%s] ' % existing_secret_key\r\n else:\r\n secret_key_prompt += ' (leave blank to skip) '\r\n default_aws_secret_access_key = raw_input(secret_key_prompt)\r\n\r\n if default_aws_secret_access_key == '' and existing_secret_key:\r\n default_aws_secret_access_key = existing_secret_key\r\n\r\n ret.update({\r\n 'default_s3_access_key_id': default_aws_access_key_id,\r\n 'default_s3_secret_access_key': default_aws_secret_access_key,\r\n })\r\n\r\n # If we're all set with AWS creds, we can setup our default\r\n # staging and production buckets\r\n if default_aws_access_key_id and default_aws_secret_access_key:\r\n existing_staging_bucket = None\r\n existing_production_bucket = None\r\n if settings.get('default_s3_buckets'):\r\n existing_staging_bucket = settings['default_s3_buckets'].get('staging', None)\r\n existing_production_bucket = settings['default_s3_buckets'].get('production', None)\r\n\r\n staging_prompt = \"\\nWhat is your default staging bucket?\"\r\n if existing_staging_bucket:\r\n staging_prompt += ' [%s] ' % existing_staging_bucket\r\n else:\r\n staging_prompt += ' (e.g. apps.beta.myorg.com, leave blank to skip) '\r\n staging = raw_input(staging_prompt)\r\n\r\n if staging == '' and existing_staging_bucket:\r\n staging = existing_staging_bucket\r\n if staging != \"\":\r\n ret['default_s3_buckets'].update({\r\n 'staging': staging,\r\n })\r\n\r\n production_prompt = \"\\nWhat is your default production bucket?\"\r\n if existing_production_bucket:\r\n production_prompt += ' [%s] ' % existing_production_bucket\r\n else:\r\n production_prompt += ' (e.g. apps.myorg.com, leave blank to skip) '\r\n production = raw_input(production_prompt)\r\n\r\n if production == '' and existing_production_bucket:\r\n production = existing_production_bucket\r\n if production != \"\":\r\n ret['default_s3_buckets'].update({\r\n 'production': production,\r\n })\r\n\r\n\r\n more_prompt = \"\\nWould you like to add bucket credentials? [y/N] \"\r\n while raw_input(more_prompt).lower() == 'y':\r\n ## Ask for a uri\r\n additional_s3_bucket = raw_input(\r\n \"\\nPlease specify an additional bucket (e.g. \"\r\n \"additional.bucket.myorg.com/, leave blank to skip adding bucket) \")\r\n if additional_s3_bucket == \"\":\r\n continue\r\n\r\n ## Ask for an access key, if it differs from the default\r\n additional_access_key_prompt = \"\\nPlease specify an AWS Access Key ID for this bucket:\"\r\n\r\n if default_aws_access_key_id:\r\n additional_access_key_prompt += ' [%s] ' % default_aws_access_key_id\r\n else:\r\n additional_access_key_prompt += ' (leave blank to skip adding bucket) '\r\n\r\n additional_aws_access_key_id = raw_input(additional_access_key_prompt)\r\n\r\n if additional_aws_access_key_id == \"\" and default_aws_access_key_id:\r\n additional_aws_access_key_id = default_aws_access_key_id\r\n elif additional_aws_access_key_id == \"\":\r\n continue\r\n\r\n # Ask for a secret key, if it differs from default\r\n additional_secret_key_prompt = \"\\nPlease specify an AWS Secret Access Key for this bucket:\"\r\n\r\n if default_aws_secret_access_key:\r\n additional_secret_key_prompt += ' [%s] ' % default_aws_secret_access_key\r\n else:\r\n additional_secret_key_prompt += ' (leave blank to skip adding bucket) '\r\n\r\n additional_aws_secret_access_key = raw_input(\r\n additional_secret_key_prompt)\r\n\r\n if additional_aws_secret_access_key == \"\" and default_aws_secret_access_key:\r\n additional_aws_secret_access_key = default_aws_secret_access_key\r\n elif additional_aws_secret_access_key == \"\":\r\n continue\r\n\r\n ret['s3_credentials'][additional_s3_bucket] = {\r\n 'access_key_id': additional_aws_access_key_id,\r\n 'secret_access_key': additional_aws_secret_access_key,\r\n }\r\n\r\n puts(\"\\n- Done configuring Amazon S3.\")\r\n return ret", "def create_s3(self, name, bucket, access_key, secret_access_key, endpoint=None, region=None,\n signature_version=None):\n\n config = {\n 'bucket': bucket,\n 'accessKey': access_key,\n 'secretAccessKey': secret_access_key,\n }\n if endpoint:\n config['endpoint'] = endpoint\n if region:\n config['region'] = region\n if signature_version:\n config['signatureVersion'] = signature_version\n\n storage_provider = models.StorageProvider(\n type='s3',\n name=name,\n config=config,\n )\n\n repository = self.build_repository(repositories.CreateStorageProvider)\n return repository.create(storage_provider)", "def Create(iam,username: str,tag='/'):\n\t\t\t\treturn iam.resource.User(username).create(Path=AWS.preptag(tag))", "def test_asset_handler_creds(\n access_handler,\n mock_s3_credentials,\n profile_name,\n example_creds_path\n):\n assert access_handler._fetch_creds(\n profile_name, example_creds_path\n ) == mock_s3_credentials", "def create_bucket():\n\n s3 = session.resource('s3')\n\n try:\n s3.create_bucket(Bucket=f\"lambda-source-{os.environ['AWS_ACCOUNT']}\", ACL='private')\n print('Created S3 bucket!')\n\n except Exception as e:\n print(f\"Error creating S3 bucket. Exception: {e}.\")", "def create_connection(self, access_key: str, secret_key: str):\n try:\n print(\"Trying to establish a connection with the AWS s3\")\n s3_client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)\n print(\"Connection successful!!\")\n return s3_client\n except ConnectionError as ex:\n print(\"Error connecting to the s3\")\n raise ex", "def get_s3_resource():\n\n s3_creds = get_s3_credentials(\"conf/local/credentials.yaml\")\n\n session = boto3.Session(\n aws_access_key_id=s3_creds['aws_access_key_id'],\n aws_secret_access_key=s3_creds['aws_secret_access_key']\n )\n\n s3 = session.client('s3')\n\n return s3", "def create(self, user, name, public_key):\n body = {'keypair': {\n 'name': name,\n 'public_key': public_key\n }}\n return self._create('/gd-userinfo/%s/keypairs' % base.getid(user),\n body, 'keypair')", "def install_creds(arguments):\n\n global credentials\n if arguments.verbose:\n print \"Installing credentials...\"\n credentials = storage.get()", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"test_access_key\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"test_secret_access_key\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"test_security_token\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"test_session_token\"", "def _generate_credential() -> dict:\n\n return {\n \"accounts\": {}\n }", "def __init__(self, user_name, user_email, user_password):\n self.user_name = user_name\n self.user_email = user_email\n self.user_password = user_password\n self.bucket_lists = {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns AuthUser.profile object, creates record if it doesn't exist.
def get_profile(self, request=None): if not request: request = get_current_request() auth_profile = request.registry.settings.get('apex.auth_profile') if auth_profile: resolver = DottedNameResolver(auth_profile.split('.')[0]) profile_cls = resolver.resolve(auth_profile) return get_or_create(DBSession, profile_cls, user_id=self.id)
[ "def create_user_profile(self, user: User) -> Person:\n # The user already has a profile.\n if hasattr(user, \"profile\"):\n user.profile.login_count += 1\n user.profile.save()\n\n return user.profile\n\n # The user doesn't have a profile, so let's try and find a matching one.\n get_queries = [\n # First see if we can match on the legacy SSO ID.\n Q(legacy_sso_user_id=user.legacy_sso_user_id),\n # Next see if we can match on the email.\n Q(email=user.email),\n # Finally try and match on the first and last name.\n Q(first_name=user.first_name, last_name=user.last_name),\n ]\n\n for query in get_queries:\n try:\n person = Person.objects.get(Q(user__isnull=True) & query)\n except (Person.DoesNotExist, Person.MultipleObjectsReturned):\n person = None\n else:\n break\n\n # If we found a matching profile, update and return it.\n if person:\n person.user = user\n person.login_count += 1\n person.save()\n\n return person\n\n # We couldn't find a matching one so let's create one for them.\n person = Person.objects.create(\n user=user,\n legacy_sso_user_id=user.legacy_sso_user_id,\n first_name=user.first_name,\n last_name=user.last_name,\n email=user.email,\n login_count=1,\n )\n\n self.profile_created(person, user)\n\n return person", "def _getProfileFromUser(self):\r\n\r\n # Getting and Verifying current user\r\n user = getUser()\r\n \r\n # get the user_id (email) \r\n user_id = getUserId(user)\r\n\r\n # Creating a profile key. \r\n p_key = ndb.Key(Profile, user_id)\r\n \r\n # Using the profile key to get a profile Object\r\n profile = p_key.get()\r\n\r\n # create new Profile if not there\r\n if not profile:\r\n \r\n profile=Profile(\r\n key=p_key,\r\n displayName=user.nickname(),\r\n mainEmail=user.email(),\r\n teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),)\r\n \r\n profile.put()\r\n \r\n return profile", "def _getProfileFromUser(self):\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n if not profile:\n profile = Profile(\n key=p_key,\n displayName=user.nickname(),\n mainEmail=user.email(),\n teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n\n return profile # return Profile", "def _getProfileFromUser(self):\n ## TODO 2\n ## step 1: make sure user is authed\n ## uncomment the following lines:\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n user_id = getUserId(user)\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n ## step 2: create a new Profile from logged in user data\n ## you can use user.nickname() to get displayName\n ## and user.email() to get mainEmail\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(), \n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n\n return profile # return Profile", "def new_profile() -> Profile:\n return Profile(\n last_name='',\n email='',\n picture='')", "def get_current_user_profile(self):\n user_id = self.get_current_user_id()\n if 'user_id' in g:\n user = self.user_store.query.get(g.user_id)\n g.user = user\n profile = self.db.session.query(Profile).filter_by(user_id=user_id).first()\n return profile\n\n return None", "def test_profile_is_created_automatically(self):\n \n user = User.objects.create_user('duffman', 'duffman@test.com', 'pass')\n user.save()\n self.assertTrue(user.get_profile())", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n UserProfile.objects.create(user=instance)", "def get_user_profile(self):\n try:\n return Profile.objects.get(owner=self.request.user)\n except Profile.DoesNotExist:\n raise Http404", "def _populate_and_save_user_profile(self):\n try:\n profile = models.UserProfile.objects.get(user=self._user)\n self._populate_profile_fields(profile)\n\n if len(ldap_settings.AUTH_LDAP_USER_ATTR_MAP) > 0:\n profile = self._populate_profile_fields(profile)\n profile.save()\n except (SiteProfileNotAvailable, ObjectDoesNotExist), e:\n profile = models.UserProfile(user=self._user,\n role=models.UserProfile.ROLE_USER,\n ldap_user=True)\n \n\n profile = self._populate_profile_fields(profile)\n\n profile.save()", "def retrieve_profile(cls):\n user_id = current_user.get_id()\n profile = Profile.query.filter_by(user_id=user_id).first()\n return user_id, profile", "def save(self, profile_callback=None):\r\n \r\n new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['Username'],\r\n first_name=self.cleaned_data['first_name'],\r\n last_name=self.cleaned_data['last_name'],\r\n password=self.cleaned_data['password1'],\r\n email=self.cleaned_data['Email'],\r\n profile_callback=profile_callback)\r\n return new_user", "def get_profile(self):\n _LOGGER.info(\"Fetching user profile.\")\n resp = yield from self._get(const.USER_PROFILE_URL.format(self.id))\n return UserProfile(resp)", "def create_django_user(self):\n # Reusing the id will make our life easier, because we can use the\n # OneToOneField as pk for Profile linked back to the auth.user\n # in the future.\n self.user = DjangoUser(id=self.pk)\n self.user.first_name = self.firstname\n self.user.last_name = self.lastname\n self.user.username = self.email\n self.user.email = self.email\n self.user.password = self.password\n self.user.date_joined = self.created\n\n if self.group_set.filter(rules='*:*').count():\n self.user.is_superuser = self.user.is_staff = True\n\n self.user.save()\n self.save()\n return self.user", "def create_profile(self, user):\n salt = hashlib.sha1(six.text_type(random.random()).encode('ascii')).hexdigest()[:5]\n salt = salt.encode('ascii')\n email = user.email\n if isinstance(email, six.text_type):\n username = email.encode('utf-8')\n activation_key = hashlib.sha1(salt + email).hexdigest()\n return self.create(user=user,\n activation_key=activation_key)", "def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(account=instance)\n new_profile_created.send(sender=instance.__class__, account=instance)", "def create_user(self):\n try:\n user, created = self.user_cls.objects.get_or_create(defaults=self.defaults, **self.query)\n if created:\n return user\n # if found user by `query` try to save firebase uid to db\n elif user is not None:\n setattr(user, self.uid_field, self.user.uid)\n # set additional information in user model\n if api_settings.FIREBASE_ADDITIONAL_FIELDS:\n for key, value in api_settings.FIREBASE_ADDITIONAL_FIELDS.items():\n setattr(user, key, value)\n user.save()\n return user\n except Exception as e:\n logger.error(e)\n msg = _('Error on user account creating. Please, write to support')\n raise exceptions.AuthenticationFailed(msg)\n return None", "def create(self, validated_data):\n\n user = models.CustomerProfile(email=validated_data['email'], first_name=validated_data['first_name'], last_name=validated_data['last_name'], gender=validated_data['gender'], is_customer=True)\n user.set_password(validated_data['password'])\n user.save()\n\n return user", "def _create_or_update_googleplus_user(profile, access_token, expires_in):\n user_is_created = False\n try:\n googleplus_user = GooglePlusUser.objects.get(googleplus_id=profile['id'])\n except GooglePlusUser.DoesNotExist:\n first_name, last_name = _get_first_and_last_name(profile['displayName'])\n user = User.objects.create( \\\n first_name=first_name,\n last_name=last_name,\n username='googleplus_' + profile['id']\n )\n user_is_created = True\n \n if user_is_created:\n googleplus_user = GooglePlusUser()\n googleplus_user.googleplus_id = profile['id']\n googleplus_user.user = user\n else:\n first_name, last_name = _get_first_and_last_name(profile['displayName'])\n googleplus_user.user.first_name = first_name\n googleplus_user.last_name = last_name\n \n googleplus_user.googleplus_display_name = profile['displayName']\n googleplus_user.access_token = access_token\n googleplus_user.expiry_at = datetime.datetime.now() + \\\n datetime.timedelta(seconds=int(expires_in)) \n googleplus_user.save()\n \n return googleplus_user" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flatten comments into a list of Comment objects using a tree traversal.
def flatten_comments(root_comments): all_comments = [] nodes = root_comments[:] while nodes: node = nodes.pop() data = node['data'] if 'body' not in data: # # weird child node # continue comment = Comment(data['body'], int(data['ups']), int(data['downs'])) all_comments.append(comment) if data['replies']: for reply in data['replies']['data']['children']: nodes.append(reply) return all_comments
[ "def get_flattened_comments(self) -> List[Comment]:\n return self.comments.list()", "def flatten(self, comment=None):\n\t\tprint 'flattening'\n\t\tif comment is None:\n\t\t\tprint 'comment is none'\n\t\t\tcomment = self.commentlist[0]\n\t\twhile isinstance(comment, praw.models.Comment):\n\t\t\tprint comment.body_html\n\t\t\tyield comment\n\t\t\tcomment = comment.replies[0]", "def get_comments(comments):\n from utils import quick_encrypt\n if comments is None:\n return []\n elif isinstance(comments, praw.models.reddit.more.MoreComments):\n return []\n elif isinstance(comments, praw.models.reddit.comment.Comment):\n author = None\n if comments.author:\n author = quick_encrypt(comments.author.name)\n return [(comments.body, author)]\n elif isinstance(comments, praw.models.comment_forest.CommentForest):\n combined = []\n for comment in (comments.list()):\n combined = combined + get_comments(comment)\n return combined\n elif isinstance(comments, list):\n return []\n else:\n print(type(comments))\n print(comments)", "def filter_comments_by_max_depth(self, max_depth, comments=None):\n\t\tif comments is None: \n\t\t\treturn\n\t\tfor i, c in reverse_enumerate(comments):\n\t\t\t# If the comment has no children at a sufficient depth, delete it altogether,\n\t\t\t# Else apply the same algorithm to its children\n\t\t\tprint i, \" -> \", self.max_comment_depth(c), \" v \", (max_depth-1)\n\t\t\tif self.max_comment_depth(c) < (max_depth-1):\n\t\t\t\tprint \" ignoring\", i\n\t\t\telif isinstance(c, praw.models.Comment):\n\t\t\t\tself.commentlist.append(c)\n\t\t\t\tprint \" saving and recursing\", i\n\t\t\t\tself.filter_comments_by_max_depth(max_depth=max_depth-1, comments=c.replies)", "async def format_nested_comments(\n db: AsyncSession,\n *,\n comments: List[Comment],\n permalink: str,\n user: Optional[User] = None,\n ) -> CommentListOut:\n\n users, comment_actions = await crud.comment.fetch_comments_data(\n db, comments=comments, user=user\n )\n\n l1_index = {}\n l1_comments = []\n for comment in comments:\n if permalink is None:\n content_link = ContentInBase().generate_permalink(\n comment.content.permalink, comment.content.id\n )\n else:\n content_link = permalink\n\n if comment.l1_id:\n (l1_index, l1_comments,) = await crud.comment.format_single_comment(\n comment,\n level=1,\n index=l1_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l1_comments,\n schema=CommentL1Out,\n )\n l1_index_obj = l1_index[comment.l1_id]\n\n if comment.l2_id:\n l2_index = l1_index_obj[\"child_index\"]\n l2_comments = l1_comments[l1_index_obj[\"list_id\"]].comments\n\n (l2_index, l2_comments,) = await crud.comment.format_single_comment(\n comment,\n level=2,\n index=l2_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l2_comments,\n schema=CommentL2Out,\n )\n l2_index_obj = l2_index[comment.l2_id]\n\n if comment.l3_id:\n l3_index = l2_index_obj[\"child_index\"]\n l3_comments = l2_comments[l2_index_obj[\"list_id\"]].comments\n\n await crud.comment.format_single_comment(\n comment,\n level=3,\n index=l3_index,\n permalink=content_link,\n users=users,\n comment_actions=comment_actions,\n comments_out=l3_comments,\n schema=CommentL3Out,\n )\n\n l1_total = comments[0].l1_total if comments else 0\n master_comments_out = CommentListOut(\n comments=l1_comments, comments_total=l1_total\n )\n return master_comments_out", "def new_child_comments():\n c.execute('''SELECT * FROM comments WHERE is_root=0 AND posted=0''')\n for comment in c.fetchall():\n yield comment", "def flatten(container, lvl=1, accessor=lambda x: x):\n for i in container:\n if not isinstance(i, (list,tuple)):\n yield (i, lvl) # yield current comment\n if isinstance(accessor(i), (list,tuple)) and accessor(i):\n for j in flatten(accessor(i), lvl+1, accessor):\n yield j # yield flattened out children", "def new_root_comments():\n c.execute('''SELECT * FROM comments WHERE is_root=1 AND posted=0''')\n for comment in c.fetchall():\n yield comment", "def __get_comments(self, root):\n comments_root = self.__expand_shadow_element_by_tag_name(root, 'mr-comment-list')\n\n list_of_comments = comments_root.find_elements_by_tag_name('mr-comment')\n print ('[*] %d comments' %len(list_of_comments))\n comments = []\n for c in list_of_comments:\n comment_root = self.__expand_shadow_element(c)\n comment_header = comment_root.find_element_by_css_selector('div>div').text.replace('\\n', ' ')\n \n m = re.match(self.comment_pattern, comment_header)\n blank_comment = { 'comment_id':'', 'comment_datetime':'', \n 'comment_author':'', 'comment_message': ' '} \n if m:\n comment_id = m.group(1).strip('\\n\\r ')\n if not 'Deleted' in comment_header:\n message_root = self.__expand_shadow_element_by_css_selector(comment_root, '.comment-body>mr-comment-content')\n lines = message_root.find_elements_by_css_selector('.line')\n\n comments.append({\n 'comment_id': comment_id,\n 'comment_datetime': m.group(4).strip('\\n\\r '),\n 'comment_author' : m.group(3).strip('\\n\\r '),\n 'comment_message': ' '.join([l.text.strip('\\n\\r ') for l in lines]) \n })\n else:\n blank_comment['comment_id'] = comment_id\n comments.append(blank_comment) \n else:\n comments.append(blank_comment) \n return comments", "def GetComments(self):\n\t\tcomments = []\n\t\tfor submission in self.submissions:\n\t\t\tif self.expanded:\n\t\t\t\tsubmission.replace_more_comments()\n\t\t\t\tcommentobjs = praw.helpers.flatten_tree(submission.comments)\n\t\t\t\tcomments.extend([comment.body for comment in commmentobjs])\n\t\t\telse:\n\t\t\t\tsubmission.replace_more_comments(limit=0)\n\t\t\t\tcomments.extend([comment.body for comment in submission.comments if comment.is_root])\n\t\tself.comments = comments\n\t\tself.commentcount = len(comments)", "def find_comments(self, comment: str) -> List[DualCommentNode]:\n\n return self._find_helper(DualCommentNode, \"find_comments\", comment)", "def rebuild(cls, link, comments):\n with cls._mutation_context(link) as lock:\n # not reading, but we should block other read-modify-write\n # operations to avoid being clobbered by their write\n tree = {}\n for comment in comments:\n tree.setdefault(comment.parent_id, []).append(comment._id)\n\n cls._write_tree(link, tree, lock)", "def comments(self):\n comments = self.get_edges() \\\n .get(API_EDGE_TYPE.HAS_COMMENT_FROM, {}) \\\n .values()\n comments.sort(key=lambda x: x.created_ts)\n return comments", "def get_top_level_comments(self) -> Generator[Tuple[str, str], None, None]:\n for comment in self.comments:\n yield (comment.id, comment.body)", "def flatten(tree):\n for node in tree:\n if isinstance(node, list):\n for subnode in flatten(node):\n yield subnode\n else:\n yield node", "def expand_comments(thread):\n\tcomments = list(thread.comments)\n\toldLen = 0\n\tnewLen = len(comments)\n\twhile newLen != oldLen:\n\t\toldLen = newLen\n\t\tthread.replace_more_comments()\n\t\tcomments = list(thread.comments)\n\t\tnewLen = len(comments)\n\tcomments.sort(key=lambda x: int(x.created_utc), reverse=True)\n\treturn comments", "def parse_comments(media_json):\n comments_attributes = media_json['edge_media_to_parent_comment']\n\n # iterate over comments\n comments = []\n for edge in comments_attributes['edges']:\n comments.append(edge['node']['text'])\n\n return comments", "def comments(self):\r\n from .._impl.comments import Comment\r\n cs = []\r\n start = 1\r\n num = 100\r\n nextStart = 0\r\n url = \"%s/sharing/rest/content/items/%s/comments\" % (self._portal.url, self.id)\r\n while nextStart != -1:\r\n params = {\r\n \"f\" : \"json\",\r\n \"start\" : start,\r\n \"num\" : num\r\n }\r\n res = self._portal.con.post(url, params)\r\n for c in res['comments']:\r\n cs.append(Comment(url=\"%s/%s\" % (url, c['id']),\r\n item=self, initialize=True))\r\n start += num\r\n nextStart = res['nextStart']\r\n return cs", "def get_comments(self):\n comments = self.data().get('comments', {}).get('data', [])\n migration_key = FacebookPost.migration.get_value_for_datastore(self)\n return (FacebookComment(key_name_parts=(cmt['id'], migration_key.name()),\n json_data=json.dumps(cmt))\n for cmt in comments)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of files that will be proccessed.
def file_count(self): return len(self.file_names)
[ "def number_of_files(self) -> int:\n return pulumi.get(self, \"number_of_files\")", "def file_count(self):\n\t\tif self.mode != PF_READ:\n\t\t\treturn -1\n\t\treturn len(self.files)", "def file_count(self):\n return sum([len(fls) for fls in self.file_list.values()])", "def count():\n\n return len(directory)", "def file_count():\n\n corpus = Corpus.from_env()\n click.echo(corpus.file_count)", "def get_file_counts(self):\n return [len(fs) for fs in self.file_sets.values()]", "def number_of_sub_file_entries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory is None:\n return 0\n\n # We cannot use len(self._directory.entries) since entries is a generator.\n return sum(1 for path_spec in self._directory.entries)", "def calculate_number_of_discovered_files(discovered_files: List[DiscoveredFileInfo]) -> int:\n return len(discovered_files)", "def count_number_files(dir):\n total = 0\n for root, dirs, files in os.walk(dir):\n total += len(files)\n return total", "def get_num_files(torrent):\n\n return torrent.get_status([\"num_files\"])[\"num_files\"]", "def getFileCount(self):\n\t\tfilecount = 0\n\t\tos.listdir()\n\t\ttry:\n\t\t\tfor file in os.listdir(os.path.join(self.conf[\"winch\"][\"log-path\"], \"log\")):\n\t\t\t\tif fnmatch.fnmatch(file, '*.csv'):\n\t\t\t\t\tfilecount += 1\n\t\texcept FileNotFoundError:\n\t\t\treturn 0\n\n\t\tself.ui.spinBox.setMaximum(filecount)\n\t\tself.ui.btn_load.setEnabled((filecount > 0))\n\t\treturn filecount", "def getNumberOfThreadsFile():\n import FEV_KEGG.settings\n if isMainProcess():\n return FEV_KEGG.settings.fileThreads\n else:\n return FEV_KEGG.settings.fileThreadsPerProcess", "def count(path):\r\n\ttry:\r\n\t\treturn len(os.listdir(path))\r\n\texcept Exception,e:\r\n\t\t# We most probably hit a permission denied here\r\n\t\treturn -1", "def number_of_images(self, force: bool = False):\n if not hasattr(self, '_page_count'):\n res, stdoutdata = _call_cmd(['djvused', '-e', 'n', self.file])\n if not res:\n return False\n self._page_count = int(stdoutdata)\n return self._page_count", "def get_source_file_count(self):\n return self.source_file_count", "def total_files_counter(inp_path, ext):\n filecounter = 0\n for dirpath, dirs, files in os.walk(inp_path):\n for filename in files:\n if filename.endswith(ext):\n filecounter += 1\n return filecounter", "def get_log_count(self):\n with open(self.FILE_PATH) as log_file:\n return sum(1 for __ in log_file)", "def number_of_downloads(self):\n return self._number_of_downloads", "def GetNbFieldFiles(self):\n\n\n filenames = os.listdir(self.path)\n f_filenames = re.findall(r'fields-\\d+\\.dat', strm.join(filenames))\n return len(f_filenames)", "def get_nb_processes():\n result = multiprocessing.cpu_count()\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement this function in a subclass to handle DB commits per file processed.
def _commit(self): pass
[ "def abstract_attempt_commit():", "def c_commit(self, args):\n log.info('forcing commit')\n self.db.commit()", "def commit(self):\n if self._dblog:\n self._feedlgr.commit()", "def process_post_commit(self, svn_commit):\n\n raise NotImplementedError()", "def commit(self):\n\t\tself.dbConnection.commit()", "def commit(self):\n if self.dbh:\n self.dbh.commit()\n self._feedlgr.commit()", "def commit(self):\n \"\"\" Default to commit after every transaction\n Will check instance variable to decide if a commit is needed\n \"\"\"\n try:\n self.cursor.execute(\"COMMIT\")\n self.cursor.close()\n self.cursor = None\n except AttributeError:\n logging.error(\"No Open Cursor to do Commit\")\n except Exception as e:\n logging.error(e)", "def commit_db(self):\n\t\tself.conn.commit()\n\t\tself.conn.close()", "def _do_commit(self):\n self.backend.commit()", "def committing(obj):\n\tyield obj\n\tobj.commit()", "def _Commit(self):\n if self.changed:\n self.changed = False\n # pylint: disable=protected-access\n if self.deleted:\n self.deleted = False\n self._cache._metadata.DeleteRows([(self.name,)])\n del self._cache._tables[self.name]\n else:\n self._cache._metadata.AddRows(\n [metadata_table.Metadata.Row(\n name=self.name,\n columns=self.columns,\n keys=self.keys,\n timeout=self.timeout,\n modified=self.modified,\n restricted=self.restricted,\n version=self._cache.version)])", "def synthesize_cvs_commit_ids(self):\n\n rows = self.db.query(self.db.rewrite_sql(\"SELECT count(*) FROM checkins WHERE commitid IS NULL\"), []);\n count = rows[0][0]\n if (count == 0):\n return\n\n print(\"Updating \" + str(count) + \" legacy CVS entries\")\n select = self.db.rewrite_sql(\"SELECT id, ci_when, whoid, repositoryid, branchid, descid FROM checkins WHERE commitid IS NULL ORDER BY repositoryid, branchid, whoid, ci_when LIMIT 100000\")\n rows = self.db.query(select, [])\n\n i = 0\n commitid = 0\n last_row = [0, 0, 0, 0, 0, 0]\n while len(rows) > 0:\n cursor = self.db.conn.cursor()\n for row in rows:\n if not self.are_rows_in_same_commit(row, last_row):\n cursor.execute(\"INSERT INTO commitids (hash, co_when, authorid, committerid) VALUES (%s, %s, %s, %s)\", [\"s\" + str(time.time()) + str(i), row[1], row[2], row[2]])\n commitid = cursor.lastrowid\n cursor.execute(self.db.rewrite_sql(\"UPDATE checkins SET commitid=%s WHERE id=%s\"), [commitid, row[0]])\n i = i + 1\n last_row = row\n\n cursor.close()\n self.db.conn.commit()\n self.db.conn.begin()\n print(\" Updated \" + str(i) + \" / \" + str(count))\n rows = self.db.query(select, []);\n cursor.close()\n self.db.conn.commit()\n print(\"OK: Converted CVS legacy entries\")", "def commitInOuts(self):\n \n substTemp = \"\"\"INSERT INTO cmdFileRelation\n (taskrow, linenum, output, logicalname, concretename)\n VALUES (?,?,?,?,?)\"\"\"\n cur = self.connection.cursor()\n cur.execute(\"BEGIN IMMEDIATE;\")\n #print \"inout commit has %d inout and %d state to commit\" % (\n # len(self.inOutList), len(self.stateList))\n if(len(self.inOutList) > 0):\n cur.executemany(substTemp, self.inOutList)\n self.inOutList = []\n if(len(self.stateList) > 0):\n cur.executemany(\"\"\"INSERT INTO filestate (taskId, concretename, state)\n values (?,?,?)\"\"\", self.stateList)\n self.stateList = []\n cur.execute(\"COMMIT;\")\n cur.close()\n pass", "def process_primary_commit(self, svn_commit):\n\n raise NotImplementedError()", "def process_branch_commit(self, svn_commit):\n\n raise NotImplementedError()", "def commit(self):\n\t\tdel self.transaction_log[:] \n\t\tself.transaction_mode = False", "def commit_database():\n global _conn\n _conn.commit()", "def timer_librarian_commit(self):\n self.queue_data.put('COMMIT')\n self.logger.debug(\"TRIGGERING COMMIT!\")\n\n # Restart timer\n self.timer_db = threading.Timer(self.db_commit_timeout, self.timer_librarian_commit)\n self.timer_db.start()", "def commit(self):\n\t\tif self._status != self._IN_PROGRESS:\n\t\t\traise ValueError(\"Batch must be in progress to commit()\")\n\n\t\ttry:\n\t\t\tfrom viur.xeno.databases import dbinterface\n\t\t\tdbinterface.transaction_commit()\n\t\tfinally:\n\t\t\tself._status = self._FINISHED" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call a JSON data processor function given it's base name.
def call_process_func(self, name, id, json_data): process_function = '_process_' + name try: function = getattr(self, process_function, None) if function is not None: function(id, json_data) else: self.root_logger.warning("No handler %s from %s %s", process_function, id, self.__class__.__name__) except Exception as e: self.root_logger.error("Exception in %s from %s %s: %s", process_function, id, self.__class__.__name__, e)
[ "def main(str_json):\n data_dict = convert_json_to_dictionary(str_json)\n\n if data_dict['request_type'] == 'load_data':\n load_data(data_dict)\n\n elif data_dict['request_type'] == 'modify_data':\n modify_data(data_dict)\n\n elif data_dict['request_type'] == 'export_data':\n export_data(data_dict)\n \n elif data_dict['request_type'] == 'finalise_data':\n finalise_data(data_dict)", "def invoke(self, json):\n raise NotImplementedError(\"Must be over-ridden in a subclass\")", "def call_function(self, name, data):\n\n function = 'projects/{}/locations/{}/functions/{}'.format(\n self.project, self.zone, name)\n\n request = self.functions.call(name=function, body={'data': data})\n\n return self.__execute_request(request)", "def standard_data_func(filename):\n func_name = 'stdrd_' + ''.join(l if l.isalnum() else '_' for l in filename[:-4])\n return globals().get(func_name, lambda x: x)", "def _post_process_json(self):\n pass", "def run_case(path):\n if hasattr(path, 'read'):\n case = json.load(path)\n else:\n with open(path) as fp:\n case = json.load(fp)\n\n func = getattr(ndd, case['f'])\n args = case['args']\n kwargs = case['kwargs']\n result = func.original_func(*args, **kwargs)\n return (case, result)", "def main():\n file_reader = Text_Processor()\n publishers = file_reader.read_files()\n json_exporter(publishers)\n run()", "def jsonHook(encoded):\n if '__Fgi__' in encoded:\n return Fgi._fromJSON(encoded['__Fgi__'])\n else:\n return encoded", "def filename(stylename, basepath=''):\n return os.path.join(basepath, stylename + os.path.extsep + 'json')", "def process_python_function(self):\r\n exec(self.python_text)\r\n self.func = locals()[self.function_name]\r\n return self.func", "def dynamicLoad():\n pass", "def mapping_api_function(name):\n mapping = {\n 'DiscoverAppliancesRequest': async_api_discovery,\n 'TurnOnRequest': async_api_turn_on,\n 'TurnOffRequest': async_api_turn_off,\n 'SetPercentageRequest': async_api_set_percentage,\n }\n return mapping.get(name, None)", "def process_name():", "def register_custom_loader(self, format_name, loader_func):\n if not callable(loader_func):\n raise ValueError(\"loader_func must be callable\")\n self._loader_map[format_name] = loader_func", "def resolve_key(self, match):\n args = match.group(1).split('|')\n key = args[0]\n processor_funcs = args[1:]\n\n value = self.args.get(key, '')\n for func_name in processor_funcs:\n # get renderer func or use to string func\n value = ALIASES.get(func_name, str)(value)\n\n return value", "def parse(name):\n\n pass", "def json_processor(entity):\n if not entity.headers.get(ntou(\"Content-Length\"), ntou(\"\")):\n raise cherrypy.HTTPError(411)\n\n body = entity.fp.read()\n try:\n cherrypy.serving.request.params.update(json_decode(body.decode('utf-8')))\n except ValueError:\n raise cherrypy.HTTPError(400, 'Invalid JSON document')", "def __call__(self, parsed_json):\n if isinstance(parsed_json, list):\n return self._data_chunk_from_dicts_list(parsed_json)\n elif isinstance(parsed_json, dict):\n\n if GROUPING_FNAMES not in parsed_json:\n raise ValueError(\"The json file is invalid as it has no '%s'\"\n \" meta-data.\" % GROUPING_FNAMES)\n grouping_fnames = parsed_json[GROUPING_FNAMES]\n del parsed_json[GROUPING_FNAMES]\n\n return self._data_chunk_from_dicts_tree(parsed_json,\n grouping_fnames)", "def main():\n\n # The functions which process different types of VPP Python API methods.\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=__doc__)\n parser.add_argument(\"-d\", \"--data\",\n required=True,\n help=\"Data is a JSON string (list) containing API name(s)\"\n \"and its/their input argument(s).\")\n\n args = parser.parse_args()\n\n vpp = VppApi()\n return VppApi.process_json_request(vpp, args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Where the magic happens. Finds a threshold that will limit the number of params in the network to the tracked_size, and resets those params to the initial value to emulate how DropBack would work in real hardware. Chainer will calculate all grads, and this updater inserts itself before the next forward pass can occur to set the parameters back to what they should be. Only the params with the largest currentinitial value will not be reset to initial. This emulates the accumulated gradient updates of the actual algorithm.
def update(self): if self.first_iter: self.first_iter = False self.params = [i for i in self.opt.target.params()] for i, p in enumerate(self.params): self.init_params.append(xp.copy(p.data)) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) xp.savez(os.path.join(self.output_dir, 'init_params_{0}'.format(self.time_stamp)), self.init_params) if self.tracked_size: self.frozen_masks = [None] * len(self.params) super(DropBack, self).update() if self.decay_init and not self.first_iter: for i, _ in enumerate(self.init_params): self.init_params[i] = self.init_params[i]*.90 if self.tracked_size: if not self.freeze: abs_values = [] for i, param in enumerate(self.params): if param.name == 'b': values = (xp.abs(param.data).flatten()).copy() else: values = (xp.abs(param.data - self.init_params[i]).flatten()).copy() abs_values.append(values) abs_vals = xp.concatenate(abs_values) thresh = xp.partition(abs_vals, self.tracked_size)[-self.tracked_size] for i, param in enumerate(self.params): if param.name == 'b': if self.freeze: mask = self.frozen_masks[i] else: mask = xp.abs(param.data) > thresh param.data = mask*param.data else: if self.freeze: mask = self.frozen_masks[i] else: mask = xp.abs(param.data - self.init_params[i]) > thresh param.data = mask*param.data + self.init_params[i]*~mask self.frozen_masks[i] = mask if self.iteration == 3465: print("Checking inv...") total_sum = sum([xp.count_nonzero(p.data != self.init_params[i]) for i, p in enumerate(self.params)]) print("********\n\n Total non zero is: {}\n\n1*********".format(total_sum)) assert total_sum <= self.tracked_size * 1.1 if self.track: if (self.iteration-1) % 100 == 0: flat_now = xp.concatenate([i.array.ravel() for i in self.params]) flat_0 = xp.concatenate([i.ravel() for i in self.init_params]) xp.savez(os.path.join(self.output_dir, f'l2_{self.iteration-1}'), xp.linalg.norm(flat_now - flat_0)) xp.savez(os.path.join(self.output_dir, f'param_hist_{self.iteration-1}'), xp.concatenate([i.array.ravel() for i in self.params if i.name == 'b' or i.name == 'W']))
[ "def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise pooling weights.\n nn.init.normal_(self.pooling_weights, mean=0.0, std=0.02)", "def reset_grads(self):\n for dparam in self.dparams:\n dparam.set_value(0.0 * dparam.get_value())", "def freeze_adaptive_regularizer_param(self):\n if self.local_weights_hook is None:\n print(\"the local adaptive smoother weight is locked\")\n self.local_weights_hook = self.local_weights.register_hook(lambda grad: grad * 0)\n self.local_weights_hook_flag = True", "def InitialThreshold(self) -> int:", "def optimize(self):\n if self.replay_buffer.length() < self.min_replay_size:\n return\n\n batch = getBatch(self.replay_buffer, self.batch_size)\n loss = self._getLoss(batch)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.soft_update:\n self.softUpdate()\n elif self.steps % self.target_update_steps == 0:\n self.updateTargetModel()", "def fine_tune_h(self):\n for p in self.encoder.parameters():\n p.requires_grad = False\n \n # If fine-tuning, only fine-tune convolutional blocks 2 through 4\n for c in list(self.encoder.children())[5:]:\n for p in c.parameters():\n p.requires_grad = self.fine_tune", "def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise lookup and target weights.\n nn.init.normal_(self.lookup_weights, mean=0.0, std=0.02)\n if self.target_weights is not None:\n nn.init.normal_(self.target_weights, mean=0.0, std=0.02)", "def _gradient_update(self):\n # sample minibatch\n captions, image_features, urls = sample_coco_minibatch(self.data, self.batch_size, split='train')\n # compute loss and gradient\n loss, gradients = self.model.loss(image_features, captions)\n self.loss_history.append(loss)\n # parameter update\n for para_name, param in self.model.params.items():\n dparam = gradients[para_name]\n next_param, params = self.update_method(param, dparam, self.update_params_all[para_name])\n self.model.params[para_name] = next_param\n self.update_params_all[para_name] = params", "def _update_agent_state_threshold_watts(self, update):\n if self.has_predecessor():\n if self.state == 1:\n logging.info('skipping _update_agent_state_threshold_watts, '\n 'because self.state is 1')\n return(self)\n else:\n logging.info(\"total number of flips: {}\".\n format(self.num_flipped))\n total_1 = 0\n total_p = 0\n for idx, predecessor in enumerate(self.predecessors):\n if predecessor.state == 1:\n total_1 += 1\n total_p += 1\n logging.info('Final: total_1 / total_p: {} / {}'.\n format(total_1, total_p))\n if total_1 / float(total_p) >= self.threshold:\n logging.info('New state is 1')\n self.step_update_status = 1\n new_state = 1\n else:\n logging.info('New state is remains 0')\n new_state = 0\n if update == 'simultaneous':\n logging.info('SIMULTANEOUS: Assign temp_new_state to {}'.\n format(new_state))\n self.temp_new_state = new_state\n elif update == 'sequential':\n logging.info('SEQUENTIAL: Assing state to {}'.\n format(new_state))\n self.state = new_state\n return(self)\n else:\n return(self)", "def update_threshold(self):\n self.threshold = self.rescale_tau * ((1 - self.eta) * self.norm_average + self.eta * self.norm)\n\n if self.den_mean == 1:\n self.norm_average = self.norm\n else:\n self.norm_average = (self.norm + self.den_mean * self.norm_average) / (self.den_mean + 1)\n return", "def apply_from_grad(self):\n with torch.no_grad():\n ra = self.running_avg_step\n bias_correction = 1 - (self.big_gamma ** self.step_count)\n eps = self.epsilon\n # Calculate gap from grad\n for pg in self.optimizer.param_groups:\n max_lr = pg[GapAwareBase.MAX_LR_NAME]\n if max_lr <= 0:\n continue\n weight_decay = pg['weight_decay']\n for p in pg['params']:\n # calculate C coefficient per-element\n avg_steps_needed = max_lr * \\\n (((ra[id(p)] / bias_correction) ** 0.5) + eps)\n\n # calculate the gap per-element\n penalty = 1 + (pg['lr'] * p.grad.abs() / avg_steps_needed)\n\n # Apply penalty to gradient\n p.grad /= penalty\n # Apply penalty to weight decay (as it will be part of the gradient)\n # HACK: we know that sgd does\n # d_p += p*wd\n # and we want:\n # d_p += p*wd/penalty\n # so we solve:\n # x + z + p*wd = x + (p*wd / penalty)\n # giving:\n # z = p*wd ((1/penalty) - 1) = ((1 - penalty) / penalty)\n # so we do\n # d_p += z\n # z = p * weight_decay * ((1 - penalty) / penalty)\n p.grad += p.mul(weight_decay * ((1 - penalty) / penalty))", "def test_post_parameter_update(self):\n # do one optimization step\n opt = optim.SGD(params=self.instance.parameters(), lr=1.0)\n batch = self.factory.mapped_triples[: self.batch_size, :].to(self.instance.device)\n scores = self.instance.score_hrt(hrt_batch=batch, mode=self.mode)\n fake_loss = scores.mean()\n fake_loss.backward()\n opt.step()\n\n # call post_parameter_update\n self.instance.post_parameter_update()\n\n # check model constraints\n self._check_constraints()", "def init_step_size(parameters,parameter_index,bounds,likelihood_function,likelihood_args,d_par_init=0.1,d_likelihood=0.1,max_step=3,alpha=0.95):\n\n likelihood = likelihood_function(parameters, *likelihood_args)\n df = parameters.shape[0] #number of parameters = number of degrees of freedom\n chi2_threshold = scipy.stats.chi2.ppf(alpha,df) #likelihood-threshold of the confidence interval\n\n #initial guess for the step\n param_tmp = np.copy(parameters)\n d_par=d_par_init\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n\n #now we correct the initial guess if it is out of bonds.\n lower_bound , upper_bound = bounds\n if lower_bound==None:\n lower_bound=-np.inf\n if upper_bound==None:\n upper_bound=np.inf\n while param_tmp[parameter_index] > upper_bound or param_tmp[parameter_index] < lower_bound: #if the current step jumps out of the parameter's bounds, then we reduce it\n print(\"Boundary reached\")\n d_par /= 2\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n print('New value: %.4g'%param_tmp[parameter_index])\n \n d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood\n\n step_evaluations = 0 #number of evaluations of the step size\n #if the step is too big we reduce it\n if d_chi2 > chi2_threshold*d_likelihood:\n while d_chi2 > chi2_threshold*d_likelihood and step_evaluations < max_step and param_tmp[parameter_index] > lower_bound and param_tmp[parameter_index] < upper_bound:\n d_par /= 2\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood\n step_evaluations += 1\n\n #otherwise we increase it\n else:\n while d_chi2 < chi2_threshold*d_likelihood and step_evaluations < max_step and param_tmp[parameter_index] > lower_bound and param_tmp[parameter_index] < upper_bound:\n d_par *= 2\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood\n step_evaluations += 1\n d_par /= 2 #this is in Raue's algorithm but I don't really get it. Apparently the last doubling step is too much.\n\n return(d_par)", "def prepare_gradient_for_optim(self):\n\n def cast_grad_to_param_dtype_if_needed(flat_param):\n # TODO (rohan-varma): test for full precision with keep_low_precision_grads\n if not self._force_full_precision and self._keep_low_precision_grads:\n _p_assert(flat_param.grad is not None, \"Unexpected None grad!\")\n if flat_param.grad.dtype != self._fwd_bwd_param_dtype:\n flat_param.grad.data = flat_param.grad.to(self._fwd_bwd_param_dtype)\n if self._use_orig_params:\n self._use_sharded_grad_views()\n\n flat_param = self.flat_param\n # TODO (awgu): We should replace these conditional checks to encode\n # the logical intention more directly.\n if hasattr(flat_param, \"_cpu_grad\"):\n # NOTE: This branch includes `NO_SHARD`.\n self._check_sharded(flat_param)\n self._check_on_cpu(flat_param)\n flat_param.grad = flat_param._cpu_grad # type: ignore[attr-defined]\n cast_grad_to_param_dtype_if_needed(flat_param)\n elif hasattr(flat_param, \"_saved_grad_shard\"):\n self._check_sharded(flat_param)\n self._check_on_compute_device(flat_param)\n if flat_param._saved_grad_shard is not None:\n self._check_on_compute_device(flat_param._saved_grad_shard) # type: ignore[attr-defined]\n # If no sharded gradient was computed this iteration, then there is\n # no need to forward `_saved_grad_shard` to `grad`\n if flat_param._post_backward_called: # type: ignore[attr-defined]\n flat_param.grad = flat_param._saved_grad_shard # type: ignore[attr-defined]\n if flat_param.grad is not None:\n cast_grad_to_param_dtype_if_needed(flat_param)\n else:\n _p_assert(\n not self.uses_sharded_strategy\n or not flat_param._post_backward_called, # type: ignore[attr-defined]\n \"All sharded parameters that received a gradient in the \"\n \"post-backward should use `_saved_grad_shard`\",\n )\n # Delete `_saved_grad_shard` since its existence indicates a previous\n # gradient to accumulate with in the post-backward hook\n if hasattr(flat_param, \"_saved_grad_shard\"):\n delattr(flat_param, \"_saved_grad_shard\")", "def update_loss_scale(self, finite_grads):\n \n max_scale = float(2 ** 14)\n\n def update_if_finite_grads():\n \"\"\"Branch function when grads are all finite.\"\"\"\n\n def incr_loss_scale():\n new_loss_scale = tf.cond(\n tf.is_finite(self._loss_scale * self._incr_ratio),\n lambda: self._loss_scale * self._incr_ratio,\n lambda: self._loss_scale)\n # new_loss_scale = tf.minimum(new_loss_scale, max_scale)\n print(\"no max limit\"*100)\n update_op = tf.assign(self._loss_scale, new_loss_scale)\n # When loss_scale is updated, both good and bad steps are reset.\n return tf.group(update_op, self._reset_stats())\n\n return tf.cond(\n self._num_good_steps + 1 >= self._incr_every_n_steps,\n incr_loss_scale,\n lambda: tf.assign_add(self._num_good_steps, 1).op)\n\n def update_if_not_finite_grads():\n \"\"\"Branch function when any grad is not finite.\"\"\"\n\n def decr_loss_scale():\n update_op = tf.assign(\n self._loss_scale,\n tf.maximum(1., self._loss_scale * self._decr_ratio))\n # When loss_scale is updated, both good and bad steps are reset.\n return tf.group(update_op, self._reset_stats())\n\n def just_update_steps():\n # When bad_steps is incremented, good_step is reset.\n return tf.group(\n tf.assign_add(self._num_bad_steps, 1),\n tf.assign(self._num_good_steps, 0))\n\n return tf.cond(\n self._num_bad_steps + 1 >= self._decr_every_n_nan_or_inf,\n decr_loss_scale, just_update_steps)\n\n return tf.cond(finite_grads, update_if_finite_grads,\n update_if_not_finite_grads)", "def updateParameterGradient(self):\n\n self.gradient += gpu.sum(self.to_port.getDelta(), 0)", "def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)", "def accept(self):\n\n if self.feature_patch_grid_size_y_new != self.configuration.feature_patch_grid_size_y:\n self.configuration.feature_patch_grid_size_y = self.feature_patch_grid_size_y_new\n self.configuration_changed = True\n\n if self.feature_patch_grid_size_x_new != self.configuration.feature_patch_grid_size_x:\n self.configuration.feature_patch_grid_size_x = self.feature_patch_grid_size_x_new\n self.configuration_changed = True\n\n if self.max_features_new != self.configuration.max_features:\n self.configuration.max_features = self.max_features_new\n self.configuration_changed = True\n\n if self.good_match_fraction_new != self.configuration.good_match_fraction:\n self.configuration.good_match_fraction = self.good_match_fraction_new\n self.configuration_changed = True\n\n if self.match_weighting_new != self.configuration.match_weighting:\n self.configuration.match_weighting = self.match_weighting_new\n self.configuration_changed = True\n\n if self.pyramid_scale_new != self.configuration.pyramid_scale:\n self.configuration.pyramid_scale = self.pyramid_scale_new\n self.configuration_changed = True\n\n if self.levels_new != self.configuration.levels:\n self.configuration.levels = self.levels_new\n self.configuration_changed = True\n\n if self.winsize_new != self.configuration.winsize:\n self.configuration.winsize = self.winsize_new\n self.configuration_changed = True\n\n if self.iterations_new != self.configuration.iterations:\n self.configuration.iterations = self.iterations_new\n self.configuration_changed = True\n\n if self.poly_n_new != self.configuration.poly_n:\n self.configuration.poly_n = self.poly_n_new\n self.configuration_changed = True\n\n if self.poly_sigma_new != self.configuration.poly_sigma:\n self.configuration.poly_sigma = self.poly_sigma_new\n self.configuration_changed = True\n\n if self.use_gaussian_filter_new != self.configuration.use_gaussian_filter:\n self.configuration.use_gaussian_filter = self.use_gaussian_filter_new\n self.configuration_changed = True\n\n if self.skip_rigid_transformation_new != self.configuration.skip_rigid_transformation:\n self.configuration.skip_rigid_transformation = self.skip_rigid_transformation_new\n self.configuration_changed = True\n\n if self.skip_optical_flow_new != self.configuration.skip_optical_flow:\n self.configuration.skip_optical_flow = self.skip_optical_flow_new\n self.configuration_changed = True\n\n self.close()", "def reset_optim(self):\r\n\t\tself.optimizer.state = defaultdict(dict)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse file and create list of dictionaries of url parameters, if key 'pageName' is present
def create_parsed_dicts(file, list_of_var=None): req = [] firstlines = [] parsed_urls = [] with_pageName_urls = [] lower_list_of_keys = [i.lower() for i in list_of_var] specified_key_list_of_dicts = [] with open(file) as json_file: data = json.load(json_file) for p in data: req.append(p['request']) for k in req: firstlines.append(k['header']['firstLine']) for l in firstlines: parsed_urls.append(prs.parse_qs(l)) for m in parsed_urls: for k,v in m.items(): m[k] = "".join(v) for p in parsed_urls: p = {k.lower(): v for k,v in p.items()} specified = {} index = [ky for ky,va in p.items() if ky.startswith('get ')] if len(index) > 0: for k in lower_list_of_keys: specified.update({k: p.get(k, p.get(k, "Not Present"))}) specified_key_list_of_dicts.append({"call": index[0], "p": specified}) return specified_key_list_of_dicts
[ "def parse_page(fpath):\n f = open(fpath, 'r')\n content_begin = False\n d = {}\n f_link = []\n for line in f:\n if line == '---------------------\\n':\n content_begin = True\n continue\n if content_begin == False:\n page_id = int( line.strip().replace('page', '') )\n if page_id not in f_link:\n f_link.append(page_id)\n else:\n d['content'] = line.strip()\n d['f_link'] = f_link\n d['r_link'] = []\n f.close()\n return d", "def parseUrlToDictionary(self, path):\n params = {}\n parsed_path = urlparse(path)\n sb_output('parsed_path=%s' % str(parsed_path) )\n\n try:\n params = dict([p.split('=') for p in parsed_path[4].split('&')])\n except:\n pass\n\n for key in params:\n parsedkey = urllib.parse.unquote_plus(params[key])\n sb_output(parsedkey,2)\n params[key] = parsedkey\n\n return params", "def get_page_properties(page,raw_file,plugs):\n #>>> page=dict()\n #>>> raw_file=['---\\n','prop: prop value\\n','sort_info: 2\\n','---\\n','\\n','text']\n #>>> get_page_properties(page,raw_file)\n #>>> print page\n imax=1\n while not ('---\\n' == raw_file[imax]):\n imax+=1\n for i in range(imax-1):\n lst=raw_file[i+1].split(': ')\n if len(lst)>1:\n page[lst[0]]=lst[1][:-1]\n else:\n lst=raw_file[i+1].split(':')\n print(('Warning in page {page}:\\n\\t Property {prop} is not defined properly \"name: value\" \\n\\t For empty property use \"name: \"'.format(prop=lst[0],page=page['srcname'])))\n page['raw_text']=''.join(raw_file[imax+1:])\n for prop in lst_prop_convert:\n page[prop[0]]=prop[1](page[prop[0]])\n for mod in plugs:\n try:\n for prop in mod.lst_prop_convert:\n page[prop[0]]=prop[1](page[prop[0]])\n except AttributeError:\n pass", "def parse(input_file):\n # TODO: is json or xml more suitable for the input file format?\n parameters = dict()\n\n try:\n # open and parse the file\n pass\n except FileNotFoundError:\n print(\"Input file '%s' not found\" % input_file)\n sys.exit(-1)\n # add other exceptions\n except:\n print(\"Unexpected error!\")\n traceback.print_exc()\n sys.exit(-1)\n finally:\n # close the file and maybe clean up\n pass\n\n return parameters", "def parse_page_details(content, filename=None):\n\n match = re.match('\\s*\\+\\+\\+(.*?)\\+\\+\\+', content, re.M | re.S)\n if not match:\n return content, {}\n\n content = content[match.end():]\n options = load_page_details(match.group(1), filename)\n return content, options", "def parse_file(file, dict={}):\n try:\n f = open(file)\n except IOError:\n return dict\n else:\n lines = f.readlines()\n vlines =[]\n for line in lines:\n if not re.match(r\"^\\s*$\",line) and not re.match(r\"^#.*$\",line):\n vlines.append(line.strip('\\n'))\n lines = []\n while len(vlines) >0:\n i = vlines.pop(0)\n i =re.sub(r\"\\s*#.*$\",\"\",i)\n while i.endswith('\\\\'):\n try:\n o = vlines.pop(0)\n except IndexError:\n o = \"\"\n i = i.rstrip('\\\\') + o.strip()\n lines.append(i)\n\n for opt in lines:\n [name,val] = opt.split(\"=\",1)\n dict[name] = val.strip('\"')\n \n return dict\n\n #for file in file_list:\n # default_dict=_parse_file(file,default_dict)\n #parser = OptionParser(option_list=option_list)\n #parser.set_defaults(default_dict)\n #(options,args) = parser.parse_args(args)\n #return options", "def parse_list_of_urls(url_list_file):\r\n tmp = list()\r\n maxPages = 1 << 16\r\n\r\n if os.path.exists(url_list_file):\r\n with open(url_list_file, 'r') as file:\r\n for line in file:\r\n if line:\r\n items = line.strip().split()\r\n url = items[0] if len(items) > 0 else ''\r\n offset = items[1] if len(items) > 1 else -1\r\n try:\r\n offset = int(offset)\r\n if offset < 0:\r\n offset = maxPages\r\n except ValueError:\r\n offset = maxPages\r\n\r\n if url:\r\n tmp.append((url, offset))\r\n return tmp", "def _get_page_args(self, pages = {}):\n for arg in request.args:\n re_match = re.findall('page_(.*)', arg)\n if re_match:\n pages[re_match[0]] = int(request.args.get(arg))\n return pages", "def generate_pages(xml_file):\n context = ElementTree.iterparse(xml_file, events=(\"start\", \"end\"))\n context = iter(context)\n _, root = next(context)\n fields = {}\n depth = -1\n for event, element in context:\n ## Top-level logic\n if event == \"start\" and has_tag(element, \"page\"):\n fields = {}\n depth = 0\n elif event == \"end\" and has_tag(element, \"page\"):\n if validate_fields(fields):\n yield fields\n root.clear() # Prevents memory issues.\n elif event == \"start\":\n depth += 1\n elif event == \"end\":\n depth -= 1\n ## Fields\n if event == \"end\" and has_tag(element, \"title\"):\n fields[\"title\"] = element.text\n elif event == \"end\" and has_tag(element, \"text\"):\n fields[\"text\"] = element.text\n elif event == \"end\" and has_tag(element, \"redirect\"):\n fields[\"redirect\"] = element.attrib[\"title\"]\n elif event == \"end\" and has_tag(element, \"ns\"):\n fields[\"ns\"] = element.text\n # Using depth to ensure we get only the top-level page id, and not some\n # other id (like a revision id).\n elif event == \"end\" and has_tag(element, \"id\") and depth == 0:\n fields[\"id\"] = int(element.text)", "def get_params_from_url(logger, url, param_name_array):\n param_dict = {}\n parsed = urlparse.urlparse(url)\n params_dict = parse_qs(parsed.query)\n for param_name in param_name_array:\n param_value = params_dict.get(param_name, [''])[0]\n param_dict[param_name] = param_value\n return param_dict", "def parse_map_file(path):\n content = {}\n with open(path) as fp:\n for line in fp:\n if not line or line.startswith(\"#\"):\n continue\n name, value = line.split(\"=\", 1)\n content[name.strip()] = value.strip()\n return content", "def extract_params(self, url_path):\n return self.regex.fullmatch(url_path).groupdict()", "def parse(file):\n sections = _parse(file)\n pre_commands = sections.get('pre', [])\n post_commands = sections.get('post', [])\n params = Parameters(_parse_parameters(sections.get('params', [])))\n cmds = sections.get('jobs', [])\n fmt = _get_name_fmt(len(cmds))\n commands = collections.OrderedDict([(fmt.format(i), c.replace('${LINE}', fmt.format(i))) for i, c in enumerate(cmds)])\n return pre_commands, commands, post_commands, params", "def parse_file_with_kwargs(kwargs):\n\n return parse_file(**kwargs)", "def url_param_dict_to_list(url_items_dict):\n params_list = \"\"\n for key, value in url_items_dict:\n if key != \"page\":\n params_list += \"&%s=%s\" % (key, value)\n\n return params_list", "def parse_parameters_file(lines):\n param_dict = {}\n for line in lines:\n line = line.strip()\n if line:\n values = line.split('\\t')\n if len(values) > 2:\n raise ValueError(\"Unrecognized parameters file format\")\n (param, param_values) = values\n param_dict[param] = param_values.split(',')\n return param_dict", "def get_page_data(url):\n req = requests.get(url)\n soup = BeautifulSoup(req.content, 'html.parser')\n return {\n \"url\": url,\n \"title\": get_title(soup),\n \"h1\": get_h1(soup),\n \"h2\": get_h2(soup),\n }", "def GetInputs(file_path):\r\n ajson = open(file_path,'r')\r\n input_json = json.load(ajson)\r\n start_url = input_json['start']\r\n end_url = input_json['end']\r\n start_title = GetTitleOfLink(start_url)\r\n end_title = GetTitleOfLink(end_url)\r\n ajson.close()\r\n return start_title,end_title", "def extract_pages(file):\n\n parser = PDFParser(file)\n document = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n device = PDFPageAggregator(rsrcmgr, laparams=LAParams())\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n layout = device.get_result()\n yield layout" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts Pandas DataFrame to Excel readable format
def convert_to_excel(df, file_name): df_excel = df.to_excel(file_name) return df_excel
[ "def test_convert_df_to_excel_file():\n in_df = pd.DataFrame([[1, 2], [1, 2]])\n expected_df = pd.DataFrame([[1, 2], [1, 2]])\n\n out_excel_file = convert_df_to_excel_file(in_df, index=False)\n out_df = pd.read_excel(out_excel_file)\n\n assert_frame_equal(out_df, expected_df)", "def export_data_frame_to_excel(df, output_file):\n wb = Workbook()\n ws = wb.active\n for r in dataframe_to_rows(df, index=False, header=True):\n ws.append(r)\n # Apply color and border\n thin = Side(border_style=\"thin\", color=\"000000\")\n for col in ['A', 'B', 'C', 'D']:\n for index in range(1, len(df) + 2):\n ws[col + str(index)].fill = PatternFill(\"solid\", fgColor=\"D0CECE\")\n ws[col + str(index)].border = Border(top=thin, left=thin, right=thin, bottom=thin)\n for index in range(1, len(df) + 2):\n ws['E' + str(index)].fill = PatternFill(\"solid\", fgColor=\"C6E0B4\")\n ws['E' + str(index)].border = Border(top=thin, left=thin, right=thin, bottom=thin)\n ws['F' + str(index)].fill = PatternFill(\"solid\", fgColor=\"FDE9D9\")\n ws['F' + str(index)].border = Border(top=thin, left=thin, right=thin, bottom=thin)\n # Apply bold\n for col in ['A', 'B', 'C', 'D', 'E', 'F']:\n ws[col + '1'].font = Font(bold=True)\n for index in range(1, len(df) + 2):\n ws['A' + str(index)].font = Font(bold=True)\n # Apply column width\n ws.column_dimensions['A'].width = 21.71\n ws.column_dimensions['B'].width = 8.43\n ws.column_dimensions['C'].width = 14.57\n ws.column_dimensions['D'].width = 9\n ws.column_dimensions['E'].width = 26.57\n ws.column_dimensions['F'].width = 42.86\n wb.save(output_file)", "def save_data_frame(data_frame, save_dir, name='coordinates.xls'):\n\n save_dir = pathlib.PurePath(save_dir, name)\n data_frame.to_excel(save_dir, columns=['Coordinates'])", "def _export_to_xls(self):\n # cellstyle = xlwt.easyxf(\n # 'align: wrap on, vert top, horiz left;', num_format_str='general'\n # )\n\n # response = HttpResponse(mimetype=\"application/csv\")\n response = self._get_initial_response(mimetype=\"application/csv\")\n response['Content-Disposition'] = \\\n 'attachment; filename=db_store_export_data.xls'\n wb = xlwt.Workbook(encoding=\"UTF-8\")\n ws = wb.add_sheet('Data')\n\n algn1 = xlwt.Alignment()\n algn1.wrap = 1\n style1 = xlwt.XFStyle()\n style1.alignment = algn1\n\n row = 0\n\n data_headers = self._get_data_headers()\n data_keys = data_headers.keys()\n data_values = data_headers.values()\n\n for cell, value in enumerate(data_values):\n ws.write(row, cell, text_type(value), xlwt.easyxf('font: bold on'))\n ws.col(cell).width = 256 * 20 # about 20 chars wide\n cell += 1\n row += 1\n\n for obj in self.queryset:\n data = json.loads(obj.saved_data)\n for cell, key in enumerate(data_keys):\n ws.write(row, cell, text_type(data.get(key, '')))\n cell += 1\n\n row += 1\n\n wb.save(response)\n return response", "def serialize_dataframe(report, compressed=True):\n result = report.to_csv(index=False)\n if compressed:\n result = bz2.compress(result.encode())\n return result", "def add_sheet(self, df, sheet_name=\"Sheet1\", zoom=85, freeze_row=1, freeze_col=0, cols_to_print=None,\n depth_col_name='', cols_to_indent=None, highlight_depth=False, highlight_col_limit=0,\n group_rows=False, print_index=True, col_formats={}, col_style={}):\n\n # Create output DF with only cols to print and replace N/A with empty string\n if cols_to_print:\n output_df = df[cols_to_print] # .where((pd.notnull(df)), '')\n else:\n output_df = df # .where((pd.notnull(df)), '')\n\n # If index column exists, need offset to shift all other columns\n index_col_offset = 1 if print_index else 0\n\n # Write data to Excel\n worksheet = self.workbook.add_worksheet(sheet_name)\n\n # Set zoom and freeze panes location\n worksheet.set_zoom(zoom)\n worksheet.freeze_panes(freeze_row, freeze_col)\n\n # UGLY!! Add custom format\n if 'custom' in col_formats.values():\n custom_format={}\n for col_name, style in col_style.items():\n custom_format[col_name] = self.workbook.add_format(style)\n\n\n # Write the column headers with the defined format.\n if print_index:\n worksheet.write(0, 0, 'Index', self.header_format)\n for col_num, value in enumerate(output_df.columns.values):\n worksheet.write(0, col_num + index_col_offset, value, self.header_format)\n\n # Iterate through DF rows and write to Excel file\n for row_num in range(len(output_df)):\n\n # Get the row depth (if needed for highlight, indent or grouping)\n if highlight_depth or cols_to_indent or group_rows:\n depth = int(df[depth_col_name].iloc[row_num])\n else:\n depth = None\n\n format_option = 'highlight' if highlight_depth else None\n\n # Write optional index first using highlighted or plain index format\n print_format = self.cell_format[('index', depth, format_option)]\n if print_index:\n worksheet.write(row_num + 1, 0, output_df.index[row_num], print_format)\n\n # Write rest of the row\n for col_num in range(len(output_df.columns)):\n\n col_name = output_df.columns[col_num]\n\n # Check if column should be highlighted and/or indented\n indent_col = cols_to_indent is not None and col_name in cols_to_indent\n highlight_col = highlight_depth and \\\n (highlight_col_limit == 0 or col_num < highlight_col_limit - index_col_offset)\n\n # Choose the correct format option to use\n if indent_col and highlight_col:\n format_option = 'indent_highlight'\n elif indent_col:\n format_option = 'indent'\n elif highlight_col:\n format_option = 'highlight'\n else:\n format_option = None\n\n # Get value from DF\n df_value = output_df.iloc[row_num, col_num]\n\n # Set as empty string if null - values could be lists also, hence the .any()\n value = df_value if pd.notnull([df_value]).any() else ''\n value_type = output_df.dtypes[col_num] if pd.notnull([df_value]).any() else None\n\n # Write data as number or string\n if col_formats.get(col_name)=='custom':\n worksheet.write(row_num + 1, col_num + index_col_offset, value,\n custom_format[col_name])\n\n elif value_type in ['float64'] or col_formats.get(col_name)=='float':\n worksheet.write_number(row_num + 1, col_num + index_col_offset, value,\n self.cell_format[('float', depth, format_option)])\n\n elif value_type in ['int64', 'Int64'] or col_formats.get(col_name)=='int':\n worksheet.write_number(row_num + 1, col_num + index_col_offset, value,\n self.cell_format[('default', depth, format_option)])\n\n elif value_type in ['datetime64[ns]', '<M8[ns]'] or col_formats.get(col_name)=='date':\n worksheet.write_datetime(row_num + 1, col_num + index_col_offset, value,\n self.cell_format[('date', depth, format_option)])\n\n elif col_formats.get(col_name)=='string':\n worksheet.write_string(row_num + 1, col_num + index_col_offset, str(value),\n self.cell_format[('default', depth, format_option)])\n\n else:\n worksheet.write(row_num + 1, col_num + index_col_offset, str(value),\n self.cell_format[('default', depth, format_option)])\n\n # Set optional grouping of rows\n if group_rows:\n if depth > 0:\n worksheet.set_row(row_num + 1, None, None, {'level': depth})\n\n # Autofit column width\n for col_num, width in enumerate(self.__get_col_widths(output_df)):\n\n # After the index column, check type and override width if necessary\n if col_num > 0:\n if output_df.dtypes[col_num - 1] in ['float64']:\n width = 8\n elif output_df.dtypes[col_num - 1] in ['datetime64[ns]']:\n width = 8\n elif width>80:\n width = 8\n\n # If not printing index, skip to the first column and offset\n if not print_index:\n if col_num == 0: continue\n col_num -= 1\n\n worksheet.set_column(col_num, col_num, width + 2)", "def create_example_xl():\n if XL_FILE.exists(): # Don't need to recreate it\n return\n\n df = pd.DataFrame(\n {\n \"tracking\": [\"F12\", \"U23\", \"F34\", \"U45\"],\n \"invoice\": [\"I120\", \"I230\", \"I340\", \"I450\"],\n }\n )\n df.to_excel(XL_FILE, index=False)", "def __format(self, df):\n df = self.__numerics_to_strings(df)\n df = self.__roads_to_columns(df)\n return df", "def guardar(df_limpio):\n df_limpio.to_excel('nydb.xlsx',sheet_name='test1')\n df_limpio.to_csv('nydb.csv')", "def pretty(self, df):\n pretty_df = pd.DataFrame(columns=df.columns)\n logger.info(\"Converting values in anonymized dataframe to their pretty versions\")\n for col in df.columns:\n for index, value in tqdm(df[col].iteritems(), total=len(df), desc=col):\n pretty_df.at[index, col] = convert_to_pretty(value, self.__config.get_default_date_format())\n return pretty_df", "def pandas_df_to_markdown_table(df: pd.DataFrame) -> str:\n\n fmt = ['---' for i in range(len(df.columns))]\n df_fmt = pd.DataFrame([fmt], columns=df.columns)\n df_formatted = pd.concat([df_fmt, df])\n return Markdown(df_formatted.to_csv(sep=\"|\", index=False)).data", "def data_frame_to_html(data_frame: DataFrame) -> str:\n return data_frame.to_html(float_format=\"%.2f\", index=False,\n classes=[\"table table-striped table-sm\"])", "def rows_to_excel(self, rows, top=1, left=0):\n n_rows = len(rows)\n n_cells = len(rows[0])\n for i in range(n_rows):\n row = rows[i]\n for j in range(n_cells):\n self.sheet.write(top+i, left+j, row[j])\n return self.sheet", "def export(df, to_path, **kwargs):\n filebase, ext = os.path.splitext(to_path)\n ext = ext.lower()\n if ext is '.xlsx':\n df.to_excel(to_path, **kwargs)\n elif ext in ['.txt', '.csv']:\n df.to_csv(to_path, **kwargs)\n else:\n raise NotImplementedError(\"Not sure how to export '{}' files.\".format(ext))", "def to_xls(self, rowno, ws):\n for name, field in self._meta.fields.iteritems():\n if name[0] != 'c':\n continue\n colnum = col2num(name[1:])-1\n val = field.get_valor(self)\n try:\n ws.write(rowno, colnum, val)\n except:\n import logging\n logging.warning(\"No puedo XLSear %s\" % unicode(self))\n logging.exception(\"Error con linea:%s, column: %s, valor:%s tabla: %s\" % (\n rowno, name, val, self._meta.table_name))\n raise", "def write_dataframe(workbook, df, sheet_name, tab_color=None):\n\t# Create a new worksheet\n\twksh = workbook.book.add_worksheet(name=sheet_name)\n\n\t# Have to add worksheet to Pandas list of worksheets\n\t# (https://stackoverflow.com/questions/32957441/putting-many-python-pandas-dataframes-to-one-excel-worksheet)\n\tworkbook.sheets[sheet_name] = wksh\n\n\t# Only set tab_color if not None\n\tif tab_color:\n\t\twksh.set_tab_color(tab_color)\n\n\t# Write DataFrame to excel worksheet\n\tdf.to_excel(workbook, sheet_name=sheet_name)\n\treturn None", "def df2bytes(dataframe):\n return '\\n'.join(\n [','.join(dataframe), ] +\n [','.join(map(str, row)) for row in dataframe.values]\n ).encode()", "def convert_df_csv(self, data_frame):\n\n if data_frame is None: return\n\n return data_frame.to_csv(index=True, encoding='utf-8')", "def write_dataframe_to_worksheet(\n *,\n ws: Worksheet,\n df: pd.DataFrame,\n index: bool = False,\n header: bool = True\n) -> Worksheet:\n for row in dataframe_to_rows(\n df=df,\n index=index,\n header=header\n ):\n ws.append(row)\n return ws", "def dataframe_to_wiki(df, float_digits=5, title='Awesome table'):\n table = '^ %s ' % title + '^' * (len(df.columns) - 1) + '^\\n'\n table += '^ ' + ' ^ '.join(df.columns) + ' ^\\n'\n\n def do_round(x):\n if isinstance(x, float):\n return round(x, float_digits)\n return x\n\n for _, row in df.iterrows():\n table += \"| \" + ' | '.join([str(do_round(x)) for x in row.values.tolist()]) + ' |\\n'\n return table" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates writers for files
def create_writers( image_path: Path, files: list, output_folder: Path, tmp_folder: Path, ) -> list: writers = [] # get info with WholeSlideImage(image_path) as wsi: shape = wsi.shapes[wsi.get_level_from_spacing(SPACING)] real_spacing = wsi.get_real_spacing(SPACING) for file in files: if (output_folder / file["name"]).exists(): f"Skipping prediction for {file['name']}, already exists in output folder: {output_folder}" continue writers.append( _create_writer( file=file, output_folder=output_folder, tmp_folder=tmp_folder, real_spacing=real_spacing, shape=shape, ) ) return writers
[ "def createFiles() -> None:\n\n try:\n mkdir('C:/tmp/')\n except:\n pass\n try:\n mkdir(path)\n except:\n pass\n open(dirfile, 'w+')\n open(path + 'Bank.txt', 'w+')\n open(expenseDtbPath, 'w+')\n open(path + 'FirstTime.txt', 'w+')\n open(path + 'LastOpened.txt', 'w+')\n f = open(path + 'OldExpenses.db', 'w+')\n f.close()", "def build_writers(self):\n # Assume the default print/log frequency.\n return [\n # It may not always print what you want to see, since it prints \"common\" metrics only.\n CommonMetricPrinterWithComet(self.max_iter), # 重载实现\n JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, \"metrics.json\")),\n TensorboardXWriter(self.cfg.OUTPUT_DIR),\n ]", "def create_logfiles(self):\n\n try:\n os.mkdir(self.today)\n except:\n pass\n t = self.now\n head, rtail = os.path.split(self.resource_file)\n #self.lfname = self.today + '/' + self.lfname + '.' + t\n self.rlfname = self.today + '/' + self.rlfname + '.' + t\n self.rufname = self.today + '/' + 'rwfile' + '.' + t + '-' + rtail\n\n #self.logFile = open(self.lfname, 'w')\n self.rlogFile = open(self.rlfname, 'w')\n self.raw_usage = open(self.rufname, 'w')\n\n #pass", "def writeParAndInputFiles(self):\n pass", "def createTrainAndValFiles(self):\n\n a_patterns = self.readDataFile()\n self.randomizeAndWriteTrainAndVal(a_patterns)", "def get_writer(self, name=None):\n self._create_working_folder()\n name = self.clean_name(name)\n if name not in self.writers:\n self.writers[name] = open(os.path.join(self.working_folder, name), 'wb')\n return self.writers[name]", "def createDataFiles(self):\n\n a_patterns = self.readDataFile()\n self.randomizeAndWriteTrainAndTest(a_patterns)", "def generate_files():\n \n # Generate a default job_file if necessary\n genfilename = \"job_file\"\n if os.path.exists(genfilename):\n logging.info(\"'job_file' already exists.\")\n else:\n genfile = open(genfilename, \"w\")\n genfile.write( \\\n \"username = '<user>'\\n\" \\\n \"nodes = [1]\\n\" \\\n \"foldername = 'experiment'\\n\" \\\n \"numreps = 1\\n\" \\\n \"exename = '<exe>'\\n\" \\\n \"exeinput = '%s'\\n\" \\\n \"exeseeds = 'integer'\\n\" \\\n )\n genfile.close()\n \n # Generate the config directory if necessary\n gendirname = \"config\"\n if os.path.exists(gendirname):\n logging.info(\"'config' directory already exists\")\n else:\n os.makedirs(gendirname)", "def create_files(self) -> None:\n data = self.data()\n for file in sorted(self.files):\n logger.debug(\n \"node(%s) service(%s) template(%s)\", self.node.name, self.name, file\n )\n rendered = self._get_rendered_template(file, data)\n file_path = Path(file)\n self.node.create_file(file_path, rendered)", "def make_file_writer(cls, fname, translatedict={}):\n filehandle = open(fname, 'w')\n outputhandler = XMLWriter(filehandle)\n return cls(outputhandler, None, translatedict)", "def get_writers(self, dir_base):\n out_d = {}\n for x in self.starts:\n out_d[x] = Chem.SDWriter(os.path.join(dir_base, x))\n return out_d", "def write_files_into_two_directories_at_different_locations(self, counter, namef, path1, named1, named2, path2, sentences):\n counter = counter\n namef = namef\n src1 = path1\n src2 = path2\n named1 = named1\n named2 = named2\n sentences = sentences\n x = 0\n while x != counter:\n self.create_directory(named1, src1)\n self.create_directory(named2, src2)\n self.write_through_all_directory(counter, namef, src1, sentences)\n self.write_through_all_directory(counter, namef, src2, sentences)\n x += 1", "def multiple_runs_write_files(file):\r\n ## Run for Titles \r\n WriteFiles(file, tfidf = True, class_type = 'T')\r\n WriteFiles(file, tfidf = False, class_type = 'T')\r\n\r\n ## Run for Abstracts\r\n WriteFiles(file, tfidf = True, class_type = 'A')\r\n WriteFiles(file, tfidf = False, class_type = 'A')\r\n\r\n ## Run for Metadata\r\n WriteFiles(file, tfidf = True, class_type = 'M')\r\n WriteFiles(file, tfidf = False, class_type = 'M')\r\n\r\n ## Run for Titles and Meta\r\n WriteFiles(file, tfidf = True, class_type = 'TM')\r\n WriteFiles(file, tfidf = False, class_type = 'TM')", "def _create_filename(self, filename):", "def create_temp_files(temp_dir, files):\n\n for drive_type, drive_files in files.items():\n folder_path = os.path.join(temp_dir, drive_type + '/')\n os.mkdir(folder_path)\n for file_ in drive_files:\n # replace reserved characters in title to assure valid filename\n filename = KIOutils.strip_invalid_characters(file_['title'])\n filename = '{}.{}'.format(os.path.join(temp_dir, folder_path, filename), drive_type)\n with open(filename, 'w') as f:\n f.write(file_['id'])", "def writeTrades(trade_factory, filename):\n # TODO-implement\n pass", "def write(self, resources):\n pass", "def create_task_files(needqa_folder, **kwargs):\n for filename, output in kwargs.items():\n print(f'Task: {filename} created')\n write_to_file(needqa_folder.joinpath(filename), output)", "def _open_summary_writers(self):\n if self._output_dir is not None:\n _log(f'Metrics will be written in {self._output_dir}.', stdout=False)\n train_writers = [jaxboard.SummaryWriter(os.path.join(output_dir, 'train'))\n for output_dir in self._output_dir_per_train_task]\n eval_writers = [jaxboard.SummaryWriter(os.path.join(output_dir, 'eval'))\n for output_dir in self._output_dir_per_eval_task]\n try:\n yield (train_writers, eval_writers)\n finally:\n for writer in train_writers + eval_writers:\n writer.close()\n _log(f'Metrics were written in {self._output_dir}', stdout=False)\n else:\n yield ([None] * len(self._tasks), [None] * len(self._eval_tasks))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delay search Kinopoisk links in references of wikipedia page
def search_link_signal(content_type_id, object_id, page, **_): # https://github.com/goldsmith/Wikipedia/issues/78 try: links = page.references except KeyError: return else: search_link.delay(content_type_id, object_id, links, page.html())
[ "def wiki_thread(goal, article, queue, visited, sub_articles, keywords, sleeptime=0.01):\n title_exp = re.compile(\"<title>(.+) -.+</title>\")\n\n while True:\n time.sleep(sleeptime) # Slight delay to avoid denied responses\n l = sub_articles.next_article()\n # Stop when article list is empty\n if not l:\n break\n # Only search if link has not been visited already\n if not visited.contains(l):\n visited.append(l) # Add to visited list\n if l == goal:\n # The correct link was found!\n print(\"Done!\")\n sub_articles.clear()\n return Article(l, article).path\n elif \"en.w\" in l and \"/Main_Page\" not in l:\n # Only check english wiki links and do not go to the Main Page. No cheating!\n\n # Tries to get the html repeatedly until the request is accepted.\n # In case a request is denied.\n html = None\n while not html:\n try:\n html = get_html(l)\n except:\n pass\n\n content = html.lower()\n item = Article(l, article)\n # Check for keywords in the HTML, to grant the Article a score\n for i in range(len(keywords)):\n for k in keywords[i]:\n if k in content:\n # Grant points if keywords are in the HTML\n item.score += [1, 10, 50][i]\n if k.replace(\" \", \"_\") in l.lower():\n # Grant more points if keywords are in the URL\n item.score += [5, 30, 100][i]\n # Insert Article into search queue in accordance to its score\n inserted = queue.insert(item)", "def find_articles_for_wiki_race(url):\n html = get_html(url) # just to \"use function from 'previous' task\"\n link_list = find_urls(html)\n new_list = []\n for i in range(len(link_list)):\n text = link_list[i]\n if text[8:10] == 'en':\n text = re.sub(r\"http[s]://\", '', text) # removes http or https\n text = re.sub(r'^.*?\\.', '', text) # removes 'language\n if \"wikipedia.org\" == text[:13]:\n # Checking if this link is already in the list\n # However it makes running time slower\n if link_list[i] not in new_list:\n # May need to change regex later to take out links with : in it (we dont need them)\n # But not I will use slow method to do it\n if link_list[i].find(\":\", 7, -1) == -1: # we found link that does not have :\n new_list.append(link_list[i])\n\n\n return new_list", "def find_next_article_forward(article, target_article):\n global w\n text_init = article.links\n text_targ = get_link_freq(target_article.links)\n all_links = []\n \n for link in article.links:\n if link == target_article.title:\n return target_article\n \n for i in range(len(text_init)-1):\n print article.title\n all_links.append(get_link_freq(w.search(text_init[i]).links))\n print i, 'of', len(text_init) # Displays progress of hyperlink parsing\n \n for i in range(len(text_init)-2):\n avg1 = (links_analysis(text_targ, all_links[i]) + compare_wiki(text_targ, all_links[i])) / 2.0\n avg2 = (links_analysis(text_targ, all_links[i+1]) + compare_wiki(text_targ, all_links[i+1])) / 2.0\n if avg1 > avg2:\n article_name = text_init[i]\n \n return w.search(article_name)", "def wiki_search(Search):\n\n #Capitalize search string and generate wikipedia API query\n page = Search.title()\n query = \"https://en.wikipedia.org/w/api.php?action=query&format=json&titles=\"\n query += page\n query += \"&prop=extracts&explaintext\"\n\n #Query wikipedia API to obtain JSON\n response = requests.get(query).json()\n\n #Parse json to obtain extracted text\n page_data = next(iter(response['query']['pages'].values()))\n page_extract = \"\"\n\n #Attempt to get extracted data and return on failure\n try:\n page_extract = page_data['extract']\n except KeyError as e:\n return statement(\"Sorry, I haven't heard of that.\")\n\n #Apply TLDR algorithm to extracted data\n page_tldr = tldr(page_extract, threshold=2)\n\n print(\"TLDR: \" + page_tldr)\n\n return statement(page_tldr)", "def test_wikipedia_scraper():\n searches = ['tiger']\n ids = ['tiger123']\n lookups: dict = retrieve_wikipedia_pages(searches, ids)\n lookup = lookups[ids[0]]\n called = lookup._called\n for key, call_happened in called.items():\n if key in DOWNLOAD_ATTRIBUTES:\n assert call_happened, f\"Have not called {key}\"", "def search_links(self) -> None:\n # connect to LinkedIn\n self.connect()\n logging.info('Inspect job search results')\n # Scroll down the `infinit` page\n self.scroll_job_results()\n # Collects all the links toward job ad pages\n self.job_links = self.get_job_links()\n\n logging.info('All available jobs ads collected.')\n # teminates the bot\n self.driver.close()\n # self.save_job_links(self.job_links) # save the links", "def wikipedia_search(search_term):\r\n\r\n search_term = replace_spaces(search_term)\r\n\r\n #get page id\r\n request_id = urllib.request.Request(\"https://en.wikipedia.org/w/api.php?action=query&titles=%s&format=json\" %search_term, headers={\"User-Agent\":\"BossBot/v1.5\"})\r\n request_id = json.loads(urllib.request.urlopen(request_id).read())\r\n page_id = list(request_id[\"query\"][\"pages\"].keys())[0]\r\n\r\n #get actual url\r\n request_page = urllib.request.Request(\"https://en.wikipedia.org/w/api.php?action=query&prop=info&pageids=%s&inprop=url&format=json\" %page_id, headers={\"User-Agent\":\"BossBot/v1.5\"})\r\n request_page = json.loads(urllib.request.urlopen(request_page).read())\r\n return request_page[\"query\"][\"pages\"][str(page_id)][\"fullurl\"]", "def search_url(self):\n self.pdfs_links = []\n self.pdf_index = 0\n\n # Return if url response error\n for link in self.links:\n try:\n response = network.get_response_from_url(link)\n # Check if url is already a pdf, if not search for all pdfs in the url.\n if network.is_response_pdf_file(response):\n self.pdfs_links.append(link)\n else:\n self.pdfs_links.extend(network.extract_pdfs_links(response))\n except Exception:\n self.append_to_text_area(\"Não foi possível pesquisar neste link: \" + link + \"\\n\")\n self.append_to_text_area(\"Tente estas instruções:\\n\")\n self.append_to_text_area(\" - Verificar a conexão com a internet.\\n\")\n self.append_to_text_area(\" - Tentar um novo link.\\n\\n\")\n continue\n\n if len(self.pdfs_links) == 0:\n self.append_to_text_area(\"\\nNenhum PDF encontrado.\\n\")\n self.end_search()\n return\n\n self.append_to_text_area(\"Estas informações serão salvas na pasta: %s\\\\log\\\\\\n\\n\" % os.getcwd(), True)\n self.append_to_text_area(\"(%d) PDFs encontrados.\\n\" % len(self.pdfs_links), log=True)\n self.append_to_text_area(\"Começando busca pela expressão/expressões:\\n\", log=True)\n for expression in self.expressions:\n self.append_to_text_area(\" \" + expression + \"\\n\", tag='success', log=True)\n self.append_to_text_area(\"Nos links:\\n\", log=True)\n for link in self.links:\n self.append_to_text_area(\" \" + link + \"\\n\", tag='success', log=True)\n self.append_to_text_area(\"\\n\", log=True)\n\n self.queue = Queue.Queue()\n PdfStringSearcherTask(self.queue, self.pdf_index, self.pdfs_links, self.expressions).start()\n\n self.master.after(100, self.process_queue)", "def search_wikipedia(term: str) -> Future[HTTPResponse]:\n url = \"http://en.wikipedia.org/w/api.php\"\n\n params: Dict[str, str] = {\"action\": \"opensearch\", \"search\": term, \"format\": \"json\"}\n # Must set a user agent for non-browser requests to Wikipedia\n user_agent = (\n \"RxPY/3.0 (https://github.com/dbrattli/RxPY; dag@brattli.net) Tornado/4.0.1\"\n )\n\n url = url_concat(url, params)\n\n http_client = AsyncHTTPClient()\n return http_client.fetch(url, method=\"GET\", user_agent=user_agent)", "def wikipedia(self, irc, msg, args, optlist, optinput):\n\n # first, check if we have a url.\n if not self.registryValue('wikiUrl') or self.registryValue('wikiUrl') == \"Not set\":\n irc.reply(\"wikipedia URL not set. see 'config help supybot.plugins.Wikipedia.wikiUrl'\")\n return\n\n # handle getopts.\n args = {'link': self.registryValue('showLink')}\n for (key, value) in optlist:\n if key == 'link':\n args['link'] = True\n\n # do the search.\n results = self._opensearch(optinput, 1)\n if results[0] == 'error':\n irc.reply(\"ERROR :: {0}\".format(results[1]))\n return\n\n # main logic.\n results = self._wikiquery(results[1][0]['text'])\n if results[0] == 'error':\n irc.reply(\"ERROR :: {0}\".format(results[1]))\n return\n else:\n results = results[1]\n\n if self.registryValue('disableANSI'):\n irc.reply(\"{0} :: {1}\".format(results['text'], results['description']))\n else:\n irc.reply(\"{0} :: {1}\".format(self._red(results['text']), results['description']))\n\n if args['link']:\n irc.reply(\"{0}\".format(results['link']))", "def load_news_page(l):\n simulate_loading_news_page(l)\n l.interrupt()", "def jump_internal(dest, sleep_time=0):\n go(f\"https://www.l---e---.com/{dest}\")\n scroll_down()\n sleep(get_rand_time() if sleep_time == 0 else sleep_time)", "def main():\n links = getLinks(link)\n counter = 1\n while len(links):\n newArticle = random.choice(links).attrs['href']\n print(f'Step {counter}:\\n\\t{newArticle}')\n links = getLinks(newArticle)\n counter += 1", "def get_wiki_pages(countries, pause=3):\n base_wiki_url = \"https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_\"\n pause_count = 0\n for country in countries:\n if pause_count > 0:\n sleep(pause)\n url = base_wiki_url + country\n wiki_page = requests.get(url)\n wiki_page.raise_for_status()\n yield BeautifulSoup(wiki_page.text, \"html.parser\")\n pause_count += 1", "def get_blurb_with_delay(self, job, delay):\n sleep(delay)\n\n search = job['link']\n log_info(f'delay of {delay:.2f}s, getting glassdoor search: {search}')\n\n res = self.s.get(search).text\n return job, res", "def search_references(database, tweet_html, tweet_id):\n try:\n tweet = BeautifulSoup(tweet_html)\n except:\n # Couldn't fetch article.\n return [] \n \n checked_words = []\n checked_urls = []\n articles = []\n if tweet.a != None:\n for at in tweet.find_all('a', {'class': 'twython-mention'}):\n account = at.text\n source_url = \"twitter.com/\" + account[1:]\n b = database.add_source(Source(source_url))\n print \"Add source\"\n print b\n #===\n print \"Get Source\"\n f = database.get_by_id(Source, 1)\n print f\n #===\n check = database.get_sources(url=source_url)\n print \"find source\"\n print check.count()\n if check.count() != 0:\n s_id = check.first().id\n a = database.add_keyword(Keyword(s_id, account))\n print \"aaaaaa\"\n print a \n database.add_keyword(Keyword(s_id, \"#\"+account[1:]))\n # find all hyperlinks in tweet content\n for atag in tweet.find_all('a', {'class': 'twython-url'}):\n t_url = atag.get('href')\n # If the link hasn't been checked already, check if there's\n # an existing article in the database with that url. \n if t_url not in checked_urls:\n ref_article = database.get_articles(url=t_url).first()\n if ref_article is None:\n articles += article_to_db(database, t_url)\n ref_article = database.get_articles(url=t_url).first()\n if ref_article != None:\n # Make sure the article exists in the database. Create a\n # reference between this article and the referenced \n # one. \n a = database.add_reference(\n (Reference(child_id=tweet_id,\n parent_id=ref_article.id)))\n print \"reffff\"\n print a\n # Add the URL to the checked list, so it isn't checked\n # again.\n checked_urls.append(t_url) \n \n # Look for references to Sources by checking keywords in the database.\n words = unicode(tweet.getText()).split()\n # Get all the keywors in database, those are the possible reference keywods\n # i.e. all the TWITTER account need to be added to database as a source with\n # corresponding keywords.\n keywords = database.get_keywords().all()\n print \"keeeeey\"\n print keywords\n for word in words:\n #print word\n # Only check the word if it hasn't been checked before.\n if word not in checked_words:\n check = database.get_keywords(name=word)\n if check.count() != 0:\n ref_source = check.first().source_id\n # This word is one of the keyword existed in database, create a\n #reference between the source of the keyword, and the article.\n database.add_reference(\n Reference(child_id=tweet_id,\n source_id=ref_source))\n\n # Add the word to the checked words list, so it isn't\n # checked again.\n checked_words.append(word) \n \n # Return the list of articles added to the database. \n return articles", "def search_wiki(page_start, page_end):\n reset()\n global all_titles\n global queries\n global all_titles_reversed\n global queries_end\n\n # Build string replacements for links\n page_start = page_start.replace(' ', '_')\n check_one = get_titles_on_page(page_start)\n page_end_replaced = page_end.replace(' ', '_')\n check_two = get_titles_on_page(page_end_replaced)\n\n # error check to see if links are valide\n if len(check_one) == 0 or len(check_two) == 0:\n reset()\n return([\"error\", ERRORS[0]])\n all_titles.add(page_start)\n page_start_titles = check_one\n\n # check 1 degree of separation\n if page_end in page_start_titles:\n return make_return_object(page_start, page_end_replaced)\n\n # Begin build queries start search object (dict of dict)\n page_start_titles = remove_duplicate_links(page_start_titles, True)\n all_titles = all_titles.union(page_start_titles)\n queries[page_start] = dict.fromkeys(page_start_titles)\n\n # Begin build queries end search object (dict of dict)\n # all_titles_reversed.add(page_end)\n # all_titles_reversed = all_titles_reversed.union(page_end_links)\n page_end_links = get_titles_linked_to_page(page_end_replaced)\n queries_end[page_end_replaced] = dict.fromkeys(page_end_links)\n\n # Begin Search\n for title in queries[page_start]:\n temp_titles = get_titles_on_page(title)\n temp_titles = remove_duplicate_links(temp_titles, True)\n if page_end in temp_titles:\n return make_return_object(\n page_start, title.replace(' ', '_'), page_end_replaced)\n else:\n for page_end_link in queries_end[page_end_replaced]:\n if page_end_link in temp_titles:\n return make_return_object(\n page_start,\n title.replace(' ', '_'),\n page_end_link.replace(' ', '_'),\n page_end_replaced)\n # temp_end_titles = get_titles_linked_to_page(page_end_link)\n # temp_end_titles = remove_duplicate_links(temp_end_titles, False)\n # all_titles_reversed = all_titles_reversed.union(temp_end_titles)\n # queries_end[page_end_replaced][page_end_link] = dict.fromkeys(temp_end_titles)\n all_titles = all_titles.union(temp_titles)\n queries[page_start][title] = dict.fromkeys(temp_titles)\n for title in queries[page_start]:\n for second_title in queries[page_start][title]:\n temp_titles = get_titles_on_page(second_title)\n temp_titles = remove_duplicate_links(temp_titles, True)\n if page_end in temp_titles:\n return make_return_object(\n page_start, title.replace(' ', '_'),\n second_title.replace(' ', '_'), page_end_replaced)\n else:\n for page_end_link in queries_end[page_end_replaced]:\n if page_end_link in temp_titles:\n return make_return_object(\n page_start, title.replace(' ', '_'),\n second_title.replace(' ', '_'),\n page_end_link.replace(' ', '_'),\n page_end_replaced)\n reset()\n return([\"error\", ERRORS[0]])", "def run_scrapping():\n logging.info(\"Starting the scrapping process...\")\n try:\n # Create an empty list variable.\n search_history = []\n # Run the for to scrap 2000 articles from wikipedia.\n for i in range(2000):\n\n # Send the request to wikipedia with the random url and get the response.\n response = requests.get(base_url)\n\n # Check if the current url is already exist in search_history list or not.\n if str(response.url) not in search_history:\n # if not exist then add it to the list.\n search_history.append(response.url)\n\n # Create the file with write mode and encoding format utf-8.\n f = open(module_directory + \"/DataSet/\" + str(i) + \".txt\", \"w\", encoding=\"utf-8\")\n # And write the response of get_body_content function.\n f.write(get_body_content(response.text))\n\n # Sleep for 2 second for not messing up with wikipedia server.\n sleep(2)\n\n # Save the search_history list which contains all the called urls into the file.\n f_ = open(module_directory + \"/DataSet/url_list.txt\", \"w\")\n f_.write(\"\\n\".join(search_history))\n\n return True\n\n except Exception as e:\n # log the error.\n traceback.print_exc()\n logging.error(\"Error: %s\", e)\n print(\"Error: %s\", e)\n return False", "def crawler(url, host, links, count):\n resp = load_content(url, host, links)\n backlinks(resp, count)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the sendreceive loop. For each cycle, receive from the server, and perform a response. Break the loop when the final message (BYE) is received from the server, or the message is empty.
def send_recv_loop(conn): while True: message = conn.recv(256).decode() if message == "": raise EmptyMessageException("Message from server empty. Something went wrong.") final = parse_message(conn, message) if final: break
[ "def server_loop(self):\n \n self.sock.listen(1)\n\n #Wait for connection from client\n while(True):\n\n self.logger.info(\"Waiting for client to connect...\")\n\n connection, client_address = self.sock.accept()\n data = \"\"\n\n self.logger.info(\"Waiting for client at %s port %s\" % client_address)\n try:\n ## The recv and sendall methods are dynamically bound\n ## to the socket object, so pylint complains about them\n ## not existing. E1101 is disabled for these lines\n length = int(connection.recv(5)) #pylint: disable=E1101\n self.logger.info(\"Receiving %d bytes\" % length)\n data = connection.recv(length) #pylint: disable=E1101\n returndata = self.handle_message(data)\n if (returndata is not None):\n\n self.logger.info(\"Sending %s\" % returndata)\n\n length = len(returndata)\n returndata = \"%5s%s\" % (length, returndata)\n\n connection.sendall(returndata) #pylint: disable=E1101\n finally:\n connection.close()", "def send_loop(self):\n while True:\n request = self.q.get()\n if request is CLIENT_STOP:\n break\n try:\n request.id_ = self._current_id\n self._current_id += 1\n\n # TODO: Move request store into new method?\n self.requests[request.id_] = request\n self.send_to_server(request)\n except Exception:\n self.log_exception(\n \"Exception sending request %s\", request.to_dict())", "def process_messages_loop_internal(self):\n while self.receiving_messages:\n # connect to AMQP server and listen for 1 message then disconnect\n self.work_request = None\n self.connection.receive_loop_with_callback(self.queue_name, self.save_work_request_and_close)\n if self.work_request:\n self.process_work_request()", "async def start_receiving(self):\n\n while self.is_connected:\n # print(\"Receiving?\")\n stream_iterator = PeerStreamIterator(self.reader, self.buffer)\n\n async for message in stream_iterator:\n # TODO: use logging instead\n # print(f\"Received {message} from {self}\")\n\n # consume message and change client and peer status\n if isinstance(message, KeepAlive):\n # TODO: make a timeout for inactive connections and break the timeout here\n pass\n elif isinstance(message, Choke):\n self.peer_choking = True\n elif isinstance(message, Unchoke):\n self.peer_choking = False\n elif isinstance(message, Interested):\n self.peer_interested = True\n elif isinstance(message, NotInterested):\n self.peer_interested = False\n elif isinstance(message, BitField):\n self.pieces_bitarray = message.bitfield[:len(self.pieces_bitarray)]\n elif isinstance(message, Have):\n self.pieces_bitarray[message.piece_index] = True\n elif isinstance(message, Request):\n # TODO\n pass\n elif isinstance(message, Piece):\n # save the block in the piece manager\n await self.client.piece_manager.download_block(self, message)\n\n # make the next block request\n await asyncio.sleep(Peer.REQUEST_DELAY_AFTER_BLOCK)\n request_message = self.client.piece_manager.get_next_request(self)\n if request_message is not None:\n await self.send(request_message)\n elif isinstance(message, Cancel):\n # TODO\n pass\n self.buffer = stream_iterator.buffer", "def recv(self, count=10000):\r\n while count>0:\r\n count = count - 1\r\n # recv message and convert to XML object\r\n txt = self.protocol.recv_msg()\r\n res = xml.dom.minidom.parseString(txt)\r\n # log messages {{{\r\n if self.debug:\r\n self.ui.windows['trace'].write( str(self.msgid) + ' : recv <===== {{{ ' + txt)\r\n self.ui.windows['trace'].write('}}}')\r\n # handle message\r\n self.handle_msg(res)\r\n # exit, if response's transaction id == last transaction id\r\n try:\r\n if int(res.firstChild.getAttribute('transaction_id')) == int(self.msgid):\r\n return\r\n except:\r\n pass", "def _receive_forever(self):\n\n while True:\n data, addr = self.socket.recvfrom(self.BUFFER_SIZE)\n try:\n body = json.loads(data)\n except json.JSONDecodeError:\n logging.debug('Received invalid JSON')\n self.send(Error.json(Error.BAD_REQ, 'invalid JSON'), addr)\n continue\n if body['type'] in self.handlers:\n handler_thread = threading.Thread(\n target=self.handlers[body['type']],\n args=(self, body, addr)\n )\n handler_thread.start()\n else:\n logging.debug('Invalid message type', body)\n self.send(Error.json(Error.BAD_REQ, 'invalid message type'), addr)", "def _run(self):\n while True:\n sockets = dict(self.poll.poll())\n\n # If the ROUTER socket has received anything\n if sockets.get(self.router) == zmq.POLLIN:\n (source, sink, msg) = self.router.recv_multipart()\n if sink in self._dealers:\n sock = self._dealers[sink]\n sock.send_multipart([source, msg])\n else:\n self.router.send_multipart([source, source, msg])\n\n # If any DEALER socket has received anything\n for dealer_id in self._dealers.keys():\n sock = self._dealers[dealer_id]\n if sockets.get(sock) == zmq.POLLIN:\n (dest, msg) = sock.recv_multipart()\n self.router.send_multipart([dest, dealer_id, msg])", "def receive_data(self):\n while 1:\n client, address = self.sock.accept()\n print('Client connection recieved from:', address[0])\n data = client.recv(self.buffer_size)\n if data:\n print(' Response recieved:', data.decode())\n client.send(data)\n client.close()", "def run(self):\n # Loop forever waiting for new connections from different peers\n while True:\n # Wait on accept and create new socket\n try:\n connection_socket, address = server_socket.accept()\n except error:\n print 'Shuts down the TCP Register Server welcoming socket...'\n exit()\n # Read peer's request data from socket\n message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)\n request_data = message_chunk\n while len(message_chunk) == MAX_BUFFER_SIZE:\n message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)\n request_data += message_chunk\n print '\\n', request_data.decode()\n try:\n assert PROTOCOL_EOP in request_data.decode(), \\\n 'Exception: Undefined App Layer Protocol..'\n # Obtain response message by extracting request protocol\n response_message = extract_data_protocol(request_data.decode())\n connection_socket.send(response_message.encode())\n except AssertionError, _e:\n print _e\n connection_socket.close()\n del connection_socket", "async def start_sending(self):\n\n while self.is_connected:\n await asyncio.sleep(Peer.REQUEST_DELAY_NO_BLOCK)\n # print(\"Sending?\")\n if not self.peer_choking:\n # make block requests\n request_message = self.client.piece_manager.get_next_request(self)\n if request_message is not None:\n await self.send(request_message)", "async def handle(self): # pylint: disable=too-complex\n reset_frame = False\n while self.running:\n try:\n units = self.server.context.slaves()\n # this is an asyncio.Queue await, it will never fail\n data = await self._recv_()\n if isinstance(data, tuple):\n # addr is populated when talking over UDP\n data, *addr = data\n else:\n addr = (None,) # empty tuple\n\n if not isinstance(units, (list, tuple)):\n units = [units]\n # if broadcast is enabled make sure to\n # process requests to address 0\n if self.server.broadcast_enable: # pragma: no cover\n if 0 not in units:\n units.append(0)\n\n Log.debug(\"Handling data: {}\", data, \":hex\")\n\n single = self.server.context.single\n self.framer.processIncomingPacket(\n data=data,\n callback=lambda x: self.execute(x, *addr),\n unit=units,\n single=single,\n )\n\n except asyncio.CancelledError:\n # catch and ignore cancellation errors\n if self.running:\n self._log_exception()\n self.running = False\n except Exception as exc: # pylint: disable=broad-except\n # force TCP socket termination as processIncomingPacket\n # should handle application layer errors\n # for UDP sockets, simply reset the frame\n if isinstance(self, ModbusConnectedRequestHandler):\n client_addr = self.client_address[:2]\n Log.error(\n 'Unknown exception \"{}\" on stream {} forcing disconnect',\n exc,\n client_addr,\n )\n self.transport.close()\n else:\n Log.error(\"Unknown error occurred {}\", exc)\n reset_frame = True # graceful recovery\n finally:\n if reset_frame:\n self.framer.resetFrame()\n reset_frame = False", "def waitForCommands(self):\n while True:\n try:\n message = self.commandSocket.recv_multipart()\n if self.handleCommand(message):\n continue\n self.commandSocket.send(codes.unknownCommand)\n except zmq.error.ContextTerminated:\n self.commandSocket.close()\n break", "def process(self):\n while self._process_rcv_data():\n pass", "def __receiveLoop(self):\n\t\theartbeat_count = 0\n\t\tlast_heartbeat_count = 0\n\t\twatchdog_time = 0.0\n\t\t\n\t\t#calculate the number of bytes to unpack using the format\n\t\tdata_length = struct.calcsize(self.recv_format)\n\t\twhile self.running:\n\t\t\ttime.sleep(0.005)\n\t\t\ttry:\n\t\t\t\t#read in data from socket\n\t\t\t\tdata, addr = self.recv_socket.recvfrom(1024) # buffer size is 1024 bytes\n\t\t\t\t\n\t\t\t\t#unpack the data in to tuple of ints/floats/etc definded by the format\n\t\t\t\tdataStruct = struct.unpack(self.recv_format,data[:data_length])\n\t\t\t\t\n\t\t\t\t#lock the receive data to this therad\n\t\t\t\twith self.recv_data_lock:\n\t\t\t\t\t\n\t\t\t\t\t#copy the contents of the tuple in to the dictionary of variables\n\t\t\t\t\tfor i in range(len(self.recv_data_name_list)):\n\t\t\t\t\t\tname = self.recv_data_name_list[i]\n\t\t\t\t\t\tself.recv_data_dict[name] = dataStruct[i]\n\t\t\t\t\t\t\n\t\t\t\t\t#take local copy of the heartbeat counter of checking\n\t\t\t\t\theartbeat_count = self.recv_data_dict[\"sys_heartbeat_counter\"]\n\t\t\t\t\t\n\t\t\t#silence time out exeption\t\t\n\t\t\texcept socket.timeout:\n\t\t\t\tpass\n\t\t\t#print any other exceptions but keep running\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\ttime.sleep(1)\n\t\t\t\n\t\t\t\n\t\t\tif last_heartbeat_count != heartbeat_count:\n\t\t\t\tlast_heartbeat_count = heartbeat_count\n\t\t\t\twatchdog_time = time.time()\n\t\t\t\t\n\t\t\tself.heartbeat_healthy = time.time() - watchdog_time < 0.5", "def _try_sending(self):\n if self._stop_on == 't':\n # Time based test will be stopped by the test class\n self._send_block()\n self._sending_handle = self._loop.call_soon(self._try_sending)\n\n elif self._stop_on == 'b':\n # Check the remianing block count\n if self._test._blocks_remaining:\n self._test._blocks_remaining -= 1\n self._send_block()\n self._sending_handle = self._loop.call_soon(self._try_sending)\n\n else:\n # No more blocks to send. Inform test and do not reschedule sending\n self._sending_handle = None\n self.done = True\n self._test.sendable_data_depleted()\n\n elif self._stop_on == 's':\n # Check the remaining bytes count\n\n if self._test._bytes_remaining > self._block_size:\n # Send the whole block, reduce size as normal\n self._test._bytes_remaining -= self._block_size\n self._send_block()\n self._sending_handle = self._loop.call_soon(self._try_sending)\n\n elif self._test._bytes_remaining > 0:\n # Sent the whole block, reduce to 0\n self._test._bytes_remaining = 0\n self._send_block()\n self._sending_handle = None\n self.done = True\n self._test.sendable_data_depleted()\n\n else:\n self._sending_handle = None\n self.done = True\n self._test.sendable_data_depleted()", "def msg_loop(self):\n #self.log.debug(\"Chrome msg_loop is initiated.\")\n while True:\n try:\n #TODO: If an exception occurs, we should not stop the auditor\n # untill all messages have been parsed.\n m = self.chrome.pop_messages()\n self.messages.extend(m)\n while len(self.messages):\n m = self.messages.pop(0)\n shutdown = self.run_cycle(m)\n if shutdown:\n self.shutdown(\"shutdown\")\n break\n except KeyboardInterrupt:\n self.log.info(\"KeyboardInterrupt, shutting down.\")\n self.shutdown(\"shutdown\")\n break\n except websocket._exceptions.WebSocketConnectionClosedException:\n self.log.info(\"Websocket exception, shutting down.\")\n self.shutdown(\"shutdown\")\n break", "def receive_packets(stopper):\n while True:\n global acks_received\n global packets_to_send\n global timestamper\n\n if (stopper in acks_received):\n return\n\n if packets_to_send is None:\n return\n try:\n packet, server = sender_socket.recvfrom(4096)\n add_to_log(packet, \"rcv\")\n process_packet(packet)\n except:\n pass", "def messageLoop(self):\n while True:\n print('{} Checking for messages...'.format(getTime()))\n msg_ids = self.getMessages()\n if len(msg_ids) > 0:\n self.processMessages(msg_ids)\n sleep_time = 1800\n print('{} Sleeping for {} minutes'.format(getTime(), int(sleep_time/60)))\n sleep(1800)", "async def _process_messages(self):\n while True:\n processing = []\n\n # Read connections\n for connection in self._connections:\n try:\n ready = connection.has_messages()\n except EOFError:\n await self.remove_connection(connection)\n else:\n if ready:\n msg = connection.next_message()\n processing.append(\n self.handle_message(connection, msg))\n\n if connection.time_since_last_message > self.PING_INTERVAL and not connection.ping_timeout:\n self.ping(connection)\n\n # Wait for all messages to finish processing\n # TODO Wrap in try-except so that errors handling messages don't crash the server\n await asyncio.gather(*processing)\n\n # Write to connections\n for connection in self._connections:\n connection.flush_messages()\n\n # Wait 10 milliseconds before checking messages\n await asyncio.sleep(0.01)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the entry point for the program. To run the program, run the following in the command line. ./client [hostname] [NEU ID] Run "./client h" for more info
def main(args): try: conn = make_connection(args.secure, args.hostname, args.port) conn.sendall("cs5700spring2015 HELLO {}\n".format(args.id).encode()) send_recv_loop(conn) except Exception as e: print(e) sys.exit(0)
[ "def main():\n global sock\n global handlers\n\n load_configuration()\n log(\"%s, starting...\" % const.SERVER_INFO, \"info\")\n # create queue for threading\n q = queue.Queue()\n for i in range(const.THREADS):\n handler = ClientHandler(q)\n handler.setDaemon(True)\n handler.start()\n handlers.append(handler)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((const.LISTEN_IP_ADDRESS, const.SERVER_PORT))\n log(\"Listening on port %s\\n\" % str(const.SERVER_PORT), \"info\")\n except socket.error as e:\n errno, strerr = e.args\n log(\"Could not bind port %s: %s. Exiting...\" % (const.SERVER_PORT,\n strerr), \"error\")\n sys.exit(1)\n else:\n sock.listen(5)\n while True:\n client, address = sock.accept()\n log(\"accepted connection from %s\" % str(address), \"info\")\n q.put((client, address))\n finally:\n safe_exit()", "def main():\n\tif len(sys.argv) != 2:\n\t\tprint(\"Bad args\")\n\t\tsys.exit(1)\n\tif sys.argv[1] not in port_dict:\n\t\tprint(\"Bad server name\")\n\t\tsys.exit(1)\n\n\tglobal log_file\n\tlog_file = open(sys.argv[1] + \"_log.txt\", \"w+\")\n\n\tglobal loop\n\tloop = asyncio.get_event_loop()\n\tcoro = asyncio.start_server(handle_input, '127.0.0.1', port_dict[sys.argv[1]], loop=loop)\n\tserver = loop.run_until_complete(coro)\n\t# print(\"Initializing server {0} at port {1}\".format(sys.argv[1], port_dict[sys.argv[1]]))\n\n\ttry:\n\t\tloop.run_forever()\n\texcept KeyboardInterrupt:\n\t\tpass\n\n\tserver.close()\n\tloop.run_until_complete(server.wait_closed())\n\tloop.close()\n\t# The log won't update until the server gets ^C'ed, which is probably bad for a server log\n\tlog_file.close()", "def main():\n\n if len(sys.argv) < 4:\n usage()\n\n\n global server_address\n global server_port\n\n # Specify the server address and port\n server_address = sys.argv[1]\n server_port = int(sys.argv[2])\n # Specify the TCP port to listen on.\n incoming_connection_port = int(sys.argv[3])\n \n\n # Launch a TCP server to listen on from the client side.\n repyportability.waitforconn(repyportability.getmyip(), incoming_connection_port, tcp_file_server)", "def main():\n check_existence_db()\n logging.basicConfig(filename=\"server.log\", encoding='utf-8', format='%(asctime)s - %(levelname)s - %(message)s')\n\n port = option_reading()\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(('', port))\n\n print(\n f\"Server turned on with address: {host_address} and the port: {port}. STATUS: Ready to interact\")\n\n logging.warning('SERVER STARTED ON IP: ' + host_address + ', ' + 'IN THE PORT: ' + str(port))\n\n while True:\n server_socket.listen()\n\n client_socket, client_address = server_socket.accept()\n print(f'\\nGot a connection from: {client_address}')\n logging.warning('ACCEPTED CONNECTION OF THE IP ADDRESS: ' + client_address[0])\n\n multithreading = threading.Thread(target=protocol_tcp, args=(client_socket, client_address))\n multithreading.start()", "def main():\n signal.signal(signal.SIGINT, signal_handler)\n print ('\\n\\n\\n\\n\\n\\t***Welcome to the Chat Server***\\n\\n')\n print ('Press Ctrl-C to shut down server and exit.\\n\\n')\n port = get_arg(parse_cmdline(), 1)\n server_handle = input_handler(\"Input Handle: \")\n server_socket = open_socket(get_machine_info(), port)\n server_listen(server_socket, server_handle)", "def start_client(language_name):\n\n\n executeCmd(\"./bin/fteproxy --quiet --mode client\"\n + \" --upstream-format \" + language_name + \"-request\"\n + \" --downstream-format \" + language_name + \"-response\"\n + \" --client_ip \" + BIND_IP\n + \" --client_port \" + str(CLIENT_PORT)\n + \" --server_ip \" + BIND_IP + \" --server_port \" + str(SERVER_PORT) + \" &\")\n\n waitForListener(BIND_IP, CLIENT_PORT)", "def main():\n try:\n client.on_connect = on_connect\n client.on_message = on_message\n # Once everything has been set up, we can (finally) connect to the broker\n # 1883 is the listener port that the MQTT broker is using\n client.connect(mqtt_broker_ip, 1883)\n client.loop_forever()\n except (KeyboardInterrupt, SystemExit):\n print(\"\\nKilling Thread...\")\n client.disconnect()\n print(\"Done.\\nExiting.\")", "def main() -> None:\n\n start_server()", "def main():\n\n (options, port) = parse_args()\n\n EchoServerProtocol.MESSAGE_PREFIX = options.msg_prefix\n EchoServerProtocol.REPETITIONS = options.repetitions\n\n factory = EchoServerFactory()\n reactor.listenTCP(port, factory)\n\n reactor.run()", "def main(self, *args):\n if not self.ip:\n try:\n self.ip = socket.gethostbyname(self.host)\n except Exception:\n self._write_result('Host not found: %s' % self.host)\n return\n s = socket.socket()\n s.connect((socket.gethostbyname('v4.whois.cymru.com'),43))\n # query for AS\n self._write_result('SOURCE | v4.whois.cymru.com')\n s.send(' -v %s\\n' % self.ip)\n output = s.recv(4096)\n s.close()\n self._write_result(output.strip())\n # query for AS peers\n s = socket.socket()\n s.connect((socket.gethostbyname('v4-peer.whois.cymru.com'),43))\n self._write_result('SOURCE | v4-peer.whois.cymru.com')\n s.send(' -v %s\\n' % self.ip)\n output = s.recv(4096)\n s.close()\n self._write_result(output.strip())\n self._write_result('\\nUPSTREAM PEER(s) DETECTED: %s' % (len(output.strip().split('\\n'))-1))", "def start_server():\n\n tester = Tester()\n host = None\n while True:\n try:\n print(get_chat_line_separator())\n host = input(\"IP to host on (nothing for localhost): \").strip()\n if not host:\n host = \"localhost\"\n port = input(\"Port to host on (nothing for {}): \".format(PORT))\n if not port:\n port = PORT\n\n print(info_message(\n \"Starting Turing Test Server on {}:{}\".format(host, port)))\n print(info_message(\"Waiting for connection from subject...\"))\n tester.run(host=host, port=port, quiet=True)\n except socket.gaierror:\n print(info_message(\"Invalid host '{}'\".format(host)))", "def main():\n\n dashboard_user = \"root\"\n args = parse_arguments(dashboard_user)\n LOG.setLevel(args.logging_level)\n\n dashboard_node = Node(args.dashboard_addr,\n dashboard_user,\n args.dashboard_pass)\n dashboard_node.initialize()\n\n LOG.info(\"Configuring Ceph Storage Dashboard on {} ({})\".format(\n dashboard_node.address, dashboard_node.fqdn))\n\n ceph_nodes = get_ceph_nodes(username=\"heat-admin\")\n if args.satOrg is not None:\n prep_subscription_json_satellite(args.satOrg, args.satKey,\n args.physId, args.cephId)\n else:\n prep_subscription_json(args.subUser, args.subPass,\n args.physId, args.cephId)\n register_overcloud_nodes()\n prep_host_files(dashboard_node, ceph_nodes)\n prep_root_user(dashboard_node, ceph_nodes)\n prep_heat_admin_user(dashboard_node, ceph_nodes)\n prep_ansible_hosts(dashboard_node, ceph_nodes)\n prep_ceph_conf(dashboard_node, ceph_nodes)\n prep_cluster_for_collection(dashboard_node,\n ceph_nodes,\n args.dashboard_addr)\n add_iptables_ports(ceph_nodes)\n unregister_overcloud_nodes()\n restart_prometheus(dashboard_node, ceph_nodes)", "def main():\n\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n parser.add_argument('--host', default=get_default_value_from_env('WM_SERVICES_MQTT_HOSTNAME'), help=\"MQTT broker address\")\n parser.add_argument('--port',\n default=get_default_value_from_env('WM_SERVICES_MQTT_PORT', 8883),\n type=int,\n help='MQTT broker port')\n parser.add_argument('--username',\n default=get_default_value_from_env('WM_SERVICES_MQTT_USERNAME', 'mqttmasteruser'),\n help='MQTT broker username')\n parser.add_argument('--password',\n default=get_default_value_from_env('WM_SERVICES_MQTT_PASSWORD'),\n help='MQTT broker password')\n parser.add_argument('--config',\n default=get_default_value_from_env('WM_PROV_CONFIG',\n '/home/wirepas/wm-provisioning/vars/settings.yml'),\n type=str,\n help='The path to your .yml config file: \\\"examples/provisioning_config.yml\\\"')\n args = parser.parse_args()\n\n logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', level=logging.INFO)\n\n wni = WirepasNetworkInterface(args.host, args.port, args.username, args.password)\n\n srv = ProvisioningServer(interface=wni, settings=args.config)\n srv.loop()", "def main() -> None:\n\n args = get_args()\n basicConfig(level=INFO, format=LOG_FORMAT)\n\n if args.server:\n host, port, passwd = from_args(args)\n else:\n host = port = passwd = None\n\n with ErrorHandler(LOGGER):\n with CommandHistory(LOGGER):\n rconcmd(host, port, passwd, prompt=args.prompt)", "def main():\r\n sockip = (\"localhost\")\r\n sockport = (1503)\r\n sock_server = socket_server(sockip, sockport)\r\n sock_server.create_accounts()\r\n print(\"[Socket Server Started on Port \" + str(sockport) + \"]\")\r\n #loop allows server to contune running for multible connections from client\r\n while True:\r\n sock_server.sock.listen(1)\r\n t = Thread(sock_server.connect())\r\n t.start()", "def display_one_client(client):\r\n if client:\r\n print(client)\r\n else:\r\n print('No such client')", "def main():\n # the `GenericDriver` is a good place to start if your platform is not supported by a \"core\"\n # platform drivers\n conn = GenericDriver(**MY_DEVICE)\n conn.open()\n\n print(conn.channel.get_prompt())\n print(conn.send_command(\"show run | i hostname\").result)\n\n # IMPORTANT: paging is NOT disabled w/ GenericDriver driver!\n conn.send_command(\"terminal length 0\")\n print(conn.send_command(\"show run\").result)\n conn.close()\n\n # Context manager is a great way to use scrapli, it will auto open/close the connection for you:\n with GenericDriver(**MY_DEVICE) as conn:\n result = conn.send_command(\"show run | i hostname\")\n print(result.result)", "def main():\n query_ip = Metadata._QUERY_IP\n\n # If a mock server was requested for testing, start it here.\n options.parse_command_line()\n if options.options.mock:\n from tornado import testing\n port = testing.get_unused_port()\n class Handler(web.RequestHandler):\n def get(self, path):\n self.write(path.split(\"/\")[-1])\n application = web.Application([ (r\"/(.*)\", Handler), ])\n application.listen(port)\n query_ip = \"localhost:{0}\".format(port)\n\n def _MetadataCallback(metadata):\n print metadata\n ioloop.IOLoop.current().stop()\n\n Metadata(callback=_MetadataCallback, query_ip=query_ip)\n ioloop.IOLoop.current().start()\n return 0", "def main():\n CLI.from_command_line()\n exit(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get (completed) decommission_device workflows for device.
def get_decommission_device_wfs(self, device_id, state='COMPLETED'): qs = { 'q': 'workflowType IN (%s) AND status IN (%s) AND input.device_id IN (%s)' % \ ('decommission_device', state, device_id) } return self.__get_workflows(qs)
[ "def decommission_device():\n rsp = self.api_devauth_mgmt.with_auth(user.token).call(\n \"DELETE\", deviceauth.URL_DEVICE.format(id=dev.id),\n )\n assert rsp.status_code == 204", "def get_degradations(graph):\n return get_nodes(graph, is_degraded)", "def get_device_migration_list(self, filters_dict=None):\n try:\n device_mig = utils.DataMigrationApi(api_client=self.client)\n if filters_dict:\n obj = device_mig.get_device_migrations(**filters_dict)\n else:\n obj = device_mig.get_device_migrations()\n self.logmsg('Device migration job', obj)\n device_mig_details = utils.serialize_content(obj)\n return self.parse_data(device_mig_details)\n except (utils.ApiException, ValueError, TypeError) as err:\n err_msg = \"Could not get Device migration jobs due to error: {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)", "def get_instance(self, payload):\n return DeactivationsInstance(self._version, payload, )", "def get_deleted_datasets(node, delreqdays):\n print(\"Gets datasets deleted at %s in the last %d days\" % (node, int(delreqdays)))\n\n date = (datetime.datetime.now() - datetime.timedelta(days=int(delreqdays))).strftime(\"%Y-%m-%d\")\n delreq_datasets = datasvc_client('deleterequests', {'node': node,\n 'approval': 'approved',\n 'create_since': date})\n datasets = []\n for req in delreq_datasets['phedex']['request']:\n for block in req['data']['dbs']['block']:\n datasets.append(block['name'].split('#')[0])\n\n return list(set(datasets))", "def find_delta(self):\n import serial.tools.list_ports\n c = serial.tools.list_ports.comports() \n delta_device_list = []\n delta_found = {}\n for i in xrange(len(c)):\n try:\n #usb device detected and check if it is serial device\n if (c[i].vid>0 and c[i].pid>0):\n delta_found = {}\n dbg.prn(dbg.DLT, \"dt.find_delta trying...{}\".format(c[i].device))\n dev = serial.Serial()\n dev.port = c[i].device\n dev.baudrate = 9600 # baud rate\n dev.bytesize = serial.EIGHTBITS # bits per bytes\n dev.parity = serial.PARITY_NONE # parity check: no parity\n dev.stopbits = serial.STOPBITS_ONE # stop bits\n dev.timeout = 1 # non-block read\n dev.xonxoff = False # disable software flow control\n dev.rtscts = False # disable hardware (RTS/CTS) flow control\n dev.dsrdtr = False # disable hardware (DSR/DTR) flow control\n dev.writeTimeout = 2 # timeout for write\n # make sure serial port is not taken by anyone\n # try tty port\n# dev_tty = c[i].device.replace(\"cu.\", \"tty.\") \n# dev.port = dev_tty\n# if (not dev.isOpen()):\n# dev.open()\n# dev.close()\n # try cu port...again\n dev.port = c[i].device\n dev.open()\n if dev.isOpen():\n dev.flushInput() # flush input buffer, discarding all its contents\n dev.flushOutput() # flush output buffer, aborting current output and discard all that is in buffer\n # set default settings (GEC,GVB,Encoder setting)\n # GVR VERSION INFO Displays software and PLD versions and build dates / times\n # GDN DEVICE NAME Up to 64 Characters (*Device Name)\n # IDF SET DEFAULTS Sets NETWORK parameters to the default settings\n # GEC CONTROL ECHO *0 = Disable (No echo) 1 = Enable (Echo received characters)\n # GVB CONTROL VERBOSE *0 = Disable (Quiet) 1 = Enable (Verbose)\n # OCR STREAM BITRATE (KBPS) 64 to 20000 (*6000)\n # OIF INTERFACE *0 = ETHERNET TS 1 = ETHERNET RTP 2 = ETHERNET RTP/RTSP]\n # ORS REMOTE IP STREAM 0 = Disable *1 = Enable\n # IMA MAC ADDRESS\n # IMO MODE 0=FIXED 1=DHCP\n # IAA ASSIGNED ADDRESS (by DHCP)\n # IAM ASSIGNED SUBNET X.X.X.X; where X = 0 - 255 (*255.0.0.0)\n # IAG ASSIGNED GATEWAY X.X.X.X; where X = 0 - 255 (*10.10.0.1)\n resp = self.sendcmd(dev,'IMA') # get MAC address\n if (resp!=''):\n mac=re.compile(r'([A-z0-9: -:\\r\\n\\t>]*)([=][ ]*)(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))', re.I)\n g=mac.match(resp)\n if ((g!=None) and (not self.mac)):\n mac_addr = g.group(3) \n delta_found['MAC'] = mac_addr\n delta_found[mac_addr] = proxydevice(c[i].device, dev) # should be MAC\n self.updatecmd(delta_found[mac_addr], \"MAC=\"+mac_addr)\n self.setRecommandedSettings(delta_found[mac_addr])\n resp = self.sendcmd(delta_found[mac_addr],'IAA') # get IP address\n dbg.prn(dbg.DLT, \"dt.find_delta IP:{} MAC:{} SVR_IP:{}\".format(resp.split(\"=\")[1], mac_addr, appg.mainip))\n self.updatecmd(delta_found[mac_addr], \"IP=\"+resp.split(\"=\")[1]) \n self.check_delta_stat(delta_found[mac_addr])\n if (pu.mdbg.checkscf(sockCmd, pu.SCF_SHOWBONJ)):\n dbg.prn(dbg.MN, \"-->DELTA:Network settings-->\\r\\n\", self.sendcmd(dev,\"IST\", True))\n dbg.prn(dbg.DLT, \"-->DELTA:serial device recs-->\", delta_found[mac_addr].dt_params)\n self.getParams(ip=delta_found[mac_addr].dt_params['IP'])\n if (not self.is_valid_ip(resp.split(\"=\")[1])):\n delta_found = False\n else:\n if (not self.manual_search):\n # For some reason, DHCP in Delta encoder sometimes not started immediately, so this will kick the process in.\n self.sendcmd(delta_found[mac_addr], 'IMO=0')\n time.sleep(2)\n self.sendcmd(delta_found[mac_addr], 'IMO=1')\n time.sleep(2)\n dbg.prn(dbg.DLT, \"-->DELTA: toggle IMO for starting...\")\n else: # IMA not respond\n dbg.prn(dbg.DLT, \"[-->] dt.find_delta: IMA not respond:\", c[i].device)\n dev.close()\n else:\n dbg.prn(dbg.DLT, \"[-->] dt.find_delta: cannot open...\", c[i].device)\n already_found = False\n for d in delta_device_list:\n if ('MAC' in d and d['MAC'] == delta_found['MAC']):\n already_found = True\n if (not already_found and len(delta_found)>0):\n delta_device_list.append(delta_found)\n except (serial.SerialException, Exception) as e:\n dbg.prn(dbg.DLT|dbg.ERR, \"[---]dt.find_delta error, check next one...:\", e, sys.exc_info()[-1].tb_lineno)\n self.closePort(dev)\n return delta_device_list", "def _downstream_list(self, task_id, session=None):\n qry = session.query(TaskDependency) \\\n .filter(TaskDependency.dependency_task_id == task_id).all()\n return [dep.task_id for dep in qry]", "def get_desayunos(self):\n return self.desayunos", "def didcomm_services(self) -> List[DIDCommService]:\n pass", "def deactivate_and_detach_export_domain(request, storage):\n def finalizer():\n testflow.teardown(\n \"Attaching and activating export domain %s to data center %s\",\n config.EXPORT_DOMAIN_NAME, config.DATA_CENTER_NAME\n )\n assert hl_sd.attach_and_activate_domain(\n config.DATA_CENTER_NAME, config.EXPORT_DOMAIN_NAME\n )\n request.addfinalizer(finalizer)\n testflow.setup(\n \"Deactivating and detaching export domain %s from data center %s\",\n config.EXPORT_DOMAIN_NAME, config.DATA_CENTER_NAME\n )\n test_utils.wait_for_tasks(config.ENGINE, config.DATA_CENTER_NAME)\n assert hl_sd.detach_and_deactivate_domain(\n config.DATA_CENTER_NAME, config.EXPORT_DOMAIN_NAME, config.ENGINE\n )", "def _remove_soft_deletes(self, payments):\n return [payment for payment in payments if not payment.is_removed]", "def start_demultiplexing(self, flow_cell: FlowCellDirectoryData):\n self.create_demultiplexing_started_file(flow_cell.demultiplexing_started_path)\n demux_dir: Path = self.flow_cell_out_dir_path(flow_cell=flow_cell)\n unaligned_dir: Path = self.get_flow_cell_unaligned_dir(flow_cell=flow_cell)\n LOG.info(f\"Demultiplexing to {unaligned_dir}\")\n if not self.dry_run:\n LOG.info(f\"Creating demux dir {unaligned_dir}\")\n unaligned_dir.mkdir(exist_ok=False, parents=True)\n\n log_path: Path = self.get_stderr_logfile(flow_cell=flow_cell)\n error_function: str = self.get_sbatch_error(\n flow_cell=flow_cell, email=self.mail, demux_dir=demux_dir\n )\n commands: str = self.get_sbatch_command(\n run_dir=flow_cell.path,\n unaligned_dir=unaligned_dir,\n sample_sheet=flow_cell.sample_sheet_path,\n demux_completed=self.demultiplexing_completed_path(flow_cell=flow_cell),\n flow_cell=flow_cell,\n environment=self.environment,\n )\n\n if flow_cell.bcl_converter == BclConverter.BCL2FASTQ:\n sbatch_parameters: Sbatch = Sbatch(\n account=self.slurm_account,\n commands=commands,\n email=self.mail,\n error=error_function,\n hours=36,\n job_name=self.get_run_name(flow_cell),\n log_dir=log_path.parent.as_posix(),\n memory=125,\n number_tasks=18,\n quality_of_service=self.slurm_quality_of_service,\n )\n if flow_cell.bcl_converter == BclConverter.DRAGEN:\n sbatch_parameters: SbatchDragen = SbatchDragen(\n account=self.slurm_account,\n commands=commands,\n email=self.mail,\n error=error_function,\n hours=36,\n job_name=self.get_run_name(flow_cell),\n log_dir=log_path.parent.as_posix(),\n quality_of_service=self.slurm_quality_of_service,\n )\n\n sbatch_content: str = self.slurm_api.generate_sbatch_content(\n sbatch_parameters=sbatch_parameters\n )\n sbatch_path: Path = self.demultiplex_sbatch_path(flow_cell=flow_cell)\n sbatch_number: int = self.slurm_api.submit_sbatch(\n sbatch_content=sbatch_content, sbatch_path=sbatch_path\n )\n LOG.info(f\"Demultiplexing running as job {sbatch_number}\")\n return sbatch_number", "def discover_services(device):\n device.loadDeviceDefinitions(\"http://fritz.box:49000/tr64desc.xml\")\n device.loadSCPD()\n scpd = device.deviceSCPD\n services = {}\n for service, actions in scpd.items():\n action_dump = defaultdict(list)\n for action, parameters in actions.items():\n if (service, action) in blacklist:\n continue\n elif 'inParameter' in parameters:\n logging.debug(f\"Dropping {action} because it has inparams\")\n continue\n elif \"outParameter\" not in parameters:\n logging.debug(f\"Dropping {action} because it has no outparams\")\n continue\n else:\n for param, desc in (parameters[\"outParameter\"].items()):\n if desc[\"dataType\"] in good_outparam_types:\n action_dump[action].append(param)\n logging.debug(f\"{action} looks great!\")\n services[service] = action_dump\n return services", "def _get_delete_components(self, build_context: CRUDBuildContext) -> List[Tuple[int, IGraphQLEndpointComponent]]:\r\n pass", "def get_debt_by_date(self, dt):\n dt = self._check_date(dt, '%Y/%m/%d')\n url = self.base_url + self.debt_endpoint + '{}?format=json'.format(dt)\n debt = self._process_request(url)\n return debt", "def get_drc_decks(self) -> List[hammer_tech.DRCDeck]:\n return self.technology.get_drc_decks_for_tool(self.name)", "def get_deferred(self, customer):\n deferred = customer.get_deferred()\n if deferred:\n deferred = deferred.display()\n return {\"deferred\": deferred}", "def sde_get_tasks(self):\n\n if not self.sde_plugin:\n raise AlmException('Requires initialization')\n\n try:\n if self.config['selected_tasks']:\n return self.sde_plugin.get_task_list()\n else:\n return self.sde_plugin.get_task_list(priority__gte=self.config['sde_min_priority'])\n except APIError, err:\n logger.error(err)\n raise AlmException('Unable to get tasks from SD Elements. Please ensure'\n ' the application and project are valid and that the user has'\n ' sufficient permission to access the project. Reason: %s' % (str(err)))", "def test_get_deposits(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get workflows according to a search query.
def __get_workflows(self, query): qs = { 'q': query, } rsp = requests_retry().get(self.addr+self.API_WF_SEARCH, params=qs) rsp.raise_for_status() return rsp.json()
[ "def search(cls, email=\"\", name=\"\", tags=None, client=None):\n if client is None:\n client = get_global_grpc_client()\n\n stream = client.api[\"SearchWorkflows\"](\n workflow_pb2.SearchWorkflowsRequest(\n email=email, name_prefix=name, tags=tags\n ),\n timeout=client.STREAM_TIMEOUT,\n )\n\n for message in stream:\n yield cls._from_proto(message, client)", "def test_api_v3_workflows_get(self):\n pass", "def get_all(self):\n LOG.info(\"Fetch workflows.\")\n\n workflows_list = [Workflow.from_dict(db_model.to_dict())\n for db_model in db_api.get_workflow_definitions()]\n\n return Workflows(workflows=workflows_list)", "def workflow_list(request, category_slug=None): \n\n result = True\n error = \"\"\n category = None\n workflows = []\n\n\n if category_slug == None:\n # By default (and if no 2nd arguments is provided)\n # category will be None\n\n # Workflows will include all wfs as no filter was given\n workflows = Workflow.objects.all().order_by('id')\n\n else:\n # Otherwise, it takes the category object from the slug.\n match = Category.objects.filter(slug = category_slug)\n if match.exists():\n category = match[0]\n\n # And workflows are also filtered by this cat\n match = Workflow.objects.filter(category = category)\n if match.exists():\n workflows = list(match)\n else:\n # Query failed, we return the message below\n result = False\n error = \"{} category doesnt own any workflow\".format(category_slug)\n else:\n # Query failed, we return the message below\n result = False\n error = \"{} category does not exist!\".format(category_slug)\n\n\n\n categories = Category.objects.all()\n\n # Variables for the pagination of the worflows\n page = request.GET.get('page', 1)\n paginator = Paginator(workflows, 8)\n\n try:\n workflows = paginator.page(page)\n except PageNotAnInteger:\n workflows = paginator.page(1)\n except EmptyPage:\n workflows = paginator.page(paginator.num_pages)\n\n\n _dict = {'category': category, # category associated to category_slug\n 'categories': categories, # list with all categories\n # usefull to repaint the category\n # menu\n 'workflows': workflows, # subset of all workflows associated to category\n # category_slug\n 'result': result, # False if no workflow satisfices the query\n 'error': error, # message to display if results == False\n 'form': SearchForm()\n }\n\n return render(request, 'find/list.html', _dict)", "def get_workflows(self, return_json: bool = False) -> Union[List[\"Workflow\"], Dict]:\n url = f\"{self.auth._endpoint()}/projects/{self.project_id}/workflows\"\n response_json = self.auth._request(request_type=\"GET\", url=url)\n workflows_json = response_json[\"data\"]\n logger.info(\n \"Got %s workflows for project %s.\", len(workflows_json), self.project_id\n )\n\n if return_json:\n return workflows_json\n else:\n workflows = [\n Workflow(self.auth, project_id=self.project_id, workflow_id=work[\"id\"])\n for work in tqdm(workflows_json)\n ]\n return workflows", "def get_workflow_by_id(self, workflowid: str, query_params: Dict[str, object] = None) -> Workflow:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"workflowid\": workflowid,\n }\n\n path = Template(\"/catalog/v2alpha2/workflows/${workflowid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Workflow)", "def list_workflow_builds(self, workflowid: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[WorkflowBuild]:\n if query_params is None:\n query_params = {}\n if count is not None:\n query_params['count'] = count\n if filter is not None:\n query_params['filter'] = filter\n if offset is not None:\n query_params['offset'] = offset\n if orderby is not None:\n query_params['orderby'] = orderby\n\n path_params = {\n \"workflowid\": workflowid,\n }\n\n path = Template(\"/catalog/v2alpha2/workflows/${workflowid}/builds\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, WorkflowBuild)", "def get_list_of_workflow_instances(base_url, digest_login, params):\n\n workflows = get_workflow_instances(base_url, digest_login, params)\n\n if \"workflow\" not in workflows:\n return []\n\n workflows = workflows[\"workflow\"]\n if not isinstance(workflows, list): # happens if there's only one\n workflows = [workflows]\n return workflows", "def get_workflow_runs_by_name(self, workflow_name):\n variables = {\n 'name': workflow_name\n }\n\n return self.query(\"\"\"\n query workflowRunsByNameQuery($name: String!) {\n workflowRunsByName(name: $name) {\n id\n name\n createdBy {\n id\n firstName\n lastName\n email\n }\n deleted\n deletedAt\n updatedAt\n createdAt\n }\n }\n \"\"\",\n variables=variables\n )", "def list_workflows_command():\n return Command().command(_list_workflows).require_migration().with_database(write=False)", "def list_workflow_runs(self, workflowid: str, workflowbuildid: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[WorkflowRun]:\n if query_params is None:\n query_params = {}\n if count is not None:\n query_params['count'] = count\n if filter is not None:\n query_params['filter'] = filter\n if offset is not None:\n query_params['offset'] = offset\n if orderby is not None:\n query_params['orderby'] = orderby\n\n path_params = {\n \"workflowid\": workflowid,\n \"workflowbuildid\": workflowbuildid,\n }\n\n path = Template(\"/catalog/v2alpha2/workflows/${workflowid}/builds/${workflowbuildid}/runs\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, WorkflowRun)", "def search_remote():\n page = request.args.get('page')\n search_text = request.args.get('textSearch', None)\n mapper_level = request.args.get('mapperLevel', None)\n mapping_types = request.args.get('mappingTypes', None)\n organisation_tag = request.args.get('organisationTag', None)\n campaign_tag = request.args.get('campaignTag', None)\n\n data = TaskingManagerProvider().search_project(\n page=page,\n search_text=search_text,\n mapper_level=mapper_level,\n mapping_types=mapping_types,\n organisation_tag=organisation_tag,\n campaign_tag=campaign_tag\n )\n return Response(data)", "def search_flow_executions(systemInstanceId=None, flowExecutionId=None, startTime=None, endTime=None, nextToken=None, maxResults=None):\n pass", "def search_notes_by_project_title(workflow, log, query):\n\n sql_query = NOTES_BY_PROJECT_TITLE.format(query)\n\n return run_query(workflow, log, sql_query)", "def search(self, query):\n if not query or len(query) < 3:\n return RenderResultListAction([\n ExtensionResultItem(icon='images/icon.png',\n name='Folder search',\n description='Keep typing to search ...',\n on_enter=HideWindowAction())\n ])\n\n try:\n folders = tracker.search(query)\n except Exception as error:\n logging.error(error)\n return RenderResultListAction([\n ExtensionResultItem(icon='images/icon.png',\n name='An error occurred',\n description=str(error),\n highlightable=False,\n on_enter=HideWindowAction())\n ])\n\n if len(folders) == 0:\n return RenderResultListAction([\n ExtensionResultItem(\n icon='images/icon.png',\n name='No folders found matching your criteria',\n description=\n 'If you were expecting results,please check your Tracker index settings',\n highlightable=False,\n on_enter=HideWindowAction())\n ])\n\n items = []\n home = str(Path.home())\n for folder in folders[:15]:\n name = folder.replace(home, '~').replace('&', 'And')\n name = urllib.parse.unquote_plus(name)\n path = urllib.parse.unquote_plus(folder)\n items.append(\n ExtensionSmallResultItem(icon='images/icon.png',\n name=name,\n on_enter=ExtensionCustomAction(\n {\n 'action': 'detail',\n 'path': path\n },\n keep_app_open=True)))\n\n return RenderResultListAction(items)", "def backlog_workflows():\n _workflow = aliased(all_models.Workflow, name=\"wf\")\n return db.session.query(_workflow.id,\n literal(\"Workflow\").label(\"type\"),\n _workflow.context_id)\\\n .filter(_workflow.kind == \"Backlog\")", "def search(self, query, model=None):\n raise NotImplementedError()", "def locate_workflows_in_directory(path=None):\n path = path if path is not None else core.config.paths.workflows_path\n if os.path.exists(path):\n return [workflow for workflow in os.listdir(path) if (os.path.isfile(os.path.join(path, workflow))\n and workflow.endswith('.workflow'))]\n else:\n logger.warning('Could not locate any workflows in directory {0}. Directory does not exist'.format(path))\n return []", "def searchProjects(request):\n search_query = ''\n\n if request.GET.get('search_query'):\n search_query = request.GET.get('search_query')\n \n\n tags = Tag.objects.filter(name__icontains=search_query)\n \n\n project_list = Project.objects.distinct().filter(\n Q(title__icontains=search_query) |\n Q(description__icontains=search_query) |\n Q(owner__name__icontains=search_query) |\n Q(tags__in=tags)\n )\n\n return project_list, search_query" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that defines the movement of bacteria in XY direction
def plane_move(self): #Move bacteria in xy plane # Generate random number from which xy movement will be decided randnum = random.random() # 5% chance of bacteria moving in -ve x direction if randnum <= self.prob_west: self.bomb_origin_x -= 1# # 10% chance of bacteria moving in -ve y direction elif randnum <= (self.prob_west + self.prob_south): self.bomb_origin_y -= 1 # 10% chance of bacteria moving in +ve y direction elif randnum <= (self.prob_west + self.prob_south + self.prob_north): self.bomb_origin_y += 1 # 75% chance of bacteria moving in ve x direction else: self.bomb_origin_x += 1
[ "def move(self):\n if self.direction == \"n\":\n self.position = (self.position[0]-1, self.position[1])\n\n elif self.direction == \"s\":\n self.position = (self.position[0]+1, self.position[1])\n\n elif self.direction == \"e\":\n self.position = (self.position[0], self.position[1]+1)\n\n elif self.direction == \"w\":\n self.position = (self.position[0], self.position[1]-1)", "def update_position(self):\n \t\t\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed", "def update_x_y(self):\n self.x = self.pos % 15\n self.y = self.pos // 15", "def move_y(self, amount=40):\n self.y_coor += amount\n self.pos = [self.x_coor - self.node_size / 2, self.y_coor - self.node_size / 2]", "def move(self):\n #The goal here is to have a bouncing movement.\n #So the first part of the code checks if the entity has\n #reached any of the screen's edges. If so, it changes to\n #the opposite direction.\n width, height = self.get_size()\n if self.x - width/2 <=0 and self.horizontal_dir == -1:\n self.horizontal_dir = 1\n elif self.x + width/2 >= SCREEN_WIDTH and self.horizontal_dir == 1:\n self.horizontal_dir = -1\n if self.y - height/2 <=0 and self.vertical_dir == -1:\n self.vertical_dir = 1\n elif self.y + height/2 >= SCREEN_HEIGHT and self.vertical_dir == 1:\n self.vertical_dir = -1\n\n #This is the movement part.\n self.x+=self.horizontal_dir*self.speed\n self.y+=self.vertical_dir*self.speed", "def pixelMove():\n pass", "def change_fleet_direction(ai_settings, lynels):\n for lynel in lynels.sprites():\n lynel.rect.y += ai_settings.horde_drop_speed\n ai_settings.horde_direction *= -1", "def move(self):\r\n if random.random() < 0.5:\r\n self._y = (self._y + 1) % 300\r\n else:\r\n self._y = (self._y - 1) % 300\r\n \r\n if random.random() < 0.5:\r\n self._x = (self._x + 1) % 300\r\n else:\r\n self._x = (self._x - 1) % 300", "def move(self):\n # TODO 11: Change the y position of this Raindrop by its speed.\n pass", "def move(self):\n self.tick_count += 1\n\n # equation for displacement of bird, downward acceleration\n d = self.vel * self.tick_count + 1.5 * self.tick_count ** 2\n\n # setting terminal velocity, so we don't go move too fast\n if d > self.TERMINAL_VEL:\n d = self.TERMINAL_VEL\n\n # before moving upward move a bit more\n if d < 0:\n d -= 2\n\n # change y position based on displacement\n self.y = self.y + d\n\n # make bird point upward if bird above starting point\n if d < 0 or self.y < (self.height + 50):\n if self.tilt < self.MAX_ROTATION:\n self.tilt = self.MAX_ROTATION\n else: # tilt down\n if self.tilt > -90:\n self.tilt -= self.ROT_VEL", "def GoToPosition(x,y):\n Movement.GoStraightToPoint(x,y)", "def changeCoordinates(self, x, y, draw) -> None:\n\n self.rect.x += x\n\n #checks if the player will go below the bottom of the screen\n if self.rect.y + y + 2*self.radius < self.WIN.get_height() and self.vector.y != 0:\n self.rect.y += y\n else:\n self.rect.y = self.WIN.get_height() - (2*self.radius) - 1\n self.vector.y = 0\n self.streak = 0\n self.friction(0.3)\n self.currentSpeed = self.baseSpeed\n self.jump = True\n\n #Draws the player on the window\n if draw: pygame.draw.circle(self.WIN, aquaGreen, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)", "def update(self):\n self.x=self.x + self.movingDirection[0]*menu.settings_ballSpeed # To move, add movingDirection times the ball speed by the previous coordinate, movingDirection gets updated after a bounce\n self.rect.x = self.rect.x + self.movingDirection[0]*menu.settings_ballSpeed\n self.y = self.y + self.movingDirection[1]*menu.settings_ballSpeed\n self.rect.y = self.rect.y + self.movingDirection[1]*menu.settings_ballSpeed", "def _move_actor(self,actor):\n px = actor.center_x\n vx = actor.change_x\n actor.center_x = 1 + (px + vx - 1) % (constants.MAX_X - 1)\n py = actor.center_y\n vy = actor.change_y\n actor.center_y = 1 + (py + vy - 1) % (constants.MAX_Y - 1)", "def robot_right(self):\r\n\t self.x = self.x + 1\r\n\t if self.x > 9:\r\n\t\t self.x = 9", "def move(x, y):\r\n turtle.goto(x, y)", "def move(self, direction):\n if direction == Direction.north:\n self.y -= 1\n elif direction == Direction.west:\n self.x -= 1\n elif direction == Direction.south:\n self.y += 1\n elif direction == Direction.east:\n self.x += 1", "def update_pos(self):\n self.last_x = self.x\n self.last_y = self.y\n self.x += self.direction[0] * BLOCK_SIZE\n self.y += self.direction[1] * BLOCK_SIZE", "def avanzar(self):\r\n \r\n self.mover_x = 8\r\n self.direccion = \"R\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate git repo access (via HTTP GET) [EXPERIMENTAL] url /magen/policy/v2/validation/repository// request.args['username'] scm system username request.args['client_id'] magen client id request.args['application'] e.g. git request.args['action'] e.g. clone
def policy_validate_repository_access(repositoryId): pstate = PolicyState() pstate.test_mode = True logger = logging.getLogger(LogDefaults.default_log_name) logger.debug("validate_repo_access: request: %s request.args: %s", request, request.args) args_ok, badargs_cause = pstate.rest_api_required_args_validate( request.args, ['application', 'client_id', 'username', 'action']) if not args_ok: return RestServerApis.respond( HTTPStatus.NOT_FOUND, "SCM validation", {"success": False, "cause": badargs_cause}) assetId = repositoryId application = request.args['application'] mc_id = request.args['client_id'] username = request.args['username'] action = request.args['action'] response, partial_event = PolicyValidationApi.scm_action_validation_v2( mc_id, username, assetId, action, application) kwgs = dict( action=action, application=application, resource_id=assetId, client_id=mc_id ) DDPolicyEventsWrapper.create_and_submit(response, kwgs, partial_event, logger) return RestServerApis.respond(HTTPStatus.OK, "log message", response)
[ "def test_rejects_non_github_urls():\r\n\tassert sanityCheck(\"https://google.com/angular/angular.git\") == False", "def test_accepts_github_urls_only():\r\n\tassert sanityCheck(\"https://github.com/facebook/react.git\") == True\r\n\tassert sanityCheck(\"https://github.com/angular/angular.git\") == True", "def test_missing_git_and_slash_url(self, _, __):\n self.assertTrue(detect_repo.check_for_repo_name('fake-path', 'syzkaller'))", "def __init__(self,repo,valid):\n self._repo = repo\n self._valid = valid", "def validate_repository_name(app: \"ToolShedApp\", name, user):\n if name in [\"None\", None, \"\"]:\n return \"Enter the required repository name.\"\n if name in [\"repos\"]:\n return f\"The term '{name}' is a reserved word in the Tool Shed, so it cannot be used as a repository name.\"\n check_existing = get_repository_by_name_and_owner(app, name, user.username)\n if check_existing is not None:\n if check_existing.deleted:\n return f\"You own a deleted repository named <b>{escape(name)}</b>, please choose a different name.\"\n else:\n return f\"You already own a repository named <b>{escape(name)}</b>, please choose a different name.\"\n if len(name) < 2:\n return \"Repository names must be at least 2 characters in length.\"\n if len(name) > 80:\n return \"Repository names cannot be more than 80 characters in length.\"\n if not (VALID_REPOSITORYNAME_RE.match(name)):\n return \"Repository names must contain only lower-case letters, numbers and underscore.\"\n return \"\"", "def validate(request):\n try:\n\n expected_hash = request.headers.get('X-Hub-Signature').replace(\"sha1=\", \"\")\n\n calculated_hash = hmac.new(os.environ.get('WEBHOOK_KEY', '').encode(\"utf-8\"), msg=request.data,\n digestmod=hashlib.sha1).hexdigest()\n\n is_allowed = hmac.compare_digest(calculated_hash, expected_hash)\n is_master = (request.json['ref'] == 'refs/heads/master')\n\n if is_allowed and is_master:\n os.system('/home/pi/server/PortfolioAng/deploy.sh')\n return \"OK\", 200\n\n return f'Bad request: Allowed: {is_allowed}, Master: {is_master}', 400\n\n except Exception as e:\n return 'Forbidden', 403", "def test_normal_style_repo_url(self, _, __):\n self.assertTrue(detect_repo.check_for_repo_name('fake-path', 'syzkaller'))", "def ValidateGitRepo(url, directory, clobber_mismatch=False, logger=None):\n if logger is None:\n logger = log_tools.GetConsoleLogger()\n git_dir = os.path.join(directory, '.git')\n if os.path.exists(git_dir):\n try:\n if IsURLInRemoteRepoList(url, directory, include_fetch=True,\n include_push=False):\n return\n\n logger.warn('Local git repo (%s) does not track url (%s)',\n directory, url)\n except:\n logger.error('Invalid git repo: %s', directory)\n\n if not clobber_mismatch:\n raise InvalidRepoException(url, 'Invalid local git repo: %s', directory)\n else:\n logger.debug('Clobbering invalid git repo %s' % directory)\n file_tools.RemoveDirectoryIfPresent(directory)\n elif os.path.exists(directory) and len(os.listdir(directory)) != 0:\n if not clobber_mismatch:\n raise InvalidRepoException(url,\n 'Invalid non-empty repository destination %s',\n directory)\n else:\n logger.debug('Clobbering intended repository destination: %s', directory)\n file_tools.RemoveDirectoryIfPresent(directory)", "def test_with_scmclient_errors_from_get_repository_info(self):\n tempdir = make_tempdir()\n git_dir = os.path.realpath(os.path.join(tempdir, 'git-repo'))\n\n e = Exception('oh no')\n\n execute(['git', 'init', git_dir])\n\n self.spy_on(GitClient.get_repository_info,\n owner=GitClient,\n op=kgb.SpyOpRaise(e))\n\n scan_result = scan_scmclients_for_path(\n path=git_dir,\n scmclient_kwargs={\n 'options': {},\n })\n\n self.assertFalse(scan_result.found)\n self.assertIsNone(scan_result.local_path)\n self.assertIsNone(scan_result.scmclient)\n\n # Check the candidates.\n self.assertEqual(len(scan_result.candidates), 1)\n\n candidate = scan_result.candidates[0]\n self.assertEqual(candidate.local_path, git_dir)\n self.assertIsInstance(candidate.scmclient, GitClient)\n\n # Check the errors.\n self.assertEqual(scan_result.scmclient_errors, {\n 'git': e,\n })", "def check_repo(self):\n _check_repo([\"svn\", \"info\"], folder=self.folder)", "def test_repository_with_invalid_user_and_repo(self):\n self.instance.repository(None, None)\n\n assert self.session.get.called is False", "def check_repository(\n self,\n path: str,\n username: Optional[str],\n password: Optional[str],\n scmtool_class: Type[SCMTool],\n local_site_name: Optional[str],\n *args,\n **kwargs,\n ) -> None:\n scmtool_class.check_repository(path, username, password,\n local_site_name)", "def test_repositories(self):\n self.assert_requires_auth(self.instance.repositories)", "def main():\n print(\"#\")\n print(\"# [\\u2713] = Committed [\\u2717] = Dirty [?] = Not a git repository\")\n print(\"#\")\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n print(\"# Checking {}\".format(path))\n gitcheck.check_repos(path)\n else:\n print(\"# Checking {}\".format(os.getcwd()))\n gitcheck.check_repos(os.getcwd())", "def check_repository(cls, path, username=None, password=None,\n local_site_name=None):\n super(SVNTool, cls).check_repository(path, username, password,\n local_site_name)\n\n if path.startswith('https://'):\n client = cls.build_client(path, username, password,\n local_site_name=local_site_name)[1]\n client.accept_ssl_certificate(path, cls.on_ssl_failure)", "def test_create_repository(self):\n self.assert_requires_auth(self.instance.create_repository, \"repo\")", "def test_repository_with_invalid_repo(self):\n self.instance.repository(\"user\", None)\n\n assert self.session.get.called is False", "def test_repo_get_git_hook(self):\n pass", "def test_repository(self):\n self.instance.repository(\"user\", \"repo\")\n\n self.session.get.assert_called_once_with(url_for(\"repos/user/repo\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of entitlements (policy instances) for client (via HTTP GUT) [TROUBLESHOOTING] url /magen/policy/v2/entitlements/ request.args['midToken'] magen_id token, to filter to client (mandatory) request.args['action'] access action, to filter by action (optional) request.args['application'] application for which access is wanted, to filter by application (optional)
def policy_entitlements_get_by_client(): pstate = PolicyState() # pstate.test_mode = True logger = logging.getLogger(LogDefaults.default_log_name) logger.debug("get entitlements v2: request: %s request.args: %s", request, request.args) args_ok, badargs_cause = pstate.rest_api_required_args_validate( request.args, ['midToken']) if not args_ok: return RestServerApis.respond( HTTPStatus.NOT_FOUND, "Client Entitlements", {"success": False, "cause": badargs_cause}) midToken = request.args.get('midToken') p_id_svc = PlibIdSvc() mc_id = p_id_svc.auth_clt_mcid_from_midtoken(midToken) filterBy = {} if 'action' in request.args: filterBy['action'] = request.args.get('action') if 'application' in request.args: filterBy['application'] = request.args.get('application') # Other filters go here success, response = PolicyValidationApi.render_entitlements_v2( midToken, mc_id, filterBy) if not success: return RestServerApis.respond(HTTPStatus.OK, "Entitlements", { "success": False, "cause": response}) return RestServerApis.respond(HTTPStatus.OK, "Entitlements", response)
[ "def policy_entitlements_get_one_by_pi():\n pstate = PolicyState()\n\n args_ok, badargs_cause = pstate.rest_api_required_args_validate(\n request.args, ['midToken', 'pi_uuid'])\n if not args_ok:\n return RestServerApis.respond(\n HTTPStatus.NOT_FOUND, \"Client Entitlement\",\n {\"success\": False, \"cause\": badargs_cause})\n\n midToken = request.args.get('midToken')\n pi_uuid = request.args.get('pi_uuid')\n\n p_id_svc = PlibIdSvc()\n mc_id = p_id_svc.auth_clt_mcid_from_midtoken(midToken)\n\n response = PolicyValidationApi.render_single_entitlement_v2(mc_id, pi_uuid)\n return RestServerApis.respond(HTTPStatus.OK, \"log message\", response)", "def getContentEntitlements(self):\n lst = []\n rhns = self.redhat()\n entitlements = rhns.find('2.*.1.1')\n for ent in entitlements:\n oid = ent[0]\n root = oid.rtrim(1)\n ext = rhns.branch(root)\n lst.append(Content(ext))\n return lst", "def test_get_entitlement_items(self):\n pass", "def getEntitlements(self):\n return self.getContentEntitlements() \\\n + self.getRoleEntitlements()", "def get_all_extensions(organization_dict, token):\n\n headers = {\"Authorization\": \"token {0}\".format(token)}\n response = requests.get(\"https://api.elis.rossum.ai/v1/hooks?organization={0}\".format(organization_dict[\"id\"]),\n headers=headers)\n\n if response.status_code == 200:\n print(\"Fetching extensions - OK\")\n else:\n print(\"Fetching extensions - ERROR\")\n\n return response.json()[\"results\"]", "def get_enterprise_proxy_enterprises(self) -> list:\n resp = self.request('enterpriseProxy/getEnterpriseProxyEnterprises', {})\n return resp.json() if resp is not None else None", "def get(ciAppliance, appid, check_mode=False, force=False):\n if appid != '':\n return ciAppliance.invoke_get(\"Updates entitlements to an application.\", \"/v1.0/owner/applications/\"+ appid +\"/entitlements\")\n else:\n from ibmsecurity.appliance.ibmappliance import IBMFatal\n raise IBMFatal(\"!! entitlements.get: appid = none\")", "def get_ea_attributes():\n reattrib = None\n try:\n reattrib = requests.get(PAYLOAD['url'] + \"extensibleattributedef?\",\n auth=(PAYLOAD['username'],\n PAYLOAD['password']),\n verify=False)\n reattrib.raise_for_status()\n except requests.exceptions.ConnectionError as eaerrt:\n print(\"Can't reach IPAM! Check your VPN or Local access\", eaerrt)\n exit()\n except requests.exceptions.HTTPError as eahrrt:\n print('Check your credentials!', eahrrt)\n exit()\n\n rutfeattrib = reattrib.content.decode('utf-8')\n rjsoneattrib = json.loads(rutfeattrib)\n eattl = []\n for att in rjsoneattrib:\n for key, value in att.items():\n if key == 'name':\n eattl.append(value)\n return eattl", "def get_entities(self):\n\n\t\tself.entity_key_values = self.req_dict[\"result\"].get(\"parameters\")\n\t\treturn self.entity_key_values", "def getRoleEntitlements(self):\n lst = []\n rhns = self.redhat()\n entitlements = rhns.find('3.*.1')\n for ent in entitlements:\n oid = ent[0]\n root = oid.rtrim(1)\n ext = rhns.branch(root)\n lst.append(Role(ext))\n return lst", "def fetch_incidents_command(client: Client, params: Dict):\n last_run = demisto.getLastRun()\n incidents, next_run = client.fetch_incidents(params, last_run)\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)", "def list_entity_owners(ipaddress, entity_owner_list):\n entity = \":8181/restconf/operational/entity-owners:entity-owners\"\n url = \"http://\" + ipaddress + entity\n resp = requests.get(url, headers=con_header, auth=authentication)\n if resp.status_code != RESP_GET_SUCCESS:\n print(\"controller is down, resp_code\", resp.status_code)\n print(\"response content\", resp.content)\n sys.exit(1)\n data = json.loads(resp.content)\n ovsdb = data[\"entity-owners\"][\"entity-type\"]\n print(\"\\n\\n=================== Entity Details ===================\\n\")\n for e_type in ovsdb:\n entities = e_type[\"entity\"]\n for entity in entities:\n id = entity[\"id\"]\n if len(entity[\"owner\"]) > 0:\n print(\"NODE ID\", str(id[id.rindex(\"=\") + 2 : len(id) - 2]))\n print(\"OWNER\", str(entity[\"owner\"]))\n for owner in entity_owner_list:\n owner_role = owner.split(\":\")\n if entity[\"owner\"] == owner_role[1]:\n print(\"IP Address\", str(owner_role[0]))\n print(\"\\n\")", "def query_entitlements_1(\n active_only: Optional[bool] = None,\n item_ids: Optional[List[str]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = QueryEntitlements1.create(\n active_only=active_only,\n item_ids=item_ids,\n limit=limit,\n offset=offset,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def _get_applications(self) -> List[dict]:\n print('Getting application list...')\n\n #\n # This filter is a bit of a hack. I tried to pick a filter that would\n # return all applications. Without the filter, the command will\n # print a warning message stating that the result set will be\n # limited.\n #\n return self._run_az(['ad', 'app', 'list',\n '--filter=signInAudience eq \\'AzureADMyOrg\\''])", "def List(apig):\n\t\t\t\treturn apig.client.get_api_keys()['items']", "def get_revocation_list() -> List[str]:\n cache_key = \"authent:revocation_list\"\n cached = cache.get(cache_key)\n if cached:\n return cached\n\n try:\n apps.get_app_config(\"authent\")\n except LookupError:\n raise NotImplementedError(\n \"Calling this function requires mds.authent in INSTALLED_APPS.\"\n )\n\n token_ids = [\n str(t)\n for t in models.AccessToken.objects.filter(\n revoked_after__lt=timezone.now()\n ).values_list(\"jti\", flat=True)\n ]\n cache.set(cache_key, token_ids, timeout=60) # seconds\n return token_ids", "def ListInvitations(limit=1000):\r\n invitation = db_models.GaSuperProxyUserInvitation.all()\r\n return invitation.run(limit=limit)", "def vendor_accounts_list (server, form):\n\n reply_fn = lambda: _send_response (server, FORBIDDEN)\n\n # get the account and circuit list from a file in the data folder\n try:\n with open(os.path.join(settings.DATA_FOLDER, settings.ACCOUNTS_LIST), 'r') as f:\n account_id_list = f.read().splitlines()\n\n with open(os.path.join(settings.DATA_FOLDER, settings.CIRCUITS_LIST), 'r') as f:\n circuit_id_list = f.read().splitlines()\n\n data = []\n # produce some random results for each account\n for account_id in account_id_list:\n data.append({ 'cid': circuit_id_list[ int(random() * len(circuit_id_list)) ],\n 'aid': account_id,\n 'cr': \"%0.2f\" % (random() * 1000),\n 'status': (random() > 0.49) })\n\n reply_fn = _send_response (server, json.dumps(data), content_type=APP_JSON, rc=response_code_number(ALLISWELL))\n\n except IOError:\n pass\n\n _with_valid_device (form, reply_fn, lambda: _send_response (server, FORBIDDEN))", "async def query_entitlements_1_async(\n active_only: Optional[bool] = None,\n item_ids: Optional[List[str]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = QueryEntitlements1.create(\n active_only=active_only,\n item_ids=item_ids,\n limit=limit,\n offset=offset,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return client entitlement for supplied policy instance (via HTTP GUT) [TROUBLESHOOTING] url /magen/policy/v2/entitlements/entitlement? request.args['midToken'] magen_id token, to filter to client (mandatory) request.args['pi_uuid'] policy instance identifiier
def policy_entitlements_get_one_by_pi(): pstate = PolicyState() args_ok, badargs_cause = pstate.rest_api_required_args_validate( request.args, ['midToken', 'pi_uuid']) if not args_ok: return RestServerApis.respond( HTTPStatus.NOT_FOUND, "Client Entitlement", {"success": False, "cause": badargs_cause}) midToken = request.args.get('midToken') pi_uuid = request.args.get('pi_uuid') p_id_svc = PlibIdSvc() mc_id = p_id_svc.auth_clt_mcid_from_midtoken(midToken) response = PolicyValidationApi.render_single_entitlement_v2(mc_id, pi_uuid) return RestServerApis.respond(HTTPStatus.OK, "log message", response)
[ "def policy_entitlements_get_by_client():\n pstate = PolicyState()\n # pstate.test_mode = True\n logger = logging.getLogger(LogDefaults.default_log_name)\n logger.debug(\"get entitlements v2: request: %s request.args: %s\", request, request.args)\n\n args_ok, badargs_cause = pstate.rest_api_required_args_validate(\n request.args, ['midToken'])\n if not args_ok:\n return RestServerApis.respond(\n HTTPStatus.NOT_FOUND, \"Client Entitlements\",\n {\"success\": False, \"cause\": badargs_cause})\n\n midToken = request.args.get('midToken')\n\n p_id_svc = PlibIdSvc()\n mc_id = p_id_svc.auth_clt_mcid_from_midtoken(midToken)\n\n filterBy = {}\n if 'action' in request.args:\n filterBy['action'] = request.args.get('action')\n if 'application' in request.args:\n filterBy['application'] = request.args.get('application')\n \n # Other filters go here\n\n success, response = PolicyValidationApi.render_entitlements_v2(\n midToken, mc_id, filterBy)\n if not success:\n return RestServerApis.respond(HTTPStatus.OK, \"Entitlements\", {\n \"success\": False, \"cause\": response})\n\n return RestServerApis.respond(HTTPStatus.OK, \"Entitlements\", response)", "def test_get_entitlement_item(self):\n pass", "def get_entitlement(self, entitlement_id):\n name = self._get_entitlement_name(entitlement_id)\n request = self.service.providers().entitlements().get(name=name)\n try:\n response = request.execute()\n return response\n except HttpError as err:\n if err.resp.status == 404:\n return None", "def get(ciAppliance, appid, check_mode=False, force=False):\n if appid != '':\n return ciAppliance.invoke_get(\"Updates entitlements to an application.\", \"/v1.0/owner/applications/\"+ appid +\"/entitlements\")\n else:\n from ibmsecurity.appliance.ibmappliance import IBMFatal\n raise IBMFatal(\"!! entitlements.get: appid = none\")", "def entitlement_management(self):\n return self.properties.get('entitlementManagement',\n RbacApplication(self.context,\n ResourcePath(\"entitlementManagement\", self.resource_path)))", "def test_get_entitlement_items(self):\n pass", "def test_get_entitlement_template(self):\n pass", "def test_use_user_entitlement_item(self):\n pass", "def getPolicybyGuid(self):\n computersURL = '/policies/89912c9e-8dbd-4c2b-a1d8-dee8a0c2bb29'\n apiRequest = Wrapper_API()\n apiResponse = apiRequest.send_api_request(computersURL)\n return apiResponse", "def verify_policy(self, device, **kwargs):\n return_value = self._common_search_processing(\n device=device,\n previous_entry_list_keyword=\"policy_entry_list\",\n get_entry_method=self.get_policy,\n kwargs=kwargs,\n )\n device.log(message=\"{} return value: {}\".format(self.tool.get_current_function_name(), return_value))\n return return_value", "def GetInvitation(email):\r\n invitation = db_models.GaSuperProxyUserInvitation.all()\r\n invitation.filter('email = ', email)\r\n return invitation.get()", "def get_entitlement(\n entitlement_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetEntitlement.create(\n entitlement_id=entitlement_id,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def __get_policy__(self, agent):\n msg = comm.RequestPolicyMessage(agent.agent_id)\n reply_msg = agent.communicate(msg)\n return reply_msg.policy", "def fetch_policy(self, device, **kwargs):\n all_entry_list = self._common_get_processing(device=device, cmd_keyword=\"policy\", kwargs=kwargs)\n device.log(message=\"{} return value:\\n{}\".format(self.tool.get_current_function_name(), self.tool.pprint(all_entry_list)))\n return all_entry_list", "def ensure_entitled(request, app_name, logger):\n\n entitlement_key = \"insights\"\n if enable_smart_mgmt_check:\n entitlement_key = \"smart_management\"\n\n # TODO: Blueprint.before_request was not working as expected, using\n # before_app_request and checking URL here instead.\n if _is_mgmt_url(request.path) or _is_openapi_url(request.path, app_name):\n return # allow request\n\n auth_key = get_key_from_headers(request.headers)\n if auth_key:\n entitlements = json.loads(base64.b64decode(auth_key)).get(\"entitlements\", {})\n if entitlement_key in entitlements:\n if entitlements[entitlement_key].get(\"is_entitled\"):\n logger.debug(\"enabled entitlement found on header\")\n return # allow request\n else:\n logger.debug(\"identity header not sent for request\")\n\n # if we got here, reject the request\n logger.debug(\"entitlement not found for account.\")\n raise HTTPError(\n HTTPStatus.BAD_REQUEST, message=\"Entitlement not found for account.\"\n )", "def test_check_user_entitlement_item(self):\n pass", "def runpolicy(volume, agent, temp, server, authtoken):\n headers1 = {'Content-Type': 'application/json; charset=UTF-8', 'Authorization': authtoken, }\n idnum = tierid(volume, agent, temp)\n data1 = {\"id\": idnum}\n server = \"https://\" + server + \":8344/api/v1/policy/tiers/trigger_job\"\n\n response = requests.post(server, json=data1, headers=headers1, verify=False)\n token = json.loads(response.text)\n print(token)\n return", "def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Get the status of the federation partner templates\",\n uri,\n requires_modules=requires_modules,\n requires_version=requires_version)", "def get_dedicated_job(self, *, organization_id: str, instance_id: str) -> GetProviderJobModel:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
execute the condition of the device, If all condition are true then add time into true_time. If true time is excedd the threshold time (mht) flag the excess operation
def on_schedule(self): conditions = self.condition_list.get("conditions") if all([parse_expr(condition).subs(self.condition_data)\ for condition in conditions]): self.device_true_time += self.interval self.device_status = True _log.debug('All condition true time {}'.format(self.device_true_time)) else: self.device_status = False _log.debug("one of the condition is false") rthr = self.device_true_time/ 3600 if rthr > self.mht: self.excess_operation = True if self.is_midnight(self.input_datetime): self.device_true_time = 0 for device_topic in self.device_topic_list: print(device_topic) self.publish(device_topic)
[ "def cond_test(self):\n self.vert_cond.home()\n self.horz_cond.home()\n # 4000 is the right step for cond_probe horizontal move to analyse\n self.horz_cond.move_to(4000)\n self.vert_cond.move_to(40000)\n print('conductivity analysing')\n time.sleep(10)\n self.vert_cond.home()\n time.sleep(10)\n # 34000 is the right step for cond_probe horizontal move to clean\n self.horz_cond.move_to(40000)\n self.vert_cond.move_to(40000)\n print('conductivity probe is cleaning')\n time.sleep(10)\n self.vert_cond.home()", "def timer_check(self, event):\n if not self.driver:\n return\n \n if self.last_cmd_vel_time is None:\n return\n \n if (event.current_real-self.last_cmd_vel_time).to_sec() > 1.0 and (self.last_speed > 0):\n self.driver.drive(0,0)\n self.last_speed = 0\n self.last_cmd_vel_time = event.current_real", "def do_run_until_condition (self, sample_rate):\n print termstyle.green(\"*** Starting T-Rex run until condition is satisfied scenario ***\")\n\n def condition (result_obj):\n return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000\n\n if not sample_rate: # use default sample rate if not passed\n sample_rate = 5\n try:\n sample_rate = int(sample_rate)\n ret = self.trex.start_trex(**self.run_params)\n ret_val = self.trex.sample_until_condition(condition, sample_rate)\n print ret_val\n print termstyle.green(\"*** End of T-Rex run ***\")\n except ValueError as inst:\n print termstyle.magenta(\"Provided illegal sample rate value. Please try again.\\n[\", inst,\"]\")\n except TRexException as inst:\n print termstyle.red(inst)", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold):\n self.spikeTimes.append(currentTau)\n self.AP()\n self.voltage -= abs(self.threshold)\n self.refractCount = self.refractory\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def __call__(self, data: np.ndarray, threshold: float):\n t_list = []\n time = 0\n # Find all threshold crossings\n data_thresh = data[data[:, 2] >= threshold, :]\n while time < self.max_time:\n # Find threshold crossings less than \"time\" before the time of event\n inds = np.logical_and(data_thresh[:, 1] >= (time), data_thresh[:, 1] <= (time + self.step_size))\n # Store a boolean indicating if a warning was ever \"On\"\n t_list.append(any(inds))\n time += self.step_size\n return t_list", "def _apply_time_limit(self, args, thisTask, cmd_args, payload, setup):\n if (not (thisTask.time is None)) and thisTask.time > 0:\n cmd_args.append(\"-l\")\n cmd_args.append(\"walltime=\" + str(int(thisTask.time) * 60))\n return True", "def check_if_time_quota_of_hand_equal(wo_computer_id,sp_computer_id,family_id,process_id):\n command='diagste -u %s -f %s -p %s -r han -t idata -m hum'%(wo_computer_id,family_id,process_id)\n out_wo = connections.execute_mml_without_check(command)\n match = re.search(r\"\\brtimer expired time time \\s*([0-9.]*)\", out_wo, re.I)\n s = match.group(1)\n l_time = s.split('.')\n ms_wo_time = int(l_time[0])*1000 + int(l_time[1])\n\n command='diagste -u %s -f %s -p %s -r han -t idata -m hum'%(sp_computer_id,family_id,process_id)\n out_sp = connections.execute_mml_without_check(command)\n match = re.search(r\"\\brtimer expired time time \\s*([0-9.]*)\", out_sp, re.I)\n s = match.group(1)\n l_time = s.split('.')\n ms_sp_time = int(l_time[0])*1000 + int(l_time[1])\n diff_time = ms_sp_time - ms_wo_time\n print diff_time\n if (diff_time <= 250 and diff_time >= 200 ):\n return 'success'\n else:\n return 'failure'", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold or random.random()*1000<self.rateConstant):\n self.spikeTimes.append(currentTau)\n #self.AP()\n self.voltage = 0\n self.refractCount = self.refractory\n self.voltageHistory[len(self.voltageHistory)-1] = self.voltageHistory[len(self.voltageHistory)-1] +1\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def detect_time_threshold(self, ):\n if self.threshold_timestamp:\n return time.time() - self.threshold_timestamp > self.time_threshold\n return False", "def check_if_timer_value_equal(wo_computer_id,sp_computer_id,family_id):\n command='diagste -u %s -f %s -r tim -t idata -m hum'%(wo_computer_id,family_id)\n out_wo = connections.execute_mml_without_check(command)\n\n match = re.search(r\"\\btime \\s*([0-9.]*)\", out_wo, re.I)\n print match.group(1)\n s = match.group(1)\n l_time = s.split('.')\n ms_wo_time = int(l_time[0])*1000 + int(l_time[1])\n \n match = re.search(r\"\\bproc \\s*([0-9]*)\", out_wo, re.I)\n proc_wo = match.group(1)\n print proc_wo\n \n command='diagste -u %s -f %s -r tim -t idata -m hum'%(sp_computer_id,family_id)\n out_sp = connections.execute_mml_without_check(command)\n match = re.search(r\"\\btime \\s*([0-9.]*)\", out_sp, re.I)\n print match.group(1)\n s = match.group(1)\n l_time = s.split('.')\n ms_sp_time = int(l_time[0])*1000 + int(l_time[1])\n\n match = re.search(r\"\\bproc \\s*([0-9]*)\", out_sp, re.I)\n proc_sp = match.group(1)\n print proc_sp\n \n diff_time = ms_sp_time - ms_wo_time\n print diff_time\n if (diff_time <= 250 and diff_time >= 200 )and(proc_wo == proc_sp):\n return 'success'\n else:\n return 'failure'", "def test_timing(self):\n\n import_file = os.path.join(\"..\", \"mlx75027.csv\")\n self.assertTrue(os.path.isfile(import_file))\n reg_dict = mlx.csv_import(import_file)\n mlx75027 = True\n\n pretime = mlx.calc_pretime(reg_dict, mlx75027)\n\n # Set some preheat on\n preheat = np.zeros(8, dtype=np.bool)\n preheat[0] = True\n mlx.set_preheat(reg_dict, preheat)\n\n mlx.set_pretime(reg_dict, pretime, mlx75027)\n pretime1 = mlx.calc_pretime(reg_dict, mlx75027)\n self.assertEqual(pretime, pretime1)\n return", "def cutting_time_prediction(self, job):\r\n # print('job :', job)\r\n cutting_time = 10\r\n return cutting_time", "def set_timer_needless_warming_and_receive_time_quota(computer_id,family_id,process_id,time_interval,wait_ack_time):\n command='ILWMTestCli -- -r %s %s %s -t %s 1 2 %s'%(computer_id,family_id,process_id,time_interval,wait_ack_time)\n out = connections.execute_mml_without_check(command)\n \n if out.count('set timer success') == 1:\n return 'success'\n elif out.count('set timer failure') == 1:\n return 'failure'\n else:\n return 'the return is wrong'", "def check_if_time_quota_exist(computer_id,family_id,process_id):\n command='diagste -u %s -f %s -p %s -r han -t idata -m hum'%(computer_id,family_id,process_id)\n out = connections.execute_mml_without_check(command)\n \n if out.count('refresh hand timer YES') == 1:\n return 'success'\n elif out.count('refresh hand timer NO') == 1:\n return 'failure'\n else:\n return 'the return is wrong'", "def set_timer_needing_warming_and_receive_time_quota(computer_id,family_id,process_id,time_interval,wait_ack_time):\n command='ILWMTestCli -- -r %s %s %s -t %s 2 2 %s'%(computer_id,family_id,process_id,time_interval,wait_ack_time)\n out = connections.execute_mml_without_check(command)\n \n if out.count('set timer success') == 1:\n return 'success'\n elif out.count('set timer failure') == 1:\n return 'failure'\n else:\n return 'the return is wrong'", "def _condition_met(self, event):\n if self.condition_function is None:\n return True\n return self.condition_function(event)", "def run_process(self):\n prepare_boiling = self.water_heater.prepare_to_boiling(MilkTank.WATER_FOR_LATHER)\n prepare_pressure_pump = self.water_heater.prepare_water_for_pressure_pump()\n if prepare_boiling and prepare_pressure_pump:\n milk_for_lather = self.milk_tank.get_amount_from_container(self.CAPACITY)\n if milk_for_lather:\n for second in range(10):\n pass\n return True\n else:\n self.add_error(self.ERROR_EMPTY_MILK_TANK)\n return False\n if not prepare_boiling:\n self.add_error(self.water_heater.ERROR_NOT_ENOUGH_WATER_TO_BOIL)\n if not prepare_pressure_pump:\n self.add_error(\"Pump\")\n return False", "def above_threshold(\n db: cosem_db.MongoCosemDB, query: Dict[str, Any], by: int = 500000\n) -> bool:\n qy = query.copy()\n qy[\"metric\"] = \"mean_false_distance\"\n qy[\"value\"] = {\"$gt\": 0}\n qy[\"iteration\"] = {\"$mod\": [25000, 0], \"$lte\": by}\n eval_col = db.access(\"evaluation\", (db.training_version, db.gt_version))\n return not (eval_col.find_one(qy) is None)", "def onNoMedicationOrTrabeculectomy(self,time):\n self.params['SideEffect'] = 0\n #IOP is supposed to increase 0.5% annually, without medication\n if self.medicalRecords['OnTrabeculectomy'] == True or self.medicalRecords['OnImplant'] == True:\n self.CorrectAttributesUpdate(time,1+2.5/100)\n self.medicalRecords['MedicationIntake'] += 1 \n else:\n self.CorrectAttributesUpdate(time,1 + 2.5/100)\n # Update continuously until end of simulation even IOP < target\n self.UpdateMedicationCombination()\n if self.medicalRecords['MedicationIntake'] == 0:\n self.medicalRecords['MedicationIntake'] += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate trajectory using SciPy ode integrator
def scipy_trajectory(self): self.xv = odeint(self.derivative, self.xv0, self.tarray)
[ "def integrate(f, x0, times, algorithm='lsoda'):\n integrator = spint.ode(algorithm)\n integrator.set_initial_value(x0)\n trajectory = np.empty((times.shape[0], x0.shape[0]))\n for i, t in enumerate(times):\n trajectory[i] = integrator.integrate(t)\n if not integrator.successful():\n raise CustomErrors.IntegrationError\n return trajectory", "def main():\r\n\r\n def dxdt_equals_x(t, x):\r\n \"\"\"\r\n Function defining ODE dxdt = x\r\n :param t: t value\r\n :param x: x value\r\n :return: returns value of dxdt at (t,x)\r\n \"\"\"\r\n dxdt = x\r\n return dxdt\r\n\r\n def dxdt_equals_x_true(t):\r\n \"\"\"\r\n Returns true values of x for the ODE dxdt = x for given values of t\r\n :param t: t value(s) to return solution for\r\n :return: Returns true values of x for the ODE dxdt = x for given values of t\r\n \"\"\"\r\n x = np.exp(t)\r\n return x\r\n\r\n t = np.linspace(0, 1, 100)\r\n \"\"\"\r\n Euler, h = 0.01\r\n \"\"\"\r\n ex1_euler_sol = solve_ode(dxdt_equals_x, 1, t, 'euler', 0.01, False)\r\n\r\n \"\"\"\r\n 4th Order Runge-Kutta, h = 0.01\r\n \"\"\"\r\n ex1_rk4_sol = solve_ode(dxdt_equals_x, 1, t, 'rk4', 0.01, False)\r\n\r\n \"\"\"\r\n Plotting solutions and true solution\r\n \"\"\"\r\n plt.plot(t, ex1_euler_sol, label='Euler')\r\n plt.plot(t, ex1_euler_sol, label='RK4')\r\n plt.plot(t, dxdt_equals_x_true(t), label='True')\r\n plt.xlabel('t')\r\n plt.ylabel('x')\r\n plt.legend()\r\n plt.show()\r\n\r\n \"\"\"\r\n Example 2 - System of ODEs\r\n\r\n d2x/dt2 = -x, initial condition x(0) = 1\r\n \r\n This is equivalent to the system of ODEs:\r\n \r\n dx/dt = y, dy/dt = -x, initial conditions x(0) = 1, y(0) = 1\r\n\r\n Solving for t = 0 to t = 10\r\n \"\"\"\r\n\r\n def d2xdt2_equals_minus_x(t, u):\r\n \"\"\"\r\n Function defining system of ODEs dx/dt = y, dy/dt = -x\r\n :param t: t value\r\n :param u: vector u = [x, y]\r\n :return: returns value of dx/dt and dy/dt at (t,u)\r\n \"\"\"\r\n x = u[0]\r\n y = u[1]\r\n\r\n dxdt = y\r\n dydt = -x\r\n\r\n return np.array([dxdt, dydt])\r\n\r\n def d2xdt2_equals_minus_x_true(t):\r\n \"\"\"\r\n Function returning true value of system of ODEs dxdt = y, dy/dt = -x\r\n :param t: t value\r\n :return: returns true value of x and y at t\r\n \"\"\"\r\n x = np.sin(t) + np.cos(t)\r\n y = np.cos(t) - np.sin(t)\r\n return np.array([x, y])\r\n\r\n t = np.linspace(0, 10, 100)\r\n \"\"\"\r\n Euler, h = 0.01\r\n \"\"\"\r\n ex2_euler_sol = solve_ode(d2xdt2_equals_minus_x, [1, 1], t, 'rk4', 0.01, True)\r\n ex2_euler_sol_x = ex2_euler_sol[0]\r\n ex2_euler_sol_y = ex2_euler_sol[1]\r\n\r\n \"\"\"\r\n 4th Order Runge-Kutta, h = 0.01\r\n \"\"\"\r\n ex2_rk4_sol = solve_ode(d2xdt2_equals_minus_x, [1, 1], t, 'rk4', 0.01, True)\r\n ex2_rk4_sol_x = ex2_rk4_sol[0]\r\n ex2_rk4_sol_y = ex2_rk4_sol[1]\r\n\r\n \"\"\"\r\n Plotting solutions and true solution\r\n \"\"\"\r\n true = d2xdt2_equals_minus_x_true(t)\r\n true_x = true[0]\r\n true_y = true[1]\r\n\r\n plt.subplot(2, 1, 1)\r\n plt.plot(t, ex2_euler_sol_x, label='Euler')\r\n plt.plot(t, ex2_rk4_sol_x, label='RK4')\r\n plt.plot(t, true_x, label='True')\r\n plt.legend()\r\n plt.xlabel('t')\r\n plt.ylabel('x')\r\n\r\n plt.subplot(2, 1, 2)\r\n plt.plot(t, ex2_euler_sol_y, label='Euler')\r\n plt.plot(t, ex2_rk4_sol_y, label='RK4')\r\n plt.plot(t, true_y, label='True')\r\n plt.legend()\r\n plt.xlabel('t')\r\n plt.ylabel('y (dx/dt)')\r\n plt.show()", "def traj2(t,y):\r\n\r\n\r\n x=y[0]\r\n ay=y[1]\r\n V=y[2]\r\n gamma=y[3]\r\n m=y[4]\r\n\r\n lr=y[8]\r\n lg=y[9]\r\n lv=y[10]\r\n\r\n\r\n sa=-lg/(V*sqrt((lg/V)**2+lv**2))\r\n ca=-lv/sqrt((lg/V)**2+lv**2)\r\n\r\n\r\n g=Ue/(ay+Re)**2\r\n TM=T/m\r\n r=ay+Re\r\n \r\n dx=V*cos(gamma)\r\n dy=V*sin(gamma)\r\n dV=TM*ca-g*sin(gamma)\r\n dgamma=TM*sa/V+(V/r-g/V)*cos(gamma)\r\n dm=-T/ISP/g0\r\n dvg=g*sin(gamma)\r\n dD=0\r\n dva=TM-TM*ca\r\n\r\n dlr=V*lg*cos(gamma)/r**2-(2*Ue*lv*sin(gamma)+2*Ue*lg*cos(gamma)/V)/r**3\r\n dlg=-lr*cos(gamma)*V+Ue*lv*cos(gamma)/r**2+lg*sin(gamma)*(V/r-Ue/(r**2*V))\r\n dlv=-lr*sin(gamma)-lg*(cos(gamma)*(1/r+Ue/((r**2)*(V**2)))-TM/V**2*sa)\r\n\r\n #print(dlr,dlv,dlg)\r\n \r\n return [dx,dy,dV,dgamma,dm,dvg,dD,dva,dlr,dlg,dlv]", "def ode_system(x, y):\n y1, y2 = y[:, 0:1], y[:, 1:]\n dy1_x = dde.grad.jacobian(y, x, i=0)\n dy2_x = dde.grad.jacobian(y, x, i=1)\n return [dy1_x - y2, dy2_x + y1]", "def ode_twobody(t, states, p):", "def evaluation_step(self):\n current_step = self.n\n # first ode: d beta(t) = (beta0(t) + beta1(t)beta(t))dt\n beta0 = [-(self.b_f + self.c_f*self.p1_grid[current_step-1][t]**2) for t in range(len(self.time))]\n beta1 = [-(2*self.b + 2*self.c*self.p1_grid[current_step-1][t]) for t in range(len(self.time))]\n if self.solver=='Euler':\n self.beta.append(self._solve_ode_euler(beta0, beta1, self.gamma)) # beta is a funcation lambda\n else:\n self.beta.append(self._solve_ode_explicit(beta0, beta1, self.gamma)) # beta is a funcation lambda\n \n # second ode: d delta(t) = (delta0(t) + delta1(t)delta(t))dt\n delta0 = [-(2*self.c_f * self.p1_grid[current_step-1][t] * self.p2_grid[current_step-1][t] + 2*self.c*self.beta[current_step-1][t]*self.p2_grid[current_step-1][t]) for t in range(len(self.time))]\n delta1 = [-(self.b + self.c*self.p1_grid[current_step-1][t]) for t in range(len(self.time))]\n if self.solver == 'Euler':\n self.delta.append(self._solve_ode_euler(delta0, delta1, 0)) # delta is a function lambda\n else:\n self.delta.append(self._solve_ode_explicit(delta0, delta1, 0)) # delta is a function lambda\n \n # third ode: d phi = (phi0(t) + phi1(t)phi(t))dt\n phi0 = [-(self.sigma**2*self.beta[current_step-1][t] + self.c_f*self.p2_grid[current_step-1][t]**2 + self.c*self.delta[current_step-1][t]*self.p2_grid[current_step-1][t]) for t in range(len(self.time))]\n phi1 = [0]*len(self.time)\n if self.solver == 'Euler':\n self.phi.append(self._solve_ode_euler(phi0, phi1, 0)) # phi is a function lambda`A\n else:\n self.phi.append(self._solve_ode_explicit(phi0, phi1, 0)) # phi is a function lambda`A\n \n \n # we update p1 and p2:\n p1_new = np.array([-self.c/(2*self.c_f)*2*self.beta[current_step-1][t] for t in range(len(self.time))])\n p2_new = np.array([-self.c/(2*self.c_f)*self.delta[current_step-1][t] for t in range(len(self.time))])\n self.p1_grid.append(p1_new)\n self.p2_grid.append(p2_new)\n self.n += 1", "def GilmoreEick_ode(R0_in, v0_in, Requ, \\\r\n t_start, t_end, t_step, \\\r\n T_l=20.):\r\n\r\n global T\r\n global T_gas_0, sc_pvapour\r\n\r\n # initial gas temperature inside bubble [K]\r\n T_gas_0 = T0_Kelvin + T_l\r\n\r\n # Compute vapour pressure using liquid temperature T_l\r\n pvapour_in = get_vapour_pressure(T_l)\r\n print \"pv = \", pvapour_in\r\n\r\n # scale initial conditions and parameters\r\n set_scale(Requ)\r\n\r\n # parameters\r\n scale_parameters(pvapour_in)\r\n\r\n # initial conditions\r\n scale_initconds(R0_in, v0_in, Requ, pvapour_in)\r\n\r\n # solve system of ODEs\r\n T = np.zeros(0)\r\n# t_data = create_tdata(t_start, t_end, t_step)\r\n\r\n o = ode(GilmoreEick_equation).set_integrator('dopri5',\r\n# atol=[1e-6, 1e0],\r\n# rtol=[1e-3, 1e-3],\r\n# first_step=1e-9,\r\n# verbosity=1,\r\n )\r\n o.set_initial_value([R0, v0, p0], t_start)\r\n\r\n nsteps = (t_end - t_start) / t_step + 1\r\n t = np.zeros(nsteps)\r\n R = np.zeros(nsteps)\r\n R_dot = np.zeros(nsteps)\r\n pg = np.zeros(nsteps)\r\n i = 0\r\n R_prev = R0\r\n growing = False\r\n while o.successful() and o.t < t_end:\r\n o.integrate(o.t + t_step)\r\n# print(\"%g\\t%g\\t%g\\t%g\" % (o.t, o.y[0], o.y[1], o.y[2]))\r\n t[i] = o.t * scale_t\r\n R[i] = o.y[0] * scale_R\r\n R_dot[i] = o.y[1] * scale_U\r\n pg[i] = o.y[2] * scale_p\r\n i += 1\r\n \r\n if o.y[0] >= R_prev:\r\n growing = True\r\n# print('Bubble is growing...')\r\n elif o.y[0] < R_prev and growing:\r\n # max. reached\r\n print('Max. radius in rebound reached!')\r\n \r\n # decrease Requ (condensation, diffusion)\r\n R0_in = o.y[0] * scale_R\r\n v0_in = o.y[1] * scale_U\r\n Requ = 0.60 * Requ\r\n set_scale(Requ)\r\n scale_parameters(pvapour_in)\r\n scale_initconds(R0_in, v0_in, Requ, pvapour_in)\r\n o.set_initial_value([R0, v0, p0], o.t)\r\n \r\n growing = False\r\n R_prev = o.y[0]\r\n\r\n# plt.figure()\r\n# plt.axis([0, 100, 0, 600])\r\n# plt.plot(t / 1e-6, R / 1e-6, '.')\r\n# plt.show()\r\n\r\n T = np.reshape(T, (-1, 2))\r\n\r\n return t, R, R_dot, pg, T", "def integrate_orbit(self, t_end = 1.0E17, dt=1.0E11,\n verbose=True, **kwargs):\n nsteps = int(np.ceil(t_end / dt))\n\n print \"integrating orbit for \" + self.name\n print \"for %5.4e Myr\"%(t_end/cgs.Myr)\n print \"Using %2.2e timesteps at dt = %5.4e\"%(nsteps,dt)\n\n \n\n t,x,v = leapfrog_integrate(self.acceleration_function, self.x0,\n self.v0, dt, nsteps, verbose, kwargs)\n\n\n self.t = t\n self.x = x\n self.v = v\n\n self.r = np.sqrt(np.sum(x**2, axis=-1)).flatten()\n self.vr = np.sqrt(np.sum(v**2, axis=-1)).flatten()", "def get_field_lines(self):\n self.point_list = []\n print(\"Calculating points\")\n for vec in self.start_vector:\n y = numpy.ndarray(shape=(0, 3))\n for tstep in self.tsteps:\n try:\n y += scipy.integrate.odeint(get_field_line_derivative,\n vec,\n [tstep],\n (self.field_map,),\n full_output=0)\n self.point_list.append(y)\n #print(y)\n except Exception:\n sys.excepthook(*sys.exc_info())\n #print(self.point_list[0][0], self.point_list[0][0])\n #print(self.point_list[-1][0], self.point_list[-1][-1])", "def _ode_dVdt(self, V, t, u_t0, u_t1, sigma):\n alpha = (self.dt - t) / self.dt\n beta = t / self.dt\n x = V[self.x_ind]\n u = u_t0 + (t / self.dt) * (u_t1 - u_t0)\n\n # using \\Phi_A(\\tau_{k+1},\\xi) = \\Phi_A(\\tau_{k+1},\\tau_k)\\Phi_A(\\xi,\\tau_k)^{-1}\n # and pre-multiplying with \\Phi_A(\\tau_{k+1},\\tau_k) after integration\n Phi_A_xi = np.linalg.inv(V[self.A_bar_ind].reshape((self.n_x, self.n_x)))\n\n A_subs = sigma * self.A(x, u)\n B_subs = sigma * self.B(x, u)\n f_subs = self.f(x, u)\n\n dVdt = np.zeros_like(V)\n dVdt[self.x_ind] = sigma * f_subs.transpose()\n dVdt[self.A_bar_ind] = np.matmul(A_subs, V[self.A_bar_ind].reshape((self.n_x, self.n_x))).reshape(-1)\n dVdt[self.B_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * alpha\n dVdt[self.C_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * beta\n dVdt[self.S_bar_ind] = np.matmul(Phi_A_xi, f_subs).transpose()\n z_t = -np.matmul(A_subs, x) - np.matmul(B_subs, u)\n dVdt[self.z_bar_ind] = np.matmul(Phi_A_xi, z_t)\n\n return dVdt", "def ode_solver(self, train_index: tf.Tensor, tensors: List[tf.Tensor]) -> tf.Tensor:\n # Unpack and define the training example data\n n0 = tensors[0][train_index, :] # Initial PSD\n process_variables_initial = tensors[1][train_index, 0, :] # Initial PV's\n process_variables_derivative = tensors[1][train_index, 1, :] # PV time-derivatives\n dt = tensors[2][train_index, :] # Time horizon\n nucleation_rates = tensors[3][train_index, :] # Rate tensor for nucleation rate\n growth_rates = tensors[4][train_index, :] # Rate tensor for growth rate\n shrinkage_rates = tensors[5][train_index, :] # Rate tensor for shrinkage rate\n agglomeration_rates = tensors[6][train_index, :] # Rate tensor for agglomeration rate\n breakage_rates = tensors[7][train_index, :] # Rate tensor for breakage rate\n # Initial state (merge n0 and initial process variables)\n x0 = tf.concat([n0, process_variables_initial], axis=0)\n # Fixed time-steps for ODE solver (linear grid)\n t0 = tf.zeros([], dtype=tf.float32)\n tspan = tf.linspace(t0, tf.reshape(dt, []), num=self.system.ode_settings.time_steps)\n (x1, report) = ts.integrate.odeint(lambda n, t: self.ode(n, t, process_variables_derivative,\n nucleation_rates, growth_rates,\n shrinkage_rates, agglomeration_rates,\n breakage_rates),\n x0,\n tspan,\n method='dopri5',\n full_output=True,\n rtol=self.system.ode_settings.rel_tol,\n atol=self.system.ode_settings.abs_tol)\n # Extract solution for t=t+dt\n x1 = tf.reshape(x1[-1, :], [tf.size(x0)])\n return x1", "def trajectory(self, x:torch.Tensor, s_span:torch.Tensor):\n x = super()._prep_odeint(x)\n sol = torchdiffeq.odeint(self.defunc, x, s_span,\n rtol=self.rtol, atol=self.atol, method=self.solver)\n return sol", "def Gilmore_ode(R0_in, v0_in, Requ, \\\r\n t_start, t_end, t_step, \\\r\n T_l=20.):\r\n \r\n global p_gas\r\n\r\n # Compute vapour pressure using liquid temperature T_l\r\n pvapour_in = get_vapour_pressure(T_l)\r\n print \"pv = \", pvapour_in\r\n\r\n # scale initial conditions and parameters\r\n set_scale(Requ)\r\n\r\n # parameters\r\n scale_parameters(pvapour_in)\r\n# print pvapour_in, sc_pvapour\r\n\r\n # initial conditions\r\n scale_initconds(R0_in, v0_in, Requ, pvapour_in)\r\n# print scale_R, R0\r\n\r\n # solve system of ODEs\r\n p_gas = np.zeros(0)\r\n t_data = create_tdata(t_start, t_end, t_step)\r\n\r\n# print (R0, v0)\r\n\r\n #xsol, i = odeint(Gilmore_deriv, (R0, v0), t_data, full_output = True)\r\n o = ode(Gilmore_equation).set_integrator('dopri5',\r\n# atol=[1e-6, 1e0],\r\n# rtol=[1e-3, 1e-3],\r\n# first_step=1e-9,\r\n# verbosity=1,\r\n )\r\n o.set_initial_value([R0, v0], t_start)\r\n\r\n nsteps = (t_end - t_start) / t_step + 1\r\n t = np.zeros(nsteps)\r\n R = np.zeros(nsteps)\r\n R_dot = np.zeros(nsteps)\r\n i = 0\r\n R_prev = R0\r\n growing = False\r\n while o.successful() and o.t < t_end:\r\n o.integrate(o.t + t_step)\r\n print(\"%g\\t%g\\t%g\" % (o.t, o.y[0], o.y[1]))\r\n t[i] = o.t * scale_t\r\n R[i] = o.y[0] * scale_R\r\n R_dot[i] = o.y[1] * scale_U\r\n i += 1\r\n \r\n if o.y[0] >= R_prev:\r\n growing = True\r\n# print('Bubble is growing...')\r\n elif o.y[0] < R_prev and growing:\r\n # max. reached\r\n print('max!')\r\n \r\n # decrease Requ (condensation, diffusion)\r\n R0_in = o.y[0] * scale_R\r\n v0_in = o.y[1] * scale_U\r\n Requ = 0.6 * Requ\r\n set_scale(Requ)\r\n scale_parameters(pvapour_in)\r\n scale_initconds(R0_in, v0_in, Requ, pvapour_in)\r\n o.set_initial_value([R0, v0], o.t)\r\n \r\n growing = False\r\n R_prev = o.y[0]\r\n\r\n plt.figure()\r\n# plt.axis([0, 100, 0, 600])\r\n plt.plot(t / 1e-6, R / 1e-6, '.')\r\n plt.show()\r\n \r\n# R = xsol[:, 0] * scale_R\r\n# R_dot = xsol[:, 1] * scale_U\r\n# p_gas = np.reshape(p_gas, (-1, 2))\r\n# t = t_data * scale_t\r\n\r\n return t, R, R_dot", "def __init__(self,ode):\n self.ode = ode\n # the number of steps is 1 for a one-step integrator\n self.s = 1", "def ode_rhs(self):\n\n #: Bandpass l_ce\n #b, a = signal.butter(2, 50, 'low', analog=True)\n #l_ce_filt = signal.lfilter(b, a, self._l_ce.sym)\n\n l_ce_tol = cas.fmax(self._l_ce.sym, 0.0)\n _stim = cas.fmax(0.01, cas.fmin(self._stim.sym, 1.))\n\n #: Algrebaic Equation\n l_mtc = self._l_slack.val + self._l_opt.val + self._delta_length.sym\n l_se = l_mtc - l_ce_tol\n\n #: Muscle Acitvation Dynamics\n self._dA.sym = (\n _stim - self._activation.sym)/GeyerMuscle.tau_act\n\n #: Muscle Dynamics\n #: Series Force\n _f_se = (self._f_max.val * (\n (l_se - self._l_slack.val) / (\n self._l_slack.val * self.e_ref))**2) * (\n l_se > self._l_slack.val)\n\n #: Muscle Belly Force\n _f_be_cond = self._l_opt.val * (1.0 - self.w)\n\n _f_be = (\n (self._f_max.val * (\n (l_ce_tol - self._l_opt.val * (1.0 - self.w)) / (\n self._l_opt.val * self.w / 2.0))**2)) * (\n l_ce_tol <= _f_be_cond)\n\n #: Force-Length Relationship\n val = cas.fabs(\n (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))\n exposant = GeyerMuscle.c * val**3\n _f_l = cas.exp(exposant)\n\n #: Force Parallel Element\n _f_pe_star = (self._f_max.val * (\n (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))**2)*(\n l_ce_tol > self._l_opt.val)\n\n #: Force Velocity Inverse Relation\n _f_v_eq = ((\n self._f_max.val * self._activation.sym * _f_l) + _f_pe_star)\n\n f_v_cond = cas.logic_and(\n _f_v_eq < self.tol, _f_v_eq > -self.tol)\n\n _f_v = cas.if_else(f_v_cond, 0.0, (_f_se + _f_be) / ((\n self._f_max.val * self._activation.sym * _f_l) + _f_pe_star))\n\n f_v = cas.fmax(0.0, cas.fmin(_f_v, 1.5))\n\n self._v_ce.sym = cas.if_else(\n f_v < 1.0, self._v_max.sym * self._l_opt.val * (\n 1.0 - f_v) / (1.0 + f_v * GeyerMuscle.K),\n self._v_max.sym*self._l_opt.val * (f_v - 1.0) / (\n 7.56 * GeyerMuscle.K *\n (f_v - GeyerMuscle.N) + 1.0 - GeyerMuscle.N\n ))\n\n #: Active, Passive, Tendon Force Computation\n _f_v_ce = cas.if_else(\n self._v_ce.sym < 0.,\n (self._v_max.sym*self._l_opt.val - self._v_ce.sym) /\n (self._v_max.sym*self._l_opt.val + GeyerMuscle.K * self._v_ce.sym),\n GeyerMuscle.N + (GeyerMuscle.N - 1) * (\n self._v_max.sym*self._l_opt.val + self._v_ce.sym\n ) / (\n 7.56 * GeyerMuscle.K * self._v_ce.sym - self._v_max.sym*self._l_opt.val\n ))\n\n self._a_force = self._activation.sym * _f_v_ce * _f_l * self._f_max.val\n self._p_force = _f_pe_star*_f_v - _f_be\n self._t_force = _f_se\n\n self._alg_tendon_force.sym = self._z_tendon_force.sym - self._t_force\n self._alg_active_force.sym = self._z_active_force.sym - self._a_force\n self._alg_passive_force.sym = self._z_passive_force.sym - self._p_force\n self._alg_v_ce.sym = self._z_v_ce.sym - self._v_ce.sym\n self._alg_l_mtc.sym = self._z_l_mtc.sym - l_mtc\n self._alg_dact.sym = self._z_dact.sym - self._dA.sym\n\n return True", "def trajectory (x0,y0,v,theta,g = 9.8, npts = 1000):\n vx = v * np.cos(np.deg2rad(theta))\n vy = v * np.sin(np.deg2rad(theta))\n tfinal = (vy/g) + np.sqrt((vy/g)**2 + 2*(y0)/g)\n t = np.linspace(0, tfinal, num = npts)\n x = x0 + vx*t\n y = y0 + vy*t - .5*g*(t**2)\n return x,y", "def controller_linear(qd, t, model_drone):\n \n k_pi = model_drone.k_pi\n k_di = model_drone.k_di\n \n k_p = model_drone.k_p\n k_d = model_drone.k_d\n \n u = np.zeros(4)\n\n # Compute error in world frame where error = current - desired\n e_pos = (qd.pos - qd.pos_des)\n e_vel = (qd.vel - qd.vel_des)\n\n r_acc_des = qd.acc_des - k_di * e_vel - k_pi * e_pos\n r_acc_total = r_acc_des + np.array([0, 0, 1]) * model_drone.grav\n\n # Limit max tilt angle\n tiltangle = np.arccos(r_acc_total[2] / np.sqrt(np.sum(r_acc_total**2)))\n if tiltangle > model_drone.maxangle:\n xy_mag = np.sqrt(np.sum(r_acc_total[:2]**2))\n xy_mag_max = r_acc_total[2] * np.tan(model_drone.maxangle)\n r_acc_total[:2] = r_acc_total[:2] / xy_mag * xy_mag_max\n\n # Compute desired rotations and Euler error\n psi_des = qd.yaw_des\n theta_des = (np.cos(psi_des) * r_acc_total[0] + np.sin(psi_des) * r_acc_total[1]) / model_drone.grav\n phi_des = (-np.cos(psi_des) * r_acc_total[1] + np.sin(psi_des) * r_acc_total[0]) / model_drone.grav\n euler_des = np.array([phi_des, theta_des, psi_des])\n \n e_euler = qd.euler - euler_des\n\n # Assume that drone is around hover point\n u[0] = r_acc_total[2] * model_drone.mass\n u[1:] = model_drone.I @ (- k_p * e_euler - k_d * qd.omega)\n\n # Thrust\n F = u[0]\n\n # print('F = {0:2f}'.format(F))\n \n # Moment\n M = u[1:] # note: params.I has the moment of inertia\n \n # Output trpy and drpy as in hardware\n trpy = np.array([F, phi_des, theta_des, psi_des])\n drpy = np.array([0, 0, 0, 0])\n \n return F, M, trpy, drpy", "def __call__(self,X,t):\n xvals = X[:3]-self.locs\n rvals = numpy.sqrt( (xvals**2).sum(1) )\n \n dVdt = sum([ self.halos[i].accel(rvals[i])*xvals[i]/rvals[i] \\\n for i in range(self.N) ])\n return numpy.concatenate([X[3:] * 1E3 * yr/kpc,\n dVdt])", "def forward_integrate_dynamics(ICs,U=None,**kwargs):\n assert np.shape(ICs)==(2,), \"ICs must be a numpy array of shape (2,).\"\n LocationStrings = [\"1st\", \"2nd\"]\n for i in range(2):\n assert str(type(ICs[i])) in [\"<class 'numpy.float'>\",\"<class 'int'>\",\"<class 'float'>\",\"<class 'numpy.int32'>\",\"<class 'numpy.int64'>\",\"<class 'numpy.float64'>\"],\\\n \"ICs must be numbers. Check the \" + LocationStrings[i] + \" element of IC\"\n\n dt = kwargs.get(\"dt\",0.01)\n assert str(type(dt)) in [\"<class 'numpy.float'>\",\"<class 'int'>\",\"<class 'float'>\",\"<class 'numpy.int32'>\",\"<class 'numpy.int64'>\",\"<class 'numpy.float64'>\"],\\\n \"dt must be a number.\"\n\n Horizon = kwargs.get(\"Horizon\",300)\n assert str(type(Horizon)) in [\"<class 'numpy.float'>\",\"<class 'int'>\",\"<class 'float'>\",\"<class 'numpy.int32'>\",\"<class 'numpy.int64'>\",\"<class 'numpy.float64'>\"],\\\n \"Horizon must be a number.\"\n\n UsingDegrees = kwargs.get(\"UsingDegrees\",False)\n assert type(UsingDegrees)==bool, \"UsingDegrees must be either True or False (Default).\"\n\n AnimateStates = kwargs.get(\"AnimateStates\",False)\n assert type(AnimateStates)==bool, \"AnimateStates must be either True or False (Default).\"\n\n PlotStates = kwargs.get(\"PlotStates\",False)\n assert type(PlotStates)==bool, \"PlotStates must be either True or False (Default).\"\n\n Time = np.arange(0,Horizon*dt,dt)\n X = np.zeros((2,Horizon))\n if U is None:\n U = np.zeros((2,Horizon-1))\n else:\n assert np.shape(U)==(2,Horizon-1), \"U must have shape = (2,Horizon-1).\"\n\n # ICs\n if UsingDegrees:\n X[0,0] = ICs[0]*(np.pi/180)\n X[1,0] = ICs[1]*(np.pi/180)\n else:\n X[0,0] = ICs[0]\n X[1,0] = ICs[1]\n\n for i in range(Horizon-1):\n X[0,i+1] = X[0,i] + F1(X[:,i],U[:,i])*dt\n X[1,i+1] = X[1,i] + F2(X[:,i],U[:,i])*dt\n\n\n if AnimateStates==False and PlotStates==False:\n return(X)\n else:\n if AnimateStates:\n animate_trajectory(Time,X,U)\n if PlotStates:\n plt.figure(figsize=(15,10))\n\n # ax1 = plt.subplot2grid((3,2),(0,0),colspan=2)\n ax1 = plt.subplot(222)\n ax1.plot(Time[:-1],U[0,:],'r')\n ax1.plot(Time[:-1],U[1,:],'g')\n ax1.set_xlabel('Time (s)')\n ax1.set_ylabel('Tendon Tension (N)')\n if max(abs(U[0,:] - U[0,0]))<1e-7 and max(abs(U[1,:] - U[1,0]))<1e-7:\n ax1.set_ylim([min(U[:,0]) - 5,max(U[:,0]) + 5])\n\n ax2 = plt.subplot(223)\n ax2.plot(Time,180*X[0,:]/np.pi,'b')\n ax2.set_xlabel('Time (s)')\n ax2.set_ylabel('Angle (deg)')\n if max(abs(180*X[0,:]/np.pi - 180*X[0,0]/np.pi))<1e-7:\n ax2.set_ylim([180*X[0,0]/np.pi - 5,180*X[0,0]/np.pi + 5])\n\n ax3 = plt.subplot(224)\n ax3.plot(Time,180*X[1,:]/np.pi,'b--')\n ax3.set_xlabel('Time (s)')\n ax3.set_ylabel('Angular Velocity (deg/s)')\n if max(abs(180*X[1,:]/np.pi-180*X[1,0]/np.pi))<1e-7:\n ax3.set_ylim([180*X[1,0]/np.pi-1,180*X[1,0]/np.pi+1])\n\n ax0 = plt.subplot(221)\n Pendulum_Width = 0.01*L1\n Pendulum_Length = L1\n\n Ground = plt.Rectangle(\n (-52*Pendulum_Width/4,-Pendulum_Length/4),\n 52*Pendulum_Width/4,\n Pendulum_Length/2,\n Color='#4682b4')\n ax0.add_patch(Ground)\n\n\n Pendulum, = ax0.plot(\n [\n 0,\n Pendulum_Length*np.sin((30*np.pi/180))\n ],\n [\n 0,\n -Pendulum_Length*np.cos((30*np.pi/180))\n ],\n Color='0.50',\n lw = 10,\n solid_capstyle='round'\n )\n\n Pendulum_neutral, = ax0.plot(\n [\n 0,\n 0\n ],\n [\n 0,\n -Pendulum_Length\n ],\n Color='k',\n lw = 1,\n linestyle='--'\n )\n\n Angle_indicator, = ax0.plot(\n Pendulum_Length*np.sin(\n np.linspace(0.05*(30*np.pi/180),0.95*(30*np.pi/180),20)\n ),\n -Pendulum_Length*np.cos(\n np.linspace(0.05*(30*np.pi/180),0.95*(30*np.pi/180),20)\n ),\n Color='b',\n lw = 2,\n solid_capstyle = 'round'\n )\n k = 0.075*Pendulum_Length\n Angle_indicator_arrow, = ax0.plot(\n Pendulum_Length*np.sin(0.95*(30*np.pi/180))\n + [\n -k*np.sin((120*np.pi/180) - 0.95*(30*np.pi/180)),\n 0,\n -k*np.sin((60*np.pi/180) - 0.95*(30*np.pi/180))\n ],\n -Pendulum_Length*np.cos(0.95*(30*np.pi/180))\n + [\n -k*np.cos((120*np.pi/180) - 0.95*(30*np.pi/180)),\n 0,\n -k*np.cos((60*np.pi/180) - 0.95*(30*np.pi/180))\n ],\n Color='b',\n lw = 2,\n solid_capstyle='round'\n )\n Angle_damping_indicator, = ax0.plot(\n 0.50*Pendulum_Length*np.sin(\n np.linspace(\n 0.45*(30*np.pi/180),\n 1.55*(30*np.pi/180),\n 20\n )\n ),\n -0.50*Pendulum_Length*np.cos(\n np.linspace(\n 0.45*(30*np.pi/180),\n 1.55*(30*np.pi/180),\n 20\n )\n ),\n Color='#ffa500',\n lw = 2,\n solid_capstyle = 'round'\n )\n Angle_damping_indicator_arrow, = ax0.plot(\n 0.50*Pendulum_Length*np.sin(0.45*(30*np.pi/180))\n + [\n k*np.sin(0.45*(30*np.pi/180) + (60*np.pi/180)),\n 0,\n k*np.sin(0.45*(30*np.pi/180) + (120*np.pi/180))\n ],\n -0.50*Pendulum_Length*np.cos(0.45*(30*np.pi/180))\n + [\n -k*np.cos(0.45*(30*np.pi/180) + (60*np.pi/180)),\n 0,\n -k*np.cos(0.45*(30*np.pi/180) + (120*np.pi/180))\n ],\n Color='#ffa500',\n lw = 2,\n solid_capstyle='round'\n )\n\n tau1_indicator, = ax0.plot(\n 0.75*Pendulum_Length*np.sin(\n np.linspace(\n 1.05*(30*np.pi/180),\n 1.05*(30*np.pi/180)+(45*np.pi/180),\n 20\n )\n ),\n -0.75*Pendulum_Length*np.cos(\n np.linspace(\n 1.05*(30*np.pi/180),\n 1.05*(30*np.pi/180)+(45*np.pi/180),\n 20\n )\n ),\n Color='r',\n lw = 2,\n solid_capstyle = 'round'\n )\n tau1_indicator_arrow, = ax0.plot(\n 0.75*Pendulum_Length*np.sin(1.05*(30*np.pi/180)+(45*np.pi/180))\n + [\n -k*np.sin((120*np.pi/180) - 1.05*(30*np.pi/180)-(45*np.pi/180)),\n 0,\n -k*np.sin((60*np.pi/180) - 1.05*(30*np.pi/180)-(45*np.pi/180))\n ],\n -0.75*Pendulum_Length*np.cos(1.05*(30*np.pi/180)+(45*np.pi/180))\n + [\n -k*np.cos((120*np.pi/180) - 1.05*(30*np.pi/180)-(45*np.pi/180)),\n 0,\n -k*np.cos((60*np.pi/180) - 1.05*(30*np.pi/180)-(45*np.pi/180))\n ],\n Color='r',\n lw = 2,\n solid_capstyle='round'\n )\n\n tau2_indicator, = ax0.plot(\n 0.75*Pendulum_Length*np.sin(\n np.linspace(\n 0.95*(30*np.pi/180)-(45*np.pi/180),\n 0.95*(30*np.pi/180),\n 20\n )\n ),\n -0.75*Pendulum_Length*np.cos(\n np.linspace(\n 0.95*(30*np.pi/180)-(45*np.pi/180),\n 0.95*(30*np.pi/180),\n 20\n )\n ),\n Color='g',\n lw = 2,\n solid_capstyle = 'round'\n )\n tau2_indicator_arrow, = ax0.plot(\n 0.75*Pendulum_Length*np.sin(0.95*(30*np.pi/180)-(45*np.pi/180))\n + [\n k*np.sin((15*np.pi/180) + 0.95*(30*np.pi/180)),\n 0,\n k*np.sin((75*np.pi/180) + 0.95*(30*np.pi/180))\n ],\n -0.75*Pendulum_Length*np.cos(0.95*(30*np.pi/180)-(45*np.pi/180))\n + [\n -k*np.cos((15*np.pi/180) + 0.95*(30*np.pi/180)),\n 0,\n -k*np.cos((75*np.pi/180) + 0.95*(30*np.pi/180))\n ],\n Color='g',\n lw = 2,\n solid_capstyle='round'\n )\n\n\n Pendulum_Attachment = plt.Circle((0,0),50*Pendulum_Width/4,Color='#4682b4')\n ax0.add_patch(Pendulum_Attachment)\n\n Pendulum_Rivet, = ax0.plot(\n [0],\n [0],\n c='k',\n marker='o',\n lw=2\n )\n\n ax0.get_xaxis().set_ticks([])\n ax0.get_yaxis().set_ticks([])\n ax0.set_frame_on(True)\n ax0.set_xlim([-0.60*Pendulum_Length,1.00*Pendulum_Length])\n ax0.set_ylim([-1.10*Pendulum_Length,0.30*Pendulum_Length])\n\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n # ax0.text(0.05, 0.95, r\"$b_1$ = \" + str(b1) + \"\\n\" + r\"$b_2$ = \" + str(b2), transform=ax0.transAxes, fontsize=14,\n # verticalalignment='top', bbox=props)\n ax0.legend(\n (Angle_damping_indicator,tau1_indicator,tau2_indicator),\n (r\"$b_1\\dot{\\theta}$\", r\"$R_1(\\theta)u_1$\", r\"$R_2(\\theta)u_2$\"),\n loc='upper left',\n facecolor='wheat',\n framealpha=0.5,\n title=\"Torques\")\n ax0.set_aspect('equal')\n\n plt.show()", "def solve_ode_system(h,force,E,I,N=1000):\r\n \r\n # Define initial position and dependent variable (distance from clamp)\r\n y0 = [0,0] # [initial position at x=0,derivative at x=0]\r\n x = np.linspace(0,h,N)\r\n # Solve for shape of cantilever\r\n output = odeint(cantilever_ode_system,y0,x,args=(force,E,I))\r\n y = output[:,0]\r\n dy_dx = output[:,1]\r\n # Compute theta in degrees at the end of the beam\r\n theta = -np.arctan(dy_dx[-1])/np.pi*180\r\n # Compute arc length\r\n length = get_arc_length(x,dy_dx)\r\n \r\n return x, y, theta, length" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.description for Group Length element
def test_description_group_length(self): elem = DataElement(0x00100000, 'LO', 12345) assert 'Group Length' == elem.description()
[ "def __len__(self) -> int:\n return len(self.groups[0])", "def test_grouping_attribute() -> None:\n g = Grouping()\n assert g._groups == []", "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def getDataUnitCount(self):\n\t\treturn 1", "def ngroups(self):\n return len(self.elems)", "def __len__(self) -> \"int\":\n return _coin.SoGroup___len__(self)", "def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100", "def test_repval_strange_type(self):\n elem = DataElement(0x00020001, 'OB', 0)\n assert len(elem.repval) < 100", "def testTagFunctionLen(self):\n template = '[numbers|len]'\n self.assertEqual(self.parse(template, numbers=range(12)), \"12\")", "def test_size():\n assert Packet12.size == 1", "def get_group_cardinality_info(self, feature):\n isKeywordCardinality = feature.find('.//c1:GroupCard/c1:IsKeyword', _namespaces).text == 'true' \n intervalMin = int(feature.find('.//c1:GroupCard/c1:Interval/c1:Min/c1:IntLiteral', _namespaces).text)\n intervalMax = int(feature.find('.//c1:GroupCard/c1:Interval/c1:Max/c1:IntLiteral', _namespaces).text)\n return (isKeywordCardinality, intervalMin, intervalMax)", "def describe_data_shape(data):\n def helper(data):\n if not isinstance(data, (list, tuple)):\n return 0, type(data).__name__\n else:\n result = type(data).__name__\n result += \" [{}]\".format(len(data))\n if len(data) > 0:\n child = data[0]\n child_nesting, child_result = helper(child)\n result += \" of \" + child_result\n else:\n child_nesting = 0\n return (child_nesting + 1), result\n\n nesting, result = helper(data)\n return \"Level {}: {}\".format(nesting, result)", "def test_size():\n assert Packet2.size == 6", "def get_test_kw_child_text_length_y(data):\r\n\r\n title = data[data['y'].apply(lambda x: True if x in \"CEML__TITLE\" else False)].child_text_length\r\n price = data[data['y'].apply(lambda x: True if x in \"CEML__PRICE\" else False)].child_text_length\r\n desc = data[data['y'].apply(lambda x: True if x in \"CEML__DESCRIPTION\" else False)].child_text_length\r\n list = data[data['y'].apply(lambda x: True if x in \"CEML__DESCRIPTION__LIST__ITEMS\" else False)].child_text_length\r\n noisy = data[data['y'].apply(lambda x: True if x in \"CEML__NOISY\" else False)].child_text_length\r\n sample_size = len(noisy)\r\n title = np.random.choice(title, sample_size)\r\n desc = np.random.choice(desc, sample_size)\r\n list = np.random.choice(list, sample_size)\r\n price = np.random.choice(price, sample_size)\r\n M = np.transpose(np.array([title, price, desc, list, noisy]))\r\n M = pd.DataFrame(M, columns=['CEML__TITLE', 'CEML__PRICE', 'CEML__DESCRIPTION', 'CEML__DESCRIPTION__LIST__ITEMS', 'CEML__NOISY'])\r\n H, pval = mstats.kruskalwallis(M['CEML__TITLE'].tolist(), M['CEML__PRICE'].tolist(), M['CEML__DESCRIPTION'].tolist(), M['CEML__DESCRIPTION__LIST__ITEMS'].tolist(), M['CEML__NOISY'].tolist())\r\n print(\"H-statistic:\", H)\r\n print(\"P-Value:\", pval)\r\n if pval < 0.05: print(\"Reject NULL hypothesis - Significant differences exist between groups.\")\r\n if pval > 0.05: print(\"Accept NULL hypothesis - No significant difference between groups.\")\r\n\r\n return data", "def test_extract_data_length():\n\n test_string = \"end of data@end_of_data:@\"\n with pytest.raises(ValueError, match=\"Couldn't find data length in \"\n \"string\"):\n _extract_data_length(test_string, 'data')\n\n test_string = \"all data@end_of_data:8@\"\n output = _extract_data_length(test_string, 'data')\n assert output == 8", "def test_group_even_length():\n assert group(\"test\", 2) == ['te', 'st']", "def test_description_unknown_private(self):\n elem = DataElement(0x00110010, 'LO', 12345)\n elem.private_creator = 'TEST'\n assert 'Private tag data' == elem.description()\n elem = DataElement(0x00110F00, 'LO', 12345)\n assert elem.tag.is_private\n assert elem.private_creator is None\n assert 'Private tag data' == elem.description()", "def test_group_odd_length():\n assert group(\"example\", 2) == ['ex', 'am', 'pl', 'e']", "def test_team_builder_config_product_groups_count_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.description with an unknown private element
def test_description_unknown_private(self): elem = DataElement(0x00110010, 'LO', 12345) elem.private_creator = 'TEST' assert 'Private tag data' == elem.description() elem = DataElement(0x00110F00, 'LO', 12345) assert elem.tag.is_private assert elem.private_creator is None assert 'Private tag data' == elem.description()
[ "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def test_private_repeater_tag(self):\n ds = Dataset()\n ds[0x60210012] = RawDataElement(\n Tag(0x60210012), None, 12, b'PAPYRUS 3.0 ', 0, True, True)\n ds[0x60211200] = RawDataElement(\n Tag(0x60211200), None, 6, b'123456', 0, True, True)\n private_creator_data_elem = ds[0x60210012]\n assert 'Private Creator' == private_creator_data_elem.name\n assert 'LO' == private_creator_data_elem.VR\n\n private_data_elem = ds[0x60211200]\n assert '[Overlay ID]' == private_data_elem.name\n assert 'UN' == private_data_elem.VR", "def test_description_group_length(self):\n elem = DataElement(0x00100000, 'LO', 12345)\n assert 'Group Length' == elem.description()", "def test_parse_description_elements(self):\n d = parse_description(\"val %n string %s bool %b menu %m.my_menu and editable %d.my_other_menu\",\n my_menu=[\"a\", \"b\"], my_other_menu=[\"c\", \"d\"])\n self.assertRaises(AttributeError, lambda e: e.elements, d[0])\n self.assertRaises(AttributeError, lambda e: e.elements, d[1])\n self.assertRaises(AttributeError, lambda e: e.elements, d[2])\n self.assertSetEqual({\"a\", \"b\"}, d[3].elements)\n self.assertSetEqual({\"c\", \"d\"}, d[4].elements)\n\n d = parse_description(\"val %n string %s bool %b menu %m.my_menu and editable %d.my_other_menu\",\n my_menu={\"a\": \"A\", \"b\": \"B\"}, my_other_menu={\"c\": \"C\", \"d\": \"D\"})\n self.assertRaises(AttributeError, lambda e: e.elements, d[0])\n self.assertRaises(AttributeError, lambda e: e.elements, d[1])\n self.assertRaises(AttributeError, lambda e: e.elements, d[2])\n self.assertSetEqual({\"a\", \"b\"}, d[3].elements)\n self.assertSetEqual({\"c\", \"d\"}, d[4].elements)", "def test_description_attr(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(place.description, \"\")", "def test_item_description(self):\n self.assertEqual(self.Cup.description, \"[No description provided.]\")\n self.assertEqual(self.Money.description, \"[No description provided.]\")\n self.assertEqual(self.GoldPiece.description,\n \"The highest denomination in the empire.\")\n self.assertEqual(self.SilverPiece.description,\n \"[No description provided.]\")\n self.assertEqual(self.CopperPiece.description,\n \"An even lesser currency, used only for small items.\")\n self.assertEqual(self.Sword.description, \"An equippable sword\\n\"\n \"Here's another line\")", "def render_datadesc(self, datadesc):\n\n dataDesc = Element(\"dataDesc\")\n\n SubElement(dataDesc, \"primaryData\", datadesc.primaryData)\n\n annotations = SubElement(dataDesc, \"annotations\")\n\n for ann in datadesc.annotations_list:\n SubElement(annotations, \"annotation\", ann)\n\n return dataDesc", "def _get_private_creator_tag(self, data_element):\n group = data_element.tag.group\n element = (data_element.tag.element & 0xff00) >> 8\n return dicom.tag.Tag(group, element)", "def test_private_tag_in_repeater_range(self):\n # regression test for #689\n ds = Dataset()\n ds[0x50f10010] = RawDataElement(\n Tag(0x50f10010), None, 8, b'FDMS 1.0', 0, True, True)\n ds[0x50f1100a] = RawDataElement(\n Tag(0x50f1100a), None, 6, b'ACC0.6', 0, True, True)\n private_creator_data_elem = ds[0x50f10010]\n assert 'Private Creator' == private_creator_data_elem.name\n assert 'LO' == private_creator_data_elem.VR\n\n private_data_elem = ds[0x50f1100a]\n assert '[FNC Parameters]' == private_data_elem.name\n assert 'UN' == private_data_elem.VR", "def _description_string(self) -> str:", "def test_get_name_link_html_mdash_for_blank_description(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='')\n self.assertIsInstance(dataset.get_name_link_html(), str)\n self.assertIn('&mdash;', dataset.get_name_link_html())", "def test_description_column(self):\n command = (\n \"\"\"\n SELECT data_type, is_nullable\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE table_name = 'flashcards' and column_name='description';\n \"\"\")\n data = ExecuteCommandFetchData().execute_command(command)\n self.assertEqual(data[0][0], 'text')\n self.assertEqual(data[0][1], 'YES')", "def test_docdata(self):\n self.assertTrue(hasattr(self.instance, \"increasing\"))\n self.assertNotEqual(\n \"\", self.cls.__doc__.splitlines()[0].strip(), msg=\"First line of docstring should not be blank\"\n )\n self.assertIsNotNone(get_docdata(self.instance), msg=\"No docdata available\")\n self.assertIsNotNone(getattr_or_docdata(self.cls, \"link\"))\n self.assertIsNotNone(getattr_or_docdata(self.cls, \"name\"))\n self.assertIsNotNone(getattr_or_docdata(self.cls, \"description\"))\n self.assertIsNotNone(self.instance.key)", "def test_pi_with_non_attribute_data(self):\n pi_data = u\"\"\" \\t keyword att1=\"value1\" \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"keyword\": None, u\"att1\": u\"value1\"})", "def set_description(description):", "def test_requestWitDescription(self):\n self.assertWellFormedRequest({\"description\": \"xyzzy\"})", "def test_description(self):\n print(f'{self.test_description.__name__}'\n f': Executing unit test for property \"description\" of class \"SlashNextUrlScan\".')\n\n self.assertEqual(self.url_scan_action.description, self.description)", "def set_description(self):\n if \"description\" not in self.data:\n logger.debug(\"Adding empty descriptions to root\")\n self.data[\"description\"] = \"\"", "def test_write_description_tag():\n data = random_data('uint8', (2, 219, 301))\n description = \"Created by TestTiffWriter\\nLorem ipsum dolor...\"\n with TempFileName('description_tag') as fname:\n imwrite(fname, data, description=description)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 2\n assert tif.pages[0].description == description\n assert tif.pages[0].description1 == '{\"shape\": [2, 219, 301]}'\n assert 'ImageDescription' not in tif.pages[1].tags\n assert__str__(tif)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.description with an unknown element
def test_description_unknown(self): elem = DataElement(0x00000004, 'LO', 12345) assert '' == elem.description()
[ "def test_description_unknown_private(self):\n elem = DataElement(0x00110010, 'LO', 12345)\n elem.private_creator = 'TEST'\n assert 'Private tag data' == elem.description()\n elem = DataElement(0x00110F00, 'LO', 12345)\n assert elem.tag.is_private\n assert elem.private_creator is None\n assert 'Private tag data' == elem.description()", "def test_parse_description_elements(self):\n d = parse_description(\"val %n string %s bool %b menu %m.my_menu and editable %d.my_other_menu\",\n my_menu=[\"a\", \"b\"], my_other_menu=[\"c\", \"d\"])\n self.assertRaises(AttributeError, lambda e: e.elements, d[0])\n self.assertRaises(AttributeError, lambda e: e.elements, d[1])\n self.assertRaises(AttributeError, lambda e: e.elements, d[2])\n self.assertSetEqual({\"a\", \"b\"}, d[3].elements)\n self.assertSetEqual({\"c\", \"d\"}, d[4].elements)\n\n d = parse_description(\"val %n string %s bool %b menu %m.my_menu and editable %d.my_other_menu\",\n my_menu={\"a\": \"A\", \"b\": \"B\"}, my_other_menu={\"c\": \"C\", \"d\": \"D\"})\n self.assertRaises(AttributeError, lambda e: e.elements, d[0])\n self.assertRaises(AttributeError, lambda e: e.elements, d[1])\n self.assertRaises(AttributeError, lambda e: e.elements, d[2])\n self.assertSetEqual({\"a\", \"b\"}, d[3].elements)\n self.assertSetEqual({\"c\", \"d\"}, d[4].elements)", "def test_item_description(self):\n self.assertEqual(self.Cup.description, \"[No description provided.]\")\n self.assertEqual(self.Money.description, \"[No description provided.]\")\n self.assertEqual(self.GoldPiece.description,\n \"The highest denomination in the empire.\")\n self.assertEqual(self.SilverPiece.description,\n \"[No description provided.]\")\n self.assertEqual(self.CopperPiece.description,\n \"An even lesser currency, used only for small items.\")\n self.assertEqual(self.Sword.description, \"An equippable sword\\n\"\n \"Here's another line\")", "def test_description_column(self):\n command = (\n \"\"\"\n SELECT data_type, is_nullable\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE table_name = 'flashcards' and column_name='description';\n \"\"\")\n data = ExecuteCommandFetchData().execute_command(command)\n self.assertEqual(data[0][0], 'text')\n self.assertEqual(data[0][1], 'YES')", "def test_description_group_length(self):\n elem = DataElement(0x00100000, 'LO', 12345)\n assert 'Group Length' == elem.description()", "def test_description_attr(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(place.description, \"\")", "def test_get_name_link_html_mdash_for_blank_description(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='')\n self.assertIsInstance(dataset.get_name_link_html(), str)\n self.assertIn('&mdash;', dataset.get_name_link_html())", "def render_datadesc(self, datadesc):\n\n dataDesc = Element(\"dataDesc\")\n\n SubElement(dataDesc, \"primaryData\", datadesc.primaryData)\n\n annotations = SubElement(dataDesc, \"annotations\")\n\n for ann in datadesc.annotations_list:\n SubElement(annotations, \"annotation\", ann)\n\n return dataDesc", "def test_description(title_page):\n if len(title_page.description.text_blocks) == 1:\n description = title_page.description.text_blocks[0].text\n else:\n description = \"\".join(map(lambda x: x.text, title_page.description.text_blocks))\n\n regex = \"\\n\".join(\n [\n \"^A (?:Dissertation|Thesis) Presented in Partial Fulfillment\",\n \"of the Requirements for the Degree\",\n \"(.*?)\",\n ]\n )\n\n assert re.match(regex, description), \"Description is valid\"", "def test_non_string(self):\n datatagger = DataTagger(\n container=self.container,\n field_name='foobar'\n )\n actual = datatagger._get_value(self.alert)\n expected = None\n self.assertEqual(actual, expected)", "def test_pi_with_non_attribute_data(self):\n pi_data = u\"\"\" \\t keyword att1=\"value1\" \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"keyword\": None, u\"att1\": u\"value1\"})", "def test_descripcion(self):\n self.assertEqual(self.message.description, 'Factura/Remito 0001-00336393')", "def test_write_description_tag():\n data = random_data('uint8', (2, 219, 301))\n description = \"Created by TestTiffWriter\\nLorem ipsum dolor...\"\n with TempFileName('description_tag') as fname:\n imwrite(fname, data, description=description)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 2\n assert tif.pages[0].description == description\n assert tif.pages[0].description1 == '{\"shape\": [2, 219, 301]}'\n assert 'ImageDescription' not in tif.pages[1].tags\n assert__str__(tif)", "def _description_string(self) -> str:", "def description(description_element: str) -> str:\n log.info(\"Try to delete all html in description\")\n return BeautifulSoup(description_element, \"html.parser\").getText()", "def test_get_name_link_html_blank_description(self):\n trait = factories.HarmonizedTraitFactory.create(i_description='')\n self.assertIsInstance(trait.get_name_link_html(), str)\n self.assertIn('&mdash;', trait.get_name_link_html())", "def test_homepage_description(self):\n assert self.wp_homepage.site_description == self.wp_lib.site_description['text']\n assert self.wp_homepage.site_description_element.is_displayed() is True", "def describe_item(self):\n if self.name_item is not None:\n print(\"\\nLook! It seems there is \" + self.desc_item + \"!\")\n\n else:\n print(\"\")", "def set_description(description):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__ne__ for standard element
def test_inequality_standard(self): dd = DataElement(0x00100010, 'PN', 'ANON') assert not dd != dd assert DataElement(0x00100010, 'PN', 'ANONA') != dd # Check tag assert DataElement(0x00100011, 'PN', 'ANON') != dd # Check VR assert DataElement(0x00100010, 'SH', 'ANON') != dd
[ "def __neq__(self, block_data):\n return not self == block_data", "def __ne__(self, node2):\n\t\t#return self._element == node2._element and self._name == node2._name\n\t\treturn not self == node2", "def _attr_ne(self, name, value):\n self._attr_present(name)\n self.filters.append(lambda elem: elem.attrib[name] != value)", "def __ne__(self, vec: 'itkVersorD') -> \"bool\":\n return _itkVersorPython.itkVersorD___ne__(self, vec)", "def test_no_update_on_data_element(self):\n no_update = self.spellgen.data.attrib['noupdate']\n self.assertEqual(no_update, '1', 'Incorrect noupdate flag')", "def __ne__(self, x):\n return _core.SwigPyIterator___ne__(self, x)", "def __ne__(self, field: 'SoMFNode') -> \"SbBool\":\n return _coin.SoMFNode___ne__(self, field)", "def assert_no_data_value(self, nodata=999.99):\n if nodata:\n xy = self.get_xy(xtime=False)\n assert ~np.isin(nodata, xy[\"x\"]), (\n \"Values of {0} have been found in data. Be sure to remove no \"\n \"data values\"\n ).format(nodata)\n assert ~np.isin(nodata, xy[\"y\"]), (\n \"Values of {0} have been found in data. Be sure to remove no \"\n \"data values\"\n ).format(nodata)", "def __ne__(self, x):\n return _almathswig.SwigPyIterator___ne__(self, x)", "def __ne__(self, other: SimpleWave) -> bool:\r\n\r\n return not self.__eq__(other)", "def __ne__(self, field: 'SoMFVec4ub') -> \"SbBool\":\n return _coin.SoMFVec4ub___ne__(self, field)", "def __ne__(self, field: 'SoSFNode') -> \"int\":\n return _coin.SoSFNode___ne__(self, field)", "def __ne__(self, *args):\n return _snap.TIntHSI___ne__(self, *args)", "def test_attribute_noteq(self):\n attr1 = Attribute(\"device\", \"read\")\n attr2 = Attribute(\"device\", \"write\")\n assert attr1 != attr2", "def __ne__(self, field: 'SoSFVec4ub') -> \"int\":\n return _coin.SoSFVec4ub___ne__(self, field)", "def __ne__(self, *args):\n return _wiimote.wiimote___ne__(self, *args)", "def __ne__(self, field: 'SoMFVec4us') -> \"SbBool\":\n return _coin.SoMFVec4us___ne__(self, field)", "def __ne__(self, field: 'SoMFVec3d') -> \"SbBool\":\n return _coin.SoMFVec3d___ne__(self, field)", "def _should_skip_number_elem(data, elem):\n number_system = elem.get('numberSystem', 'latn')\n\n if number_system != 'latn':\n data['unsupported_number_systems'].add(number_system)\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__ne__ for sequence element
def test_inequality_sequence(self): dd = DataElement(0x300A00B0, 'SQ', []) assert not dd != dd assert not DataElement(0x300A00B0, 'SQ', []) != dd ee = DataElement(0x300A00B0, 'SQ', [Dataset()]) assert ee != dd # Check value dd.value = [Dataset()] dd[0].PatientName = 'ANON' ee[0].PatientName = 'ANON' assert not ee != dd ee[0].PatientName = 'ANONA' assert ee != dd
[ "def test_inequality_standard(self):\n dd = DataElement(0x00100010, 'PN', 'ANON')\n assert not dd != dd\n assert DataElement(0x00100010, 'PN', 'ANONA') != dd\n\n # Check tag\n assert DataElement(0x00100011, 'PN', 'ANON') != dd\n\n # Check VR\n assert DataElement(0x00100010, 'SH', 'ANON') != dd", "def __ne__(self, x):\n return _core.SwigPyIterator___ne__(self, x)", "def __neq__(self, block_data):\n return not self == block_data", "def __ne__(self, x):\n return _almathswig.SwigPyIterator___ne__(self, x)", "def __ne__(self, node2):\n\t\t#return self._element == node2._element and self._name == node2._name\n\t\treturn not self == node2", "def __ne__(self, field: 'SoMFVec4ub') -> \"SbBool\":\n return _coin.SoMFVec4ub___ne__(self, field)", "def __ne__(self, field: 'SoMFVec4i32') -> \"SbBool\":\n return _coin.SoMFVec4i32___ne__(self, field)", "def __ne__(self, *args):\n return _snap.TIntHSI___ne__(self, *args)", "def __ne__(self, other):\n if isinstance(other, Timeline):\n return (len(self) != len(other)) or \\\n any([segment != other[s] for s, segment in enumerate(self)])\n else:\n return True", "def __ne__(self, vec: 'itkVersorD') -> \"bool\":\n return _itkVersorPython.itkVersorD___ne__(self, vec)", "def __ne__(self, field: 'SoMFVec3i32') -> \"SbBool\":\n return _coin.SoMFVec3i32___ne__(self, field)", "def __ne__(self, field: 'SoMFVec4s') -> \"SbBool\":\n return _coin.SoMFVec4s___ne__(self, field)", "def __ne__(self, u: 'SbDPMatrix') -> \"int\":\n return _coin.SbDPMatrix___ne__(self, u)", "def __ne__(self, field: 'SoMFNode') -> \"SbBool\":\n return _coin.SoMFNode___ne__(self, field)", "def __ne__(self, field: 'SoMFVec4us') -> \"SbBool\":\n return _coin.SoMFVec4us___ne__(self, field)", "def __ne__(self: bitlist, other: bitlist) -> bool:\n # Ignores leading zeros in representation.\n return int(self) != int(other)", "def __ne__(self, other: Callable[['ParserState'], bool]) -> bool:\r\n\t\treturn self.NextState is not other", "def __ne__(self, field: 'SoMFVec4ui32') -> \"SbBool\":\n return _coin.SoMFVec4ui32___ne__(self, field)", "def __ne__(self, field: 'SoSFVec4ub') -> \"int\":\n return _coin.SoSFVec4ub___ne__(self, field)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test hash(DataElement) raises TypeError
def test_hash(self): with pytest.raises(TypeError, match=r"unhashable"): hash(DataElement(0x00100010, 'PN', 'ANON'))
[ "def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if len(hashsum) != len(hashd.hexdigest()):\n return False\n return True", "def test_hashable(self):\n\n test = 'test'\n\n result = hashiter(test)\n\n self.assertEqual(result, hash(test))", "def __hash__(self):\n raise TypeError(\"%s objects are unhashable\" % self.__class__.__name__)", "def _hashable(item):\n try:\n hash(item)\n except TypeError:\n return util_hash.hash_data(item)\n else:\n return item", "def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")", "def __hash__(self) -> hash:\n if self.empty:\n return hash(())\n else:\n return hash((self.data, self.left, self.right))", "def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.atom1, self.atom2, self.atom3, self.atom4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.atom1, self.atom2, self.atom3, self.atom4}), 4)", "def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)", "def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98", "def hash_data(data):\n return hashlib.md5(data).hexdigest()", "def test__AutoModerationActionMetadataBase__hash():\n metadata = AutoModerationActionMetadataBase()\n \n vampytest.assert_instance(hash(metadata), int)", "def __hash__(self):", "def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.bond1, self.bond2, self.bond3, self.bond4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.bond1, self.bond2, self.bond3, self.bond4}), 4)", "def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.mol1, self.mol2, self.mol3])), 2)\n\n # Test set behavior\n self.assertEqual(len({self.mol1, self.mol2, self.mol3}), 2)", "def get_hash(self, descriptor):", "def __check_hash__(self) -> None:\n state = self.__dict__.copy()\n event_hash = state.pop(\"__event_hash__\")\n method_name = state.get(\"__event_hash_method_name__\", \"__hash_object_v1__\")\n hash_method = getattr(self, method_name)\n if event_hash != hash_method(state):\n raise EventHashError()", "def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)", "def __hash__(self):\n # see if there is an available hash value\n # if you are seeing cache bugs this is the thing\n # to try eliminating because it is very likely that\n # someone somewhere is modifying the data without\n # setting `self._hash = None`\n hashed = getattr(self, '_hash', None)\n if hashed is not None:\n return hashed\n\n hashed = hash_fast(\n (''.join(str(hash(k)) + v.get('geometry', '')\n for k, v in self.edge_data.items()) +\n ''.join(str(k) + v.get('geometry', '')\n for k, v in self.node_data.items())).encode('utf-8') +\n b''.join(v['matrix'].tobytes()\n for v in self.edge_data.values()\n if 'matrix' in v))\n self._hash = hashed\n return hashed", "def test_list(self):\n\n test = ['test', 1, list()]\n\n result = hashiter(test)\n\n self.assertEqual(\n result,\n hash(list) +\n (hash('test') + 1) * 1 +\n (hash(1) + 1) * 2 + (hashiter([]) + 1) * 3\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__str__ output with no VR
def test_str_no_vr(self): elem = DataElement(0x00100010, 'PN', 'ANON') assert "(0010, 0010) Patient's Name" in str(elem) assert "PN: 'ANON'" in str(elem) elem.showVR = False assert "(0010, 0010) Patient's Name" in str(elem) assert 'PN' not in str(elem)
[ "def as_string(self, element):\n raise NotImplementedError()", "def reprLRData(s):\n return repr(s)", "def test_str_method(self):\n sq8 = Square(2, id=99)\n str_s = sq8.__str__()\n self.assertEqual(str_s, '[Square] (99) 0/0 - 2')", "def test_repr(self):\n attr = Attribute(\"device\", \"name\")\n assert repr(attr) == '<Attribute(\"device\", \"name\")>'", "def print_element(self):\n\t\tif self.state == DISCOVERED:\n\t\t\tif self.content != \"0\":\n\t\t\t\treturn self.content\n\t\t\telse :\t\t\t\t\n\t\t\t\treturn \" \"\n\t\telse :\n\t\t\treturn self.state", "def __str__(self):\n\t\treturn 'vector( '+', '.join(map(str, self.data))+' )'", "def testStr(self):\n f4 = self.f4\n self.assertEqual(str(f4), 'Finite field of order 2^2')", "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def __repr__(self):\n return 'Stratum({})'.format(self.to_native())", "def test_str(self):\n\n atts_list = [\n \"koAngles_SolarPanel\",\n \"ko_dtStep\",\n \"settlingTime\",\n \"thrust\",\n \"slewIsp\",\n \"scMass\",\n \"slewMass\",\n \"skMass\",\n \"twotanks\",\n \"dryMass\",\n \"coMass\",\n \"occulterSep\",\n \"skIsp\",\n \"defburnPortion\",\n \"checkKeepoutEnd\",\n \"forceStaticEphem\",\n \"constTOF\",\n \"occ_dtmin\",\n \"occ_dtmax\",\n \"maxdVpct\",\n \"dVtot\",\n \"dVmax\",\n \"flowRate\",\n \"havejplephem\",\n \"slewEff\",\n \"skEff\",\n ]\n\n for mod in self.allmods:\n if \"__str__\" not in mod.__dict__:\n continue\n\n with RedirectStreams(stdout=self.dev_null):\n if \"SotoStarshade\" in mod.__name__:\n obj = mod(f_nStars=4, **copy.deepcopy(self.spec))\n else:\n obj = mod(**copy.deepcopy(self.spec))\n original_stdout = sys.stdout\n sys.stdout = StringIO()\n # call __str__ method\n result = obj.__str__()\n # examine what was printed\n contents = sys.stdout.getvalue()\n self.assertEqual(type(contents), type(\"\"))\n # attributes from ICD\n for att in atts_list:\n self.assertIn(\n att, contents, \"{} missing for {}\".format(att, mod.__name__)\n )\n sys.stdout.close()\n # it also returns a string, which is not necessary\n self.assertEqual(type(result), type(\"\"))\n # put stdout back\n sys.stdout = original_stdout", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = (\n \"<xbout.BoutDataset>\\n\"\n + \"Contains:\\n{}\\n\".format(str(self.data))\n + \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n )\n if self.options:\n text += \"Options:\\n{}\".format(styled(self.options))\n return text", "def toString(self) -> \"SbString\":\n return _coin.SbVec3d_toString(self)", "def getStr(self):\r\n return _osgDB.Field_getStr(self)", "def test_Point_repr():\n point = Point(10, 5)\n assert repr(point) == 'Point(10, 5)'", "def convertToString(self, str: 'SbString') -> \"void\":\n return _coin.ScXMLRealDataObj_convertToString(self, str)", "def test_tag_string_representation(self):\n self.assertEqual(\n str(self.tag),\n \"Novel\"\n )", "def test_repr_success(self):\r\n expected_res = 'Car(price=1.0, producer=\"Ford\", car_type=\"Diesel\", ' \\\r\n 'number=\"65c11813-3eb5-4d48-b62b-3da6ef951f53\", mileage=1.0, garage_numb=None)'\r\n self.assertIsInstance(self.car1.__repr__(), str)\r\n self.assertEqual(self.car1.__repr__(), expected_res)", "def test_str_valid_data(self):\n self.assertTrue('data' in ''.join(self.a.render()))", "def _elem2str(elemDict):\n try:\n elemStr = [elemDict.length, 0, 0, data.elem_type.find_key(elemDict.type)]\n except:\n elemStr = [0.0, 0, 0, data.elem_type.find_key(elemDict.type)]\n if elemDict.type == 'drift':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.pipe_radius)\n\n elif elemDict.type in 'quad':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.Kx)\n elemStr.append(elemDict.file_id)\n elemStr.append(elemDict.pipe_radius)\n if 'misalign_x' in elemDict:\n elemStr.append(elemDict.misalign_x)\n if 'misalign_y' in elemDict:\n elemStr.append(elemDict.misalign_y)\n if 'rotation_x' in elemDict:\n elemStr.append(elemDict.rotation_x)\n if 'rotation_y' in elemDict:\n elemStr.append(elemDict.rotation_y)\n if 'rotation_z' in elemDict:\n elemStr.append(elemDict.rotation_z)\n \n elif elemDict.type == 'quad_hardedge':\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.Kx)\n if elemDict.flagEntrance == True:\n elemStr.append(0.0)\n else:\n elemStr.append(1.0)\n \n elif elemDict.type == 'const_focusing':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.kx2)\n elemStr.append(elemDict.ky2)\n elemStr.append(elemDict.kz2)\n elemStr.append(elemDict.pipe_radius)\n\n elif elemDict.type == 'solenoid':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.Bz)\n elemStr.append(elemDict.file_id)\n elemStr.append(elemDict.pipe_radius)\n if 'misalign_x' in elemDict:\n elemStr.append(elemDict.misalign_x)\n if 'misalign_y' in elemDict:\n elemStr.append(elemDict.misalign_y)\n if 'rotation_x' in elemDict:\n elemStr.append(elemDict.rotation_x)\n if 'rotation_y' in elemDict:\n elemStr.append(elemDict.rotation_y)\n if 'rotation_z' in elemDict:\n elemStr.append(elemDict.rotation_z)\n \n elif elemDict.type == 'dipole':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.bending_angle)\n elemStr.append(elemDict.k1)\n elemStr.append(elemDict.file_id)\n elemStr.append(elemDict.pipe_radius)\n if 'entrance_angle' in elemDict:\n elemStr.append(elemDict.entrance_angle)\n if 'exit_angle' in elemDict:\n elemStr.append(elemDict.exit_angle)\n if 'entrance_curvature' in elemDict:\n elemStr.append(elemDict.entrance_curvature)\n if 'exit_curvature' in elemDict:\n elemStr.append(elemDict.exit_curvature)\n if 'fringe_field_integration' in elemDict:\n elemStr.append(elemDict.fringe_field_integration)\n\n elif elemDict.type == 'multipole_thin':\n elemStr[0]=0.0\n elemStr.append(0.0)\n elemStr.append(elemDict.KL_dipole)\n elemStr.append(elemDict.KL_quad)\n elemStr.append(elemDict.KL_sext)\n if 'KL_oct' in elemDict:\n elemStr.append(elemDict.KL_oct)\n if 'KL_deca' in elemDict:\n elemStr.append(elemDict.KL_deca)\n if 'KL_dodeca' in elemDict:\n elemStr.append(elemDict.KL_dodeca)\n\n\n elif elemDict.type == 'linear_matrix_map':\n elemStr.append(0.0)\n elemStr.append(elemDict.nonlinear_insert_length)\n elemStr.append(elemDict.nonlinear_insert_tuneAdvance)\n elemStr.append(elemDict.tune_advance_x)\n elemStr.append(elemDict.tune_advance_y)\n\n elif elemDict.type in ['nonlinear_insert','nonlinear_insert_smooth_focusing']:\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.strength_t)\n elemStr.append(elemDict.transverse_scale_c)\n if elemDict.type == 'nonlinear_insert':\n elemStr.append(elemDict.tune_advance)\n else:\n elemStr.append(elemDict.betx)\n elemStr.append(elemDict.pipe_radius)\n \n elif elemDict.type == 'nonlinear_insert_sliced':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.strength_t)\n elemStr.append(elemDict.transverse_scale_c)\n elemStr.append(elemDict.tune_advance)\n elemStr.append(elemDict.total_length)\n elemStr.append(elemDict.start_position)\n elemStr.append(elemDict.pipe_radius)\n\n elif elemDict.type == 'DTL':\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.field_scaling)\n elemStr.append(elemDict.frequency)\n elemStr.append(elemDict.phase)\n elemStr.append(elemDict.file_id)\n elemStr.append(elemDict.pipe_radius)\n elemStr.append(elemDict.quad1_length)\n elemStr.append(elemDict.quad1_B1)\n elemStr.append(elemDict.quad2_length)\n elemStr.append(elemDict.quad2_B1)\n if 'misalign_x' in elemDict:\n elemStr.append(elemDict.misalign_x)\n if 'misalign_y' in elemDict:\n elemStr.append(elemDict.misalign_y)\n\n elif elemDict.type == 'loop':\n elemStr.append(0.0)\n elemStr.append(elemDict.turns)\n \n elif elemDict.type in ['CCDTL','CCL','SCRF','solenoidRF','EMfld']:\n elemStr[1]=elemDict.n_sckick\n elemStr[2]=elemDict.n_map\n elemStr.append(elemDict.field_scaling)\n elemStr.append(elemDict.frequency)\n elemStr.append(elemDict.phase)\n elemStr.append(elemDict.file_id)\n elemStr.append(elemDict.pipe_radius)\n if 'misalign_x' in elemDict:\n elemStr.append(elemDict.misalign_x)\n if 'misalign_y' in elemDict:\n elemStr.append(elemDict.misalign_y)\n if 'rotation_x' in elemDict:\n elemStr.append(elemDict.rotation_x)\n if 'rotation_y' in elemDict:\n elemStr.append(elemDict.rotation_y)\n if 'rotation_z' in elemDict:\n elemStr.append(elemDict.rotation_z)\n if elemDict.type == 'solenoidRF':\n elemStr.append(elemDict.Bz)\n\n elif elemDict.type == 'centroid_shift':\n elemStr.append(1.0)\n elemStr.append(elemDict.x)\n elemStr.append(elemDict.px)\n elemStr.append(elemDict.y)\n elemStr.append(elemDict.py)\n elemStr.append(elemDict.z)\n elemStr.append(elemDict.pz)\n \n \n elif elemDict.type == 'RFkick':\n elemStr.append(1.0)\n elemStr.append(elemDict.vmax)\n elemStr.append(elemDict.phi0)\n elemStr.append(elemDict.harmonic_number)\n \n \n elif elemDict.type == '-8':\n elemStr[2]=elemDict.file_id\n elemStr.append(elemDict.value)\n \n elif elemDict.type == 'write_raw_ptcl':\n elemStr[2]=elemDict.file_id\n elemStr.append(elemDict.format_id)\n elemStr.append(elemDict.turn)\n if 'sample_period' in elemDict:\n elemStr.append(elemDict.sample_period)\n \n elif elemDict.type in ['TBT','TBT_multiple_file']:\n elemStr[2]=elemDict.file_id\n elemStr.append(elemDict.pID_begin)\n elemStr.append(elemDict.pID_end)\n if elemDict.type == 'TBT_multiple_file':\n elemStr.append(elemDict.n_files)\n\n elif elemDict.type == 'pipe_override':\n elemStr.append(data.pipe_shape.find_key(elemDict.pipe_shape))\n elemStr.append(elemDict.xmax)\n elemStr.append(elemDict.ymax)\n \n elif elemDict.type in ['TBT_integral','TBT_integral_onMomentum']:\n elemStr[2]=elemDict.file_id\n elemStr.append(elemDict.betx)\n elemStr.append(elemDict.alfx)\n elemStr.append(elemDict.strength_t)\n elemStr.append(elemDict.transverse_scale_c)\n elemStr.append(elemDict.pID_begin)\n elemStr.append(elemDict.pID_end)\n \n for i in range(len(elemStr)):\n elemStr[i] = str(elemStr[i])\n\n return elemStr", "def __repr__(self):\n return str(self.values[0]) if self.settled else '_'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__repr__ with a sequence
def test_repr_seq(self): elem = DataElement(0x300A00B0, 'SQ', [Dataset()]) elem[0].PatientID = '1234' assert repr(elem) == repr(elem.value)
[ "def test_repr(self):\n attr = Attribute(\"device\", \"name\")\n assert repr(attr) == '<Attribute(\"device\", \"name\")>'", "def test_sparsearray_repr():\n sa = SparseArray(LIST1)\n\n print(repr(LIST1))\n assert repr(sa) == repr(LIST1)", "def reprLRData(s):\n return repr(s)", "def test_repr(self):\n self.assertEqual(repr(self.named), \\\n 'PairMatrix('+ repr(self.named._data) + ',' +\\\n repr(self.ab_pairs)+\",'name')\")", "def test_repr():\n op = qml.FlipSign([0, 1], wires=(\"a\", \"b\"))\n expected = \"FlipSign([0, 1], wires=['a', 'b'])\"\n assert repr(op) == expected", "def test_Point_repr():\n point = Point(10, 5)\n assert repr(point) == 'Point(10, 5)'", "def test___repr__in_list(self):\n expected = ['Country(\\\"Country_One\\\", 100, 1000)']\n actual = [repr(self.test_country_1)]\n self.assertEqual(expected, actual)", "def test_repr_value(self):\n self.assertIn(\n repr(self.pdf.pages[0]['Resources']['ColorSpace']['CS0']),\n (\n \"['ICCBased', <IndirectObject(62, 0)>]\",\n \"[u'ICCBased', <IndirectObject(62, 0)>]\",\n ))", "def __repr__(self):\n if isinstance(self.item, YAMLNode):\n return self.item.__repr__()\n elif isinstance(self.item, list):\n return \"[{0}]\".format(\", \".join([x.__repr__() for x in self.item]))\n elif isinstance(self.item, dict):\n item = {}\n for x, y in self.item.items():\n item[x] = y\n return str(item)\n elif isinstance(self.item, int):\n return str(self.item)\n else:\n return \"'{0}'\".format(self.item)", "def test_print():\n linked_list = LL.LinkedList()\n linked_list.insert(u\"test_val_1\")\n linked_list.insert(u\"test_val_2\")\n linked_list.insert(u\"test_val_3\")\n assert linked_list.__str__() == u\"(test_val_3, test_val_2, test_val_1)\"", "def test_random_repr(self, asymgame):\n expected_repr = \"Axelrod game with matrices: {}\".format((asymgame.A, asymgame.B))\n self.assertEqual(expected_repr, asymgame.__repr__())\n self.assertEqual(expected_repr, str(asymgame))", "def test_repr_success(self):\r\n expected_res = 'Car(price=1.0, producer=\"Ford\", car_type=\"Diesel\", ' \\\r\n 'number=\"65c11813-3eb5-4d48-b62b-3da6ef951f53\", mileage=1.0, garage_numb=None)'\r\n self.assertIsInstance(self.car1.__repr__(), str)\r\n self.assertEqual(self.car1.__repr__(), expected_res)", "def test___repr__(self):\n expected = 'Country(\\\"Country_One\\\", 100, 1000)'\n actual = repr(self.test_country_1)\n self.assertEqual(expected, actual)", "def __repr__(self):\n\n result = \"\"\n for dessert in self.desserts:\n result += f\"{dessert}\\n\"\n return result", "def test_command_repr(self):\n cmd = Command(\"device\", \"command name\", 1, \"def\", 3, kw1=\"abc\")\n assert (\n repr(cmd) == \"<Command('device', 'command name', 1, 'def', 3, kw1='abc')>\"\n )", "def __repr__(self: object) -> str:\n return \"Sequence(%s)\" % self.path", "def test_repr(self) -> None:\n msg0_str = \"OatmealMsg('DISR', token='XY')\"\n msg1_str = \"OatmealMsg('RUNR', 1.23, True, 'Hi!', [1, 2], token='aa')\"\n msg2_str = \"OatmealMsg('XYZA', 101, [0, 42], token='zZ')\"\n msg3_str = \"OatmealMsg('LOLR', 123, True, 99.9, token='Oh')\"\n msg4_str = \"OatmealMsg('TSTR', 1, 'abc', [], token='xy')\"\n msg5_str = \"OatmealMsg('QWER', '', token='AZ')\"\n self.assertEqual(repr(eval(msg0_str)), msg0_str)\n self.assertEqual(repr(eval(msg1_str)), msg1_str)\n self.assertEqual(repr(eval(msg2_str)), msg2_str)\n self.assertEqual(repr(eval(msg3_str)), msg3_str)\n self.assertEqual(repr(eval(msg4_str)), msg4_str)\n self.assertEqual(repr(eval(msg5_str)), msg5_str)", "def test_repr_type(self):\n self.assertIsInstance(\n repr(self.pdf.pages[0]['Resources']['ColorSpace']['CS0']),\n str)", "def test_repr_fail(self):\r\n expected_res = 'CAR(price=1.0, producer=\"Ford\", car_type=\"Diesel\", ' \\\r\n 'number=\"65c11813-3eb5-4d48-b62b-3da6ef951f53\", mileage=1.0, garage_numb=None)'\r\n self.assertIsInstance(self.car1.__repr__(), str)\r\n self.assertFalse(self.car1.__repr__() == expected_res)", "def test_repr_success(self):\r\n expected_res_1 = \"\"\"Garage(\"{'places': 15, 'owner': 'be23adf5-3d7f-43f1-9874-e60d61c84522', 'number': 1, 'cars': [Car(price=1.0, producer=\"Ford\", car_type=\"Diesel\", number=\"65c11813-3eb5-4d48-b62b-3da6ef951f53\", mileage=1.0, garage_numb=None)], 'town': 'Amsterdam'}\")\"\"\"\r\n expected_res_2 = \"\"\"Garage(\"{'places': 15, 'owner': 'be23adf5-3d7f-43f1-9874-e60d61c84523', 'number': 2, 'cars': [Car(price=1.0, producer=\"Ford\", car_type=\"Diesel\", number=\"65c11813-3eb5-4d48-b62b-3da6ef951f55\", mileage=1.0, garage_numb=None)], 'town': 'Kiev'}\")\"\"\"\r\n self.assertEqual(self.garage1.__repr__(), expected_res_1)\r\n self.assertEqual(self.garage2.__repr__(), expected_res_2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__getitem__ raise if value not indexable
def test_getitem_raises(self): elem = DataElement(0x00100010, 'LO', 12345) with pytest.raises(TypeError): elem[0]
[ "def test_getitem_setitem_not_implemented():", "def testAccessIncorrectIndex(self):\n self.assertRaises(ValueError,\n self.manager.ifDescr.__getitem__, (47, 18))\n self.assertRaises(ValueError,\n self.manager.ifDescr.__getitem__, \"nothing\")", "def test_getitem_existing(self):\n self.assertEqual('v1', self.record.data_values['k1'])\n self.assertEqual('v2', self.record.data_values['k2'])\n self.assertEqual(123, self.record.data_values['k3'])", "def __getitem__(self, given):\n return self.dataset[given]", "def test_getitem(self):\n catalog = readEvents()\n self.assertEqual(catalog[0], catalog.events[0])\n self.assertEqual(catalog[-1], catalog.events[-1])\n self.assertEqual(catalog[2], catalog.events[2])\n # out of index should fail\n self.assertRaises(IndexError, catalog.__getitem__, 3)\n self.assertRaises(IndexError, catalog.__getitem__, -99)", "def __getitem__(self, i):\n return self._data[i]", "def __getitem__(self, idx: Union[int, slice]):\n if isinstance(idx, int):\n if idx < 0 and len_or_none(self) is None:\n raise ValueError(neg_idx_msg)\n return self.get(idx)\n elif isinstance(idx, slice):\n return self.islice(idx.start, idx.stop, idx.step)\n else:\n raise TypeError(\n \"FIt indices must be integers or slices, not \" + type(idx).__name__\n )", "def __getitem__(self, key):\n\n if is_idx(key):\n # Index out of range protection\n if key >= self.nrows:\n raise IndexError, \"Index out of range\"\n if key < 0:\n # To support negative values\n key += self.nrows\n (start, stop, step) = self._processRange(key, key+1, 1)\n return self.read(start, stop, step)[0]\n elif isinstance(key, slice):\n start, stop, step = self._processRange(\n key.start, key.stop, key.step)\n return self.read(start, stop, step)\n # Try with a boolean or point selection\n elif type(key) in (list, tuple) or isinstance(key, numpy.ndarray):\n coords = self._pointSelection(key)\n return self._readCoordinates(coords)\n else:\n raise IndexError(\"Invalid index or slice: %r\" % (key,))", "def __getitem__(self, ind):\n if isinstance(ind, slice):\n return self.TAA[ind]\n else:\n return self.TAA[ind, 0]", "def test_getitem_out_of_bounds() -> None:\n ll1 = setup_linked_list([108, 148, 165])\n # One way to test this, there are likely other (perhaps better) ways,\n # but this will do given what we know so far.\n try:\n ll1[3]\n assert False\n except IndexError:\n assert True\n except:\n assert False", "def __getitem__(self, x):\n if isinstance(x, int):\n #return self._rowList.__getitem__(x)\n return self._rowList[x]\n\n elif isinstance(x, str):\n try:\n #return self._rowList[0][x]\n ii=self.getAttributeIndex(x)\n return self._rowList[0][ii]\n except (IndexError, KeyError):\n raise KeyError\n raise TypeError(x)", "def test_setitem_out_of_range(self, index):\n ds = DatasetList([0])\n\n with pytest.raises(IndexError):\n ds[index] = 1", "def test_getitem_root(self):\n x = IndexedVariable(name='x', index=1)\n self.assertIs(x[()], x)", "def __getitem__(self,key):\n # See if key is a column name\n try:\n i = self.names.index(key)\n return self.data[i]\n except ValueError:\n # Not a column name\n # See if it's an integer index\n try:\n i = int(key)\n except ValueError:\n # Not an integer\n raise KeyError, \"column '%s' not found\" % key\n try:\n return self.data[i]\n except IndexError:\n # Integer but out of range\n raise IndexError, \"integer index out of range for '%s'\" % key", "def __getitem__(self, index):\n return self.unknowns[index]", "def __getitem__(self, item):\n if isinstance(item, (int, np.integer)):\n item = (item,) # though the branches might differ...\n elif isinstance(item, slice):\n item = (item,)\n if any(not isinstance(i, (int, np.integer)) for i in item):\n return self.derivative_tensor(len(item), item)\n else:\n d = self.compute_derivatives(len(item), item, lazy=False)\n return d[0]", "def test_index_error_with_data():\n test_list = OffByOneList([])\n for k in (0, 4):\n with pytest.raises(IndexError):\n test_list[k]", "def __getitem__(self, index: int) -> Cell:\n\n if index[0] <= self.N and index[1] <= self.N:\n return self._safe_get(index)\n return None", "def __getitem__(self, *args) -> \"uval_t\":\n return _ida_pro.uval_array___getitem__(self, *args)", "def key_safe_data_access(data, key):\n try:\n return data[key]\n except (KeyError, IndexError):\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.repval doesn't return a huge string for a large value
def test_repval_large_elem(self): elem = DataElement(0x00820003, 'UT', 'a' * 1000) assert len(elem.repval) < 100
[ "def test_repval_large_vm(self):\n elem = DataElement(0x00080054, 'AE', 'a\\\\' * 1000 + 'a')\n assert len(elem.repval) < 100", "def test_repval_strange_type(self):\n elem = DataElement(0x00020001, 'OB', 0)\n assert len(elem.repval) < 100", "def _get_cleaned_value(self, data_element):\n if data_element.VR == 'UI':\n return self._generate_uuid(data_element.value)\n if data_element.VR == 'DT' or data_element.VR == 'TM':\n return \"000000.00\"\n elif data_element.VR == 'DA':\n return \"20000101\"\n return \"no value\"", "def _reduce_response(data: str) -> str:\n try:\n data = dumps(loads(data))\n except ValueError:\n pass\n size = sys.getsizeof(data)\n if size > 2 * 10 ** 4:\n start = data[:10 ** 1]\n end = data[-(10 ** 1):]\n data = f'{start}\\n...\\n<Response was {size} bytes. Log was reduced>\\n...\\n{end}'\n return data", "def test_tag_with_long_value_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n\n single_value = b'123456.789012345'\n large_value = b'\\\\'.join([single_value] * 4500)\n ds[0x30040058] = DataElement(0x30040058, 'UN',\n large_value,\n is_undefined_length=False)\n ds.decode()\n assert 'UN' == ds[0x30040058].VR", "def test_string(self):\n bandwidth_value = random.randint(0, 10000000)\n self._bw.change(bandwidth_value)\n self.assertEqual(str(bandwidth_value), str(bandwidth_value))", "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def getDataUnitCount(self):\n\t\treturn 1", "def test_description_group_length(self):\n elem = DataElement(0x00100000, 'LO', 12345)\n assert 'Group Length' == elem.description()", "def test_big_number(self):\n feed = \"i bought 10,000 cookies\"\n expected = \"i bought cookies\"\n\n result = Parser().parse_numbers(feed)\n self.assertEqual(expected, result)", "def testDataIsString(self):\n registry_value = fake.FakeWinRegistryValue(\n u'MRUListEx', data_type=definitions.REG_BINARY)\n\n self.assertFalse(registry_value.DataIsString())\n\n registry_value = fake.FakeWinRegistryValue(\n u'MRU', data_type=definitions.REG_SZ)\n\n self.assertTrue(registry_value.DataIsString())", "def reprLRData(s):\n return repr(s)", "def get_text(self):\n # FIXME Finish comments\n if self.value is None:\n result_str = str(self.value)\n else:\n format_str = '%%.%dg' % self.precision\n result_str = '['\n result_str += ', '.join([format_str % f for f in self.value])\n result_str += ']'\n return result_str", "def get_decimal64(self):\n return pn_data_get_decimal64(self._data)", "def fixDuplicateEndingLabVIEWBug(data):\n duplicateString = '''g\":[\n ]\n}g\":[\n ]\n}'''\n if(data[-len(duplicateString):] == duplicateString):\n data = data[:-12]\n return data", "def exhaust_iterator(iterator):\r\n data = b('')\r\n\r\n try:\r\n chunk = b(next(iterator))\r\n except StopIteration:\r\n chunk = b('')\r\n\r\n while len(chunk) > 0:\r\n data += chunk\r\n\r\n try:\r\n chunk = b(next(iterator))\r\n except StopIteration:\r\n chunk = b('')\r\n\r\n return data", "def test_human_readable_numbers(self):\n self.assert_has_content(\n '<td class=\"col-md-1 rate\">1.0 kB / 0 B</td>',\n )", "def new_store_data():\n return random_string(8), random_string(random.randrange(1000))", "def _get_large_test_payload(content_type):\n if content_type == 'text/csv':\n return 'timestamp,value\\n'+\"1\"*17*1024*1024\n else:\n return '{\"values\": ['+\"1\"*17*1024*1024+']}'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.repval doesn't return a huge string for a large vm
def test_repval_large_vm(self): elem = DataElement(0x00080054, 'AE', 'a\\' * 1000 + 'a') assert len(elem.repval) < 100
[ "def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100", "def test_repval_strange_type(self):\n elem = DataElement(0x00020001, 'OB', 0)\n assert len(elem.repval) < 100", "def _reduce_response(data: str) -> str:\n try:\n data = dumps(loads(data))\n except ValueError:\n pass\n size = sys.getsizeof(data)\n if size > 2 * 10 ** 4:\n start = data[:10 ** 1]\n end = data[-(10 ** 1):]\n data = f'{start}\\n...\\n<Response was {size} bytes. Log was reduced>\\n...\\n{end}'\n return data", "def testRmStress(self) :\n\n\t\trandom.seed( 19 )\n\n\t\tdataPresent = set()\n\n\t\tf = IECore.MemoryIndexedIO( IECore.CharVectorData(), [], IECore.IndexedIO.OpenMode.Write)\n\t\tf = f.subdirectory(\"data\", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )\n\t\tbuf = f.buffer() # Fails under gcc 3.3.4 when no data has been written, as a result of the way in which std::ostringstream::seekp(0) works when the stream is currently empty. Fixed in gcc 3.4.x and later.\n\t\tf = None\n\t\tf = IECore.MemoryIndexedIO(buf, [], IECore.IndexedIO.OpenMode.Append)\n\t\tf = f.subdirectory( \"data\" )\n\n\t\tnumLoops = 500\n\t\tmaxSize = 1000\n\n\t\tfor i in range( 0, numLoops ) :\n\n\t\t\tfor i in range( 0, maxSize ) :\n\n\t\t\t\tindex = int( random.random() * maxSize )\n\n\t\t\t\tif not index in dataPresent :\n\n\t\t\t\t\tf.write( \"data\"+str(index), i )\n\t\t\t\t\tdataPresent.add( index )\n\n\t\t\t\telse :\n\n\t\t\t\t\tf.remove( \"data\"+str(index) )\n\t\t\t\t\tdataPresent.remove( index )\n\n\n\t\t\t# Reopen the file every now and then, to exercise the index reading/writing\n\t\t\tif random.random() > 0.8 :\n\n\t\t\t\tbuf = f.buffer()\n\t\t\t\tf = None\n\t\t\t\tf = IECore.MemoryIndexedIO(buf, [\"data\"], IECore.IndexedIO.OpenMode.Append)\n\n\t\t\tentryNames = f.entryIds()\n\n\t\t\tfor i in range( 0, maxSize ) :\n\n\t\t\t\tdataName = \"data\"+str(i)\n\t\t\t\tif dataName in entryNames :\n\n\t\t\t\t\tself.assertTrue( i in dataPresent )\n\n\t\t\t\telse :\n\n\t\t\t\t\tself.assertFalse( i in dataPresent )\n\n\t\t\tself.assertEqual( len(entryNames), len(dataPresent) )", "def test_tag_with_long_value_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n\n single_value = b'123456.789012345'\n large_value = b'\\\\'.join([single_value] * 4500)\n ds[0x30040058] = DataElement(0x30040058, 'UN',\n large_value,\n is_undefined_length=False)\n ds.decode()\n assert 'UN' == ds[0x30040058].VR", "def new_store_data():\n return random_string(8), random_string(random.randrange(1000))", "def test_getMPRemainingData(self):\n self.assertEquals(\n self.getMP('\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01foo'),\n (1, 'foo'))", "def test_data_str_1(self):\n var1 = variables.Variable(name='test1', pre_transform=lambda x: str(x[1]))\n var2 = variables.Variable(name='test2', pre_transform=lambda x: str(x[2]))\n var3 = variables.Variable(name='test3', pre_transform=lambda x: str(x[0]))\n\n model_vars = variables.ModelVariables(independent=[var2, var3], dependent=[var1], schema=[var1, var2, var3])\n output = model_vars.data_str([100, 200, 300])\n expected = '200\t300\t100'\n\n self.assertEqual(output, expected)", "def returnMemopsText(value):\n \n if value:\n \n wordString = value[:254]\n \n return wordString\n \n else:\n \n return value", "def test_slow_response(self):\n adapter = LsResponseAdapter()\n for response in self.valid_lsp_responses:\n for every_len in [3, 6, 12, 20]:\n i = 0\n output = b''\n while i < len(response):\n this_input = response[i:(i + every_len)]\n output = output + adapter.adapt_response(this_input)\n i = i + every_len\n self.assertEqual(response, output)", "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def test__parse_text_large():\n for input_data, expected_output in (\n ({}, None),\n ({'large_text': None}, None),\n ({'large_text': ''}, None),\n ({'large_text': 'a'}, 'a'),\n ):\n output = parse_text_large(input_data)\n vampytest.assert_eq(output, expected_output)", "def test_string(self):\n bandwidth_value = random.randint(0, 10000000)\n self._bw.change(bandwidth_value)\n self.assertEqual(str(bandwidth_value), str(bandwidth_value))", "def test_22_info(self, r):\n info = (\n \"allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330,\"\n \"13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020,\"\n \"20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303,\"\n \"27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160,\"\n \"34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523,\"\n \"41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171,\"\n \"49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332,\"\n \"58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30,\"\n \"67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25,\"\n \"76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46,\"\n \"85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20,\"\n \"94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15,\"\n \"103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52,\"\n \"111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54,\"\n \"119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52,\"\n \"127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62,\"\n \"135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7,\"\n \"144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1,\"\n \"155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2,\"\n \"172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3,\"\n \"187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1,\"\n \"207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2,\"\n \"220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1,\"\n \">=256=203\"\n )\n parsed = parse_info(info)\n assert \"allocation_stats\" in parsed\n assert \"6\" in parsed[\"allocation_stats\"]\n assert \">=256\" in parsed[\"allocation_stats\"]", "def test_repr(self):\n namespace = {}\n exec('thermodata = {0!r}'.format(self.thermodata), globals(), namespace)\n self.assertIn('thermodata', namespace)\n thermodata = namespace['thermodata']\n self.assertEqual(self.thermodata.Tdata.value.shape, thermodata.Tdata.value.shape)\n for T, T0 in zip(self.thermodata.Tdata.value, thermodata.Tdata.value):\n self.assertAlmostEqual(T, T0, 4)\n self.assertEqual(self.thermodata.Tdata.units, thermodata.Tdata.units)\n self.assertEqual(self.thermodata.Cpdata.value.shape, thermodata.Cpdata.value.shape)\n for Cp, Cp0 in zip(self.thermodata.Cpdata.value, thermodata.Cpdata.value):\n self.assertAlmostEqual(Cp, Cp0, 3)\n self.assertEqual(self.thermodata.Cpdata.units, thermodata.Cpdata.units)\n self.assertAlmostEqual(self.thermodata.H298.value, thermodata.H298.value, 4)\n self.assertEqual(self.thermodata.H298.units, thermodata.H298.units)\n self.assertAlmostEqual(self.thermodata.S298.value, thermodata.S298.value, 2)\n self.assertEqual(self.thermodata.S298.units, thermodata.S298.units)\n self.assertAlmostEqual(self.thermodata.Cp0.value, thermodata.Cp0.value, 4)\n self.assertEqual(self.thermodata.Cp0.units, thermodata.Cp0.units)\n self.assertAlmostEqual(self.thermodata.CpInf.value, thermodata.CpInf.value, 3)\n self.assertEqual(self.thermodata.CpInf.units, thermodata.CpInf.units)\n self.assertAlmostEqual(self.thermodata.Tmin.value, thermodata.Tmin.value, 4)\n self.assertEqual(self.thermodata.Tmin.units, thermodata.Tmin.units)\n self.assertAlmostEqual(self.thermodata.Tmax.value, thermodata.Tmax.value, 4)\n self.assertEqual(self.thermodata.Tmax.units, thermodata.Tmax.units)\n self.assertAlmostEqual(self.thermodata.E0.value, thermodata.E0.value, 4)\n self.assertEqual(self.thermodata.E0.units, thermodata.E0.units)\n self.assertEqual(self.thermodata.label, thermodata.label)\n self.assertEqual(self.thermodata.comment, thermodata.comment)", "def testDataIsString(self):\n registry_value = fake.FakeWinRegistryValue(\n u'MRUListEx', data_type=definitions.REG_BINARY)\n\n self.assertFalse(registry_value.DataIsString())\n\n registry_value = fake.FakeWinRegistryValue(\n u'MRU', data_type=definitions.REG_SZ)\n\n self.assertTrue(registry_value.DataIsString())", "def reprLRData(s):\n return repr(s)", "def _get_large_test_payload(content_type):\n if content_type == 'text/csv':\n return 'timestamp,value\\n'+\"1\"*17*1024*1024\n else:\n return '{\"values\": ['+\"1\"*17*1024*1024+']}'", "def test_ipv6network_repr(self):\n n = 10**5\n net = ip.IPv6Network('1:2:3:4::/120')\n time1, result1 = timefn(n, net.__repr__)\n enet = eip.IPv6Network('1:2:3:4::/120')\n time2, result2 = timefn(n, enet.__repr__)\n results = (time1, result1), (time2, result2)\n self.report_6n.report(fn_name(), n, results, net)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.repval doesn't break with bad types
def test_repval_strange_type(self): elem = DataElement(0x00020001, 'OB', 0) assert len(elem.repval) < 100
[ "def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100", "def test_repval_large_vm(self):\n elem = DataElement(0x00080054, 'AE', 'a\\\\' * 1000 + 'a')\n assert len(elem.repval) < 100", "def test_inequality_sequence(self):\n dd = DataElement(0x300A00B0, 'SQ', [])\n assert not dd != dd\n assert not DataElement(0x300A00B0, 'SQ', []) != dd\n ee = DataElement(0x300A00B0, 'SQ', [Dataset()])\n assert ee != dd\n\n # Check value\n dd.value = [Dataset()]\n dd[0].PatientName = 'ANON'\n ee[0].PatientName = 'ANON'\n assert not ee != dd\n ee[0].PatientName = 'ANONA'\n assert ee != dd", "def test_datatype():\n hist1, hist2, hist3 = get_histograms()\n hist0 = hg.Count()\n\n assert isinstance(None, hist0.datatype)\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [np.number, str])\n np.testing.assert_array_equal(hist3.datatype, [np.datetime64, np.number, str])", "def test_dictdata_001_02(self):\n\n # field_b, float, '1234.4567'\n self.assertTrue(self.org_values[1].startswith(self.field_values[1]))\n self.assertEqual(type(self.org_values[1]), type(self.field_values[1]))\n self.assertTrue(isinstance(self.my_dict_data.field_b, float))\n\n # field_h, float, '9.8765'\n self.assertTrue(self.org_values[7].startswith(self.field_values[7]))\n self.assertEqual(type(self.org_values[7]), type(self.field_values[7]))\n self.assertTrue(isinstance(self.my_dict_data.field_h, float))", "def test_Regression_dtype():\n some = \"A wrong data type of type string\" \n with pytest.raises(TypeError):\n Regression(some)", "def __init__(self, data):\n if type(data) is not int and type(data) is not float and type(data) is not long and type(data) is not str:\n raise TypeError(\"Wrong type of data\")\n\n else:\n self.value = data", "def test_non_string(self):\n datatagger = DataTagger(\n container=self.container,\n field_name='foobar'\n )\n actual = datatagger._get_value(self.alert)\n expected = None\n self.assertEqual(actual, expected)", "def test_data(self):\n self.assertEqual(self.node.data, 10)\n self.assertNotEqual(self.node.data, 5)", "def _element_check(data):\n if isinstance(data, etree.Element):\n logging.debug(\"attempting to convert to xml string\")\n return etree.tostring(data)\n else:\n return data", "def fix_data_type(data):\n try:\n converted_data = int(data)\n except ValueError:\n try:\n converted_data = float(data)\n except:\n # converted_data = unicode(data)\n converted_data = str(data) \n return converted_data", "def test_setitem_check_new_valid_type(dictionary):\n\n val = list(dictionary.values())[0]\n matching = BaseMatching(dictionary)\n assert matching._check_new_valid_type(val, str) is None\n\n with pytest.raises(ValueError):\n matching._check_new_valid_type(val, float)", "def test_pandas_boolean_native_type_error(data):\n data = pd.Series(data)\n dtype = pandas_engine.Engine.dtype(\"boolean\")\n\n with pytest.raises(TypeError):\n dtype.coerce(data)\n\n for _, value in data.iteritems():\n with pytest.raises(TypeError):\n dtype.coerce_value(value)", "def test_generate_simulation_data_types(self):\n \n seq = list(simdat.generate_simulation_data_types([\"fmiString\", \\\n simdat.SimulationDataType.INTEGER, \"fmiBoolean\", \"fmiReal\"]))\n ref = [simdat.SimulationDataType.STRING, \\\n simdat.SimulationDataType.INTEGER, \\\n simdat.SimulationDataType.BOOLEAN, simdat.SimulationDataType.REAL]\n \n self.assertSequenceEqual(seq, ref)\n \n try:\n it = iter((simdat.generate_simulation_data_types([\"nope\"])))\n next(it)\n self.assertTrue(False)\n except ValueError:\n pass", "def check_dtype(self):\n # for data\n if not isinstance(self.data, torch.Tensor):\n if isinstance(self.data, np.ndarray):\n self.data = torch.from_numpy(self.data)\n else:\n raise TypeError(f\"invalid dtype {type(self.data)}\")\n # for label\n if not isinstance(self.label, torch.Tensor):\n if isinstance(self.label, np.ndarray):\n self.label = torch.from_numpy(self.label).long()\n elif isinstance(self.label, list):\n self.label = torch.tensor(self.label, dtype=torch.long)\n else:\n raise TypeError(f\"invalid dtype {type(self.data)}\")", "def _ensure_correct_dtype(self, data):\n try:\n data = np.asarray(data, dtype=self.POINT_DTYPE)\n except TypeError:\n raise TypeError('The input data cannot be converted into an array'\n ' with dtype=%s' % repr(self.POINT_DTYPE))\n return data", "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def test_dictdata_001_05(self):\n\n # field_e, timestamp, '1558110396'\n self.assertEqual(self.org_values[4], self.field_values[4])\n self.assertEqual(type(self.org_values[4]), type(self.field_values[4]))\n self.assertTrue(isinstance(self.my_dict_data.field_e, datetime))", "def test02_set_attributes_wrong_type(self):\r\n\r\n _values = (1, 0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, None, 0)\r\n\r\n (_error_code,\r\n _error_msg) = self.DUT.set_attributes(_values)\r\n self.assertEqual(_error_code, 10)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an unknown private tag (e.g. a tag not in the private dictionary) in the repeater range is not handled as a repeater tag if using Implicit Little Endian transfer syntax.
def test_private_tag_in_repeater_range(self): # regression test for #689 ds = Dataset() ds[0x50f10010] = RawDataElement( Tag(0x50f10010), None, 8, b'FDMS 1.0', 0, True, True) ds[0x50f1100a] = RawDataElement( Tag(0x50f1100a), None, 6, b'ACC0.6', 0, True, True) private_creator_data_elem = ds[0x50f10010] assert 'Private Creator' == private_creator_data_elem.name assert 'LO' == private_creator_data_elem.VR private_data_elem = ds[0x50f1100a] assert '[FNC Parameters]' == private_data_elem.name assert 'UN' == private_data_elem.VR
[ "def test_private_repeater_tag(self):\n ds = Dataset()\n ds[0x60210012] = RawDataElement(\n Tag(0x60210012), None, 12, b'PAPYRUS 3.0 ', 0, True, True)\n ds[0x60211200] = RawDataElement(\n Tag(0x60211200), None, 6, b'123456', 0, True, True)\n private_creator_data_elem = ds[0x60210012]\n assert 'Private Creator' == private_creator_data_elem.name\n assert 'LO' == private_creator_data_elem.VR\n\n private_data_elem = ds[0x60211200]\n assert '[Overlay ID]' == private_data_elem.name\n assert 'UN' == private_data_elem.VR", "def verifyTag(tag):\n check = [\"P\", \"Y\", \"L\", \"Q\", \"G\", \"R\", \"J\", \"C\", \"U\", \"V\", \"0\", \"2\", \"8\", \"9\"]\n if len(tag) > 15:\n return False\n if any(i not in check for i in tag):\n return False\n\n return True", "def test_description_unknown_private(self):\n elem = DataElement(0x00110010, 'LO', 12345)\n elem.private_creator = 'TEST'\n assert 'Private tag data' == elem.description()\n elem = DataElement(0x00110F00, 'LO', 12345)\n assert elem.tag.is_private\n assert elem.private_creator is None\n assert 'Private tag data' == elem.description()", "def testLoopAbsentIndex(self):\n template = '{{ for item in [tag:absent] }} x {{ endfor }}'\n self.assertFalse(self.parse(template, tag='absent'))", "def test_bad_bytestring(self):\n bytestring = b'\\x10\\x00'\n convert_tag(bytestring, True)", "def test_inequality_standard(self):\n dd = DataElement(0x00100010, 'PN', 'ANON')\n assert not dd != dd\n assert DataElement(0x00100010, 'PN', 'ANONA') != dd\n\n # Check tag\n assert DataElement(0x00100011, 'PN', 'ANON') != dd\n\n # Check VR\n assert DataElement(0x00100010, 'SH', 'ANON') != dd", "def testWrongTagDependency(self):\n self.assertRaises(AttributeError, lambda: self.connector.Packet(\"test\", wait_tags=\"test\"))", "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def is_valid_tag(tag):\n return tag.lower() in _ohlcfield_lookup", "def is_ignored_tag(self, tag):\n return any(r.match(tag) for r in IGNORED_TAGS)", "def test_invalid_tag_name(self):\n control_data = control_dict_for_testing(\n \"\"\"\n classic-tag-block:\n 1-13:\n artist: \"Hello there!\"\n general: kenobi\n \"\"\"\n )\n controller = NakloController([None] * 13)\n self.assertRaisesRegex(\n ValueError, \"^invalid tag name: ``general''$\",\n controller.add_tag_blocks, control_data)\n\n control_data = control_dict_for_testing(\n \"\"\"\n inverted-tag-block:\n artist:\n 1-13: \"Hello there!\"\n General:\n 1-13: \"Kenobi\"\n \"\"\"\n )\n controller = NakloController([None] * 13)\n self.assertRaisesRegex(\n ValueError, \"^invalid tag name: ``General''$\",\n controller.add_tag_blocks, control_data)", "def test_starttag_bad_closing():\n inst = _encoder.TextEncoder('utf-8')\n with raises(RuntimeError):\n inst.starttag(b'x', [], _test.badbool)", "def _should_skip_number_elem(data, elem):\n number_system = elem.get('numberSystem', 'latn')\n\n if number_system != 'latn':\n data['unsupported_number_systems'].add(number_system)\n return True\n\n return False", "def correct_overflow(timetags, valid):\n overflow = 2**16 # 2**timetag_bits\n overflow_idx = np.where(valid==0)[0]\n for i, (idx1, idx2) in enumerate(zip(overflow_idx[:-1], overflow_idx[1:])):\n timetags[idx1:idx2] += (i + 1)*overflow\n timetags[idx2:] += (i + 2)*overflow", "def test_unclosed_tags_get_closed(self):\n ...", "def _should_skip_elem(elem, type=None, dest=None):\n if 'draft' in elem.attrib or 'alt' in elem.attrib:\n if dest is None or type in dest:\n return True", "def validateTag(tag):\n\n if tag == None:\n raise OTCodecError(\"Invalid argument: None\")\n\n # Recognize exceptional sfntVersion tag:\n if tag == b'\\x00\\x01\\x00\\x00':\n return 0\n\n errors = 0\n\n # Test against normal rules\n\n if len(tag) != 4:\n errors += 0x01\n for c in tag:\n if ord(c) < 0x20 or ord(c) > 0x7E:\n errors += 0x02\n\n # check for non-trailing spaces: remove all spaces and compare with rstrip\n if re.sub(\" \", \"\", tag) != tag.rstrip():\n errors += 0x04\n \n return errors", "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00111010].VR\n assert 'Διονυσιος'.encode('iso_ir_126') == ds[0x00111010].value", "def test_offset(self):\n bytestring = b'\\x12\\x23\\x10\\x00\\x20\\x00\\x34\\x45'\n assert convert_tag(bytestring, True, 0) == Tag(0x2312, 0x0010)\n assert convert_tag(bytestring, True, 2) == Tag(0x0010, 0x0020)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a known private tag in the repeater range is correctly handled using Implicit Little Endian transfer syntax.
def test_private_repeater_tag(self): ds = Dataset() ds[0x60210012] = RawDataElement( Tag(0x60210012), None, 12, b'PAPYRUS 3.0 ', 0, True, True) ds[0x60211200] = RawDataElement( Tag(0x60211200), None, 6, b'123456', 0, True, True) private_creator_data_elem = ds[0x60210012] assert 'Private Creator' == private_creator_data_elem.name assert 'LO' == private_creator_data_elem.VR private_data_elem = ds[0x60211200] assert '[Overlay ID]' == private_data_elem.name assert 'UN' == private_data_elem.VR
[ "def test_private_tag_in_repeater_range(self):\n # regression test for #689\n ds = Dataset()\n ds[0x50f10010] = RawDataElement(\n Tag(0x50f10010), None, 8, b'FDMS 1.0', 0, True, True)\n ds[0x50f1100a] = RawDataElement(\n Tag(0x50f1100a), None, 6, b'ACC0.6', 0, True, True)\n private_creator_data_elem = ds[0x50f10010]\n assert 'Private Creator' == private_creator_data_elem.name\n assert 'LO' == private_creator_data_elem.VR\n\n private_data_elem = ds[0x50f1100a]\n assert '[FNC Parameters]' == private_data_elem.name\n assert 'UN' == private_data_elem.VR", "def test_offset(self):\n bytestring = b'\\x12\\x23\\x10\\x00\\x20\\x00\\x34\\x45'\n assert convert_tag(bytestring, True, 0) == Tag(0x2312, 0x0010)\n assert convert_tag(bytestring, True, 2) == Tag(0x0010, 0x0020)", "def test_little_endian(self):\n bytestring = b'\\x10\\x00\\x20\\x00'\n assert convert_tag(bytestring, True) == Tag(0x0010, 0x0020)", "def test_big_endian(self):\n # VM 1\n bytestring = b'\\x00\\x10\\x00\\x20'\n assert convert_ATvalue(bytestring, False) == Tag(0x0010, 0x0020)\n\n # VM 3\n bytestring += b'\\x00\\x10\\x00\\x30\\x00\\x10\\x00\\x40'\n out = convert_ATvalue(bytestring, False)\n assert Tag(0x0010, 0x0020) in out\n assert Tag(0x0010, 0x0030) in out\n assert Tag(0x0010, 0x0040) in out", "def test_big_endian(self):\n bytestring = b'\\x00\\x10\\x00\\x20'\n assert convert_tag(bytestring, False) == Tag(0x0010, 0x0020)", "def test_little_endian(self):\n # VM 1\n bytestring = b'\\x10\\x00\\x20\\x00'\n assert convert_ATvalue(bytestring, True) == Tag(0x0010, 0x0020)\n\n # VM 3\n bytestring += b'\\x10\\x00\\x30\\x00\\x10\\x00\\x40\\x00'\n out = convert_ATvalue(bytestring, True)\n assert Tag(0x0010, 0x0020) in out\n assert Tag(0x0010, 0x0030) in out\n assert Tag(0x0010, 0x0040) in out", "def test_description_unknown_private(self):\n elem = DataElement(0x00110010, 'LO', 12345)\n elem.private_creator = 'TEST'\n assert 'Private tag data' == elem.description()\n elem = DataElement(0x00110F00, 'LO', 12345)\n assert elem.tag.is_private\n assert elem.private_creator is None\n assert 'Private tag data' == elem.description()", "def test_bad_bytestring(self):\n bytestring = b'\\x10\\x00'\n convert_tag(bytestring, True)", "def MatchProxy_bit_from_14_to_16_test(self):\n m = self.m\n self.assertEqual(m.resign_offer, 0)", "def test_decode_misinterpretation(self):\n data = tlv8.encode([\n tlv8.Entry(1, 16843330),\n tlv8.Entry(2, b'\\x01')\n ])\n result = tlv8.deep_decode(data)\n expected_data = tlv8.EntryList([\n tlv8.Entry(1, tlv8.EntryList([\n tlv8.Entry(66, b'\\x01\\x01')\n ])),\n tlv8.Entry(2, b'\\x01')\n ])\n self.assertEqual(result, expected_data)", "def test_packed_response_type_FAILS():\n value = Decimal(\"1.0\")\n r1 = ValueType(abi_type=\"ufixed64x9\", packed=True)\n bytes_val = r1.encode(value)\n assert bytes_val.hex() == \"000000003b9aca00\"\n int_val = int.from_bytes(bytes_val, \"big\", signed=False)\n assert int_val == 10 ** 9\n\n with pytest.raises(InsufficientDataBytes):\n decoded = r1.decode(bytes_val)\n print(decoded)", "def test_to_uint(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n num = 128 # larger than a byte\n rlp_encoded_item = rlp.encode(num)\n\n assert contract.functions.testToUint(rlp_encoded_item).call() == num", "def test_uint16(structure_app):\n dataset = open_url(\"http://localhost:8001/\", structure_app)\n assert (dataset.types.ui16.dtype == np.dtype(\">u2\"))", "def test_int16(structure_app):\n dataset = open_url(\"http://localhost:8001/\", structure_app)\n assert (dataset.types.i16.dtype == np.dtype(\">i2\"))", "def test_repval_strange_type(self):\n elem = DataElement(0x00020001, 'OB', 0)\n assert len(elem.repval) < 100", "def MatchProxy_bit_from_22_to_37_test(self):\n m = self.m\n self.assertEqual(m.match_length, 9)", "def verifyTag(tag):\n check = [\"P\", \"Y\", \"L\", \"Q\", \"G\", \"R\", \"J\", \"C\", \"U\", \"V\", \"0\", \"2\", \"8\", \"9\"]\n if len(tag) > 15:\n return False\n if any(i not in check for i in tag):\n return False\n\n return True", "def test_from_raw(good_values):\n values = extract_values(good_values)\n\n packed_be = struct.pack(FMT_PACKET_BE, *values)\n packet_be = PacketBE.from_raw(packed_be)\n packed_le = struct.pack(FMT_PACKET_LE, *values)\n packet_le = PacketLE.from_raw(packed_le)\n\n check_packet(packet_be, good_values)\n check_packet(packet_le, good_values)", "def test_range_test(self):\n range_filter = AddressFilter.Range(\"2-16\")\n assert range_filter.match(10)\n assert range_filter.match(2)\n assert range_filter.match(16)\n assert not range_filter.match(1)\n assert not range_filter.match(17)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Known tags with VR UN are correctly decoded.
def test_known_tags_with_UN_VR(self, replace_un_with_known_vr): ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO_IR 126') ds[0x00100010] = DataElement(0x00100010, 'UN', 'Διονυσιος'.encode('iso_ir_126')) ds.decode() assert 'CS' == ds[0x00080005].VR assert 'PN' == ds[0x00100010].VR assert 'Διονυσιος' == ds[0x00100010].value ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO 2022 IR 100\\ISO 2022 IR 126') ds[0x00100010] = DataElement(0x00100010, 'UN', b'Dionysios=\x1b\x2d\x46' + 'Διονυσιος'.encode('iso_ir_126')) ds.decode() assert 'CS' == ds[0x00080005].VR assert 'PN' == ds[0x00100010].VR assert 'Dionysios=Διονυσιος' == ds[0x00100010].value
[ "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00111010].VR\n assert 'Διονυσιος'.encode('iso_ir_126') == ds[0x00111010].value", "def test_tag_with_long_value_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n\n single_value = b'123456.789012345'\n large_value = b'\\\\'.join([single_value] * 4500)\n ds[0x30040058] = DataElement(0x30040058, 'UN',\n large_value,\n is_undefined_length=False)\n ds.decode()\n assert 'UN' == ds[0x30040058].VR", "def test_reading_ds_with_known_tags_with_UN_VR(\n self, replace_un_with_known_vr):\n test_file = get_testdata_file('explicit_VR-UN.dcm')\n ds = dcmread(test_file)\n assert 'CS' == ds[0x00080005].VR\n assert 'TM' == ds[0x00080030].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'DA' == ds[0x00100030].VR", "def decodeTag(cls, value):\n raise NotImplementedError", "def _decode_seq_tags(self):\n if self.decode_group_size > 0:\n raise NotImplementedError('Unsupported cnn group for CRF')\n else:\n self._decode_with_seq_encodes()\n # self._decode_cnn_pooling_all()\n # self._decode_sim_WX_B()\n self._compute_seqtag_scores_and_loss()\n self._add_weight_decay_regularizer()", "def _ptu_read_tag(s, offset, tag_type_r):\n # Struct fields: 32-char string, int32, uint32, int64\n tag_struct = struct.unpack('32s i I q', s[offset:offset + 48])\n offset += 48\n # and save it into a dict\n tagname = tag_struct[0].rstrip(b'\\0').decode()\n keys = ('idx', 'type', 'value')\n tag = {k: v for k, v in zip(keys, tag_struct[1:])}\n # Recover the name of the type (a string)\n tag['type'] = tag_type_r[tag['type']]\n\n # Some tag types need conversion\n if tag['type'] == 'tyFloat8':\n tag['value'] = np.int64(tag['value']).view('float64')\n elif tag['type'] == 'tyBool8':\n tag['value'] = bool(tag['value'])\n elif tag['type'] == 'tyTDateTime':\n TDateTime = np.uint64(tag['value']).view('float64')\n t = time.gmtime(_ptu_TDateTime_to_time_t(TDateTime))\n tag['value'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", t)\n\n # Some tag types have additional data\n if tag['type'] == 'tyAnsiString':\n tag['data'] = s[offset: offset + tag['value']].rstrip(b'\\0').decode()\n offset += tag['value']\n elif tag['type'] == 'tyFloat8Array':\n tag['data'] = np.frombuffer(s, dtype='float', count=tag['value'] / 8)\n offset += tag['value']\n elif tag['type'] == 'tyWideString':\n # WideString use type WCHAR in the original C++ demo code.\n # WCHAR size is not fixed by C++ standard, but on windows\n # is 2 bytes and the default encoding is UTF-16.\n # I'm assuming this is what the PTU requires.\n tag['data'] = s[offset: offset + tag['value'] * 2].decode('utf16')\n offset += tag['value']\n elif tag['type'] == 'tyBinaryBlob':\n tag['data'] = s[offset: offset + tag['value']]\n offset += tag['value']\n\n return tagname, tag, offset", "def decodeTag(tag):\n tag = ord(tag)\n if TYPE.has_key(tag):\n return TYPE[tag]\n else:\n raise SnmplibUnknownType, \"decodeTag(): Unknown tag: %02X\" % tag", "def decode(self, data):\n\t\traise NotImplementedError()", "def get_value_decoded(self): # real signature unknown; restored from __doc__\n pass", "def tags(self):\n\n return self.video_data.get('tags')", "def extract(self):\n tags = mutagen.File(self.input_file)\n \n ext = os.path.splitext(self.input_file)[1].lower()\n if ext in self.exts:\n for tag, key in self.__tag_mapping[ext].items():\n if key in tags:\n self.tags[tag] = tags[key][0]\n elif tag == 'lyrics' and key == 'USLT':\n for id3tag in tags:\n if id3tag.startswith(key):\n self.tags[tag] = tags[id3tag].text\n \n # Handle info tags specially\n self.tags['length'] = int(tags.info.length)\n self.tags['bitrate'] = (tags.info.bitrate \n if hasattr(tags.info, 'bitrate') \n else int(os.path.getsize(path) * 8 / tags.info.length)) / 1000\n \n # Convert string values to integers for certain tags, ignoring \n # any non-integer characters.\n for key in ['year', 'tracknumber', 'discnumber']:\n if self.tags[key] is not None:\n match = re.match('\\d+', str(self.tags[key]))\n if match:\n self.tags[key] = int(match.group(0))\n \n for key in ['title', 'artist', 'album']:\n self.tags[key] = self.tags[key].strip()", "def auto_decode(self):\r\n return True", "def decoding(vector2string):\n # decodes the first trigram, i.e, the first 3 symbols of a verb\n decoded = str(checkcandidates_beg(vector2string)['decoded'])\n\n # finds a new compatible list of wickelfeatures, i.e., an intersection with the first decoded trigram\n\n new_wicklftrs = find_compatible(checkcandidates_beg(vector2string)['wickelfeatures'], vector2string)\n\n while len(new_wicklftrs) > 16:\n\n # decodes the next phoneme\n phoneme = competition(new_wicklftrs, 2)\n\n # does this until last phoneme is decoded\n new_wicklftrs = find_compatible(new_wicklftrs, vector2string)\n\n # sums the new phoneme to the\n decoded = decoded + phoneme\n\n return decoded", "def decode(self):\n # Find specific character set. 'ISO_IR 6' is default\n # May be multi-valued, but let dicom.charset handle all logic on that\n dicom_character_set = self.get('SpecificCharacterSet', \"ISO_IR 6\")\n\n # shortcut to the decode function in dicom.charset\n decode_data_element = dicom.charset.decode\n\n # sub-function callback for walk(), to decode the chr strings if necessary\n # this simply calls the dicom.charset.decode function\n def decode_callback(ds, data_element):\n decode_data_element(data_element, dicom_character_set)\n # Use the walk function to go through all elements in the dataset and convert them\n self.walk(decode_callback)", "def test_retrieve_supported_tags_response_structure_is_as_expected(client):\n response = client.retrieve_supported_tags().json()\n schema = S({\"data\": Partial([S({\"tag\": str, \"description\": str})])})\n assert response == schema", "def decode(self, frame):\n try: pos = frame.index(\"LAME\")\n except: return\n\n # check the info tag crc. if it's not valid, no point parsing much more.\n lamecrc = bin2dec(bytes2bin(frame[190:192]))\n if self._crc16(frame[:190]) != lamecrc:\n #TRACE_MSG('Lame tag CRC check failed')\n # read version string from the first 30 bytes, up to any\n # non-ascii chars, then strip padding chars.\n #\n # XXX (How many bytes is proper to read? madplay reads 20, but I've\n # got files with longer version strings)\n lamever = []\n for c in frame[pos:pos + 30]:\n if ord(c) not in range(32, 127):\n break\n lamever.append(c)\n self['encoder_version'] = ''.join(lamever).rstrip('\\x55')\n TRACE_MSG('Lame Encoder Version: %s' % self['encoder_version'])\n return\n\n TRACE_MSG('Lame info tag found at position %d' % pos)\n\n # Encoder short VersionString, 9 bytes\n self['encoder_version'] = lamever = frame[pos:pos + 9].rstrip()\n TRACE_MSG('Lame Encoder Version: %s' % self['encoder_version'])\n pos += 9\n\n # Info Tag revision + VBR method, 1 byte\n self['tag_revision'] = bin2dec(bytes2bin(frame[pos:pos + 1])[:5])\n vbr_method = bin2dec(bytes2bin(frame[pos:pos + 1])[5:])\n self['vbr_method'] = self.VBR_METHODS.get(vbr_method, 'Unknown')\n TRACE_MSG('Lame info tag version: %s' % self['tag_revision'])\n TRACE_MSG('Lame VBR method: %s' % self['vbr_method'])\n pos += 1\n\n # Lowpass filter value, 1 byte\n self['lowpass_filter'] = bin2dec(bytes2bin(frame[pos:pos + 1])) * 100\n TRACE_MSG('Lame Lowpass filter value: %s Hz' % self['lowpass_filter'])\n pos += 1\n\n # Replay Gain, 8 bytes total\n replaygain = {}\n\n # Peak signal amplitude, 4 bytes\n peak = bin2dec(bytes2bin(frame[pos:pos + 4])) << 5\n if peak > 0:\n peak /= float(1 << 28)\n db = 20 * log10(peak)\n replaygain['peak_amplitude'] = peak\n TRACE_MSG('Lame Peak signal amplitude: %.8f (%+.1f dB)' % (peak, db))\n pos += 4\n\n # Radio and Audiofile Gain, AKA track and album, 2 bytes each\n for gaintype in ['radio', 'audiofile']:\n name = bin2dec(bytes2bin(frame[pos:pos + 2])[:3])\n orig = bin2dec(bytes2bin(frame[pos:pos + 2])[3:6])\n sign = bin2dec(bytes2bin(frame[pos:pos + 2])[6:7])\n adj = bin2dec(bytes2bin(frame[pos:pos + 2])[7:]) / 10.0\n if sign:\n adj *= -1\n # XXX Lame 3.95.1 and above use 89dB as a reference instead of 83dB\n # as defined by the Replay Gain spec. Should this be compensated for?\n #if lamever[:4] == 'LAME' and lamevercmp(lamever[4:], '3.95') > 0:\n # adj -= 6\n if orig:\n name = self.REPLAYGAIN_NAME.get(name, 'Unknown')\n orig = self.REPLAYGAIN_ORIGINATOR.get(orig, 'Unknown')\n replaygain[gaintype] = {'name': name, 'adjustment': adj,\n 'originator': orig}\n TRACE_MSG('Lame %s Replay Gain: %s dB (%s)' % (name, adj, orig))\n pos += 2\n if replaygain:\n self['replaygain'] = replaygain\n\n # Encoding flags + ATH Type, 1 byte\n encflags = bin2dec(bytes2bin(frame[pos:pos + 1])[:4])\n self['encoding_flags'], self['nogap'] = self._parse_encflags(encflags)\n self['ath_type'] = bin2dec(bytes2bin(frame[pos:pos + 1])[4:])\n TRACE_MSG('Lame Encoding flags: %s' % ' '.join(self['encoding_flags']))\n if self['nogap']:\n TRACE_MSG('Lame No gap: %s' % ' and '.join(self['nogap']))\n TRACE_MSG('Lame ATH type: %s' % self['ath_type'])\n pos += 1\n\n # if ABR {specified bitrate} else {minimal bitrate}, 1 byte\n btype = 'Constant'\n if 'Average' in self['vbr_method']:\n btype = 'Target'\n elif 'Variable' in self['vbr_method']:\n btype = 'Minimum'\n # bitrate may be modified below after preset is read\n self['bitrate'] = (bin2dec(bytes2bin(frame[pos:pos + 1])), btype)\n TRACE_MSG('Lame Bitrate (%s): %s' % (btype, self['bitrate'][0]))\n pos += 1\n\n # Encoder delays, 3 bytes\n self['encoder_delay'] = bin2dec(bytes2bin(frame[pos:pos + 3])[:12])\n self['encoder_padding'] = bin2dec(bytes2bin(frame[pos:pos + 3])[12:])\n TRACE_MSG('Lame Encoder delay: %s samples' % self['encoder_delay'])\n TRACE_MSG('Lame Encoder padding: %s samples' % self['encoder_padding'])\n pos += 3\n\n # Misc, 1 byte\n sample_freq = bin2dec(bytes2bin(frame[pos:pos + 1])[:2])\n unwise_settings = bin2dec(bytes2bin(frame[pos:pos + 1])[2:3])\n stereo_mode = bin2dec(bytes2bin(frame[pos:pos + 1])[3:6])\n self['noise_shaping'] = bin2dec(bytes2bin(frame[pos:pos + 1])[6:])\n self['sample_freq'] = self.SAMPLE_FREQUENCIES.get(sample_freq, 'Unknown')\n self['unwise_settings'] = bool(unwise_settings)\n self['stereo_mode'] = self.STEREO_MODES.get(stereo_mode, 'Unknown')\n TRACE_MSG('Lame Source Sample Frequency: %s' % self['sample_freq'])\n TRACE_MSG('Lame Unwise settings used: %s' % self['unwise_settings'])\n TRACE_MSG('Lame Stereo mode: %s' % self['stereo_mode'])\n TRACE_MSG('Lame Noise Shaping: %s' % self['noise_shaping'])\n pos += 1\n\n # MP3 Gain, 1 byte\n sign = bytes2bin(frame[pos:pos + 1])[0]\n gain = bin2dec(bytes2bin(frame[pos:pos + 1])[1:])\n if sign:\n gain *= -1\n self['mp3_gain'] = gain\n db = gain * 1.5\n TRACE_MSG('Lame MP3 Gain: %s (%+.1f dB)' % (self['mp3_gain'], db))\n pos += 1\n\n # Preset and surround info, 2 bytes\n surround = bin2dec(bytes2bin(frame[pos:pos + 2])[2:5])\n preset = bin2dec(bytes2bin(frame[pos:pos + 2])[5:])\n if preset in range(8, 321):\n if self['bitrate'] >= 255:\n # the value from preset is better in this case\n self['bitrate'] = (preset, btype)\n TRACE_MSG('Lame Bitrate (%s): %s' % (btype, self['bitrate'][0]))\n if 'Average' in self['vbr_method']:\n preset = 'ABR %s' % preset\n else:\n preset = 'CBR %s' % preset\n else:\n preset = self.PRESETS.get(preset, preset)\n self['surround_info'] = self.SURROUND_INFO.get(surround, surround)\n self['preset'] = preset\n TRACE_MSG('Lame Surround Info: %s' % self['surround_info'])\n TRACE_MSG('Lame Preset: %s' % self['preset'])\n pos += 2\n\n # MusicLength, 4 bytes\n self['music_length'] = bin2dec(bytes2bin(frame[pos:pos + 4]))\n TRACE_MSG('Lame Music Length: %s bytes' % self['music_length'])\n pos += 4\n\n # MusicCRC, 2 bytes\n self['music_crc'] = bin2dec(bytes2bin(frame[pos:pos + 2]))\n TRACE_MSG('Lame Music CRC: %04X' % self['music_crc'])\n pos += 2\n\n # CRC-16 of Info Tag, 2 bytes\n self['infotag_crc'] = lamecrc # we read this earlier\n TRACE_MSG('Lame Info Tag CRC: %04X' % self['infotag_crc'])\n pos += 2", "def test_decode_misinterpretation(self):\n data = tlv8.encode([\n tlv8.Entry(1, 16843330),\n tlv8.Entry(2, b'\\x01')\n ])\n result = tlv8.deep_decode(data)\n expected_data = tlv8.EntryList([\n tlv8.Entry(1, tlv8.EntryList([\n tlv8.Entry(66, b'\\x01\\x01')\n ])),\n tlv8.Entry(2, b'\\x01')\n ])\n self.assertEqual(result, expected_data)", "def test_decode_invalid_pair_errors_replace(self):\n self.assertEqual(\n decode(b'ZJVYUGTDRPDYFGFXMK', 'trytes', 'replace'),\n b'??\\xd2\\x80??\\xc3??',\n )", "def test_add_reverse_iptag_then_not_locate_tag(self):\n tag_info = Tags()\n reverse_iptag = ReverseIPTag(\"\", 1, 23, 0, 0, 1, 1)\n machine_vertex = SimpleMachineVertex(None, \"\")\n machine_vertex2 = SimpleMachineVertex(None, \"\")\n tag_info.add_reverse_ip_tag(reverse_iptag, machine_vertex2)\n gotton_tag = tag_info.get_reverse_ip_tags_for_vertex(\n machine_vertex)\n self.assertEqual(gotton_tag, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Known tags with VR UN are correctly read.
def test_reading_ds_with_known_tags_with_UN_VR( self, replace_un_with_known_vr): test_file = get_testdata_file('explicit_VR-UN.dcm') ds = dcmread(test_file) assert 'CS' == ds[0x00080005].VR assert 'TM' == ds[0x00080030].VR assert 'PN' == ds[0x00100010].VR assert 'PN' == ds[0x00100010].VR assert 'DA' == ds[0x00100030].VR
[ "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00111010].VR\n assert 'Διονυσιος'.encode('iso_ir_126') == ds[0x00111010].value", "def test_known_tags_with_UN_VR(self, replace_un_with_known_vr):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO_IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'CS' == ds[0x00080005].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'Διονυσιος' == ds[0x00100010].value\n\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN',\n b'ISO 2022 IR 100\\\\ISO 2022 IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n b'Dionysios=\\x1b\\x2d\\x46'\n + 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'CS' == ds[0x00080005].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'Dionysios=Διονυσιος' == ds[0x00100010].value", "def test_tag_with_long_value_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n\n single_value = b'123456.789012345'\n large_value = b'\\\\'.join([single_value] * 4500)\n ds[0x30040058] = DataElement(0x30040058, 'UN',\n large_value,\n is_undefined_length=False)\n ds.decode()\n assert 'UN' == ds[0x30040058].VR", "def _read_tags(self):\n t1 = self.tok.reveal_next_token(1)\n \n if t1 != None and t1[0] == \"TAG_MARKER\":\n self.tok.consume_next_token()\n self.tok.consume_next_token()\n token = self.tok.consume_next_token()\n \n while (token != None):\n self.tags.append(token[1])\n token = self.tok.consume_next_token()\n\n return True\n else:\n return False", "def test_resource_tag_resource_find_tags_get(self):\n pass", "def tags(self) -> List:", "def extract(self):\n tags = mutagen.File(self.input_file)\n \n ext = os.path.splitext(self.input_file)[1].lower()\n if ext in self.exts:\n for tag, key in self.__tag_mapping[ext].items():\n if key in tags:\n self.tags[tag] = tags[key][0]\n elif tag == 'lyrics' and key == 'USLT':\n for id3tag in tags:\n if id3tag.startswith(key):\n self.tags[tag] = tags[id3tag].text\n \n # Handle info tags specially\n self.tags['length'] = int(tags.info.length)\n self.tags['bitrate'] = (tags.info.bitrate \n if hasattr(tags.info, 'bitrate') \n else int(os.path.getsize(path) * 8 / tags.info.length)) / 1000\n \n # Convert string values to integers for certain tags, ignoring \n # any non-integer characters.\n for key in ['year', 'tracknumber', 'discnumber']:\n if self.tags[key] is not None:\n match = re.match('\\d+', str(self.tags[key]))\n if match:\n self.tags[key] = int(match.group(0))\n \n for key in ['title', 'artist', 'album']:\n self.tags[key] = self.tags[key].strip()", "def _ptu_read_tag(s, offset, tag_type_r):\n # Struct fields: 32-char string, int32, uint32, int64\n tag_struct = struct.unpack('32s i I q', s[offset:offset + 48])\n offset += 48\n # and save it into a dict\n tagname = tag_struct[0].rstrip(b'\\0').decode()\n keys = ('idx', 'type', 'value')\n tag = {k: v for k, v in zip(keys, tag_struct[1:])}\n # Recover the name of the type (a string)\n tag['type'] = tag_type_r[tag['type']]\n\n # Some tag types need conversion\n if tag['type'] == 'tyFloat8':\n tag['value'] = np.int64(tag['value']).view('float64')\n elif tag['type'] == 'tyBool8':\n tag['value'] = bool(tag['value'])\n elif tag['type'] == 'tyTDateTime':\n TDateTime = np.uint64(tag['value']).view('float64')\n t = time.gmtime(_ptu_TDateTime_to_time_t(TDateTime))\n tag['value'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", t)\n\n # Some tag types have additional data\n if tag['type'] == 'tyAnsiString':\n tag['data'] = s[offset: offset + tag['value']].rstrip(b'\\0').decode()\n offset += tag['value']\n elif tag['type'] == 'tyFloat8Array':\n tag['data'] = np.frombuffer(s, dtype='float', count=tag['value'] / 8)\n offset += tag['value']\n elif tag['type'] == 'tyWideString':\n # WideString use type WCHAR in the original C++ demo code.\n # WCHAR size is not fixed by C++ standard, but on windows\n # is 2 bytes and the default encoding is UTF-16.\n # I'm assuming this is what the PTU requires.\n tag['data'] = s[offset: offset + tag['value'] * 2].decode('utf16')\n offset += tag['value']\n elif tag['type'] == 'tyBinaryBlob':\n tag['data'] = s[offset: offset + tag['value']]\n offset += tag['value']\n\n return tagname, tag, offset", "def _extract_tags(self, tags, ifd, ifd_name, tags_library=None, relative=0):\n\n if tags_library is None:\n tags_library = EXIF_TAGS\n\n ## The number of tags we expect to read\n entries_count = self._read_int(ifd, 2)\n\n for i in xrange(entries_count):\n ## Entry is index of start of this IFD in the file\n entry = ifd + 2 + (12 * i)\n tag = self._read_int(entry, 2)\n\n ## Get tag name early to avoid errors, help debug\n tag_entry = tags_library.get(tag)\n if tag_entry:\n tag_name = tag_entry[0]\n else:\n tag_name = 'Tag 0x{:04X}'.format(tag)\n\n ## ignore certain tags for faster processing\n # if not (not self.detailed and tag in IGNORE_TAGS): # <-- WTF?\n if self.detailed or tag not in IGNORE_TAGS:\n field_type = self._read_int(entry + 2, 2)\n\n if field_type not in FIELD_TYPES:\n ## We found an unknown field type\n message = 'Unknown type {:d} in tag 0x{:04X}' \\\n ''.format(field_type, tag)\n if self.strict:\n raise ValueError(message)\n else:\n warnings.warn(message)\n continue # Just skip\n\n ## Get the field length for this type\n type_len = FIELD_TYPES[field_type][0]\n\n ## Amount of values for this field\n values_count = self._read_int(entry + 4, 4)\n\n ## Adjust for tag id/type/value_count (2+2+4 bytes)\n ## Now we point at either the data or the 2nd level offset\n offset = entry + 8\n\n ## If the value fits in 4 bytes, it is inlined, else we\n ## need to jump ahead again.\n if (values_count * type_len) > 4:\n ## offset is not the value; it's a pointer to the value\n ## if relative we set things up so s2n will seek to the\n ## right place when it adds self.offset.\n ## Note that this 'relative' is for the Nikon type 3\n ## makernote.\n ## Other cameras may use other relative offsets, which\n ## would have to be computed here slightly differently.\n if relative:\n tmp_offset = self._read_int(offset, 4)\n offset = tmp_offset + ifd - 8\n if self.fake_exif:\n offset += 18\n else:\n offset = self._read_int(offset, 4)\n\n field_offset = offset\n values = None\n\n if field_type == FT_ASCII:\n ## Special case: null-terminated ASCII string\n ## todo: investigate\n ## Sometimes gets too big to fit in int value (in Python??)\n\n if values_count > 0:\n ## Was: and value_count < (2**31):\n ## but 2E31 is hardware dependant. --gd\n try:\n self.file.seek(self.offset + offset)\n values = self.file.read(values_count)\n ## Drop any garbage after a null.\n try:\n zeroidx = values.index('\\x00')\n except ValueError: # No zero in values\n pass\n else:\n values = values[:zeroidx]\n values = [values] # Must be a list..\n\n except OverflowError: # Why??\n values = []\n\n else:\n signed_types = (\n FT_SIGNED_BYTE,\n FT_SIGNED_SHORT,\n FT_SIGNED_LONG,\n FT_SIGNED_RATIO,\n )\n values = []\n signed = (field_type in signed_types)\n\n if values_count > 1000:\n ## todo: investigate this:\n ## some entries get too big to handle could be malformed\n ## file or problem with self.s2n\n #values_count = 1000\n if tag_name != 'MakerNote':\n warnings.warn(\n \"Encountered tag {} with > 1000 values \"\n \"({} found). Limiting to 1000.\"\n \"\".format(tag_name, values_count))\n values_count = 1000\n\n for dummy in xrange(values_count):\n if field_type in (FT_RATIO, FT_SIGNED_RATIO):\n ## This is a ratio\n value = Ratio(\n self._read_int(offset, 4, signed),\n self._read_int(offset + 4, 4, signed))\n else:\n ## This is an int\n value = self._read_int(offset, type_len, signed)\n\n values.append(value)\n offset += type_len\n\n\n # ## Now 'values' is either a string or an array\n # ## todo: WTF???\n # if value_count == 1 and field_type != FT_ASCII:\n # printable = str(values[0])\n #\n # elif value_count > 50 and len(values) > 20:\n # printable = str(values[0:20])[0:-1] + \", ... ]\"\n #\n # else:\n # printable = str(values)\n #\n # ## Compute printable version of values\n # ## todo: this should be generated by the tag at need...\n # if tag_entry:\n # if len(tag_entry) != 1:\n # # optional 2nd tag element is present\n # if callable(tag_entry[1]):\n # # call mapping function\n # printable = tag_entry[1](values)\n # else:\n # printable = ''\n # for i in values:\n # # use lookup table for this tag\n # printable += tag_entry[1].get(i, repr(i))\n\n _tag_name = '{} {}'.format(ifd_name, tag_name)\n\n new_tag = IFD_Tag(\n tag=tag,\n field_type=field_type,\n values=values,\n field_offset=field_offset,\n field_length=values_count * type_len,\n tag_entry=tag_entry)\n\n logger.debug('Added tag: {}: {!r}'.format(tag_name, new_tag))\n\n tags[_tag_name] = new_tag", "def test_storage_project_iso_tag_get(self):\n pass", "def _tag_exists(self):\n return self.metadata and self.metadata.get('Tags', None)", "def tag_seen_callback(llrpMsg):\n global tagReport, accessId, OpSpecsIdx, hexFileIdx\n tags = llrpMsg.msgdict['RO_ACCESS_REPORT']['TagReportData']\n\n if tags:\n smokesignal.emit('rfid', {\n 'tags': tags,})\n\n if len(tags):\n for tag in tags:\n # logger.info('saw!! tag(s): {}'.format(pprint.pformat(tags)))\n if(\"OpSpecResult\" in tags[0]):\n for ops in tag[\"OpSpecResult\"]:\n logger.info('saw tag(s): {}'.format(pprint.pformat(tags)))\n if (\"ReadData\" in tag[\"OpSpecResult\"][ops]):\n logger.info(\"Readdata = \" + tag[\"OpSpecResult\"][ops][\"ReadData\"] + \" accessType :\" + accessType)\n\n if (accessType == 'readWisp') :\n # AsscessSpec Reading message for WISP5\n logger.info(\"OpSpecsIdx : \" + str(OpSpecsIdx) + \" OpSpecs.__len__(): \" + str(OpSpecs.__len__()) )\n smokesignal.emit('rfid', {\n 'readWispTags': [{'readWisp' : tag[\"OpSpecResult\"][ops][\"ReadData\"]\n , 'EPCvalue' : tag[\"EPC-96\"]\n , 'OpSpecId' : tag[\"OpSpecResult\"][ops][\"OpSpecID\"] }],})\n\n if(OpSpecsIdx < OpSpecs.__len__()) :\n logger.info(\"ReadWisp : \")\n accessId += 1\n fac.nextAccessSpec(opSpecs = [OpSpecs[OpSpecsIdx], OpSpecs[OpSpecsIdx+1]],\n accessSpec = {'ID':accessId, 'StopParam': {'AccessSpecStopTriggerType': 1, 'OperationCountValue': 1,},})\n OpSpecsIdx += 2\n\n else :\n # Result for Normal tags\n smokesignal.emit('rfid', {\n 'readTags': [{'read' : tag[\"OpSpecResult\"][ops][\"ReadData\"]\n , 'EPCvalue' : tag[\"EPC-96\"] }],})\n\n\n elif(0 == tag[\"OpSpecResult\"][ops][\"NumWordsWritten\"]):\n if (accessType == 'readWisp') :\n OpSpecsIdx -= 2\n fac.nextAccessSpec(opSpecs = [OpSpecs[OpSpecsIdx], OpSpecs[OpSpecsIdx+1]],\n accessSpec = {'ID':accessId, 'StopParam': {'AccessSpecStopTriggerType': 1, 'OperationCountValue': 1,},})\n OpSpecsIdx += 2\n elif(accessType == 'writeWisp'):\n smokesignal.emit('rfid', {\n 'writeWispTags': [{'writeWisp' : hexFileLines[hexFileIdx]\n , 'EPCvalue' : tag[\"EPC-96\"]\n , 'OpSpecId' : tag[\"OpSpecResult\"][ops][\"OpSpecID\"]\n , 'status' : 'Failed'} ],})\n\n elif(2 < tag[\"OpSpecResult\"][ops][\"NumWordsWritten\"]):\n if (accessType == 'writeWisp') :\n # AsscessSpec Writing message for WISP5\n logger.info(\"hexFileLines : \" + hexFileLines[hexFileIdx] + \" hexFileIdx size: \" + str(hexFileIdx) + \" OpSpecSize : \" + str(len(OpSpecs)))\n\n smokesignal.emit('rfid', {\n 'writeWispTags': [{'writeWisp' : hexFileLines[hexFileIdx]\n , 'EPCvalue' : tag[\"EPC-96\"]\n , 'OpSpecId' : tag[\"OpSpecResult\"][ops][\"OpSpecID\"]\n , 'status' : 'Success'} ],})\n\n if (hexFileIdx == (len(OpSpecs) - 1)):\n logger.info(\" EOF reached.\")\n else:\n logger.info(\"WriteWisp : \" + str(hexFileIdx))\n accessId += 1\n hexFileIdx += 1\n fac.nextAccessSpec(opSpecs = [OpSpecs[hexFileIdx]],\n accessSpec = {'ID':accessId, 'StopParam': {'AccessSpecStopTriggerType': 1, 'OperationCountValue': 1,},})\n\n print getTimeMeasurement()\n else:\n logger.info('no tags seen')\n return\n for tag in tags:\n tagReport += tag['TagSeenCount'][0]", "def tags(self):\n\n return self.video_data.get('tags')", "def test_vrfs_read(self):\n pass", "def find_ruuvitags(bt_device=''):\n\n log.info('Finding RuuviTags. Stop with Ctrl+C.')\n\n datas = dict()\n for new_data in RuuviTagSensor._get_ruuvitag_datas(bt_device=bt_device):\n if new_data[0] in datas:\n continue\n datas[new_data[0]] = new_data[1]\n log.info(new_data[0])\n log.info(new_data[1])\n\n return datas", "def test_new_tag_info(self):\n Tags()", "def test_resource_tag_resource_get_tag_get(self):\n pass", "def get_known_tags(self, model_idx=0): \n return self.trainers[model_idx].get_known_tags()", "def _get_tags(self):\n with open('input/tags.txt', 'rb') as f:\n tags = dd(set)\n for line in f.readlines():\n tag, language = line.decode().strip().replace(' ', '').split(',')\n tags[language].add(tag)\n return dict(tags)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unknown tags with VR UN are not decoded.
def test_unknown_tags_with_UN_VR(self): ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126') ds[0x00111010] = DataElement(0x00111010, 'UN', 'Διονυσιος'.encode('iso_ir_126')) ds.decode() assert 'UN' == ds[0x00111010].VR assert 'Διονυσιος'.encode('iso_ir_126') == ds[0x00111010].value
[ "def test_known_tags_with_UN_VR(self, replace_un_with_known_vr):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO_IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'CS' == ds[0x00080005].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'Διονυσιος' == ds[0x00100010].value\n\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN',\n b'ISO 2022 IR 100\\\\ISO 2022 IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n b'Dionysios=\\x1b\\x2d\\x46'\n + 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'CS' == ds[0x00080005].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'Dionysios=Διονυσιος' == ds[0x00100010].value", "def test_reading_ds_with_known_tags_with_UN_VR(\n self, replace_un_with_known_vr):\n test_file = get_testdata_file('explicit_VR-UN.dcm')\n ds = dcmread(test_file)\n assert 'CS' == ds[0x00080005].VR\n assert 'TM' == ds[0x00080030].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'DA' == ds[0x00100030].VR", "def test_tag_with_long_value_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n\n single_value = b'123456.789012345'\n large_value = b'\\\\'.join([single_value] * 4500)\n ds[0x30040058] = DataElement(0x30040058, 'UN',\n large_value,\n is_undefined_length=False)\n ds.decode()\n assert 'UN' == ds[0x30040058].VR", "def decodeTag(cls, value):\n raise NotImplementedError", "def non_eng(self):\n return self.raw.get(\"tags\", {\"language\": \"eng\"}).get(\"language\", \"eng\").lower() != \"eng\"", "def decodeTag(tag):\n tag = ord(tag)\n if TYPE.has_key(tag):\n return TYPE[tag]\n else:\n raise SnmplibUnknownType, \"decodeTag(): Unknown tag: %02X\" % tag", "def tag_is_unknown(state):\n return state == None or state[0] == UNKNOWN", "def test_decode_invalid_pair_errors_ignore(self):\n self.assertEqual(\n decode(b'ZJVYUGTDRPDYFGFXMK', 'trytes', 'ignore'),\n b'\\xd2\\x80\\xc3',\n )", "def _decode_unknown_me(self, msg):\n from struct import unpack\n\n (tid, msg_type, framing) = unpack('!HBB', msg[0:4])\n\n assert framing == 0xa, 'Only basic OMCI framing supported at this time'\n msg = msg[4:]\n\n # TODO: Commented out items below are future work (not expected for VOLTHA v2.0)\n (msg_class, kwargs) = {\n # OmciCreateResponse.message_id: (OmciCreateResponse, None),\n # OmciDeleteResponse.message_id: (OmciDeleteResponse, None),\n # OmciSetResponse.message_id: (OmciSetResponse, None),\n # OmciGetResponse.message_id: (OmciGetResponse, None),\n # OmciGetAllAlarmsNextResponse.message_id: (OmciGetAllAlarmsNextResponse, None),\n OmciMibUploadNextResponse.message_id: (OmciMibUploadNextResponse,\n {\n 'entity_class': unpack('!H', msg[0:2])[0],\n 'entity_id': unpack('!H', msg[2:4])[0],\n 'object_entity_class': unpack('!H', msg[4:6])[0],\n 'object_entity_id': unpack('!H', msg[6:8])[0],\n 'object_attributes_mask': unpack('!H', msg[8:10])[0],\n 'object_data': {\n UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[10:-4])\n },\n }),\n # OmciAlarmNotification.message_id: (OmciAlarmNotification, None),\n OmciAttributeValueChange.message_id: (OmciAttributeValueChange,\n {\n 'entity_class': unpack('!H', msg[0:2])[0],\n 'entity_id': unpack('!H', msg[2:4])[0],\n 'data': {\n UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[4:-8])\n },\n }),\n # OmciTestResult.message_id: (OmciTestResult, None),\n }.get(msg_type, None)\n\n if msg_class is None:\n raise TypeError('Unsupport Message Type for Unknown Decode: {}',\n msg_type)\n\n return OmciFrame(transaction_id=tid, message_type=msg_type,\n omci_message=msg_class(**kwargs))", "def test_bad_bytestring(self):\n bytestring = b'\\x10\\x00'\n convert_tag(bytestring, True)", "def test_add_reverse_iptag_then_not_locate_tag(self):\n tag_info = Tags()\n reverse_iptag = ReverseIPTag(\"\", 1, 23, 0, 0, 1, 1)\n machine_vertex = SimpleMachineVertex(None, \"\")\n machine_vertex2 = SimpleMachineVertex(None, \"\")\n tag_info.add_reverse_ip_tag(reverse_iptag, machine_vertex2)\n gotton_tag = tag_info.get_reverse_ip_tags_for_vertex(\n machine_vertex)\n self.assertEqual(gotton_tag, None)", "def unknownPacket(con, kind, data):", "def auto_decode(self):\r\n return True", "def test_decode_invalid_pair_errors_replace(self):\n self.assertEqual(\n decode(b'ZJVYUGTDRPDYFGFXMK', 'trytes', 'replace'),\n b'??\\xd2\\x80??\\xc3??',\n )", "def test013ReadUnknownEncoding(self):\n self.assertRaises(ChunkDataError, self.region.get_nbt, 11, 0)", "def decode(self, data):\n\t\traise NotImplementedError()", "def fix_tags(tags):\n if tags.subclasses[2][0].value != 'AcDbVertex':\n tags.subclasses.insert(2, EMPTY_VERTEX_SUBCLASS)", "def _disable_native_tag(self, interface):\n url = self._construct_url(interface, suffix='trunk/tag/native-vlan')\n self._make_request('DELETE', url, acceptable_error_codes=(404,))", "def tag(self, tag):\n try:\n self.data = re.findall(r'\\w?[^.?!]*{0}[^.?!]*[.?!]'.format(tag), self.data)\n if len(self.data) == 0:\n self.data = None\n except TypeError:\n self.status = 400" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tag with length > 64kb with VR UN is not changed.
def test_tag_with_long_value_UN_VR(self): ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126') single_value = b'123456.789012345' large_value = b'\\'.join([single_value] * 4500) ds[0x30040058] = DataElement(0x30040058, 'UN', large_value, is_undefined_length=False) ds.decode() assert 'UN' == ds[0x30040058].VR
[ "def encodeTagLength(cls, tag, length):\n raise NotImplementedError", "def set_vlen(self, vec_length):\n return _radio_astro_swig.detect_set_vlen(self, vec_length)", "def set_len(self, length):\n pass", "def get_tag(self, tag_len=16):\n tag = _FFI.new(\"unsigned char []\", tag_len)\n ret = _C.EVP_CIPHER_CTX_ctrl(\n self.ctx, _C.EVP_CTRL_GCM_GET_TAG, tag_len, tag)\n _check(ret)\n s = bytes(_FFI.buffer(tag)[:])\n return s", "def set_length(self, length):\n self.length = length\n self.changed = True", "def _is_too_long(self, new_length=0):\n return get_embed_size(self.cur_embed) + new_length > Limits.EMBED_TOTAL", "def getVersionLength(self) -> int:\n ...", "def _set_length(self, val):\n self.Data.Length = val", "def max_length(self) -> float:", "def ByteLength(self) -> _n_2_t_29:", "def extension_length(self, extension_length):\n\n self._extension_length = extension_length", "def set_vlen(self, vec_length):\n return _radio_astro_swig.detect_sptr_set_vlen(self, vec_length)", "def __len__(self):\n return(len(self.nucleotides))", "def min_length(self) -> float:", "def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100", "def testUpdateTlen(self):\n a = self.build_read()\n oldlen = a.template_length\n oldlen *= 2\n a.template_length = oldlen\n self.assertEqual(a.template_length, oldlen)", "def cl_10_5_4_1_fillet_weld_effective_length(fillet_size, available_length):\n # TODO : if available_length >= 4 * fillet_size\n effective_length = available_length - 2 * fillet_size\n return effective_length", "def _set_length(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"length\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint16', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"length must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"length\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint16', is_config=True)\"\"\",\n })\n\n self.__length = t\n if hasattr(self, '_set'):\n self._set()", "def any_attribute_long_than(\n self, text: str, length: int, start: str | None = None\n ) -> bool:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test converting a raw element with unknown VR
def test_unknown_vr(self): raw = RawDataElement(Tag(0x00080000), 'AA', 8, b'20170101', 0, False, True) with pytest.raises(NotImplementedError): DataElement_from_raw(raw, default_encoding)
[ "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00111010].VR\n assert 'Διονυσιος'.encode('iso_ir_126') == ds[0x00111010].value", "def test_reading_ds_with_known_tags_with_UN_VR(\n self, replace_un_with_known_vr):\n test_file = get_testdata_file('explicit_VR-UN.dcm')\n ds = dcmread(test_file)\n assert 'CS' == ds[0x00080005].VR\n assert 'TM' == ds[0x00080030].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'DA' == ds[0x00100030].VR", "def test_known_tags_with_UN_VR(self, replace_un_with_known_vr):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO_IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'CS' == ds[0x00080005].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'Διονυσιος' == ds[0x00100010].value\n\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN',\n b'ISO 2022 IR 100\\\\ISO 2022 IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n b'Dionysios=\\x1b\\x2d\\x46'\n + 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'CS' == ds[0x00080005].VR\n assert 'PN' == ds[0x00100010].VR\n assert 'Dionysios=Διονυσιος' == ds[0x00100010].value", "def test_decode_torch(self):\n\t\tvoxel = v2lt.decode_shape(self.shlt)\n\t\t# sio.savemat('decode_torch.mat', {'voxel':voxel.numpy(), 'gt':self.voxelt.numpy()})\n\t\tself.assertTrue((voxel == self.voxelt).all())\n\t\tpass", "def test_roundtrip_torch(self):\n\t\tvoxel = v2lt.decode_shape(v2lt.encode_shape(self.voxelt, 2))\n\t\tself.assertTrue((voxel == self.voxelt).all())\n\t\tpass", "def test_transformer(self) -> None:\n elemType = 'TRANSFORMER'\n elemName = '1200KV/400KV BINA-ICT-1'\n elemVoltLvl = extractVoltFromName(elemType, elemName)\n # print(elemVoltLvl)\n self.assertTrue(elemVoltLvl == '1200KV/400KV')", "def _testBee(self, source):\n pass\n source.send('at',command='vr')\n return source.wait_read_frame()", "def test_g_et_pixe2eid(self):\n pass", "def test_video_vob_should_return_true(self):\n\n video_name : str = \"video.vob\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_rmvb_should_return_true(self):\n\n video_name : str = \"video.rmvb\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def convertSolidTx(node ,attributeobject, uvSetName=\"string\", fillTextureSeams=bool, uvRange=float, antiAlias=bool, samplePlaneRange=float, camera=\"string\", shadows=bool, alpha=bool, name=\"string\", resolutionY=int, samplePlane=bool, fullUvRange=bool, reuseDepthMap=bool, fileFormat=\"string\", backgroundMode=\"string\", resolutionX=int, componentRange=bool, force=bool, backgroundColor=int, doubleSided=bool, fileImageName=\"string\", uvBBoxIntersect=bool, pixelFormat=\"string\"):\n pass", "def test_inequality_standard(self):\n dd = DataElement(0x00100010, 'PN', 'ANON')\n assert not dd != dd\n assert DataElement(0x00100010, 'PN', 'ANONA') != dd\n\n # Check tag\n assert DataElement(0x00100011, 'PN', 'ANON') != dd\n\n # Check VR\n assert DataElement(0x00100010, 'SH', 'ANON') != dd", "def nodeToElement(node,result):\n\tname=node.getId()\n\ttype=node.getType()\n\tposition=node.getPosition()\n\tlength=node.getLength()\n\t#print \"==n2E==\",name,type,position,length\n\n\t#thick elements\n\tif node.isKindOf(\"dh\"):\n\t\tresult.add(Dipole(position,length,name))\n\telif node.isKindOf(\"qh\"):\n\t\tresult.add(Quadrupole(position,length,name))\n\telif node.isKindOf(\"qv\"):\n\t\tresult.add(Quadrupole(position,length,name))\n\telif node.isKindOf(\"pq\"):\n\t\tresult.add(Quadrupole(position,length,name))\n\n\t#thin elements within nonzero drift space (quasi thick elements)\n\telif node.isKindOf(\"rfgap\"):\n\t\tfor i in RFGap(position,length,name).asTuple(): result.add(i)\n\telif node.isKindOf(\"bcm\"):\n\t\tfor i in BCMonitor(position,length,name).asTuple(): result.add(i)\n\n\t#thin elements\n\telif node.isKindOf(\"dch\"):\n\t\tresult.add(HSteerer(position,length,name))\n\telif node.isKindOf(\"dcv\"):\n\t\tresult.add(VSteerer(position,length,name))\n\telif node.isKindOf(\"bpm\"):\n\t\tresult.add(BPMonitor(position,length,name))\n\telif node.isKindOf(\"ws\"):\n\t\tresult.add(WScanner(position,length,name))\n\telse:\n\t\tprint node.getId(),\"is unknown node type.\"\n\t\tsys.exit(-1)", "def test_no_spw():\n uvobj = UVData()\n testfile_no_spw = os.path.join(DATA_PATH, \"zen.2456865.60537.xy.uvcRREAAM.ms\")\n uvobj.read(testfile_no_spw, use_future_array_shapes=True)\n del uvobj", "def test_tag_with_long_value_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n\n single_value = b'123456.789012345'\n large_value = b'\\\\'.join([single_value] * 4500)\n ds[0x30040058] = DataElement(0x30040058, 'UN',\n large_value,\n is_undefined_length=False)\n ds.decode()\n assert 'UN' == ds[0x30040058].VR", "def isUVReversed(*args, **kwargs):\n \n pass", "def test_resnet_v1_encode():\n client = ResnetV12Vec()\n sample = client.read('https://getvectorai.com/assets/logo-square.png')\n result = client.encode(sample)\n assert np.array(result).shape == (2048,)", "def test_video_divx_should_return_true(self):\n\n video_name : str = \"video.divx\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def matches(self, element: 'SoElement') -> \"SbBool\":\n return _coin.SoTextureOverrideElement_matches(self, element)", "def test_lighting_transform_no_errors(self):\n dataset = self.get_test_image_dataset()\n\n config = [{\"name\": \"ToTensor\"}, {\"name\": \"lighting\"}]\n transform = build_field_transform_default_imagenet(config)\n sample = dataset[0]\n try:\n # test that lighting has been registered and runs without errors\n transform(sample)\n except Exception:\n self.fail(\"LightingTransform raised an exception\")\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change working directory to project folder and call mainloop.
def main(): srcDir = os.path.dirname(__file__) os.chdir(srcDir) Application().mainloop()
[ "def main_loop(self):\n # Start main loop thread (loop() handler)\n while True:\n if self._looping:\n # Call loop() handler\n self._berry.loop_client()", "def run(self):\r\n self.root.after(3000, self.__my_mainloop)\r\n self.root.mainloop()", "def main_loop():\n \n ConsoleViewController.isMainLoopRunning = True\n \n while (ConsoleViewController.isMainLoopRunning):\n continue", "def mainLoop(self):\n\n while self.running:\n if self.state == \"START\":\n self.startLoop()\n elif self.state == \"GAME\":\n self.gameLoop()\n elif self.state == \"END\":\n self.endLoop()", "def main():\n start()", "def main_cycle():\n server_config = get_config_params()\n directory = os.getcwd()\n server_config.read(f\"{directory}/{'server_config.ini'}\")\n\n address, port, no_gui = get_launch_params(\n server_config['SETTINGS']['listen_address'],\n server_config['SETTINGS']['default_port']\n )\n database = ServerDatabase(\n os.path.join(\n server_config['SETTINGS']['db_path'],\n server_config['SETTINGS']['db_file']\n )\n )\n\n server = MessagingServer(address, port, database)\n server.setDaemon(True)\n server.start()\n\n if no_gui:\n while True:\n prompt = input('Для выхода введите exit: ')\n if prompt == 'exit':\n server.working = False\n server.join()\n break\n else:\n server_app = QApplication(argv)\n server_app.setAttribute(Qt.AA_DisableWindowContextHelpButton)\n main_window = MainWindow(server, database, server_config)\n\n server_app.exec_()\n\n server.working = False", "def start(self):\n print(\"=\"*80)\n print(\"Inspecting\", self.folder)\n print(\"=\"*80)\n\n self.unzipProject()\n self.moveToProject()\n self.showFilesToInspect()\n self.executeProject()\n self.performJUnitTests()\n self.listAllFilesByExtension(self, \"java\");\n self.returnToBaseDirectory()\n print(\"=\"*80)\n print(\"Concluding\", self.folder)\n print(\"=\"*80)", "def MainLoop(self):\n\t\tself.__frame.Layout()\n\t\tself.__frame.Show()\n\t\tself.__app.MainLoop()", "def main():\n log.info('beginning INDRA machine runner')\n\n for subdirectory in sorted(os.listdir(HERE)):\n run_one(os.path.join(HERE, subdirectory))", "def run(self):\n try:\n self.root.mainloop()\n except (KeyboardInterrupt, SystemExit):\n self.kill()\n return", "def run(self):\n # First setup board\n self.setup()\n # Loop forever\n self.loop()", "def exec(self) -> None:\n self.start_message_loop()", "def main():\n event_handler = FileConverter('*.edf')\n\n observer = Observer()\n observer.schedule(event_handler, BASE_DIR)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()", "def setupRunDirectory(self):\n\t\tif self.params['rundir'] is None:\n\t\t\tapDisplay.printWarning(\"run directory not defined, automatically setting it\")\n\t\t\tself.setProcessingDirName()\n\t\t\tself.setRunDir()\n\t\t\tif self.params['rundir'] is None:\n\t\t\t\tapDisplay.printError(\"No run directory was set\")\n\n\t\tif self.quiet is False:\n\t\t\tapDisplay.printMsg(\"Run directory: \"+self.params['rundir'])\n\n\t\tif not os.path.isdir(self.params['rundir']):\n\t\t\t\tapDisplay.printError(\"run directory must exist for FileScript run\")\n\n\t\tos.chdir(self.params['rundir'])", "def mainloop(self):\n while self.running:\n self.updateview();\n self.handlekey(self.scr.getch());", "def run(self):\n self.root.title(\"SATRO\")\n self.root.mainloop()", "def main():\n app = WoJ()\n app.run()\n pygame.quit()", "def run(self):\n cherrypy.engine.SIGHUP = None\n cherrypy.engine.SIGTERM = None\n cherrypy.engine.autoreload_on = False\n\n # User config file if specified\n if self.configFile:\n cherrypy.config.update(self.configFile)\n # Override explicitly passed config options\n cherrypy.config.update(self.configDict)\n \n cherrypy.tree.mount(self.httpTree)\n cherrypy.server.quickstart()\n cherrypy.engine.start(blocking=False)\n \n # Loop till done\n finished = False\n while not finished:\n time.sleep(5)\n finished = self.exitFlag\n \n # When done, exit gracefully\n self._suicide()", "def chdir_local() -> None:\n os.chdir(os.path.dirname(__file__))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch UDHR xml bundle from unicode.org to fetch_dir.
def fetch_udhr(fetch_dir): fetch_dir = tool_utils.ensure_dir_exists(fetch_dir) dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME) result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile) print 'Fetched: ' + result[0]
[ "def update_udhr(udhr_dir, fetch_dir, in_repo):\n\n zippath = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)\n tool_utils.check_file_exists(zippath)\n\n if in_repo and os.path.isdir(udhr_dir) and not tool_utils.git_is_clean(udhr_dir):\n raise ValueError('Please clean %s.' % udhr_dir)\n\n if os.path.isdir(udhr_dir):\n shutil.rmtree(udhr_dir)\n os.makedirs(udhr_dir)\n tool_utils.zip_extract_with_timestamp(zippath, udhr_dir)\n\n # dos line endings, sheesh\n tool_utils.dos2unix(udhr_dir, ['*.xml', '*.rnc', '*.rng'])\n\n if in_repo:\n tool_utils.git_add_all(udhr_dir)\n\n date = datetime.datetime.now().strftime('%Y-%m-%d')\n dst = 'in %s ' % udhr_dir if not in_repo else ''\n print 'Update UDHR files %sfrom %s as of %s.' % (dst, fetch_dir, date)", "def scrape_ucddb():\n BASE_URL = \"http://physionet.org/physiobank/database/ucddb/\"\n contents = download(BASE_URL)\n tree = etree.HTML(contents)\n urls = tree.xpath(\"//pre/a/@href\")\n urls = filter_urls(urls)\n absolute_urls = [urljoin(BASE_URL, url) for url in urls]\n\n for url in absolute_urls:\n save_contents(url)", "def _fetchUnicodes(glif):\n parser = _FetchUnicodesParser()\n parser.parse(glif)\n return parser.unicodes", "def unpackFile(uri, fetchTarget, sourceBaseDir, sourceSubDir, foldSubDir):\n\n\tsourceDir = sourceBaseDir + '/' + sourceSubDir \\\n\t\tif sourceSubDir else sourceBaseDir\n\tif uri.endswith('#noarchive'):\n\t\tif os.path.isdir(fetchTarget):\n\t\t\tshutil.copytree(fetchTarget, sourceDir, symlinks=True)\n\t\telse:\n\t\t\tif not os.path.isdir(sourceDir):\n\t\t\t\tos.makedirs(sourceDir)\n\t\t\tshutil.copy(fetchTarget, sourceDir)\n\telse:\n\t\tactualSubDir = sourceSubDir\n\t\tif actualSubDir:\n\t\t\tif foldSubDir:\n\t\t\t\tactualSubDir += '/' + foldSubDir\n\t\telse:\n\t\t\tactualSubDir = foldSubDir\n\t\tunpackArchive(fetchTarget, sourceBaseDir, actualSubDir)\n\t\tif foldSubDir:\n\t\t\tfoldSubdirIntoSourceDir(foldSubDir, sourceDir)", "def fetch_local(path):\n tempdir = mkdtemp()\n destination = \"{}/bundle\".format(tempdir)\n copytree(path, destination)\n return destination", "def get_local_filepath(ucb_url):\n content_dir = \"/apps/content/raw_files/UCSF/JapaneseWoodblocks/\"\n\n # example: http://nma.berkeley.edu/ark:/28722/bk0000m7z5r\n real_url = ucb_url.replace('nma.berkeley.edu', 'vm172.lib.berkeley.edu:8080/resolver')\n parsed_url = urlparse(ucb_url)\n ark = parsed_url.path.split('/ark:/')[1]\n dir = os.path.join(content_dir, ark)\n try:\n # look in the local cache of ARK->filename\n filename = [files for root, dirs, files in os.walk(dir)][0][0]\n except:\n # do the lookup\n r = requests.head(real_url, allow_redirects=False)\n url_we_want = r.headers['Location']\n path, filename = os.path.split(urlparse(url_we_want).path)\n dest_dir = os.path.join(content_dir, ark)\n dest_path = os.path.join(dest_dir, filename)\n _mkdir(dest_dir)\n # just touch the files; no need to download\n # (in fact, some are fobidden from download)\n with open(dest_path, 'a'): # http://stackoverflow.com/a/1160227/1763984\n os.utime(dest_path, None)\n print \"Touched file:\", filename\n return dir, filename", "def _fetch_url(self, word):\n\t\t\n\t\tif os.path.isfile(RAW_DATA_PATH + word + \".html\"):\n\t\t\thtml_file = open(RAW_DATA_PATH + word + \".html\", \"r\")\n\t\t\tcontent = html_file.read()\n\t\t\thtml_file.close()\n\n\t\telse:\n\t\t\ttry:\n\t\t\t\turl = \"http://dictionary.reference.com/browse/\" + word\n\t\t\t\tcontent = urllib2.urlopen(url).read()\n\t\t\texcept urllib2.URLError as err:\n\t\t\t\tprint \"Word not in database, please connect to the internet.\"\n\t\t\t\texit()\n\n\t\t#Saving html content just in case I'll need to make some changes in the future.\n\t\tcontent_file = open(RAW_DATA_PATH + word + \".html\", \"w\")\n\t\tcontent_file.write(content)\n\t\tcontent_file.close()\n\t\t\n\t\treturn BeautifulSoup(content)", "def download_and_unpack(self, download_dir):\n pass", "def get_dpkg(name, release, dir):\n\n debian_repo = 'http://ftp.es.debian.org/debian/'\n sources_url = debian_repo + 'dists/' + release + '/source/Sources.gz'\n sources_file = os.path.join(dir, 'Sources.gz')\n urllib.request.urlretrieve(sources_url, sources_file)\n pkg_data = get_dpkg_data(sources_file, name)\n for file in pkg_data['components']:\n file_url = debian_repo + pkg_data['directory'] + \"/\" + file\n file_path = os.path.join(dir, file)\n logging.info (\"Downloading {} from {}\".format(file, file_url))\n urllib.request.urlretrieve(file_url, file_path)\n return os.path.join(dir, pkg_data['dsc'])", "def download_word_embeddings_nl() -> None:\n print('--- Beginning word embedding file download ---')\n url = 'https://www.clips.uantwerpen.be/dutchembeddings/combined-320.tar.gz'\n with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,\n desc=url.split('/')[-1]) as t:\n file_tmp = urlretrieve(url, filename=None, reporthook=t.update_to)[0]\n t.total = t.n\n\n base_name = os.path.basename(url)\n file_name, file_extension = os.path.splitext(base_name)\n tar = tarfile.open(file_tmp)\n tar.extractall(ROOT_DIR+'/resources/word_embeddings/'+file_name)\n return None", "def update_addon(uid,wow_dir):\n url = 'http://www.wowinterface.com/patcher/info-%d.xml' % uid\n dom = minidom.parse(urllib2.urlopen(url))\n \n if dom.getElementsByTagName('error'):\n if int(dom.getElementsByTagName('id')[0].firstChild.nodeValue) == 403:\n print 'The file is still being checked by mods, update will be downloaded next time you run this script.' #This function shouldn't print.\n return False\n else:\n print 'Please give this info to the addon author: <%d> - %s' % (int(dom.getElementsByTagName('id')[0].firstChild.nodeValue),\n str(dom.getElementsByTagName('message')[0].firstChild.nodeValue))\n return False\n file_location = str(dom.getElementsByTagName('UIFileURL')[0].firstChild.nodeValue)\n size = int(dom.getElementsByTagName('UISize')[0].firstChild.nodeValue)\n if size > 1048576: #If size is lager then 1mb\n print 'Downloading big file, this may take more then few seconds' #This function shouldn't print. This is just a workaround. Again.\n f = urllib2.urlopen(file_location)\n data = StringIO(f.read())\n f.close()\n data = zipfile.ZipFile(data)\n addon_dirs = []\n for f in data.namelist():\n dir = str(f.split('/',1)[0])\n if not (dir in addon_dirs):\n addon_dirs.append(dir)\n wuiup_removedir(os.path.join(wow_dir, dir))\n wuiup_unzip(data,wow_dir)\n data.close()\n return True", "async def load_eu_data():\n eu_url = \"https://sourceforge.net/projects/miuimix/rss?path=/\"\n async with ClientSession() as session:\n stable = eT.fromstring(await fetch(session, f'{eu_url}/weekly'))\n weekly = eT.fromstring(await fetch(session, f'{eu_url}/stable'))\n stable_links = [i.find('link').text for i in stable[0].findall('item')]\n weekly_links = [i.find('link').text for i in weekly[0].findall('item')]\n return [*stable_links, *weekly_links]", "def fetch_episode_xml(supla_id):\n # We happen to know this is where the XML is stored. Hacky, in that\n # sense\n url = f\"https://gatling.nelonenmedia.fi/media-xml-cache?id={supla_id}\"\n ref = f\"https://www.supla.fi/supla/{supla_id}\"\n\n return ElementTree.fromstring(requests.get(url, headers={\"Referer\": ref}).text)", "def fetch(data_dir, dest=\"wmt14\"):\n # Create folder\n wmt_dir = os.path.join(data_dir, dest)\n utils.create_folder(wmt_dir)\n\n # Download all datasets\n for f, url in CORPORA.items():\n utils.urlretrieve(url, os.path.join(wmt_dir, f))\n\n return wmt_dir", "def fetch_and_parse_xml(url, auth_info=None):\n return ET.parse(fetch_resource(url, auth_info))", "def _extract_dir_content(url, infile):\n parser = xml.sax.make_parser()\n\n handler = DavListDirHandler()\n handler.set_url(url)\n parser.setContentHandler(handler)\n try:\n parser.parse(infile)\n except xml.sax.SAXParseException, e:\n raise errors.InvalidHttpResponse(\n url, msg='Malformed xml response: %s' % e)\n # Reformat for bzr needs\n dir_content = handler.dir_content\n (dir_name, is_dir) = dir_content[0][:2]\n if not is_dir:\n raise errors.NotADirectory(url)\n dir_len = len(dir_name)\n elements = []\n for (href, is_dir, size, is_exec) in dir_content[1:]: # Ignore first element\n if href.startswith(dir_name):\n name = href[dir_len:]\n if name.endswith('/'):\n # Get rid of final '/'\n name = name[0:-1]\n # We receive already url-encoded strings so down-casting is\n # safe. And bzr insists on getting strings not unicode strings.\n elements.append((str(name), is_dir, size, is_exec))\n return elements", "def DownstreamBinaryLoadUnit(self):\r\n\t\treturn self._get_attribute('downstreamBinaryLoadUnit')", "def do_fetchdoc(self, line):\n if not line.strip():\n print \"usage: fetchdoc <document_id>\\n\"\n\n key = line.strip()\n url = self.base_doc_url % (key[:3], key)\n\n print \"fetchdoc: %s\" % url\n doc = lxml.html.parse(url).getroot()\n content = lxml.html.tostring(doc, encoding='utf-8')\n\n filedir = os.path.join(self.root_dir, \"raw\", key[:3])\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n filepath = os.path.join(filedir, \"%s.html\" % key)\n\n f_out = open(filepath, 'w')\n f_out.write(content)\n f_out.close()", "def uci_load(self, ucistr):\n self.execute(\"echo \" + ucistr + \" | uci import \")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete udhr_dir and rebuild with files extracted from udhr_xml.zip in fetch_dir. Stage if udhr_dir is in the repo.
def update_udhr(udhr_dir, fetch_dir, in_repo): zippath = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME) tool_utils.check_file_exists(zippath) if in_repo and os.path.isdir(udhr_dir) and not tool_utils.git_is_clean(udhr_dir): raise ValueError('Please clean %s.' % udhr_dir) if os.path.isdir(udhr_dir): shutil.rmtree(udhr_dir) os.makedirs(udhr_dir) tool_utils.zip_extract_with_timestamp(zippath, udhr_dir) # dos line endings, sheesh tool_utils.dos2unix(udhr_dir, ['*.xml', '*.rnc', '*.rng']) if in_repo: tool_utils.git_add_all(udhr_dir) date = datetime.datetime.now().strftime('%Y-%m-%d') dst = 'in %s ' % udhr_dir if not in_repo else '' print 'Update UDHR files %sfrom %s as of %s.' % (dst, fetch_dir, date)
[ "def fetch_udhr(fetch_dir):\n fetch_dir = tool_utils.ensure_dir_exists(fetch_dir)\n dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)\n result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile)\n print 'Fetched: ' + result[0]", "def remove_update_files(self):\n tar_file = os.path.join(htpc.RUNDIR, '%s.tar.gz' % self.repo)\n update_folder = os.path.join(htpc.RUNDIR, 'update')\n\n if os.path.exists(tar_file):\n os.remove(tar_file)\n\n if os.path.exists(update_folder):\n shutil.rmtree(update_folder)", "def cleanup_files(base_dir, builder):\n builder.run_root('rm -rf /build')", "def unpackFile(uri, fetchTarget, sourceBaseDir, sourceSubDir, foldSubDir):\n\n\tsourceDir = sourceBaseDir + '/' + sourceSubDir \\\n\t\tif sourceSubDir else sourceBaseDir\n\tif uri.endswith('#noarchive'):\n\t\tif os.path.isdir(fetchTarget):\n\t\t\tshutil.copytree(fetchTarget, sourceDir, symlinks=True)\n\t\telse:\n\t\t\tif not os.path.isdir(sourceDir):\n\t\t\t\tos.makedirs(sourceDir)\n\t\t\tshutil.copy(fetchTarget, sourceDir)\n\telse:\n\t\tactualSubDir = sourceSubDir\n\t\tif actualSubDir:\n\t\t\tif foldSubDir:\n\t\t\t\tactualSubDir += '/' + foldSubDir\n\t\telse:\n\t\t\tactualSubDir = foldSubDir\n\t\tunpackArchive(fetchTarget, sourceBaseDir, actualSubDir)\n\t\tif foldSubDir:\n\t\t\tfoldSubdirIntoSourceDir(foldSubDir, sourceDir)", "def gsd_file_reduction(self):\n\n self.remove_file_dir(self.gsd_file_path, is_dir=True)\n self.my_zip.unzip_file(self.zip_file, add_dir=True)\n self.remove_file_dir(self.zip_file)", "def download_and_unpack(self, download_dir):\n pass", "def lxd_pull_files(lxd_dir):\n\n from tempfile import mkdtemp\n import subprocess\n\n tmp_dir = mkdtemp()\n cmdlist = ['lxc', 'file', 'pull', '--recursive', lxd_dir, tmp_dir]\n subprocess.check_call(cmdlist)\n\n # Typically this will pull the directory user.sandcats.io, find the files\n # and move them to tmp_dir\n lfiles = os.listdir(tmp_dir)\n if len(lfiles) == 1:\n single_item = join(tmp_dir, lfiles[0])\n if isdir(single_item):\n subprocess.check_call('mv {}/* {}'.format(single_item, tmp_dir),\n shell = True)\n os.rmdir(single_item)\n\n return tmp_dir", "def clean_dir(dl_dir):\n failed_downloads = get_fails()\n failed_targets = set([f[4:14] for f in failed_downloads])\n\n dl_files = os.listdir(dl_dir)\n for file in dl_files:\n if file[:10] in failed_targets:\n rem = dl_dir+'/'+file\n os.remove(rem)\n print(\"removed {}\".format(rem))\n\n os.remove('failed_downloads.log')\n open('failed_downloads.log','w').close()", "def cleanup(self) -> None:\n info('<<lightyellow>>Remove copied files... ', newline=False)\n shutil.rmtree(self.target)\n # restore pywikibot en.json file\n filename = 'en.json'\n self.target.mkdir()\n shutil.copy(self.source / filename, self.target / filename)\n info('<<lightyellow>>done')", "def clean_packmol_dir(envpath):\r\n # copy resulting .xyz to project dir\r\n try:\r\n os.replace(\"./PR_initcell.xyz\", f\"{envpath}/initcell.xyz\")\r\n except OSError:\r\n print(\"!!!!!Can't copy resulting .xyz file! Check packmol.log!!!!!\")\r\n exit()\r\n\r\n # clear the packmol directory of temporary .xyz and .inp files\r\n for i in glob.glob(f\"{PATH}/packmol/*.xyz\"):\r\n os.remove(i)\r\n for i in glob.glob(f\"{PATH}/packmol/*.inp\"):\r\n os.remove(i)", "def update():\n import os\n os.chdir(path.scriptdir)\n tools.run(\"git\", \"pull\")", "def tar_update(self):\n self.logger.info(\"Trying update through tar-download\")\n tar_file = os.path.join(htpc.RUNDIR, '%s.tar.gz' % self.repo)\n update_folder = os.path.join(htpc.RUNDIR, 'update')\n\n try:\n self.logger.debug(\"Downloading from https://github.com/%s/%s/tarball/%s\"\n % (self.user, self.repo, self.branch))\n self.logger.debug(\"Downloading to \" + tar_file)\n url = urllib2.urlopen('https://github.com/%s/%s/tarball/%s'\n % (self.user, self.repo, self.branch))\n file_obj = open(tar_file, 'wb')\n file_obj.write(url.read())\n file_obj.close()\n except:\n self.logger.error(\"Unable to fetch tar-file. Aborting and removing left overs.\")\n self.remove_update_files()\n return False\n\n try:\n self.logger.debug(\"Extracting tar file to \" + update_folder)\n tar = tarfile.open(tar_file)\n tar.extractall(update_folder)\n tar.close()\n except:\n self.logger.error(\"Unable to extract tar-file. Aborting and removing left overs.\")\n self.remove_update_files()\n return False\n\n latest = self.latest_commit()\n root_src_dir = os.path.join(update_folder, '%s-%s-%s'\n % (self.user, self.repo, latest[:7]))\n\n try:\n self.logger.debug(\"Replacing the old files with the updated files.\")\n for src_dir, dirs, files in os.walk(root_src_dir):\n dst_dir = src_dir.replace(root_src_dir, htpc.RUNDIR)\n if not os.path.exists(dst_dir):\n os.mkdir(dst_dir)\n for file_ in files:\n src_file = os.path.join(src_dir, file_)\n dst_file = os.path.join(dst_dir, file_)\n if os.path.exists(dst_file):\n os.remove(dst_file)\n shutil.move(src_file, dst_dir)\n except:\n self.logger.debug(\"Unable to replace the old files. Aborting and removing left overs.\")\n self.remove_update_files()\n return False\n\n self.logger.debug(\"Update successful. Removing left overs.\")\n self.remove_update_files()\n return True", "def rm_pes_dir(self, dirname):\n self.parallel_environment_manager.rm_object_dir(dirname)", "def cleanup(base_dir):\n for root, dirs, files in os.walk(base_dir, topdown=False):\n for name in files:\n fname = os.path.join(root, name)\n if fname not in __FETCHED:\n print('{}Deleting: {}{}'.format(YELLOW, RESET, fname))\n os.unlink(fname)\n\n for name in dirs:\n dname = os.path.join(root, name)\n if not os.listdir(dname):\n print('{}Deleting: {}{}'.format(YELLOW, RESET, dname))\n os.rmdir(dname)", "def test20RemoveUnzipped(self):\n self.om.download_file(\"INTL.IVYDB.{}D.zip\".format(self.good_day_str))\n self.om.unzip_file(self.good_day)\n self.om.remove_unzipped(self.good_day)\n self.assertEquals(len(glob.glob(\n self.om.savepath+\"INTL.IVY*.{}D.txt\".format(self.good_day_str))),\n 0) # No .txt files for that date.", "def rm_hgrps_dir(self, dirname):\n self.host_group_manager.rm_object_dir(dirname)", "def rm_prjs_dir(self, dirname):\n self.project_manager.rm_object_dir(dirname)", "def setUpCrosswalk(self):\n if self.harvestInfo['xsl_file'] is not None and self.harvestInfo['xsl_file'] != '':\n self.storeFileExtension = 'tmp'\n # clean up previous crosswalk and import content\n self.outputDir = self.harvestInfo['data_store_path'] + str(self.harvestInfo['data_source_id'])\n self.outputDir = self.outputDir + os.sep + str(self.harvestInfo['batch_number'])\n for file in os.listdir(self.outputDir):\n if file.endswith(self.resultFileExtension) or \\\n file.endswith(self.resultFileExtension + \".validated\") or \\\n file.endswith(self.resultFileExtension + \".processed\"):\n try:\n if os.path.isfile(self.outputDir + os.sep + file):\n os.unlink(self.outputDir + os.sep + file)\n else:\n self.emptyDirectory(self.outputDir + os.sep + file)\n os.rmdir(self.outputDir + os.sep + file)\n except PermissionError as e:\n self.logger.logMessage(\"Unable to remove %s\" % (self.outputDir + os.sep + file), \"ERROR\")", "def unar(to_unar, dstdir, delarchives, test):\n print('[.] extracting %s to %s' % (to_unar, dstdir))\n if not test:\n subprocess.run(['unar', '-f', '-o', dstdir, to_unar])\n if delarchives == True:\n print('[.] deleting archive: %s' % (to_unar))\n del_file(to_unar, test)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the index.xml file in src_dir and return a map from bcp to a set of file codes. Skip files at stages 1 (missing) or 2 (not started). Stage 3 files have article 1, which is what we want. Stage 4 and 5 are ok, the vast majority are unreviewed (4). In some cases more than one file is mapped to the same bcp47 code, this gets dealt with in fix_index.
def parse_index(src_dir): tree = ET.parse(os.path.join(src_dir, 'index.xml')) bcp_to_codes = collections.defaultdict(set) code_to_ohchr = {} for e in tree.getroot().iter('udhr'): s = int(e.attrib.get('stage')) if s < 3: continue code = e.attrib.get('f') bcp = e.attrib.get('bcp47') if not bcp: # don't know what to do with this, maybe we could supply a mapping. print 'no bcp for %s' % code continue ohchr = e.attrib.get('ohchr') bcp_to_codes[bcp].add(code) # we use the ohchr code to identify an attribution if ohchr: code_to_ohchr[code] = ohchr return bcp_to_codes, code_to_ohchr
[ "def source_index(self):\n return os.path.join(self.data_directory, 'sources')", "def load_source_files(self):\n self.stage = [f for f in self.stage_filenames]\n\n for stage in self.STAGES:\n if self.stage_filenames[stage]:\n with open(self.stage_filenames[stage]) as f:\n self.sources[stage] = f.read()\n else:\n self.sources[stage] = None\n\n # Update our mtimes to match the last time we read from source files\n self.prev_mtimes = self.mtimes()", "def mapFile(self, driverXML):\n stepDict = {}\n fileDict = {}\n mapDict = {}\n stepCount = 0 \n tree = ET.parse(driverXML)\n root = tree.getroot()\n for stepsXML in root.getiterator('Steps'):\n for inputXML in stepsXML.getiterator('Input'): \n #print (fileCount)\n #print (inputXML.attrib)\n #print (inputXML.text)\n stepDict[inputXML.text.lower()] = stepCount\n stepCount = stepCount + 1 \n for filesXML in root.getiterator('Files'):\n for inputXML in filesXML.getiterator('Input'): \n fileDict[inputXML.attrib.get('type').lower()] = inputXML.text.lower()\n #for typeName, fileName in fileDict.item():\n # for stepName, mapNumber in stepDict():\n mapDict = {k:stepDict[v] for k,v in fileDict.iteritems()} \n #print (fileDict)\n #print (mapDict)\n return mapDict", "def _code_into_dict(src_dir: str, extensions: dict) -> dict:\n data = {}\n files = os.listdir(src_dir)\n for filename in files:\n path = os.path.join(src_dir, filename)\n ext = filename.split(\".\")[1]\n lang = extensions[ext]\n with open(path) as f:\n code = f.read().strip()\n data[lang] = code\n return data", "def get_index_dictionary(parent_dir):\n # Open the index file\n try:\n index_file = open(join(parent_dir, \".lgit/index\"), \"r+\")\n except PermissionError as e:\n print(e.filename + \":\", \"index file open failed: PermissionDenied\")\n return None\n # Save its content\n content = index_file.readlines()\n index_info_list = []\n char_count = 0\n # Split each line of the content into multiple fields\n for order, index_line in enumerate(content):\n index_info_list.append((index_line[:14],\n index_line[15:55],\n index_line[56:96],\n index_line[97:137],\n index_line[138:].strip(),\n char_count))\n char_count += len(index_line)\n # Create the dictionary\n index_dict = {index_info[4]: index_info for index_info in index_info_list}\n return index_dict", "def processFiles(pathList):\n lettersRegEx = re.compile('^\\D+', re.IGNORECASE)\n files = []\n masterList = []\n \n for i in range(len(pathList)):\n data = map(lambda l: l.strip().split('\\t'), open(pathList[i],'rU'))\n if data[0][0].startswith('#'): data.pop(0) # get rid of headers\n tmpDict = {}\n for i in range(len(data)):\n tmpDict[data[i][0]] = combinePos1and2(data[i][1:])\n \n \n # Add new hitDict to the master list\n masterList.append(tmpDict)\n return masterList", "def ProcessCodeCorpus(fileList, basePath, \n errorCount, args,\n vocabulary, projectFiles):\n i = 0\n for path in fileList:\n #if(i > 1000):\n # break\n print(str(i) + \":\" + path)\n #try:\n if(True):\n (vocabulary, projectFiles, status) = \\\n ProcessCodeFile(path, basePath, errorCount, args, \n i, vocabulary, projectFiles)\n print(status)\n if(status):\n i += 1\n\n return(vocabulary, projectFiles, i)", "def _get_index_file ( self ) :\n\n\t\tif not os.path.isdir( self.absolute_index_dir ):\n\t\t\tself.env.log.warning ('index directory does not exist')\n\t\t\treturn None\n\n # Read all the matching index.* files into a dictionary\n all = {}\n for item in dircache.listdir( self.absolute_index_dir ) :\n path = os.path.join ( self.absolute_index_dir , item )\n\n prefix_pattern = re.compile('^index\\.swish-e\\.(.*)$') \n\t\t\tprefix = prefix_pattern.match ( item )\n if prefix :\n # Can be index.xxxx or index.xxxx.prop or index.xxxx.temp\n \tkey = prefix.group(1)\n\n \tif re.compile('^.*\\.temp$').match ( key ) :\n \t# Ignore files ending with *.temp\n \tbreak\n\n if not re.compile('^.*\\.prop$').match( key ):\n # This is an index file ...\n # ... add last modification time\n all[path] = os.path.getmtime(path)\n\n\n\t\t# Do we have indexes in the 'all' dictionary?\n\t\tif not all:\n\t\t\tself.env.log.warning ('attachments/index does not contain any index file')\n\t\t\treturn None\n\n # Sort the indexes dictionary by increasing value\n sorted_dict = list(all.iteritems())\n sorted_dict.sort(lambda i1, i2: cmp(i1[1], i2[1]))\n\n\t\t# Get last tuple\n \tlast_index = sorted_dict.pop()\n\t\treturn last_index[0]", "def getSrcFiles(dir, src):\n\tdirs = []\n\tfor x in os.listdir(dir):\n\t\tif os.path.isdir(pj(dir, x)):\n\t\t\tgetSrcFiles(pj(dir, x), src)\n\t\tif x[-4:] == '.cpp':\n\t\t\tsrc.append('#' + pj(dir, x))", "def _process_tagdir(dirname, output_dir, frame_size, grayscale, file_num=0,\n augment=True):\n mappings = []\n for idx in os.listdir(dirname):\n datadir = os.path.join(dirname, idx)\n if not idx.isdigit() or not os.path.isdir(datadir):\n continue\n vidfile, syncfile, cmdfile = [\n os.path.join(datadir, fname)\n for fname in (\"video.avi\", \"sync.txt\", \"commands.txt\")\n ]\n file_num = _process_files(\n vidfile, syncfile, cmdfile, output_dir, frame_size, grayscale,\n augment=augment, file_num=file_num\n )\n return file_num", "def _file_indexer(cat_list, file_list):\n\n # Determine the number of elements per category\n num_files = len(file_list)\n num_cat = len(cat_list)\n\n stim_per_cat = int(num_files / num_cat)\n\n # Create the index\n file_index = {key: file_list[key] for cat in range(num_cat)\n for key in range(cat * stim_per_cat, (cat + 1) * stim_per_cat)}\n\n return file_index", "def _index_files(self):\n fnames = [self.idx_ids, self.idx_files, self.idx_dates]\n for step in PROCESSING_STEPS:\n fnames.append(\"%s%sidx-processed-%s.txt\" % (self.idx_dir, os.sep, step))\n return fnames", "def _index_files(path):\n with zipfile.ZipFile(path) as zf:\n names = sorted(zf.namelist())\n names = [nn for nn in names if nn.endswith(\".tif\")]\n phasefiles = []\n for name in names:\n with zf.open(name) as pt:\n fd = io.BytesIO(pt.read())\n if SingleRawOAHTif.verify(fd):\n phasefiles.append(name)\n return phasefiles", "def get_index_files(self):\n return", "def merge_indexes(index_files):\n index = {}\n for f in index_files:\n print f\n part_index = pickle.load(file(f))\n index.update(part_index)\n\n return index", "def get_src_files(sdfg):\n sourcefiles = []\n for node, _ in sdfg.all_nodes_recursive():\n if (isinstance(node, (nodes.AccessNode, nodes.Tasklet, nodes.LibraryNode, nodes.Map, nodes.NestedSDFG))\n and node.debuginfo is not None):\n\n filename = node.debuginfo.filename\n if not filename in sourcefiles:\n sourcefiles.append(filename)\n\n elif (isinstance(node, (nodes.MapEntry, nodes.MapExit)) and node.map.debuginfo is not None):\n\n filename = node.map.debuginfo.filename\n if not filename in sourcefiles:\n sourcefiles.append(filename)\n\n return sourcefiles", "def process_dir(src):\n\n for name in os.listdir(src):\n path = os.path.join(src, name)\n if not os.path.isfile(path):\n continue\n\n extensions = []\n while True:\n (name, ext) = os.path.splitext(name)\n if ext == '':\n break\n\n extensions.append(ext)\n\n dbname = re.sub(r'[^a-z0-9]', '_', name)\n filters = reversed([EXTFILTERS[ext] for ext in extensions if ext not in EXTIGNORE])\n process_file(path, dbname, filters)", "def get_code_dict_using_path(full_path):\n code_dict = {}\n\n p = Path(full_path)\n #path_list = [x for x in p.iterdir() if x.is_dir()]\n #path_list.sort()\n \n #print(path_list)\n #print(len(path_list)) \n \n count_1 = 0\n count_2 = 0 \n \n # Create a list of all python files. Note: list is PosixPath() \n file_list = list(p.glob('**/*.py')) # 304\n file_list.sort()\n #print(file_list)\n \n # Remove the _rc or rc_ files from the file_list. \n length = len(file_list)\n #print(length) #304\n for index, file in enumerate(reversed(file_list)):\n file_name = PurePath(file).name\n if \"_rc\" in file_name or \"rc_\" in file_name:\n #print(file_name)\n file_list.pop((length -1) - index)\n #print(len(file_list)) # 284 - removed 20 x rc files from list.\n \n # For each file add it to the dictionary\n #print(file_list)\n for file in file_list:\n count_1 +=1\n #print(file)\n #TODO: Make this independent of Pyside6 installation:\n #position = len(\"/home/ian/venv-pyside6/lib/python3.10/site-packages/PySide6/examples\")\n position = len(full_path) \n #print(position)\n file_path = Path(file).as_posix() # Strip PosixPath() off the path\n file_path = \"/\" + file_path\n #print(file_path[position:])\n #print(PurePath(file).name)\n \n file_name = PurePath(file).name\n #file_name = \"/\" + file_name\n count_3 = 0\n \n code_list = [] \n with open(file, \"r\") as fin:\n temp_list = fin.readlines() \n for temp_line in temp_list:\n # Build the lines of code, but remove copyright lines\n if not temp_line.startswith(\"#\"): \n code_list.append(temp_line[:-1]) # strip off extra \\n\n \n #count_2 += len(temp) # 304 3,209,120\n #print(count_1, len(temp_list), count_2, count_3)\n\n # Add key as truncated path/filename code as a list to the dictionary. \n code_dict[file_path[position:]] = code_list \n #print(count_1, len(temp_list), count_2, count_3)\n\n #print(len(file_list), count_2)\n \n return code_dict", "def extract_index_hrefs(source, base_url=None):\n root = ET.parse(source, parser=None)\n return [\n # the \".//*/a[@class='index-file']\" expression does not seem to work\n # with Python 2.6s XPath engine. Workaround:\n (el.attrib[\"href\"].split(\"/\")[-1], urljoin(base_url, el.attrib[\"href\"]))\n for el in itertools.chain(\n root.findall(\".//*/{http://www.w3.org/1999/xhtml}a\"),\n root.findall(\".//*/a\"),\n ) if el.attrib.get(\"class\") == \"index-file\"\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add script subtags where they are not present in the bcp code.
def add_likely_scripts(bcp_to_code): result= {} for bcp, code in bcp_to_code.iteritems(): if code in CODE_TO_BCP: new_bcp = CODE_TO_BCP[code] else: new_bcp = bcp parts = bcp.split('-') try: script = generate_website_data.find_likely_script(parts[0]) if len(parts) == 1: new_bcp = '%s-%s' % (bcp, script) elif len(parts[1]) != 4 or parts[1].isdigit(): # assume a region or variant. Some 4-char values are years, e.g. '1996' new_bcp = '%s-%s-%s' % (parts[0], script, '-'.join(parts[1:])) # otherwise, we assume the 4-char value is a script, and leave it alone. except KeyError: # if we can't provide a script, it's no use for a script sample, so exclude it print 'no likely subtag (script) data for %s, excluding' % parts[0] continue result[new_bcp] = code return result
[ "def _filter_script_tags(input_xml):\n output_lines = []\n in_script = False\n for line in input_xml.splitlines():\n if \"<script>\" in line:\n in_script = True\n if not in_script:\n output_lines.append(line)\n if \"</script>\" in line:\n in_script = False\n return '\\n'.join(output_lines)", "def script(self):\n if 'Suppress-Script' in self.data['record']:\n return Subtag(self.data['record']['Suppress-Script'], 'script')\n return None", "def load_scripts(pkg):\n\n script_ul = SOUP.find(\"ul\", {\"id\": \"scripts\"})\n script_ul.contents = []\n\n for f in os.listdir(pkg):\n if splitext(f)[1] != '.pkg':\n continue\n\n script_dir = join(pkg, f, 'Scripts')\n script_list = Tag(SOUP, 'ul')\n\n for script in os.listdir(script_dir):\n if script == \"Tools\":\n continue\n\n script_li = Tag(SOUP, 'li')\n script_li['class'] = 'code'\n script_path = join(script_dir, script)\n\n if isfile(script_path):\n script_li.append(join(f, 'Scripts', script))\n script_li.append(anchor_for_name(script_path))\n script_pre = Tag(SOUP, 'pre')\n script_pre.append(NavigableString(open(script_path).read()))\n script_li.append(script_pre)\n elif isdir(script_path):\n subscript_files = os.listdir(script_path)\n if not subscript_files:\n continue\n\n script_li.append(\"%s Scripts\" % join(f, 'Scripts', script))\n subscripts = Tag(SOUP, 'ul')\n\n for subscript in subscript_files:\n subscript_path = join(script_path, subscript)\n subscript_li = Tag(SOUP, 'li')\n subscript_li.append(subscript)\n subscript_li.append(anchor_for_name(subscript_path))\n\n subscript_pre = Tag(SOUP, 'pre')\n subscript_pre.append(NavigableString(open(subscript_path).read()))\n subscript_li.append(subscript_pre)\n\n subscripts.append(subscript_li)\n\n script_li.append(subscripts)\n\n script_list.append(script_li)\n\n if script_list.contents:\n new_scripts = Tag(SOUP, 'li')\n new_scripts.append(NavigableString(\"%s Scripts\" % f))\n new_scripts.append(script_list)\n script_ul.append(new_scripts)", "def add_script(self, script):\n globpath = os.path.join(self.hooks_dir, script + '*')\n for s in glob.glob(globpath):\n if os.path.isfile(s) and os.access(s, os.X_OK) and s not in self.scripts:\n self.scripts.append(s)", "def enable_add_script(self):\n self.rules.add_script = re.compile(r'\\$add_script\\((.+?)\\)')\n self.default_rules.insert(0, 'add_script')", "def add_script(self, script, raw=False):\n if raw:\n self.raw_scripts.append(script)\n\n else:\n self.scripts.append(script)", "def process_scripts(cfg, obj):\n\n keys = cfg.keys()\n for key in keys:\n\n ## Look for script snippets that begin with 'on_'\n if key.startswith('on_'):\n event_name = key\n\n ## Issue warnings for events that don't match class methods\n if event_name not in obj.__class__.__dict__:\n THE_LOG.add(\"?? Warning unknown event '%s' in file %s.\" %\n (event_name, obj.filename))\n\n else:\n script = cfg.pop(event_name)\n\n try:\n code = compile_script(script)\n\n except BogScriptError, error:\n THE_LOG.add('!! Script error in file %s' % obj.filename)\n THE_LOG.add(\"!! for event '%s'\" % key)\n THE_LOG.add('!! %s' % error)\n sys.exit(1)\n\n ## Map the event name to the compiled bytecode\n obj.scripts[event_name] = code\n\n return cfg", "def addScript(self, state: 'ScXMLScriptElt') -> \"void\":\n return _coin.ScXMLScxmlElt_addScript(self, state)", "def INLINE_SCRIPT_BASE_RESTRICTION():\n if Directive._inlineScriptBaseRestriction is None:\n Directive._inlineScriptBaseRestriction = Directive(\"[inline script base restriction]\", ())\n Directive._inlineScriptBaseRestriction._isRegularDirective = False\n return Directive._inlineScriptBaseRestriction", "def obtener_tags_script(self, p_id_script):\n bd = MySQLConnector.MySQLConnector()\n consulta = \"SELECT IdTag FROM Tag_Script WHERE IdScript=%s\", (p_id_script, )\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd", "def removeIncludes(self, lines):\n\t\t\n\t\tfor index, line in enumerate(lines):\n\t\t\t\n\t\t\tif re.match('^\\+(STIL|TEMA|JAVASCRIPT)', line):\n\t\t\t\tlines[index] = ''\n\t\t\t\t\t\n\t\treturn lines", "def get_scripts(text):\n\tstart = text.find(\"OTHER SCRIPTS\")\n\tend = text.find(\"\\n\", start)\n\treturn text[start:end].strip()", "def test_remove_asset_tag(self):\n pass", "def scriptNode(attributeList, ignoreReferenceEdits=bool, scriptType=int, executeBefore=bool, afterScript=\"string\", beforeScript=\"string\", sourceType=\"string\", executeAfter=bool, name=\"string\"):\n pass", "def get_scripts(self):\n return []", "def clean_script_files(self, remove_template=True):\n # Remove last script file\n self.Script.clean_script_file()\n # Remove template file\n if remove_template and self.inp_script.exists():\n print('Removing {} ...'.format(str(self.inp_script)))\n os.remove(self.inp_script)\n return", "def remove_JS(string):\n return re.sub('<script.*?</script>', '', string, re.S)", "def removeScript(self, state: 'ScXMLScriptElt') -> \"void\":\n return _coin.ScXMLScxmlElt_removeScript(self, state)", "def test_acceptance_stripe_script_has_been_inserted(self):\n pattern = re.compile(r'<script src=\"https://js.stripe.com/v3\"></script>', re.I | re.M)\n res = re.search(pattern, self.dom_str)\n self.assertTrue(res.group())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When we query this data, typically we have only language and script. Some of the bcp codes have variants or regions as well. Select one of these to be the default when we have only language and script.
def add_default_lang_script(bcp_to_code): options = collections.defaultdict(set) long_keys = {} for key in bcp_to_code: tags = key.split('-') if len(tags) > 2: long_keys[key] = tags for key in sorted(long_keys): tags = long_keys[key] lang_scr = tags[0] + '-' + tags[1] if lang_scr in bcp_to_code: print 'have default for long tag %s: %s' % (key, bcp_to_code[lang_scr]) else: options[lang_scr].add(key) for lang_scr in sorted(options): print '%s options: %s' % (lang_scr, options[lang_scr]) if not lang_scr in OPTION_MAP: print 'missing from option map: %s' % lang_scr elif not OPTION_MAP[lang_scr] in options[lang_scr]: print 'selected option for %s (%s) not available' % ( lang_scr, OPTION_MAP[lang_scr]) else: value = bcp_to_code[OPTION_MAP[lang_scr]] print 'adding %s for %s' % (value, lang_scr) bcp_to_code[lang_scr] = value return bcp_to_code
[ "def get_default_value_in_Libgen_translate_script_by_hw_type():\n file_root_and_name = '/opt/nokiasiemens/configure/py/Libgen_translate.py'\n command='echo $HW_PLATFORM'\n out = connections.execute_mml_without_check(command)\n hw_platform = out.strip()\n\n command1=\"\"\"sed -n \"/ATCA/,/^'''/p\" %s\"\"\"%(file_root_and_name)\n\n command2=\"\"\"sed -n \"/FTLB/,/^'''/p\" %s\"\"\"%(file_root_and_name)\n\n command3=\"\"\"sed -n \"/^else:/,/^'''/p\" %s\"\"\"%(file_root_and_name)\n\n if hw_platform == 'ATCA':\n out = connections.execute_mml_without_check(command1)\n elif hw_platform == 'FTLB':\n out = connections.execute_mml_without_check(command2)\n else:\n out = connections.execute_mml_without_check(command3)\n other_hw_platform = re.search(r'else:.*\\n.*header(.*)[default](.*)',out,re.I)\n if other_hw_platform is None:\n exceptions.raise_ILError(\"ILCommandExecuteError\", \"failed to get other hw_platform default values\")\n\n values_in_script = []\n line_list = out.split(\"[default]\")\n line_list = line_list[1].split(\"\\r\\n\")\n for line in line_list:\n if line != '':\n if line.count('=') == 1:\n values_in_script.append(line.strip())\n return values_in_script", "def preferred(self):\n if 'Preferred-Value' in self.data['record']:\n preferred = self.data['record']['Preferred-Value']\n type = self.data['type']\n if type == 'extlang':\n type = 'language'\n return Subtag(preferred, type)\n return None", "def _default_sepa_origid_issr(self):\n if self.partner_id.country_id.code == 'BE':\n return 'KBO-BCE'", "def by_code_or_alias_or_none(self, code):\r\n try:\r\n return self.by_code_or_alias(code)\r\n except Language.DoesNotExist:\r\n return None", "def getDefaultCode(self): #$NON-NLS-1$\r", "def specific_or_default(label):\n return sb_params.get(label, sb_defaults.get(label, \"\"))", "def get_default_query(): # pragma: no cover\r\n\r\n sqlfile = resource_filename('pybmpdb.data', 'default.sql')\r\n with open(sqlfile, 'r') as sql:\r\n sqlquery = sql.read()\r\n return sqlquery", "def _get_default_treebank(self) -> str:\n stanza_default_treebanks = default_treebanks # type: Dict[str, str]\n return stanza_default_treebanks[self.stanza_code]", "def default_flavor(self):\n return self.get('default_flavor')", "def default_cc( # pylint:disable=unused-argument\n arch: str, platform: Optional[str] = None, language: Optional[str] = None\n) -> Optional[Type[SimCC]]:\n\n if arch in DEFAULT_CC:\n return DEFAULT_CC[arch]\n\n alias = unify_arch_name(arch)\n return DEFAULT_CC.get(alias)", "def get_default_languagecode():\n default_languagecode = getattr(_active, 'default_languagecode', None)\n return default_languagecode or settings.LANGUAGE_CODE", "def categorize_source(ds_md: dict) -> str:\n igo = [\"FAO\", \"World Bank\", \"ILO\", \"United Nations\", \"International Monetary Fund\"]\n go = [\"U.S. Energy Information Administration\"]\n ngo = [\"UNICEF\", \"IFPRI\"]\n company = [\"CBOT\"]\n\n quelle = ds_md[\"Quelle\"]\n\n try:\n if any(x in quelle for x in igo):\n gw_quelle = \"Zwischenstaatliche Organisation\"\n elif any(x in quelle for x in ngo):\n gw_quelle = \"Nichtregierungsorganisation\"\n elif any(x in quelle for x in go):\n gw_quelle = \"Staatliche Organisation\"\n elif any(x in quelle for x in company):\n gw_quelle = \"Unternehmen\"\n else:\n gw_quelle = \"N/A\"\n except TypeError:\n gw_quelle = \"N/A\"\n return gw_quelle", "def test_portals_id_builder_config_default_get(self):\n pass", "def get_default_tz_from_db(engine):\n sql = (\"SELECT option_name, option_value FROM vdc_options WHERE \"\n \"option_name like '%s';\")\n pattern = 'Default%TimeZone'\n try:\n res = engine.db.psql(sql, pattern)\n return {r[0]: r[1] for r in res}\n except errors.ExecuteDBQueryError:\n return dict()", "def default_cloud(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_cloud\")", "def by_code_or_alias(self, code):\r\n if not code:\r\n raise Language.DoesNotExist(\"No language matched the query.\")\r\n lang = cache.get('languages:code_or_alias:%s' % code, None)\r\n if lang is None:\r\n lang = Language.objects.get(\r\n models.Q(code=code) |\r\n models.Q(code_aliases__contains=' %s ' % code)\r\n )\r\n cache.set('languages:code_or_alias:%s' % code, lang)\r\n return lang", "def _get_base0(self, base0: bool) -> bool:\n if base0 is None:\n base0 = self.base0\n self.base0 = get_option(\"index.base.0\", base0)\n return self.base0", "def _select_language(self, lang):\n log.info('Filtering kernels to {}'.format(lang))\n assert self._select_drop_down('language', lang)", "def bq_default_prod_project():\n return os.environ.get('BIGQUERY_PROD_PROJECT', 'copper-actor-127213')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create samples in sample_dir from the sources in udhr_dir, based on the bcp_to_code mapping. Stage if sample_dir is in the repo. If sample_dir is in the repo, don't overwrite samples whose most recent log entry does not start with 'Updated by tool'
def update_samples(sample_dir, udhr_dir, bcp_to_code_attrib, in_repo): tool_utils.check_dir_exists(udhr_dir) if in_repo and os.path.isdir(sample_dir) and not tool_utils.git_is_clean(sample_dir): raise ValueError('Please clean %s.' % sample_dir) if in_repo: repo, subdir = os.path.split(sample_dir) tool_samples = frozenset(tool_utils.get_tool_generated(repo, subdir)) print 'only allowing overwrite of:\n %s' % '\n '.join(sorted(tool_samples)) comments = [ '# Attributions for sample excerpts:', '# original - in the public domain, no attribution', '# UN - UN, OHCHR, or affiliate, attribute to UN', '# other - not a UN translation', '# none - not on ohchr, not a UN translation' ] sample_attrib_list = [] sample_dir = tool_utils.ensure_dir_exists(sample_dir) count = 0 for bcp, (code, attrib) in bcp_to_code_attrib.iteritems(): text = None src_file = 'udhr_%s.xml' % code dst_file = '%s_udhr.txt' % bcp src_path = os.path.join(udhr_dir, src_file) dst_path = os.path.join(sample_dir, dst_file) sample = extract_para(src_path) if not sample: print 'unable to get sample from %s' % src_file return if in_repo and os.path.isfile(dst_path) and dst_file not in tool_samples: print 'Not overwriting modified file %s' % dst_file else: sample = fix_sample(sample, bcp) with codecs.open(dst_path, 'w', 'utf8') as f: f.write(sample) print 'created sample %s from %s' % (dst_file, src_file) count += 1 sample_attrib_list.append('%s: %s' % (dst_file, attrib)) print 'Created %d samples' % count # Some existing samples that we don't overwrite are not in bcp_to_code_attrib, # so they're not listed. Readers of the attributions.txt file will need to # default these to 'none'. attrib_data = '\n'.join(comments + sorted(sample_attrib_list)) + '\n' with open(os.path.join(sample_dir, 'attributions.txt'), 'w') as f: f.write(attrib_data) if in_repo: tool_utils.git_add_all(sample_dir) date = datetime.datetime.now().strftime('%Y-%m-%d') dst = 'in %s ' % sample_dir if not in_repo else '' noto_ix = udhr_dir.find('nototools') src = udhr_dir if noto_ix == -1 else udhr_dir[noto_ix:] # prefix of this sample commit message indicates that these were tool-generated print 'Updated by tool - sample files %sfrom %s as of %s.' % (dst, src, date)
[ "def test_setup_merged_samples(self):\n flist = find_samples(j_doe_00_05)\n setup_merged_samples(flist, **{'dry_run':False})\n with open(os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3-bcbb-config.yaml\")) as fh:\n conf = yaml.load(fh)\n self.assertEqual(conf[\"details\"][0][\"files\"][0], os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz\"))", "def prep_source_directory(options):\n if not options.update:\n wipe_directory(options.source, options.dry_run)\n create_directory(options.source, options.dry_run)", "def read_sample_dir(db_dir, sample_dir):\n # save startdir\n startdir = os.getcwd()\n\n # specify set of samples using dir name\n os.chdir(os.path.join(db_dir, sample_dir))\n cwd = os.getcwd()\n\n # read files in cwd\n files = os.listdir('.')\n\n # make sure all files are sample files and sort before return\n def check_sample_file(filename):\n if filename.startswith('sample') and filename.endswith('pickle'):\n return True\n else:\n return Fale\n\n sample_files = [f for f in sorted(files) if check_sample_file(f)]\n\n # change directory back to start dir\n os.chdir(startdir)\n\n return sample_files", "def update_source(self, source, skll=False, file_type=\"output\", input_source=None):\n # locate the updated outputs for the experiment under the given\n # outputs directory, locate the existing experiment outputs\n # and define how we will refer to the test\n if file_type == \"output\":\n updated_output_path = self.updated_outputs_directory / source / \"output\"\n existing_output_path = self.tests_directory / \"data\" / \"experiments\" / source / \"output\"\n test_name = source\n else:\n updated_output_path = self.updated_outputs_directory / input_source / \"output\"\n existing_output_path = (\n self.tests_directory / \"data\" / \"experiments\" / source / input_source / \"output\"\n )\n test_name = f\"{source}/{input_source}\"\n\n # if the directory for this source does not exist on the updated output\n # side, then that's a problem and something we should report on later\n try:\n assert updated_output_path.exists()\n except AssertionError:\n self.missing_or_empty_sources.append(test_name)\n return\n\n # if the existing output path does not exist, then create it\n try:\n assert existing_output_path.exists()\n except AssertionError:\n sys.stderr.write(f'\\nNo existing output for \"{test_name}\". Creating directory ...\\n')\n existing_output_path.mkdir(parents=True)\n\n # get a comparison betwen the two directories\n dir_comparison = dircmp(updated_output_path, existing_output_path)\n\n # if no output was found in the updated outputs directory, that's\n # likely to be a problem so save that source\n if not dir_comparison.left_list:\n self.missing_or_empty_sources.append(test_name)\n return\n\n # first delete the files that only exist in the existing output directory\n # since those are likely old files from old versions that we do not need\n existing_output_only_files = dir_comparison.right_only\n for file in existing_output_only_files:\n remove(existing_output_path / file)\n\n # Next find all the NEW files in the updated outputs.\n new_files = dir_comparison.left_only\n\n # We also define several types of files we exclude.\n # 1. we exclude OLS summary files\n excluded_suffixes = [\"_ols_summary.txt\", \".ols\", \".model\", \".npy\"]\n\n # 2. for output files we exclude all json files.\n # We keep these files if we are dealing with input files.\n if file_type == \"output\":\n excluded_suffixes.extend(\n [\n \"_rsmtool.json\",\n \"_rsmeval.json\",\n \"_rsmsummarize.json\",\n \"_rsmcompare.json\",\n \"_rsmxval.json\",\n ]\n )\n\n new_files = [\n f for f in new_files if not any(f.endswith(suffix) for suffix in excluded_suffixes)\n ]\n\n # 3. We also exclude files related to model evaluations for SKLL models.\n if skll:\n new_files = [f for f in new_files if not self.is_skll_excluded_file(f)]\n\n # next we get the files that have changed and try to figure out if they\n # have actually changed beyond a tolerance level that we care about for\n # tests. To do this, we run the same function that we use when comparing\n # the files in the actual test. However, for non-tabular files, we just\n # assume that they have really changed since we have no easy way to compare.\n changed_files = dir_comparison.diff_files\n really_changed_files = []\n for changed_file in changed_files:\n include_file = True\n updated_output_filepath = updated_output_path / changed_file\n existing_output_filepath = existing_output_path / changed_file\n file_format = updated_output_filepath.suffix.lstrip(\".\")\n if file_format in [\"csv\", \"tsv\", \"xlsx\"]:\n try:\n check_file_output(\n str(updated_output_filepath),\n str(existing_output_filepath),\n file_format=file_format,\n )\n except AssertionError:\n pass\n else:\n include_file = False\n\n if include_file:\n really_changed_files.append(changed_file)\n\n # Copy over the new files as well as the really changed files\n new_or_changed_files = new_files + really_changed_files\n for file in new_or_changed_files:\n copyfile(updated_output_path / file, existing_output_path / file)\n\n # Update the lists with files that were changed for this source\n self.deleted_files.extend([(test_name, file) for file in existing_output_only_files])\n self.updated_files.extend([(test_name, file) for file in new_or_changed_files])", "def write_data_source_files(self) -> None:\n data_sources_dir = self.ids.additional_output_dir / f\"{self.ids.short_polarity}_data_sources\"\n if len(list(data_sources_dir.glob(\"*\"))) >= 4:\n logger.warning(\n (\n \"Data sources directory already populated from previous work on this analysis. \"\n \"Not overwriting.\"\n )\n )\n else:\n shutil.rmtree(data_sources_dir, ignore_errors=True)\n logger.info(\"Writing data source files to %s.\", data_sources_dir)\n ma_data.make_data_sources_tables(\n self.ids.groups, self.atlas, self.ids.additional_output_dir, self.ids.short_polarity\n )", "def add_samples(self, samples):\n new_samples = load_samples(samples)\n for s in new_samples:\n s.subsample_events(self.subsample_count)\n if s.original_filename in self.sample_lut:\n # sample ID may have been added via a FlowJo workspace,\n # check if Sample value is None\n if self.sample_lut[s.original_filename] is not None:\n warnings.warn(\"A sample with this ID already exists...skipping\")\n continue\n self.sample_lut[s.original_filename] = s\n\n # all samples get added to the 'default' group\n self.assign_sample(s.original_filename, 'default')", "def setup_samples(parent_dir):\n print \"Reading directory %s ...\" % parent_dir\n\n # Container to keep sample objects\n samples = []\n\n # Get subdirectories in parent dir\n subdirs = [os.path.join(parent_dir, s) for s in os.listdir(parent_dir) if os.path.isdir(os.path.join(parent_dir, s))]\n for sd in subdirs:\n # Loop files in sample directory\n abs_sample_path = os.path.abspath(os.path.join(parent_dir, sd))\n\n # Create sample object\n sample = Sample(abs_sample_path, os.path.abspath(parent_dir))\n\n # Add to samples collection\n samples.append(sample)\n\n # Return all samples\n return samples", "def _build_sample_directory(context, components_dict):\n config = context.template.template_config\n client_section = config.client_section\n root = components_dict[\"root\"]\n\n sample_dir = DirTemplateComponent(\"sample\")\n root.add_child(sample_dir)\n\n ClientTemplate._copy_sample_files(context, components_dict, sample_dir)\n\n file_comp = FileTemplateComponent(\"common.py\", \"../../app/static/sample/common.py.tmpl\")\n sample_dir.add_child(file_comp)\n\n sample_basic_dir = DirTemplateComponent(\"basic\")\n sample_dir.add_child(sample_basic_dir)\n\n include_example = client_section.include_example_method\n basic_sample_comp = FileTemplateComponent(\"basic_sample.py\", \"sample/basic/basic_sample.py.tmpl\",\n {\"clientClassName\": client_section.client_class_name,\n \"name\": client_section.name,\n \"additionalImports\": (\n \"from dxlbootstrap.util import MessageUtils\\n\"\n if include_example else \"\")})\n if include_example:\n comp = CodeTemplateComponent(\"sample/basic/code/invoke_example_method.code.tmpl\")\n comp.indent_level = 1\n basic_sample_comp.add_child(comp)\n\n sample_basic_dir.add_child(basic_sample_comp)", "def prepare_snippets(root_dir: str):\n\n # load mapping of extensions to language names\n extensions = load_extensions(root_dir)\n\n # load list of dirs in root_folder, filter out auto-generated folders\n dirs = [x for x in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, x))]\n dirs = [x for x in dirs if x[0] not in [\".\", \"_\"]]\n\n # update data (JSON) files in each code dir\n for d in dirs:\n code_dir = os.path.join(root_dir, d)\n _process_code_files(code_dir, extensions)", "def _copy_sample_files(context, components_dict, dir_comp):\n del context\n del components_dict\n\n file_comp = FileTemplateComponent(\"dxlclient.config\", \"../../app/static/config/dxlclient.config.tmpl\")\n dir_comp.add_child(file_comp)", "def copy_samples():\n lane_line_fv_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'front_view_lane_line_for_training')\n lane_line_top_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'top_view_lane_line_for_training')\n non_lane_line_fv_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'tmp')\n non_lane_line_top_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'top_view_non_lane_line_for_training')\n\n lane_line_fv_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'lane_line/front_view')\n lane_line_top_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'lane_line/top_view')\n non_lane_line_fv_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'non_lane_line/front_view')\n non_lane_line_top_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'non_lane_line/top_view')\n\n for parents, _, filenames in os.walk(lane_line_fv_src_dir):\n for index, filename in enumerate(filenames):\n fv_src_filename = ops.join(parents, filename)\n top_src_filename = ops.join(lane_line_top_src_dir, filename.replace('fv', 'top'))\n\n fv_dst_filename = ops.join(lane_line_fv_dst_dir, filename)\n top_dst_filename = ops.join(lane_line_top_dst_dir, filename.replace('fv', 'top'))\n\n shutil.copyfile(src=fv_src_filename, dst=fv_dst_filename)\n shutil.copyfile(src=top_src_filename, dst=top_dst_filename)\n sys.stdout.write('\\r>>Copying lane line samples {:d}/{:d} {:s}'.format(index+1, len(filenames),\n filename[0:filename.find('.')]))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n for parents, _, filenames in os.walk(non_lane_line_fv_src_dir):\n for index, filename in enumerate(filenames):\n fv_src_filename = ops.join(parents, filename)\n top_src_filename = ops.join(non_lane_line_top_src_dir, filename.replace('fv', 'top'))\n\n fv_dst_filename = ops.join(non_lane_line_fv_dst_dir, filename)\n top_dst_filename = ops.join(non_lane_line_top_dst_dir, filename.replace('fv', 'top'))\n\n shutil.copyfile(src=fv_src_filename, dst=fv_dst_filename)\n shutil.copyfile(src=top_src_filename, dst=top_dst_filename)\n sys.stdout.write('\\r>>Copying non lane line samples {:d}/{:d} {:s}'.format(index + 1, len(filenames),\n filename[0:filename.find('.')]))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n return", "def test_merge_sample_config(self):\n flist = find_samples(j_doe_00_05)\n fdict = _group_samples(flist)\n out_d = os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\")\n if not os.path.exists(out_d):\n os.makedirs(out_d)\n newconf = merge_sample_config(fdict[\"P001_101_index3\"].values(), \"P001_101_index3\", out_d=out_d, dry_run=False)\n self.assertTrue(os.path.exists(os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz\" )))\n self.assertTrue(os.path.exists(os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3_C003CCCXX_TGACCA_L001_R1_001.fastq.gz\" )))", "def build_from_dir(cls, d_path, sample=None, clean=False, build='grch37'):\n pattern = \"*.cov\"\n if sample:\n pattern = sample + pattern\n\n p = pathlib.Path(d_path)\n file_list = sorted(p.glob(pattern))\n name = cls.name_from_file(file_list[0])\n\n df = cls._build_dataframe(file_list, build)\n\n if clean:\n [os.remove(str(f)) for f in file_list]\n\n # chrom_list = cls.chroms_from_files(file_list, build)\n\n return cls(df, name)", "def _create_samples(session, match):\n all_samples = defaultdict(data_models.Sample)\n project_id = match.get('project_id')\n project_status = match.get('project_status', 'open')\n sample_id = match.get('sample_id')\n sample_time_since = match.get('createddate')\n process_limit_date = match.get('process_limit_date')\n detailed = request.args.get('detailed') in ['true', 'True', True]\n if detailed:\n list_process_complete = None\n list_process_queued = None\n else:\n list_process_complete = list(status_cfg.step_completed_to_status) \\\n + list(status_cfg.additional_step_completed) \\\n + list(status_cfg.library_type_step_completed) \\\n + status_cfg.started_steps\n list_process_queued = status_cfg.step_queued_to_status\n udfs_to_fields = {\n 'Prep Workflow': 'planned_library',\n 'Species': 'species',\n 'Required Yield (Gb)': 'required_yield',\n 'Coverage (X)': 'coverage'\n }\n for result in queries.get_sample_info(session, project_id, sample_id, project_status=project_status,\n time_since=sample_time_since, udfs=list(udfs_to_fields)):\n (pjct_name, sample_name, container, wellx, welly, udf_name, udf_value) = result\n s = all_samples[sanitize_user_id(sample_name)]\n s.sample_name = sanitize_user_id(sample_name)\n s.project_name = pjct_name\n s.plate_name = container\n s.original_name = sample_name\n if udf_name in udfs_to_fields:\n setattr(all_samples[sanitize_user_id(sample_name)], udfs_to_fields[udf_name], udf_value)\n\n for result in queries.get_samples_and_processes(session, project_id, sample_id, project_status=project_status,\n workstatus='COMPLETE', list_process=list_process_complete,\n time_since=sample_time_since, process_limit_date=process_limit_date):\n (pjct_name, sample_name, process_name, process_status, date_run, process_id) = result\n all_samples[sanitize_user_id(sample_name)].add_completed_process(process_name, date_run, process_id)\n\n for result in queries.get_sample_in_queues_or_progress(\n session, project_id, sample_id, list_process=list_process_queued,\n time_since=sample_time_since, project_status=project_status, process_limit_date=process_limit_date):\n pjct_name, sample_name, process_name, queued_date, queue_id, process_id, process_date = result\n if not process_id:\n all_samples[sanitize_user_id(sample_name)].add_queue_location(process_name, queued_date, queue_id)\n else:\n all_samples[sanitize_user_id(sample_name)].add_inprogress(process_name, process_date, process_id)\n\n return all_samples.values()", "def setup():\n for file in glob.glob('{}*'.format(TEST_FILE_DIR)):\n new_dest = file.replace(TEST_FILE_DIR, '')\n shutil.copy(file, new_dest)", "def test_setup_samples(self):\n flist = find_samples(j_doe_00_05)\n for f in flist:\n setup_sample(f, **{'analysis':'Align_standard_seqcap', 'genome_build':'rn4', 'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'num_cores':8, 'distributed':False})\n for f in flist:\n with open(f, \"r\") as fh:\n config = yaml.load(fh)\n if config[\"details\"][0].get(\"multiplex\", None):\n self.assertEqual(config[\"details\"][0][\"multiplex\"][0][\"genome_build\"], \"rn4\")\n else:\n self.assertEqual(config[\"details\"][0][\"genome_build\"], \"rn4\")\n\n with open(f.replace(\"-bcbb-config.yaml\", \"-post_process.yaml\")) as fh:\n config = yaml.load(fh)\n self.assertEqual(config[\"custom_algorithms\"][ANALYSIS_TYPE][\"hybrid_bait\"], 'rat_baits.interval_list')\n self.assertEqual(config[\"custom_algorithms\"][ANALYSIS_TYPE][\"hybrid_target\"], 'rat_targets.interval_list')\n self.assertEqual(config[\"algorithm\"][\"num_cores\"], 8)\n \n for f in flist:\n setup_sample(f, **{'analysis':ANALYSIS_TYPE, 'genome_build':'rn4', 'dry_run':False,\n 'no_only_run':True, 'google_report':True,\n 'dry_run':False, 'baits':'rat_baits.interval_list', 'targets':'rat_targets.interval_list', 'amplicon':True, 'num_cores':8, 'distributed':False})\n with open(f, \"r\") as fh:\n config = yaml.load(fh)\n if config[\"details\"][0].get(\"multiplex\", None):\n self.assertEqual(config[\"details\"][0][\"multiplex\"][0][\"genome_build\"], \"rn4\")\n else:\n self.assertEqual(config[\"details\"][0][\"genome_build\"], \"rn4\")\n with open(f.replace(\"-bcbb-config.yaml\", \"-post_process.yaml\")) as fh:\n config = yaml.load(fh)\n self.assertEqual(config[\"algorithm\"][\"mark_duplicates\"], False)\n self.assertEqual(config[\"custom_algorithms\"][ANALYSIS_TYPE][\"mark_duplicates\"], False)", "def format_and_include_new_output():\n update_output_in_java_files()\n for new_version in config.example_dir.rglob(\"*.java\"):\n insert_example_in_book(new_version)", "def _StageSourceAndConfigFiles(self, args, messages, build):\n\n project = properties.VALUES.core.project.Get(required=True)\n safe_project = project.replace(':', '_')\n safe_project = safe_project.replace('.', '_')\n # The string 'google' is not allowed in bucket names.\n safe_project = safe_project.replace('google', 'elgoog')\n\n gcs_client = storage_api.StorageClient()\n\n default_bucket_name = '{}_cloudbuild'.format(safe_project)\n\n gcs_staging_dir_name = (\n args.gcs_staging_dir if args.gcs_staging_dir else\n 'gs://{}/deploy'.format(default_bucket_name))\n\n try:\n gcs_staging_dir = resources.REGISTRY.Parse(\n gcs_staging_dir_name, collection='storage.objects')\n gcs_staging_dir_obj = gcs_staging_dir.object\n except resources.WrongResourceCollectionException:\n gcs_staging_dir = resources.REGISTRY.Parse(\n gcs_staging_dir_name, collection='storage.buckets')\n gcs_staging_dir_obj = None\n\n gcs_client.CreateBucketIfNotExists(gcs_staging_dir.bucket)\n\n if args.gcs_staging_dir is None:\n # Check that the default bucket is also owned by the project (b/33046325)\n bucket_list_req = gcs_client.messages.StorageBucketsListRequest(\n project=project, prefix=default_bucket_name)\n bucket_list = gcs_client.client.buckets.List(bucket_list_req)\n\n if not any(\n bucket.id == default_bucket_name for bucket in bucket_list.items):\n raise c_exceptions.RequiredArgumentException(\n '--gcs-staging-dir',\n 'A bucket with name {} already exists and is owned by '\n 'another project. Specify a bucket using '\n '--gcs-staging-dir.'.format(default_bucket_name))\n\n if args.source:\n suffix = '.tgz'\n if args.source.startswith('gs://') or os.path.isfile(args.source):\n _, suffix = os.path.splitext(args.source)\n\n staged_source = 'source/{stamp}-{uuid}{suffix}'.format(\n stamp=times.GetTimeStampFromDateTime(times.Now()),\n uuid=uuid.uuid4().hex,\n suffix=suffix,\n )\n\n if gcs_staging_dir_obj:\n staged_source = gcs_staging_dir_obj + '/' + staged_source\n gcs_source_staging = resources.REGISTRY.Create(\n collection='storage.objects',\n bucket=gcs_staging_dir.bucket,\n object=staged_source)\n\n staged_source_obj = None\n\n if args.source.startswith('gs://'):\n gcs_source = resources.REGISTRY.Parse(\n args.source, collection='storage.objects')\n staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)\n else:\n if not os.path.exists(args.source):\n raise c_exceptions.BadFileException(\n 'could not find source [{src}]'.format(src=args.source))\n elif os.path.isdir(args.source):\n source_snapshot = snapshot.Snapshot(args.source)\n size_str = resource_transform.TransformSize(\n source_snapshot.uncompressed_size)\n log.status.Print(\n 'Creating temporary tarball archive of {num_files} file(s)'\n ' totalling {size} before compression.'.format(\n num_files=len(source_snapshot.files), size=size_str))\n staged_source_obj = source_snapshot.CopyTarballToGCS(\n gcs_client, gcs_source_staging)\n elif os.path.isfile(args.source):\n unused_root, ext = os.path.splitext(args.source)\n if ext not in _ALLOWED_SOURCE_EXT:\n raise c_exceptions.BadFileException(\n 'Local file [{src}] is none of '.format(src=args.source) +\n ', '.join(_ALLOWED_SOURCE_EXT))\n log.status.Print('Uploading local file [{src}] to '\n '[gs://{bucket}/{object}].'.format(\n src=args.source,\n bucket=gcs_source_staging.bucket,\n object=gcs_source_staging.object,\n ))\n staged_source_obj = gcs_client.CopyFileToGCS(args.source,\n gcs_source_staging)\n\n build.source = messages.Source(\n storageSource=messages.StorageSource(\n bucket=staged_source_obj.bucket,\n object=staged_source_obj.name,\n generation=staged_source_obj.generation,\n ))\n if gcs_staging_dir_obj:\n config_path = gcs_staging_dir.bucket + '/' + gcs_staging_dir_obj\n else:\n config_path = gcs_staging_dir.bucket\n\n build.artifacts = messages.Artifacts(\n objects=messages.ArtifactObjects(\n location='gs://{}/config/$BUILD_ID/expanded'.format(config_path),\n paths=['output/expanded/*'],\n ))\n\n build.steps.append(\n messages.BuildStep(\n name='gcr.io/cloud-builders/gsutil',\n args=[\n 'cp', '-r', 'output/suggested',\n 'gs://{}/config/$BUILD_ID/suggested'.format(config_path)\n ],\n ))\n return", "def read_samples(args, db):\n db[\"samples\"] = []\n for sample_file in args.samples:\n with open(sample_file, \"r\") as fin:\n for line in fin:\n if line.startswith(\"#\"):\n continue\n newsample = line.strip()\n if len(newsample) == 0:\n continue\n db[\"samples\"].append(newsample)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the set of scripts in this text. Excludes some common chars.
def get_scripts(text): # ignore these chars, we assume they are ok in any script exclusions = {0x00, 0x0A, 0x0D, 0x20, 0xA0, 0xFEFF} zyyy_chars = set() scripts = set() ustr = unicode(text, 'utf8') for cp in ustr: if ord(cp) in exclusions: continue script = unicode_data.script(cp) if script == 'Zyyy': # common/undetermined zyyy_chars.add(cp if cp < '\u00fe' else ord(cp)) elif not script == 'Zinh': # inherited scripts.add(script) return scripts, zyyy_chars
[ "def get_scripts(text):\n\tstart = text.find(\"OTHER SCRIPTS\")\n\tend = text.find(\"\\n\", start)\n\treturn text[start:end].strip()", "def get_scripts(self):\n return []", "def script(self):\n return [\n p.text.strip()\n for p in self.xml.findall('p')\n if p.text and p.text.strip() and not _is_technical_note(p)\n ]", "def get_scripts_list(self, state):\n return scripts_in_path(self.script_dir, state + \".d\")", "def _filter_script_tags(input_xml):\n output_lines = []\n in_script = False\n for line in input_xml.splitlines():\n if \"<script>\" in line:\n in_script = True\n if not in_script:\n output_lines.append(line)\n if \"</script>\" in line:\n in_script = False\n return '\\n'.join(output_lines)", "def _get_script_paths_from_scripts_node(self) -> list:\r\n paths: list = []\r\n\r\n script_nodes = ElementHelper.get(self.root_node, 'Scripts')\r\n if script_nodes is None:\r\n return []\r\n\r\n for script_node in script_nodes:\r\n if not script_node.tag.endswith('Script'):\r\n continue\r\n\r\n psc_path: str = self.parse(script_node.text)\r\n\r\n if ':' in psc_path:\r\n psc_path = psc_path.replace(':', os.sep)\r\n\r\n paths.append(os.path.normpath(psc_path))\r\n\r\n return PathHelper.uniqify(paths)", "def get_all_scripts(self, key=None):\r\n if key:\r\n script = []\r\n dbref = self.dbref(key)\r\n if dbref or dbref == 0:\r\n script = self.dbref_search(dbref)\r\n if not script:\r\n script = self.filter(db_key=key)\r\n return script\r\n return self.all()", "def removeIncludes(self, lines):\n\t\t\n\t\tfor index, line in enumerate(lines):\n\t\t\t\n\t\t\tif re.match('^\\+(STIL|TEMA|JAVASCRIPT)', line):\n\t\t\t\tlines[index] = ''\n\t\t\t\t\t\n\t\treturn lines", "def exclude(text):\n\n unwanted = [\"\"\"\"\"\"]", "def select_script(scripts):\n max_len = len(scripts[0].text)\n index = 0\n\n for i in range(1, len(scripts)):\n l = len(scripts[i].text)\n if l > max_len:\n max_len = l\n index = i\n\n return scripts[index].text", "def displayScripts(self): \n for iid in sorted(self.__scripts.keys()): \n char_re = re.compile(\"【(.+)】\")\n \n comment, orig, trans = self.__scripts[iid]\n \n char_match = char_re.search(comment)\n if char_match:\n char = char_match.group(1)\n else:\n char = \"\"\n \n state = \"translated\" if comment.endswith(\"*\") else \"untranslated\"\n \n self.__tree.insert(\"\", \"end\", iid=iid, values=(char, orig, trans),\n tags = state)", "def all(self, scriptid=None):\r\n return ScriptDB.objects.get_all_scripts_on_obj(self.obj, key=scriptid)", "def _get_implicit_script_imports(self) -> list:\r\n implicit_paths: list = []\r\n\r\n for psc_path in self.psc_paths:\r\n for import_path in self.import_paths:\r\n relpath = os.path.relpath(os.path.dirname(psc_path), import_path)\r\n test_path = os.path.normpath(os.path.join(import_path, relpath))\r\n if os.path.exists(test_path):\r\n implicit_paths.append(test_path)\r\n\r\n return PathHelper.uniqify(implicit_paths)", "def list_scripts() -> dict:\n endpoint_url = '/real-time-response/entities/scripts/v1'\n response = http_request('GET', endpoint_url)\n return response", "def list(self):\n return self.connection.get(self.service + \"/AllScripts\")", "def texts(self):\n texts_list = [self.window_text(), ]\n return texts_list", "def get_script_files():\n path = folder + \"all\\\\\"\n files = []\n for root, dirnames, filenames in os.walk(path):\n for filename in filenames:\n if filename.endswith(\".js\"):\n files.append(os.path.join(root, filename))\n\n return files", "def filter_input(self, forced=False):\n content = []\n for hunk in self.hunks(forced):\n # If a file ends with a function call, say, console.log()\n # but doesn't have a semicolon, and the next file starts with\n # a (, the individual files are ok, but when combined you get an\n # error like TypeError...\n # Forcing a semicolon in between fixes it.\n if settings.COMPRESS_ENABLED or forced:\n hunk += \";\"\n content.append(hunk)\n return content", "def read_script(fname):\n from corpkit.constants import OPENER\n with OPENER(fname, 'r') as fo:\n data = fo.read()\n data = data.splitlines()\n data = [i for i in data if i.strip() and i.strip()[0] != '#']\n \n # turn off concordancing if it's never used in the script\n if 'concordance' not in ' '.join(data):\n objs._do_conc = False\n\n return list(reversed(data))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report on differences between samples in source and target directories. The trg_to_src_name fn takes a target file name and returns the source file name to use in the comparisons.
def compare_samples(src_dir, trg_dir, trg_to_src_name=lambda x: x, opts=None): if not os.path.isdir(src_dir): print 'Original sample dir \'%s\' does not exist' % src_dir return if not os.path.isdir(trg_dir): print 'New sample dir \'%s\' does not exist' % trg_dir return print 'Base dir: %s' % src_dir print 'Target dir: %s' % trg_dir show_missing = opts and 'missing' in opts show_diffs = opts and 'diffs' in opts for trg_name in os.listdir(trg_dir): if trg_name == 'attributions.txt': continue trg_path = os.path.join(trg_dir, trg_name) if not (os.path.isfile(trg_path) and trg_name.endswith('.txt')): continue src_name = trg_to_src_name(trg_name) src_path = os.path.join(src_dir, src_name) if not os.path.exists(src_path): if show_missing: print 'source does not exist: %s' % src_name continue src_text = None dst_text = None with codecs.open(src_path, 'r', 'utf8') as f: src_text = f.read() with codecs.open(trg_path, 'r', 'utf8') as f: trg_text = f.read() if not src_text: print 'source text (%s) is empty' % k continue if not trg_text: print 'target text is empty: %s' % trg_path continue if src_text.find(trg_text) == -1: print 'target (%s) text not in source (%s)' % (src_name, trg_name) if show_diffs: # In scripts that use space for word break it might be better to compare # word by word, but this suffices. sm = difflib.SequenceMatcher(None, src_text, trg_text, autojunk=False) lines = [] for tag, i1, i2, j1, j2 in sm.get_opcodes(): if tag == 'delete': lines.append('[%s/]' % src_text[i1:i2]) elif tag == 'equal': lines.append(src_text[i1:i2]) elif tag == 'insert': lines.append('[/%s]' % trg_text[j1:j2]) else: lines.append('[%s/%s]' % (src_text[i1:i2], trg_text[j1:j2])) print ''.join(lines)
[ "def _make_source_file_name(self):\n source = self.target\n if isinstance(self.target_suffix, tuple):\n if self.target_suffix[0] and self.source_suffix:\n source = rreplace(source, self.target_suffix[0], self.source_suffix, 1)\n else:\n if self.target_suffix and self.source_suffix:\n source = rreplace(source, self.target_suffix, self.source_suffix, 1)\n if not self.label:\n return source\n if source.count(self.label) > 1:\n logger.warn(\"label '{}' found multiple times in target '{}'; this could be intentional\".format(self.label, source))\n elif source.count(self.label) == 0:\n logger.warn(\"label '{}' not found in target '{}'; are you sure your target is correctly formatted?\".format(self.label, source))\n return rreplace(source, self.label, \"\", 1)", "def GetVtsTargetTestSourceFileName(self):\n test_binary_name = self._test_module_name + 'Test.cpp'\n return os.path.join(self.GetHalInterfacePath(), 'vts/functional',\n test_binary_name)", "def extractDifferenceFromTraces(sourceDir, datatype):\n try:\n # Retrieve original programs\n prettyPrint(\"Loading the list of original programs\")\n originalFiles = list(set(glob.glob(\"%s/*.%s\" % (sourceDir, datatype))) - set(glob.glob(\"%s/*_*.%s\" % (sourceDir, datatype))))\n prettyPrint(\"Successfully retrieved %s original programs\" % len(originalFiles))\n counter = 0\n allTraces = [] # To hold the difference sequences for TF-IDF extraction\n for originalFile in originalFiles:\n # Retrieve obfuscated versions of each original file\n obfuscatedVersions = glob.glob(\"%s_*.%s\" % (originalFile.replace(\".%s\" % datatype, \"\"), datatype))\n prettyPrint(\"Successfully retrieved %s obfuscated versions for \\\"%s\\\"\" % (len(obfuscatedVersions), originalFile), \"debug\")\n originalTrace = list(open(originalFile).read())\n for obfuscated in obfuscatedVersions:\n obfuscatedTrace = list(open(obfuscated).read()) \n # Calculate the difference between two sequences\n indexMax = min(len(originalTrace), len(obfuscatedTrace))\n diffTrace = [] + obfuscatedTrace\n for index in range(indexMax):\n if originalTrace[index] == diffTrace[index]:\n diffTrace[index] = \"_\"\n diffFile = open(obfuscated.replace(datatype, \"%sdiff\" % datatype), \"w\")\n for instruction in list(diffTrace):\n diffFile.write(\"%s\\n\" % instruction)\n diffFile.close()\n if os.path.exists(obfuscated.replace(datatype, \"%sdiff\" % datatype)) and os.path.getsize(obfuscated.replace(datatype, \"%sdiff\" % datatype)) > 0:\n # Make sure it exists and not empty\n counter += 1\n \n prettyPrint(\"Successfully generated %s difference files\" % counter)\n \n sourceFiles = glob.glob(\"%s/*_*.%sdiff\" % (sourceDir, datatype))\n for targetFile in sourceFiles:\n allTraces.append(open(targetFile).read())\n \n # Now perform TF-IDF on them\n vectorizer = TfidfVectorizer(max_df=1.0, min_df=1, max_features=1000, stop_words=[\",\",\"%\",\"(\",\")\",\",\",\":\",\"\\n\",\"$\"], norm='l2', smooth_idf=True, use_idf=True, sublinear_tf=False)\n X = vectorizer.fit_transform(allTraces)\n for targetFile in sourceFiles:\n # Get the feature vector\n featureVector = X.toarray()[ sourceFiles.index(targetFile),:].tolist()\n # Save it to file \n featureFile = open(targetFile.replace(\"%sdiff\" % datatype, \"%sdiffidf\" % datatype), \"w\")\n featureFile.write(str(featureVector))\n featureFile.close()\n \n except Exception as e:\n prettyPrint(\"Error encountered: %s\" % e, \"error\")\n return False\n\n return True", "def compare_traces(dir1: str, dir2: str):\n get_traces = lambda dir: [\n f for f in os.listdir(dir) if f.startswith(\"test_\") and f.endswith(\".trace\")\n ]\n\n traces1 = get_traces(dir1)\n traces2 = get_traces(dir2)\n\n traces1.sort()\n traces2.sort()\n\n print(\"### Comparing traces: \")\n print(f\"dir1 - {dir1} :\")\n print(\", \".join(traces1))\n print()\n print(f\"dir2 - {dir2} :\")\n print(\", \".join(traces2))\n\n for t1, t2 in zip(traces1, traces2):\n path1 = os.path.join(dir1, t1)\n path2 = os.path.join(dir2, t2)\n\n with open(path1) as fp1, open(path2) as fp2:\n if fp1.read() != fp2.read():\n print(f\"Files {t1} and {t2} differs.\")\n else:\n print(f\"Files {t1} and {t2} matches.\")", "def get_viable_similarities_file_name(gt_id, target_horizon, history, lag, metric, model):\n return os.path.join(\n get_cache_dir(model),\n '{}-viable_similarities-{}-{}-hist{}-lag{}.h5'.format(\n metric, gt_id,target_horizon,history,lag))", "def tax_maps_not_in_source_etl():\r\n start_time = datetime.datetime.now()\r\n LOG.info(\r\n \"Start: Compile table of tax maps not mirrored between the Lane County & RLID\"\r\n \" repositories.\\nAny tax maps in RLID not mirrored in the county repositoryare\"\r\n \" likely tax maps that no longer exist, and should be researched (and perhaps\"\r\n \" retired).\"\r\n )\r\n conn = credential.UNCPathCredential(\r\n path.RLID_DATA_SHARE, **credential.RLID_DATA_SHARE\r\n )\r\n with conn:\r\n check_time = start_time.strftime(\"%Y-%m-%d %H:%M\")\r\n file_names = {\r\n \"County\": {\r\n fixed_file_name(name)\r\n for _, _, filenames in os.walk(REPO_PATH[\"tax-map-staging\"])\r\n for name in filenames\r\n if name.lower().endswith(\".pdf\")\r\n },\r\n \"RLID\": {\r\n fixed_file_name(name)\r\n for name in os.listdir(REPO_PATH[\"tax-map\"])\r\n if name.lower().endswith(\".pdf\")\r\n },\r\n }\r\n for repo, other in permutations([\"County\", \"RLID\"]):\r\n LOG.info(\"Checking %s repository for tax maps not mirrored.\", repo)\r\n unmirrored_file_names = sorted(file_names[repo] - file_names[other])\r\n csv_path = os.path.join(\r\n REPO_PATH[\"tax-map\"], \"In_{}_Not_{}.csv\".format(repo, other)\r\n )\r\n csv_file = open(csv_path, \"wb\")\r\n with csv_file:\r\n csv_ = csv.writer(csv_file)\r\n csv_.writerow((\"file_name\", \"check_time\"))\r\n for file_name in unmirrored_file_names:\r\n csv_.writerow((file_name, check_time))\r\n LOG.info(\r\n \"Found %s tax maps in %s repository not mirrored in %s.\",\r\n len(unmirrored_file_names),\r\n repo,\r\n other,\r\n )\r\n LOG.info(\"End: Compile.\")\r\n elapsed(start_time, LOG)", "def link_files(tags, src_session, trg_session, src_data_dir, trg_data_dir):\n\n src_dir = os.path.join(src_data_dir,\n src_session.get_full_subjectid_with_timepoint())\n\n trg_dir = os.path.join(trg_data_dir,\n trg_session.get_full_subjectid_with_timepoint())\n\n logger.info(\"Making links in {} for tagged files in {}\".format(trg_dir,\n src_dir))\n\n for root, dirs, files in os.walk(src_dir):\n for filename in files:\n try:\n ident, file_tag, series, description = \\\n dm.scanid.parse_filename(filename)\n except dm.scanid.ParseException:\n continue\n if file_tag in tags:\n # need to create the link\n # first need to capture the file extension as it gets lost\n # when using dm.scanid to make the new name\n ext = dm.utils.get_extension(filename)\n\n trg_name = dm.scanid.make_filename(trg_session, file_tag,\n series, description)\n src_file = os.path.join(root, filename)\n trg_file = os.path.join(trg_dir, trg_name) + ext\n\n make_link(src_file, trg_file)\n add_link_to_dbase(src_file, trg_file)", "def neuro_transfer(self, src, trg):\n \n self.src = src\n self.trg = trg\n for roots, subFolders, files in os.walk(src):\n for file in files:\n subFolder = os.path.join(trg, file[:8])\n if not os.path.isdir(subFolder):\n os.makedirs(subFolder)\n print('making new dir...' + subFolder)\n shutil.copy(os.path.join(roots, file), subFolder)\n print ('copying ' + file + ' to '+ subFolder)\n return True", "def get_logical_test_file_paths(test_file, output_dir):\n #eg d:/dev/data-dev/tableau-tests/tdvt/logicaltests/setup/calcs\n expected_base_dir = os.path.split(test_file)[0]\n expected_base_dir, logical_subdir = os.path.split(expected_base_dir)\n #Split again to remove the 'setup' dir.\n expected_base_dir = os.path.split(expected_base_dir)[0]\n #eg d:/dev/data-dev/tableau-tests/tdvt/logicaltests/expected/calcs\n expected_base_dir = os.path.join(expected_base_dir, 'expected', logical_subdir)\n expected_output_dir = expected_base_dir\n\n #eg setup.bugs.b1713.dbo.xml\n expected_base_filename = os.path.split(test_file)[1]\n #Get the abstract test name without the datasource specific customization.\n #eg setup.bugs.b1713.xml\n new_base_filename = \".\".join(expected_base_filename.split(\".\")[:-2]) + \".xml\"\n #eg setup.bugs.b1713.dbo-combined.xml\n expected_output_filename = expected_base_filename.replace('.xml', '-combined.xml')\n\n temp_output_dir = output_dir if output_dir else expected_base_dir\n #eg full path to above file.\n existing_output_filepath = os.path.join(temp_output_dir, expected_output_filename)\n #if not os.path.isfile( existing_output_filepath ):\n #The filename and full path to the expected output from tabquery.\n new_output_filename = \"actual.\" + new_base_filename\n new_output_filepath = os.path.join(temp_output_dir, new_output_filename)\n #Full path the expected file.\n new_base_filepath = os.path.join(expected_base_dir, new_base_filename)\n\n return existing_output_filepath, new_output_filepath, new_base_filename, new_base_filepath, expected_output_dir", "def get_model_file_name(orig: str, dest: str):\n return f\"pta_{orig}{dest}.pickle\"", "def generate_src_files(numsim, source_file, srcdir,\n prefix, hypo_rand,\n variation, multiseg,\n segment, first_seg_dir):\n src_props = bband_utils.parse_properties(source_file)\n # Delete \"seed\" and \"common_seed\" from the property set\n if \"seed\" in src_props:\n src_props.pop(\"seed\")\n if \"common_seed\" in src_props:\n src_props.pop(\"common_seed\")\n # Get FAULT_LENGTH and FAULT_WIDTH from the SRC file\n try:\n flen = float(src_props[\"fault_length\"])\n fwid = float(src_props[\"fault_width\"])\n except KeyError:\n raise bband_utils.ParameterError(\"Cannot read fault_length/fault_width\"\n \" parameters from SRC file!\")\n if hypo_rand:\n # Delete HYPO_ALONG_STK and HYPO_DOWN_DIP\n if \"hypo_along_stk\" in src_props:\n src_props.pop(\"hypo_along_stk\")\n if \"hypo_down_dip\" in src_props:\n src_props.pop(\"hypo_down_dip\")\n # Create common list of keys for all files\n output = \"\"\n for key in src_props:\n output = output + \"%s = %s\\n\" % (key.upper(), src_props[key])\n common_seeds = []\n # Check if we are doing a multi-segment run\n if multiseg and first_seg_dir is not None:\n # Read common seeds from seed file\n seed_file = open(os.path.join(first_seg_dir, \"Src\", \"seeds.txt\"), 'r')\n first_seg_sims = int(seed_file.readline().strip())\n if first_seg_sims != numsim:\n print(\"ERROR: Number of simulations must match across segments!\")\n sys.exit(1)\n for line in seed_file:\n common_seeds.append(int(line.strip()))\n seed_file.close()\n # Generate the numsim SRC files\n all_seeds = []\n for sim in range(0, numsim):\n random.seed((sim + 1) + (variation - 1) * 500)\n seed = int(math.exp(7 * math.log(10.0)) * random.random())\n all_seeds.append(seed)\n hypo_along_stk = flen * (0.2 + 0.6 * random.random() - 0.5)\n hypo_down_dip = fwid * (0.2 + 0.6 * random.random())\n if multiseg:\n srcfile = os.path.join(srcdir,\n \"%s-%04d_seg%02d.src\" %\n (prefix, sim, segment))\n else:\n srcfile = os.path.join(srcdir, \"%s-%04d.src\" % (prefix, sim))\n outfile = open(srcfile, 'w')\n outfile.write(output)\n if hypo_rand:\n outfile.write(\"HYPO_ALONG_STK = %.2f\\n\" % (hypo_along_stk))\n outfile.write(\"HYPO_DOWN_DIP = %.2f\\n\" % (hypo_down_dip))\n outfile.write(\"SEED = %d\\n\" % (seed))\n if multiseg and first_seg_dir is not None:\n outfile.write(\"COMMON_SEED = %d\\n\" % (common_seeds[sim]))\n outfile.close()\n # Check if we need to write file with all seeds\n if multiseg and first_seg_dir is None:\n # This is the first segment, write seeds file\n seed_file = open(os.path.join(srcdir, \"seeds.txt\"), 'w')\n seed_file.write(\"%d\\n\" % (numsim))\n for seed in all_seeds:\n seed_file.write(\"%d\\n\" % (seed))\n seed_file.close()", "def get_test_file_paths(root_directory, test_name, output_dir):\n\n #d:\\...\\tdvt\\exprtests\n test_path_base = os.path.join(root_directory, os.path.split(test_name)[0])\n test_name = os.path.split(test_name)[1]\n\n setupfile_path = os.path.join(test_path_base, test_name)\n actual_dir = output_dir if output_dir else test_path_base\n actualfile_path = os.path.join(actual_dir, test_name.replace('setup', 'actual.setup'))\n diff_file, diff_ext = os.path.splitext(actualfile_path)\n diff_file_path = diff_file + \"_diff\" + diff_ext\n\n expected_file_version = 0\n expected_filename = 'expected.' + test_name\n expected_file_path = test_path_base\n\n expected_file_path = os.path.join(expected_file_path, expected_filename)\n next_expected_file_path = ''\n expected_file_list = []\n while os.path.isfile(expected_file_path):\n expected_file_list.append(expected_file_path)\n\n expected_file_version += 1\n #Everything but the ending.\n expected_file_parts = expected_filename.split(\".\")[:-1]\n #Put the number in.\n expected_file_parts.append( str(expected_file_version) )\n #Add the ending again.\n expected_file_parts.append( expected_filename.split(\".\")[-1] )\n expected_file = \".\".join(expected_file_parts)\n\n expected_file_path = os.path.join(test_path_base, expected_file)\n next_expected_file_path = expected_file_path\n\n if not expected_file_list:\n #Always add the base expected file even if it doesn't exist. The callers will use this to copy the actual.\n expected_file_list.append(expected_file_path)\n\n for filepath in expected_file_list:\n logging.debug(\"Found expected filepath \" + filepath)\n return (actualfile_path, diff_file_path, setupfile_path, expected_file_list, next_expected_file_path)", "def get_source_names(df):\n df_keys = df.keys().str.strip().str.replace(' Corrected Flux Error', '').str.replace(' Image X', '').str.replace(' Image Y', '').str.replace(' Cutout X', '').str.replace(' Cutout Y', '').str.replace(' Centroid Warning', '').str.replace(' Corrected Flux', '').str.replace(\n ' ALC Weight', '').str.replace(' Background', '').str.replace(' Raw Flux', '').str.replace(' Bad Pixel Flag', '').str.replace(' Interpolation Flag', '').str.replace(' Flux', '').str.replace(' Flux Error', '').str.replace(' Error', '').str.replace(' Normalized', '')\n source_names = []\n for i in range(len(df_keys)):\n df_key = df_keys[i]\n if (df_key != 'Filename') and (df_key != 'Night Number') and (df_key != 'Block Number') and (df_key != 'Filter') and (df_key != 'Exptime') and (df_key != 'Seeing') and (df_key != 'Airmass') and (df_key != 'Time BJD TDB') and (df_key != 'Time (JD UTC)') and (df_key != 'Time UT') and (df_key != 'Time JD UTC') and (df_key != 'Sigma Clip Flag') and (df_key != 'Block Number') and (df_key != 'ALC') and (df_key not in source_names):\n source_names.append(df_key)\n return source_names", "def test_case_name(expected_output_path):\n basename = os.path.basename(expected_output_path)\n basename = basename.replace('-out-', '-')\n basename = basename.replace('-in-', '-')\n basename = basename.replace('.txt', '')\n return basename", "def suggest_name( src ):\n date = src.split(os.sep)[-2]\n basename = os.path.basename(src).split('.', 2)[0]\n if basename in hpcparse.FS_MAP_REV:\n return hpcparse.FS_MAP_REV[basename] + \"_\" + date + \".hdf5\"\n else:\n return basename + \"_\" + date + \".hdf5\"", "def test_origin_source_filename_is_correct(self):\n self.assertEqual(\n os.path.basename(self.job.origin.source.filename),\n \"test_testing_utils.py\")", "def get_output_names(self, inputFile):\n \n dir = os.path.dirname(__file__)\n newInputFile = os.path.join(dir, 'test_files', inputFile)\n outputFile = os.path.join(dir, 'test_files', inputFile[:-4] + '_result' + inputFile[-4:])\n \n #[:4] removes the .txt from the end of the filename below\n refFile = inputFile[:-4]\n ref = os.path.join(dir, 'reference_files', refFile + '_ref.txt')\n \n return (newInputFile, outputFile, ref)", "def tests_source_name(self, method):\n pass", "def copy_samples():\n lane_line_fv_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'front_view_lane_line_for_training')\n lane_line_top_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'top_view_lane_line_for_training')\n non_lane_line_fv_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'tmp')\n non_lane_line_top_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'top_view_non_lane_line_for_training')\n\n lane_line_fv_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'lane_line/front_view')\n lane_line_top_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'lane_line/top_view')\n non_lane_line_fv_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'non_lane_line/front_view')\n non_lane_line_top_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'non_lane_line/top_view')\n\n for parents, _, filenames in os.walk(lane_line_fv_src_dir):\n for index, filename in enumerate(filenames):\n fv_src_filename = ops.join(parents, filename)\n top_src_filename = ops.join(lane_line_top_src_dir, filename.replace('fv', 'top'))\n\n fv_dst_filename = ops.join(lane_line_fv_dst_dir, filename)\n top_dst_filename = ops.join(lane_line_top_dst_dir, filename.replace('fv', 'top'))\n\n shutil.copyfile(src=fv_src_filename, dst=fv_dst_filename)\n shutil.copyfile(src=top_src_filename, dst=top_dst_filename)\n sys.stdout.write('\\r>>Copying lane line samples {:d}/{:d} {:s}'.format(index+1, len(filenames),\n filename[0:filename.find('.')]))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n for parents, _, filenames in os.walk(non_lane_line_fv_src_dir):\n for index, filename in enumerate(filenames):\n fv_src_filename = ops.join(parents, filename)\n top_src_filename = ops.join(non_lane_line_top_src_dir, filename.replace('fv', 'top'))\n\n fv_dst_filename = ops.join(non_lane_line_fv_dst_dir, filename)\n top_dst_filename = ops.join(non_lane_line_top_dst_dir, filename.replace('fv', 'top'))\n\n shutil.copyfile(src=fv_src_filename, dst=fv_dst_filename)\n shutil.copyfile(src=top_src_filename, dst=top_dst_filename)\n sys.stdout.write('\\r>>Copying non lane line samples {:d}/{:d} {:s}'.format(index + 1, len(filenames),\n filename[0:filename.find('.')]))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over toplevel transients of this subgraph.
def top_level_transients(self): schildren = self.scope_children() sdfg = self.parent result = set() for node in schildren[self.entry]: if isinstance(node, nd.AccessNode) and node.desc(sdfg).transient: result.add(node.data) return result
[ "def all_toplevel_checkboxes(self):\n\t\tif not self.checkboxes:\n\t\t\traise StopIteration()\n\n\t\tc = self.first_checkbox\n\t\twhile c:\n\t\t\tyield c\n\t\t\tc = c.next_sibling\n\t\traise StopIteration()", "def walk(self, topdown=True):\n\n if topdown:\n yield (self, self.subcollections, self.data_objects)\n for subcollection in self.subcollections:\n new_root = subcollection\n for x in new_root.walk(topdown):\n yield x\n if not topdown:\n yield (self, self.subcollections, self.data_objects)", "def walk_parents(self):\n active = self.parent_datasets[:]\n while active:\n d = active.pop()\n yield d\n active += d.parent_datasets", "def iter_sub_categories(self):\n sub_categories = self.sub_categories\n if (sub_categories is not None):\n yield from sub_categories", "def __iter__(self):\n return iter(self._lineage_roots)", "def subobjs(self):\n if not hasattr(self, \"_subobjs\"):\n self._subobjs = []\n return self._subobjs", "def iterateDepthFirst(self):\r\n gen = self.__iterateDepthFirst(self.root)\r\n for n in gen:\r\n yield n", "def iter_sub_categories(self):\n command_categories = self._command_categories\n if (command_categories is not None):\n yield from command_categories.values()", "def iterSubTables(self) -> Iterator[SubTableEntry]:\n for conv in self.getConverters():\n name = conv.name\n value = getattr(self, name, None)\n if value is None:\n continue\n if isinstance(value, BaseTable):\n yield self.SubTableEntry(name, value)\n elif isinstance(value, list):\n yield from (\n self.SubTableEntry(name, v, index=i)\n for i, v in enumerate(value)\n if isinstance(v, BaseTable)\n )", "def children(self):\r\n c = self.child\r\n while c:\r\n yield c\r\n c = c.nxt", "def internal_nodes(self) -> NodeIterator:\n if not self.is_leaf:\n yield self\n for child in self.children:\n yield from child.internal_nodes()", "def children_iter(self):\n for child in self.children:\n if child:\n yield child", "def iter_parents(content: IResource) -> typing.Iterator[IResource]:\n content = getattr(content, '__parent__', None)\n while content is not None:\n yield content\n content = getattr(content, '__parent__', None)", "def iter_stations(self):\r\n for i in range(self.rowCount()):\r\n obstreeloop = self.child(i)\r\n for ii in range(obstreeloop.rowCount()):\r\n obstreestation = obstreeloop.child(ii)\r\n yield obstreestation", "def __iter__(self):\n\n v = self.root\n if v is None:\n return\n while True:\n while v.left is not None:\n v = v.left\n k = self.splay(v)\n if k.right is not None:\n v = k.right\n yield k\n else:\n yield k\n break", "def walk( self ):\n\t\tfor x in self.S.walk(): yield x", "def iter_siblings(self):\r\n if self._parent:\r\n for sibling in self._parent.iter_children():\r\n if sibling is not self:\r\n yield sibling\r\n else:\r\n raise StopIteration()", "def children(self) -> Iterator['Type']:\n raise NotImplementedError", "def _select_root_objects(state):\n if state.audioContent is not None:\n for audioObject in state.audioContent.audioObjects:\n yield audioObject\n else:\n for audioObject in _root_objects(state.adm):\n yield audioObject" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true iff scope of `node` contains the scope of `other_node`.
def scope_contains_scope(sdict: ScopeDictType, node: NodeType, other_node: NodeType) -> bool: curnode = other_node nodescope = sdict[node] while curnode is not None: curnode = sdict[curnode] if curnode == nodescope: return True return False
[ "def are_siblings(self, node, node_other):\n if node.parent is node_other.parent:\n return True\n else:\n return False", "def is_descendant_of(self, other, include_self=False):\n if other.pk == self.pk:\n return include_self\n\n return self._closure_model.objects.filter(\n parent=other,\n child=self\n ).exclude(pk=self.pk).exists()", "def _are_suitable_for_share(self, first_node: TreePlanNode, second_node: TreePlanNode):\n return first_node.is_equivalent(second_node)", "def nodes_equal(node1, node2):\r\n if type(node1) is not type(node2):\r\n return False\r\n if type(node1) == LocalNameTest:\r\n return node1.name == node2.name\r\n return True", "def is_connected(self, node1, node2):\n\n return node1 in self.__graph and node2 in self.__graph[node1]", "def isEqualToNode(self, other):\n is_lower = self.nodeName.lower() == other.nodeName.lower()\n same_name = self.namespace == other.namespace\n same_attrs = self.attributes == other.attributes\n is_equal = Node.isEqualToNode(self, other)\n return all([is_lower, same_name, same_attrs, is_equal])", "def containsNode(*args, **kwargs):\n \n pass", "def is_connected(self,node1,node2):\r\n return node1 in self.graph_dict and node2 in self.graph_dict[node1]", "def match_node_to_node(self, node, node_other):\n if isinstance(node.item, Group) and isinstance(node_other.item, Group):\n return (self.match_node_to_structure(node, node_other.item, atoms=node_other.item.get_all_labeled_atoms()) and\n self.match_node_to_structure(node_other, node.item, atoms=node.item.get_all_labeled_atoms()))\n elif isinstance(node.item, LogicOr) and isinstance(node_other.item, LogicOr):\n return node.item.match_logic_or(node_other.item)\n else:\n # Assume nonmatching\n return False", "def route_exists(self, node1, node2):\n stack = Stack()\n for node in self.get_nodes():\n node.visited = False\n stack.push(node1)\n while not stack.is_empty():\n node = stack.pop()\n if node:\n for child in node.get_children():\n if not child.visited:\n if child is node2:\n return True\n else:\n stack.push(child)\n node.visited = True\n return False", "def is_ancestor_of(self, other, include_self=False):\n return other.is_descendant_of(self, include_self=include_self)", "def containsNode(self, node: 'SoNode') -> \"SbBool\":\n return _coin.SoPath_containsNode(self, node)", "def __eq__(self, other):\n if self.nodes == other.nodes:\n return True\n else:\n return False", "def has_ancestor(self, node):\n if self is node:\n return True\n elif self._parent is None:\n return False\n else:\n return self._parent.has_ancestor(node)", "def HasNode(self, node):\n return self.connections.has_key(node)", "def containsNode(self, node: 'SoBaseKit') -> \"SbBool\":\n return _coin.SoNodeKitPath_containsNode(self, node)", "def variable_in_parent_scopes(self, variable_name):\n scope = self.parent\n\n while scope is not None:\n variables_set = set(use.name for use in scope.variable_uses\n if use.kind == VariableUsage.Kind.SET)\n if variable_name in variables_set:\n return True\n else:\n scope = scope.parent\n\n return False", "def isEqualToNode(self, other):\n if len(self.childNodes) != len(other.childNodes):\n return False\n\n for a, b in zip(self.childNodes, other.childNodes):\n if not a.isEqualToNode(b):\n return False\n\n return True", "def has_ancestor(self, other: 'Snapshot') -> bool:\n\t\treturn core.BNSnapshotHasAncestor(self.handle, other.handle)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds a common parent scope for both input scopes, or None if the scopes are in different connected components.
def common_parent_scope(sdict: ScopeDictType, scope_a: NodeType, scope_b: NodeType) -> NodeType: if scope_a is scope_b: return scope_a # Scope B is in scope A if scope_contains_scope(sdict, scope_a, scope_b): return scope_a # Scope A is in scope B if scope_contains_scope(sdict, scope_b, scope_a): return scope_b # Disjoint scopes: prepare two paths and traverse in reversed fashion spath_a = _scope_path(sdict, scope_a) spath_b = _scope_path(sdict, scope_b) common = None for spa, spb in reversed(zip(spath_a, spath_b)): if spa is spb: common = spa else: break return common
[ "def parent(self) -> Optional[Scope]:\n return self._parent", "def common_ancestor(parent_list_0, parent_list_1):\n for b in parent_list_0[::-1]:\n if b in parent_list_1:\n return b\n return None", "def scope(self):\n if self._scope is None:\n with self._graph.as_default():\n if self.name is None:\n self._scope = self._parent_scope\n else:\n with variable_space(self._parent_scope):\n with variable_space(None, self.name) as scope:\n self._scope = scope\n return self._scope", "def top_scope(self) -> Optional[Scope]:\n if self.scope_stack:\n return self.scope_stack[0]\n return None", "def common_schema(self, left, right):\n left = self.get(left) or self.get(right)\n right = self.get(right) or self.get(left)\n left_schemata = list(left.schemata)\n right_schemata = list(right.schemata)\n if right in left_schemata:\n return left\n if left in right_schemata:\n return right\n\n # Find a common ancestor:\n for left in left_schemata:\n for right in right_schemata:\n if left == right:\n return left\n\n msg = \"No common ancestor: %s and %s\"\n raise InvalidData(msg % (left, right))", "def closest_parent(self):\n # type: () -> Optional[Tag]\n parent = self.parent\n while parent:\n if parent.name in self.PARENT_TAGS:\n return parent\n parent = parent.parent\n return None # pragma: no cover", "def lowest_common_parent(self, path):\n parent = Path()\n for n1, n2 in zip(self._path.split('.'), path._path.split('.')):\n if n1 == n2:\n parent.append(n1)\n else:\n return parent\n return parent", "def find_scope(self, scope_type):\n\n for scope in reversed(self.contexts):\n if scope.context_type == scope_type:\n return scope\n return self.contexts[0]", "def lowestCommonAncestor(root,p,q):\n\tif not root:\n\t\treturn None\n\t#root is p or q\n\tif root.data == q.data or root.data ==p.data:\n\t\treturn root\n\n\tL = lowestCommonAncestor(root.left, p, q)\n\tR = lowestCommonAncestor(root.right, p, q)\n\n\tif L and R:\n\t\treturn root\n\n\treturn L if L else R\n\t# ", "def parent(self):\n if self.parent_code:\n return CountrySubdivision(code=self.parent_code)\n return None", "def find_pyte_parent(self, pyte_widget: pyted_widget_types) -> pyted_widget_types:\n for w in self.widget_list:\n if w.name == pyte_widget.parent:\n parent_widget = w\n break\n else:\n parent_widget = None\n return parent_widget", "def lowest_common_ancestor(self, node1, node2):\n ca = self.common_ancestors(node1, node2)\n if len(ca) > 0:\n return ca[-1]\n return", "def cenancestor(self):\n ancestor = parent = self.parent\n while parent:\n ancestor = parent\n parent = getattr(parent, 'parent', None)\n return ancestor", "def common_ancestor(node_a, node_b):\n ancestors_a = ancestors(node_a)\n ancestors_b = ancestors(node_b)\n lowest_ancestors = ancestors_a if node_a.level > node_b.level else ancestors_b\n for _ in range(abs(node_a.level - node_b.level)):\n next(lowest_ancestors)\n same = (pa for pa, pb in zip(ancestors_a, ancestors_b) if pa == pb)\n return next(same)", "def getParent(selectionList):\r\n parentDict = dict()\r\n for obj in selectionList:\r\n parentDict[obj.semanticObject.getHierParent()] = obj\r\n for obj in selectionList:\r\n if(parentDict.has_key(obj.semanticObject)):\r\n del parentDict[obj.semanticObject]\r\n if(parentDict.has_key(None)):\r\n del parentDict[None]\r\n \r\n if(len(parentDict) == 0):\r\n topLayerList = selectionList[0].semanticObject.getHierTopLayer()\r\n for obj in selectionList:\r\n if(obj.semanticObject in topLayerList):\r\n return obj\r\n return None\r\n return parentDict.values()[0]", "def common_ancestor(n1, n2, head):\n count = 0 # How many nodes in {n1, n2} have been visited so far?\n ancestor = None\n\n def traverse(node):\n nonlocal count, ancestor\n if node is None or ancestor is not None: return\n count_at_entry = count\n if node is n1: count += 1\n if node is n2: count += 1\n traverse(node.left)\n traverse(node.right)\n if count_at_entry == 0 and count == 2 and ancestor is None:\n ancestor = node\n\n traverse(head)\n return ancestor", "def get_ancestor(self, cs1, cs2):\n raise NotImplementedError(\"Abstract method\")", "def find_tk_parent(self, pyte_widget: pyted_widget_types) -> tkinter.Widget:\n\n for w in self.widget_list:\n if w.name == pyte_widget.parent:\n parent_tk_widget = w.tk_name\n break\n else:\n parent_tk_widget = None\n return parent_tk_widget", "def _get_parent(*, schema: oa_types.Schema, schemas: oa_types.Schemas) -> str:\n ref = peek.ref(schema=schema, schemas=schemas)\n assert ref is not None\n parent, _ = ref_helper.get_ref(ref=ref, schemas=schemas)\n return parent" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within a certain set of scope schedules.
def is_in_scope(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType, schedules: List[dtypes.ScheduleType]) -> bool: while sdfg is not None: if state is not None and node is not None: sdict = state.scope_dict() scope = sdict[node] while scope is not None: if scope.schedule in schedules: return True scope = sdict[scope] # Traverse up nested SDFGs if sdfg.parent is not None: parent = sdfg.parent_sdfg state = sdfg.parent node = sdfg.parent_nsdfg_node if node.schedule in schedules: return True else: parent = sdfg.parent sdfg = parent return False
[ "def check_schedule_against_stn(stn, schedule, epsilon=1e-10):\n for (u, v) in stn.edges():\n # Retrieve the STC for this edge\n lb, ub = stn[u][v]['stc']\n stc_satisfied = (schedule[v] - schedule[u] <= ub + epsilon) and (schedule[v] - schedule[u] >= lb - epsilon)\n if not stc_satisfied:\n return (False, \"There's a temporal constraint [{}, {}] from {} to {}, but those events were scheduled at {}={:0.4f} and {}={:0.4f} for a difference of {:0.4f}, violating the temporal constraint!\".format(lb, ub, u, v, u, schedule[u], v, schedule[v], schedule[v] - schedule[u]))\n # All edges satisfied\n return (True, \"Great!\")", "def scope_contains_scope(sdict: ScopeDictType, node: NodeType, other_node: NodeType) -> bool:\n curnode = other_node\n nodescope = sdict[node]\n while curnode is not None:\n curnode = sdict[curnode]\n if curnode == nodescope:\n return True\n return False", "def is_node_exists_in_elev_ranges(min, max):\n if len(Node.objects.filter(elevation__gte=min).filter(elevation__lte=max))==0:\n return False\n return True", "def is_valid(schedule: dict[str, tuple[str, str, tuple]]) -> bool:\n # Gives all the values of the dictionary\n sc_sections = [schedule[key] for key in schedule]\n return all([not sections_conflict(x, y) for x in sc_sections for y in sc_sections if x is not y])", "def is_in_region(position, aligns):\r\n\r\n for align in aligns:\r\n if align.start <= position < align.end: return True\r\n return False", "def can_search_schedule_slots(self):\n return # boolean", "def contains(self, esdc2):\n return contains(self.range, esdc2.range)", "def has_scopes(self, scopes):\n return set(scopes).issubset(set(self.scopes))", "def valid_in_scope(scoped, scope, exact=True):\n return in_ucs(scoped) or has_scope(scoped, scope, exact)", "def in_obstacle():\r\n x = node.x\r\n y = node.y\r\n return any([obstacle.contains_node(x, y, o) for o in obstacles])", "def can_lookup_schedules(self):\n return # boolean", "def inBounds(self, loc):\n if loc[0] in range(0, len(self._houses)) and loc[1] in range(0, len(self._houses[0])):\n return True\n else:\n return False", "def is_valid_root_setup(station, p_start, p_end, session_start, session_end, tree_node):\n member = \"__all__\"\n setup_start = session_start\n setup_end = session_end\n\n for s in tree_node:\n if s.tag == \"member\":\n member = s.text\n if s.tag == \"start\":\n setup_start = datetime.datetime.strptime(s.text, \"%Y.%m.%d %H:%M:%S\")\n if s.tag == \"end\":\n setup_end = datetime.datetime.strptime(s.text, \"%Y.%m.%d %H:%M:%S\")\n\n flag = True\n if not (member == \"__end__\" or member == station):\n flag = False\n if not (p_start >= setup_start and p_end <= setup_end):\n flag = False\n\n if setup_start < p_start < setup_end < p_end:\n Message.addMessage(\" ERROR: overlapping parameter setups!\")\n if p_start < setup_start < p_end < setup_end:\n Message.addMessage(\" ERROR: overlapping parameter setups!\")\n\n return flag", "def can_search_schedules(self):\n return # boolean", "def can_lookup_schedule_slots(self):\n return # boolean", "def contained_in(cc1, cc2):\n x2, y2, w2, h2 = cc2\n x1, y1, w1, h1 = cc1\n if x2 < x1 and y2 < y1 and x1 + w1 < x2 + w2 and y1 + h1 < y2 + h2:\n return True\n\n return False", "def check_bounds(next_rect, snek):\r\n if next_rect[0] < 0 or next_rect[1] < 0 or next_rect[0] >= 35 or next_rect[1] >= 30 or next_rect in snek:\r\n return False\r\n return True", "def is_non_overlapping(self):\n # Recalculate spannednodes here (instead of using\n # self.nodes_spanned()) since we need to add up the\n # sum of community sizes anyway.\n total_nodes = 0\n spannednodes = set()\n for ns in self.itervalues():\n spannednodes.update(ns)\n total_nodes += len(ns)\n return total_nodes == len(spannednodes)", "def test_is_node_schedulable(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n # Check if all master nodes are schedulable/unschedulable\n master_node_list = node_api_obj.get_master_nodes()\n for master_node_name in master_node_list.items:\n node_schedulable_status = node_api_obj.is_node_schedulable(node_name=master_node_name.metadata.name)\n if not node_schedulable_status:\n assert node_schedulable_status is False\n else:\n assert node_schedulable_status is True\n # Check if all worker nodes are schedulable/unschedulable\n worker_node_list = node_api_obj.get_worker_nodes()\n for worker_node_name in worker_node_list.items:\n node_schedulable_status = node_api_obj.is_node_schedulable(node_name=worker_node_name.metadata.name)\n if not node_schedulable_status:\n assert node_schedulable_status is False\n else:\n assert node_schedulable_status is True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within GPU devicelevel code.
def is_devicelevel_gpu(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType, with_gpu_default: bool = False) -> bool: if with_gpu_default: schedules = dtypes.GPU_SCHEDULES + [dtypes.ScheduleType.GPU_Default] else: schedules = dtypes.GPU_SCHEDULES return is_in_scope( sdfg, state, node, schedules, )
[ "def is_devicelevel_gpu_kernel(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:\n is_parent_nested = (sdfg.parent is not None)\n if is_parent_nested:\n return is_devicelevel_gpu(sdfg.parent.parent, sdfg.parent, sdfg.parent_nsdfg_node, with_gpu_default=True)\n else:\n return is_devicelevel_gpu(state.parent, state, node, with_gpu_default=True)", "def is_devicelevel_fpga(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:\n from dace.sdfg.utils import is_fpga_kernel\n return (is_in_scope(sdfg, state, node, [dtypes.ScheduleType.FPGA_Device])\n or (state is not None and is_fpga_kernel(sdfg, state)))", "def is_in_scope(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType,\n schedules: List[dtypes.ScheduleType]) -> bool:\n while sdfg is not None:\n if state is not None and node is not None:\n sdict = state.scope_dict()\n scope = sdict[node]\n while scope is not None:\n if scope.schedule in schedules:\n return True\n scope = sdict[scope]\n # Traverse up nested SDFGs\n if sdfg.parent is not None:\n parent = sdfg.parent_sdfg\n state = sdfg.parent\n node = sdfg.parent_nsdfg_node\n if node.schedule in schedules:\n return True\n else:\n parent = sdfg.parent\n sdfg = parent\n return False", "def is_on_device(maybe_symbol, gpu_fit, only_writes=False):\n try:\n functions = (maybe_symbol.function,)\n except AttributeError:\n assert maybe_symbol.is_Node\n iet = maybe_symbol\n functions = set(FindSymbols().visit(iet))\n if only_writes:\n expressions = FindNodes(Expression).visit(iet)\n functions &= {i.write for i in expressions}\n\n return all(not (f.is_TimeFunction and f.save is not None and f not in gpu_fit)\n for f in functions)", "def is_hpss_node(node):\n return node.address == \"HPSS\"", "def _node_in_my_heat_mem(self, node_id):\n layer = self._node_in_what_level_of_mem(node_id, self.heat_mem)\n return layer != -1", "def check_with_disk_graph(self, instance):\n not_covered_targets = self.find_not_covered_targets(instance)\n connected_components = self.find_connected_components(instance)\n return len(not_covered_targets) == 0 and len(connected_components) == 1", "def has_node() -> bool:\n return \"WASMFUN_NODE_EXE\" in os.environ\n\n # TODO: enable the code below.\n # On appveyor this failed:\n # https://ci.appveyor.com/project/WindelBouwman/ppci-786/build/1.0.537\n if hasattr(shutil, \"which\"):\n return bool(shutil.which(\"node\"))\n else:\n return False", "def has_logical_children(device):\n return device.model and device.model.type in (DeviceType.switch_stack,)", "def are_disks_used(self):\n disks = [node.disk for node in self.nodes]\n for disk in disks:\n if disk:\n return True\n return False", "def _is_model_on_gpu(self):\n return all([param.is_cuda for param in self._model.parameters()])", "def _check_that_node_from_body(node):\n n_ports = len(node.out_edges())\n internal_port_in_out_ports = ['internal_port_id' in edge for edge in node.out_edges()]\n return np.all(internal_port_in_out_ports) and n_ports", "def isPyNode(node):\r\n if re.search('pymel', str(node.__class__)):\r\n return 1\r\n else:\r\n return 0", "def confirm_gpu_availability():\n a = th.FloatTensor(1).cuda()\n # Just make sure a is not somehow removed by any smart compiling,\n # probably not necessary.\n return a is not None", "def is_node_with_weight(node: NNCFNode) -> bool:", "def __check_registered(self, source_address):\n if self.is_root:\n if self.stream.get_node_by_server(source_address[0], source_address[1]):\n if self.stream.get_node_by_server(source_address[0], source_address[1]).is_register():\n return True", "def containsNode(self, node: 'SoBaseKit') -> \"SbBool\":\n return _coin.SoNodeKitPath_containsNode(self, node)", "def SoSoundElement_sceneGraphHasSoundNode(state: 'SoState') -> \"SbBool\":\n return _coin.SoSoundElement_sceneGraphHasSoundNode(state)", "def has_nm3_simulator_node(self, unique_id):\n return unique_id in self._nm3_simulator_nodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within an actual GPU kernel.
def is_devicelevel_gpu_kernel(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool: is_parent_nested = (sdfg.parent is not None) if is_parent_nested: return is_devicelevel_gpu(sdfg.parent.parent, sdfg.parent, sdfg.parent_nsdfg_node, with_gpu_default=True) else: return is_devicelevel_gpu(state.parent, state, node, with_gpu_default=True)
[ "def is_devicelevel_gpu(sdfg: 'dace.sdfg.SDFG',\n state: 'dace.sdfg.SDFGState',\n node: NodeType,\n with_gpu_default: bool = False) -> bool:\n if with_gpu_default:\n schedules = dtypes.GPU_SCHEDULES + [dtypes.ScheduleType.GPU_Default]\n else:\n schedules = dtypes.GPU_SCHEDULES\n return is_in_scope(\n sdfg,\n state,\n node,\n schedules,\n )", "def is_devicelevel_fpga(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:\n from dace.sdfg.utils import is_fpga_kernel\n return (is_in_scope(sdfg, state, node, [dtypes.ScheduleType.FPGA_Device])\n or (state is not None and is_fpga_kernel(sdfg, state)))", "def is_node_with_weight(node: NNCFNode) -> bool:", "def in_obstacle():\r\n x = node.x\r\n y = node.y\r\n return any([obstacle.contains_node(x, y, o) for o in obstacles])", "def _node_in_my_heat_mem(self, node_id):\n layer = self._node_in_what_level_of_mem(node_id, self.heat_mem)\n return layer != -1", "def containsNode(*args, **kwargs):\n \n pass", "def is_node_in_bounds(self, node):\n (x_coord, y_coord) = node\n\n if x_coord < 0 or x_coord >= self.width:\n return False\n elif y_coord < 0 or y_coord >= self.height:\n return False\n else:\n return True", "def is_member_of(self, node, cluster):\n return self.lookup[node] == cluster", "def is_in_scope(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType,\n schedules: List[dtypes.ScheduleType]) -> bool:\n while sdfg is not None:\n if state is not None and node is not None:\n sdict = state.scope_dict()\n scope = sdict[node]\n while scope is not None:\n if scope.schedule in schedules:\n return True\n scope = sdict[scope]\n # Traverse up nested SDFGs\n if sdfg.parent is not None:\n parent = sdfg.parent_sdfg\n state = sdfg.parent\n node = sdfg.parent_nsdfg_node\n if node.schedule in schedules:\n return True\n else:\n parent = sdfg.parent\n sdfg = parent\n return False", "def containsNode(self, node: 'SoBaseKit') -> \"SbBool\":\n return _coin.SoNodeKitPath_containsNode(self, node)", "def _is_model_on_gpu(self):\n return all([param.is_cuda for param in self._model.parameters()])", "def are_disks_used(self):\n disks = [node.disk for node in self.nodes]\n for disk in disks:\n if disk:\n return True\n return False", "def check_with_disk_graph(self, instance):\n not_covered_targets = self.find_not_covered_targets(instance)\n connected_components = self.find_connected_components(instance)\n return len(not_covered_targets) == 0 and len(connected_components) == 1", "def _check_that_node_from_body(node):\n n_ports = len(node.out_edges())\n internal_port_in_out_ports = ['internal_port_id' in edge for edge in node.out_edges()]\n return np.all(internal_port_in_out_ports) and n_ports", "def __contains__(self, key):\n for node in self.nodes:\n if node.key == key:\n return True\n return False", "def __contains__(self, id):\n\n return id in self.nodes", "def confirm_gpu_availability():\n a = th.FloatTensor(1).cuda()\n # Just make sure a is not somehow removed by any smart compiling,\n # probably not necessary.\n return a is not None", "def _is_inside_map(saliency_map: torch.Tensor, region: Tuple[int, int, int, int]):\n map_height, map_width = saliency_map.shape\n y, x, height, width = region\n return y >= 0 and x >= 0 and y + height <= map_height and x + width <= map_width", "def containsNodeExactly(*args, **kwargs):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within FPGA devicelevel code.
def is_devicelevel_fpga(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool: from dace.sdfg.utils import is_fpga_kernel return (is_in_scope(sdfg, state, node, [dtypes.ScheduleType.FPGA_Device]) or (state is not None and is_fpga_kernel(sdfg, state)))
[ "def is_devicelevel_gpu_kernel(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:\n is_parent_nested = (sdfg.parent is not None)\n if is_parent_nested:\n return is_devicelevel_gpu(sdfg.parent.parent, sdfg.parent, sdfg.parent_nsdfg_node, with_gpu_default=True)\n else:\n return is_devicelevel_gpu(state.parent, state, node, with_gpu_default=True)", "def is_in_scope(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType,\n schedules: List[dtypes.ScheduleType]) -> bool:\n while sdfg is not None:\n if state is not None and node is not None:\n sdict = state.scope_dict()\n scope = sdict[node]\n while scope is not None:\n if scope.schedule in schedules:\n return True\n scope = sdict[scope]\n # Traverse up nested SDFGs\n if sdfg.parent is not None:\n parent = sdfg.parent_sdfg\n state = sdfg.parent\n node = sdfg.parent_nsdfg_node\n if node.schedule in schedules:\n return True\n else:\n parent = sdfg.parent\n sdfg = parent\n return False", "def is_hpss_node(node):\n return node.address == \"HPSS\"", "def is_devicelevel_gpu(sdfg: 'dace.sdfg.SDFG',\n state: 'dace.sdfg.SDFGState',\n node: NodeType,\n with_gpu_default: bool = False) -> bool:\n if with_gpu_default:\n schedules = dtypes.GPU_SCHEDULES + [dtypes.ScheduleType.GPU_Default]\n else:\n schedules = dtypes.GPU_SCHEDULES\n return is_in_scope(\n sdfg,\n state,\n node,\n schedules,\n )", "def __check_registered(self, source_address):\n if self.is_root:\n if self.stream.get_node_by_server(source_address[0], source_address[1]):\n if self.stream.get_node_by_server(source_address[0], source_address[1]).is_register():\n return True", "def HasFDG(self):\n return self.__has('FDG')", "def SoSoundElement_sceneGraphHasSoundNode(state: 'SoState') -> \"SbBool\":\n return _coin.SoSoundElement_sceneGraphHasSoundNode(state)", "def containsNode(self, node: 'SoBaseKit') -> \"SbBool\":\n return _coin.SoNodeKitPath_containsNode(self, node)", "def sceneGraphHasSoundNode(state: 'SoState') -> \"SbBool\":\n return _coin.SoSoundElement_sceneGraphHasSoundNode(state)", "def has_node() -> bool:\n return \"WASMFUN_NODE_EXE\" in os.environ\n\n # TODO: enable the code below.\n # On appveyor this failed:\n # https://ci.appveyor.com/project/WindelBouwman/ppci-786/build/1.0.537\n if hasattr(shutil, \"which\"):\n return bool(shutil.which(\"node\"))\n else:\n return False", "def containsNode(self, node: 'SoNode') -> \"SbBool\":\n return _coin.SoPath_containsNode(self, node)", "def SoSoundElement_isPartOfActiveSceneGraph(state: 'SoState') -> \"SbBool\":\n return _coin.SoSoundElement_isPartOfActiveSceneGraph(state)", "def IsFamilyOrAssembly(self) -> bool:", "def check_nodes(nodes, codes):\n print(\"checking nodes...\")\n for i in codes:\n if i not in nodes.keys():\n return 1\n return 0", "def IsNestedFamily(self) -> bool:", "def has_logical_children(device):\n return device.model and device.model.type in (DeviceType.switch_stack,)", "def within_dunder_def(node: nodes.NodeNG) -> bool:\n parent = node.parent\n while parent is not None:\n if (\n isinstance(parent, nodes.FunctionDef)\n and parent.name.startswith(\"__\")\n and parent.name.endswith(\"__\")\n ):\n return True\n parent = parent.parent\n return False", "def is_in_defs(doc, element):\n if element is not None:\n defs = doc.find('defs', namespaces=inkex.NSS)\n if defs is not None:\n return linked_node in defs.iterdescendants()\n return False", "def HasScanNode(self, path_spec):\n return self._scan_nodes.get(path_spec, None) is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current threadblock size if the given node is enclosed in a GPU kernel, or None otherwise.
def devicelevel_block_size(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> Tuple[symbolic.SymExpr]: from dace.sdfg import nodes as nd from dace.sdfg.sdfg import SDFGState while sdfg is not None: sdict = state.scope_dict() scope = sdict[node] while scope is not None: if scope.schedule == dtypes.ScheduleType.GPU_ThreadBlock: return tuple(scope.map.range.size()) elif scope.schedule == dtypes.ScheduleType.GPU_Device: # No thread-block map, use config default return tuple(int(s) for s in Config.get('compiler', 'cuda', 'default_block_size').split(',')) elif scope.schedule == dtypes.ScheduleType.GPU_ThreadBlock_Dynamic: # Dynamic thread-block map, use configured value return tuple(int(s) for s in Config.get('compiler', 'cuda', 'dynamic_map_block_size').split(',')) scope = sdict[scope] # Traverse up nested SDFGs if sdfg.parent is not None: if isinstance(sdfg.parent, SDFGState): parent = sdfg.parent.parent else: parent = sdfg.parent state, node = next((s, n) for s in parent.nodes() for n in s.nodes() if isinstance(n, nd.NestedSDFG) and n.sdfg.name == sdfg.name) else: parent = sdfg.parent sdfg = parent return None
[ "def node_size(self):\n return self._partitions[self.partition]", "def node_height(node):\n height = node.screenHeight()\n\n # In Nuke 7, a bug can prevent screenHeight() from reporting correctly.\n # In that case, it will return as 0.\n if not height:\n height = 18 if node.Class() != 'Dot' else 12\n\n return height", "def getSizeOfBlock(self) -> int:\n ...", "def node_width(node):\n width = node.screenWidth()\n\n # In Nuke 7, a bug can prevent screenWidth() from reporting correctly.\n # In that case, it will return as 0.\n if not width:\n width = 80 if node.Class() != 'Dot' else 12\n\n return width", "def pick_best_batch_size_for_gpu():\n if torch.cuda.is_available():\n _, available = torch.cuda.mem_get_info()\n availableGb = available / (1024 ** 3)\n if availableGb > 14:\n return 16\n elif availableGb > 10:\n return 8\n elif availableGb > 7:\n return 4\n if torch.backends.mps.is_available():\n import psutil\n available = psutil.virtual_memory().total\n availableGb = available / (1024 ** 3)\n if availableGb > 14:\n return 16\n elif availableGb > 10:\n return 8\n elif availableGb > 7:\n return 4\n return 1", "def get_node_block_height():\n\n response = requests.get(url=NODE_STATUS_ENDPOINT)\n if response.status_code != 200:\n logger.info(\"ConnectionError while requesting \" + NODE_STATUS_ENDPOINT)\n raise ConnectionError\n\n status = response.json()\n return status['result']['sync_info']['latest_block_height']", "def _node_size(node):\n size = 0\n if node:\n size += 1\n if node.left:\n size += node.left.size\n if node.right:\n size += node.right.size\n return size\n return size", "def get_number_gpu():\n n_gpu = len([x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU'])\n return n_gpu", "def get_node_vm_size(self) -> str:\n return self._get_node_vm_size()", "def get_blocksize():\n return C.blosc_get_blocksize()", "def physical_block_size_bytes(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"physical_block_size_bytes\")", "def Col_block_size(self):\n return self.Col_block", "def getGPURequired(self):\n if hasattr(self.data.application, \"gpu\"):\n if hasattr(self.data.application.gpu, \"gpuRequired\"):\n return self.data.application.gpu.gpuRequired\n return None", "def _compute_block_size(self):\n self._update_global_properties()\n self.block_size = self.block_scale_factor*self.glb_max_h", "def get_free_gpu():\n from pynvml import nvmlInit, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo, nvmlDeviceGetCount\n nvmlInit()\n\n return np.argmax([\n nvmlDeviceGetMemoryInfo(nvmlDeviceGetHandleByIndex(i)).free\n for i in range(nvmlDeviceGetCount())\n ])", "def gpu_num(self):\n return sum([len(gpu) for gpu in self.worker_vacant_gpus.values()])", "def node_size(rank_spec):", "def _gpu_info_subprocess():\n total_gpus = 0\n total_mem = 0\n try:\n import py3nvml.py3nvml\n py3nvml.py3nvml.nvmlInit()\n total_gpus = py3nvml.py3nvml.nvmlDeviceGetCount()\n\n import os\n cudavis = os.getenv(\"CUDA_VISIBLE_DEVICES\")\n if cudavis is not None:\n lencudavis = len(cudavis)\n if lencudavis == 0:\n total_gpus = 0\n else:\n total_gpus =\\\n min(total_gpus,\n os.getenv(\"CUDA_VISIBLE_DEVICES\").count(\",\") + 1)\n\n total_mem = \\\n min([py3nvml.py3nvml.nvmlDeviceGetMemoryInfo(\n py3nvml.py3nvml.nvmlDeviceGetHandleByIndex(i)).total for i in\n range(total_gpus)])\n except NVMLError as e:\n print(\"No GPU, setting total_gpus=0 and total_mem=0\")\n print(e)\n sys.stdout.flush()\n return total_gpus, total_mem", "def _max_thread_width(thread):\n if not thread['children']:\n return 0\n return max(\n max([_max_thread_width(reply) for reply in thread['children']]),\n len(thread['children'])\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the aliases of this ModifyLeaderboardEvent.
def aliases(self): return self._aliases
[ "def get_aliases(self):\n return self.aliases", "def aliases(self):\n return self._names[1:]", "def getAliases(self):\n return self.__aliases;", "def aliases(self):\n\n return self._aliases.copy()", "def ask_amazon_for_account_aliases(self):\n self._get_info(get_cached=True)\n return getattr(self, \"account_aliases\", None)", "def aliases(self, obj: Author) -> str:\n return ', '.join(obj.aliases.names())", "def aliases(self, obj: Artist) -> str:\n return ', '.join(obj.aliases.names())", "def associated_aliases(self, account):\n aliases = []\n for spec in self.accounts[account]:\n aliases += spec.aliases\n return set(aliases)", "def alias_addresses(self):\n qset = (\n self.aliasrecipient_set.select_related(\"alias\")\n .filter(alias__internal=False)\n )\n aliases = [alr.alias.address for alr in qset]\n return aliases", "def aliases(self):\n raise AttributeError(\n \"Can not get aliases from unnamed units. Perhaps you meant to_string()?\"\n )", "def GetAliases(cls):\n return sorted(cls.meta.commands.keys())", "def get_channel_aliases(self, channel):\n chan_key = channel.key.lower()\n nicktuples = self.caller.nicks.get(category=\"channel\", return_tuple=True, return_list=True)\n if nicktuples:\n return [tup[2] for tup in nicktuples if tup[3].lower() == chan_key]\n return []", "def get_reversed_aliases(self):\n return dict((v, k) for k, v in self.aliases.items())", "def get_db_aliases():\n return [\n member[1] for member\n in inspect.getmembers(sys.modules[__name__])\n if member[0].startswith('ALIAS_')]", "def get(self):\n return SenderAlias_DB.query.all()", "def get_work_aliases(work):\n if hasattr(work, \"_alias\"):\n return [work._alias]\n if hasattr(work, \"_aliases\"):\n return work._aliases\n return []", "async def status_alias(self, ctx):\r\n\r\n if ctx.invoked_subcommand is None:\r\n\r\n aliases = _get(ctx.guild.id, 'status', 'aliases')\r\n if aliases is None:\r\n await ctx.send(f'No server aliases have been set for {ctx.guild.name}')\r\n return\r\n alias_list = discord.Embed(title=f'{ctx.guild.name} Status Aliases',colour=0x4b4740)\r\n for alias in aliases:\r\n alias_list.add_field(name=alias,value=aliases[alias])\r\n await ctx.send(embed=alias_list)", "async def aliases_command(self, ctx: Context) -> None:\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"• `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )", "def read_alias_definitions(self):\n self.alias = [] # map an alias to a sensor id\n try:\n aliaslist = self.owbase.read('/settings/alias/list').decode()\n for line in aliaslist.splitlines():\n sensor,alias = line.split('=')\n sensor=sensor[0:2]+'.'+sensor[2:-2] # set a dot and remove two hex digits from checksum at the end\n self.alias.append((sensor,alias))\n\n except Exception as e:\n self.logger.debug(\"Got an error {} while reading the alias definitions\".format(e))\n pass", "def addresses(self):\n addrs = {u.recieved_raw['ingress-address']\n for u in self.all_joined_units}\n return list(sorted(addrs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the aliases of this ModifyLeaderboardEvent.
def aliases(self, aliases): self._aliases = aliases
[ "def set_aliases (self, alias):\r\n self._check_alias_dict(alias, \"alias\")\r\n self.alias = alias", "def addAliases(self, aliases):\n assert isinstance(aliases, dict);\n\n for alias, identifier in aliases.items():\n self.setAlias(alias, identifier);", "def _add_aliases(cls,obs,aliases):\n for a in aliases:\n cls._alias_map[a.lower()] = obs.name\n for o in cls._registry.values():\n obs_aliases = []\n for alias, name in cls._alias_map.items():\n if name == o.name:\n obs_aliases.append(alias)\n o._aliases = obs_aliases", "def add_enabled_aliases(self, aliases):\n for alias, unit in aliases.items():\n if alias in self._registry and unit != self._registry[alias]:\n raise ValueError(\n f\"{alias} already means {self._registry[alias]}, so \"\n f\"cannot be used as an alias for {unit}.\"\n )\n if alias in self._aliases and unit != self._aliases[alias]:\n raise ValueError(\n f\"{alias} already is an alias for {self._aliases[alias]}, so \"\n f\"cannot be used as an alias for {unit}.\"\n )\n\n for alias, unit in aliases.items():\n if alias not in self._registry and alias not in self._aliases:\n self._aliases[alias] = unit", "def merge_aliases(self, mnamespace, aliases):\n for (key,val) in aliases.iteritems():\n self.add_alias(mnamespace, key, val)", "def set_alias(self,name,alias):\n self[alias]=self[name]", "async def aliases_command(self, ctx: Context) -> None:\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"• `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )", "async def status_alias(self, ctx):\r\n\r\n if ctx.invoked_subcommand is None:\r\n\r\n aliases = _get(ctx.guild.id, 'status', 'aliases')\r\n if aliases is None:\r\n await ctx.send(f'No server aliases have been set for {ctx.guild.name}')\r\n return\r\n alias_list = discord.Embed(title=f'{ctx.guild.name} Status Aliases',colour=0x4b4740)\r\n for alias in aliases:\r\n alias_list.add_field(name=alias,value=aliases[alias])\r\n await ctx.send(embed=alias_list)", "def aliases(self):\n\n return self._aliases.copy()", "def aliases(self):\n return self._names[1:]", "def set_alias(self, alias):\n self.send_command(api.set_alias, alias='IPCAM')", "def aliases(self, obj: Author) -> str:\n return ', '.join(obj.aliases.names())", "def aliases(self, obj: Artist) -> str:\n return ', '.join(obj.aliases.names())", "def aliases(self):\n raise AttributeError(\n \"Can not get aliases from unnamed units. Perhaps you meant to_string()?\"\n )", "def alias_event(self, event_id, alias_id):\n pass", "def alias(event, bot):\n\t# API is bit different from olde bot, and then group things\n\targ1, arg2 = argumentSplit(event.argument, 2)\n\n\tif arg1 == \"~del\":\n\t\tif arg2:\n\t\t\treturn del_alias(bot, arg2)\n\t\telse:\n\t\t\t# show help for del\n\t\t\treturn bot.say(functionHelp(alias, \"~del\"))\n\telif arg1 and arg2:\n\t\t#binding a new alias\n\t\tif arg2.lower() == \"me\": return bot.say(\"But you are already yourself.\")\n\t\t\n\t\tsource = USERS_MODULE.get_username(bot, arg1, source=event.nick, _inalias=True)\n\t\tif not source: \n\t\t\t# ATTEMPT GROUP\n\t\t\tif aliasgroup(bot, arg1, arg2) is False:\n\t\t\t\treturn bot.say(\"(%s) is not a group or a user I know. Try '.alias <existing_name> <new_alias>'\" % arg1)\n\t\t\telse: \n\t\t\t\treturn\n\t\t# else continue with normal user\n\t\treturn aliasuser(bot, arg1, arg2, source)\n\t\t\n\telif arg1:\n\t\t#querying an alias\n\t\tnick = lookup_alias(bot.dbQuery, arg1)\n\t\tif nick:\n\t\t\taliases = alias_list(bot.dbQuery, nick)\n\t\t\tif aliases:\n\t\t\t\tmsg = \"Aliases for (%s): %%s\" % arg1\n\t\t\t\ttitle = \"Aliases for (%s)\" % arg1\n\t\t\t\treturn pastehelper(bot, msg, items=aliases, altmsg=\"%s\", title=title)\n\t\t# unknown alias or no aliases:\n\t\treturn bot.say(\"No aliases for (%s)\" % arg1)\n\t\t\n\t# if none of the above, show help\t\n\tbot.say(functionHelp(alias))\n\treturn", "def reset(self):\n self.aliases = {}", "def get_aliases(self):\n return self.aliases", "def getAliases(self):\n return self.__aliases;" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the columns of this ModifyLeaderboardEvent.
def columns(self, columns): self._columns = columns
[ "def setColumns( self, names ):\n self.columns = names", "def setColumns(self, *args):\n if not args:\n self._column_to_role = {col: role for col, role in enumerate(itertools.chain(self._role_to_prop.keys(),\n self._ref_role_to_prop.keys()))}\n self._column_names = [prop for prop in itertools.chain(self._role_to_prop.values(),\n self._ref_role_to_prop.values())]\n return\n\n names = args[0].toVariant() if isinstance(args[0], qtc.QVariant) else list(map(lambda a: str(a), args))\n self._column_names = names\n\n self._column_to_role = {}\n for col, name in enumerate(names):\n try:\n role = next(filter(lambda rn: rn[1] == name, itertools.chain(self._role_to_prop.items(),\n self._ref_role_to_prop.items())))[0]\n except:\n continue\n\n self._column_to_role[col] = role", "def _update_columns(self, new_columns):\n for name, column in new_columns.items():\n self.columns[name] = column\n # Make sure the underlying dataframe is in sync in case series data has changed\n self._dataframe[name] = column._series", "def SetColumnOrder(self, columns):\n oldColsCount = self.__table.GetNumberCols()\n self.__table.SetColumnOrder(columns)\n newColsCount = self.__table.GetNumberCols()\n \n self.BeginBatch()\n \n if (oldColsCount > newColsCount): # deleted\n msg = wx.grid.GridTableMessage(self.__table, \n wx.grid.GRIDTABLE_NOTIFY_COLS_DELETED, newColsCount, \n oldColsCount - newColsCount)\n self.ProcessTableMessage(msg)\n elif (oldColsCount < newColsCount): # added\n msg = wx.grid.GridTableMessage(self.__table, \n wx.grid.GRIDTABLE_NOTIFY_COLS_APPENDED, \n newColsCount - oldColsCount)\n self.ProcessTableMessage(msg)\n \n for i in range(newColsCount):\n wx.grid.Grid.SetColSize(self, i, \n self.__table.GetColSize(self.__table.GetColKey(i)))\n \n msg = wx.grid.GridTableMessage(self.__table, \n wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n self.ProcessTableMessage(msg)\n \n self.EndBatch()", "def updateColumns(self):\r\n if self._table_name:\r\n Base.cursor.execute(f\"describe {self._table_name}\")\r\n results = Base.cursor.fetchall()\r\n if Base.cursor.rowcount>0:\r\n self._column_list = []\r\n for column in results:\r\n self._column_list.append(column[0])\r\n if column[3] == \"PRI\":\r\n self.pk = column[0]\r\n setattr(self,column[0],None)\r\n else:\r\n raise Exception(f\"Table {self._table_name} has no columns\")", "def SetGridCols(self, grid_name, no_of_cols):\r\n grid_name.ClearGrid() #clear all data first\r\n change_cols = no_of_cols - grid_name.GetNumberCols()\r\n if change_cols > 0:\r\n grid_name.AppendCols(change_cols) #always to end\r\n elif change_cols < 0:\r\n grid_name.DeleteRows(0, -change_cols) #from posn 0\r\n self.other_self.m_scrolledWindow3.SendSizeEvent() # make sure new size is fitted\r", "def setup_column_prefs( self ):\n\n\t\tpass", "def setSavedColumnsOnReset(self, columns):\n self._savedColumnsOnReset = columns", "def _toggle_columns(self, view: View, defx: Defx,\n context: Context) -> None:\n columns = (context.args[0] if context.args else '').split(':')\n if not columns:\n return\n current_columns = [x.name for x in view._columns]\n if columns == current_columns:\n # Use default columns\n columns = context.columns.split(':')\n view._init_columns(columns)", "def add_columns(self, *args):\n for (this_name, this_data) in args:\n assert(this_name not in self.column_list), 'column name %s already exists' % this_name\n assert(len(this_data) == self.n_row), 'expected %d rows but found %d' % (self.n_row, len(this_data))\n self.n_column += 1\n self.column_list.append(this_name)\n self.column_type[this_name] = type(this_data[0])\n self.data[this_name] = np.array(this_data)\n n_new = len(args)\n logger.info('added %d columns: %s', n_new, ' '.join(self.column_list[-n_new:]))", "def createColumns(self):\n self.tableWidget.insertColumn(self.tableWidget.columnCount())", "def delete_columns(self):\n self.focus()\n self.dispatch('DeleteColumns')", "def make_cols(self):\n column_types = self.config.column_types\n table = self.make_new_table()\n #update current table\n self.curr_table = table\n\n cols_to_add = []\n count = 0\n for column_type in column_types:\n num = int(self.MAX_COLS_TABLE * .8)\n cols_to_add += [(table.name+\"__\"+str(c), column_type) for c in range(count, count+num)]\n count += num\n\n values=[]\n for (name, col_type) in cols_to_add:\n values.append(\"ADD COLUMN `%s` %s\" % (name, col_type))\n\n values = \", \".join(values)\n qry = \"\"\"\n ALTER TABLE `{table}`\n {cols_to_add}\n \"\"\".format(table=table.name, cols_to_add=values)\n self.engine.execute(qry)\n\n \n #reflect table again to have update columns\n table = Table(table.name, MetaData(bind=self.engine), autoload=True, autoload_with=self.engine)\n self.tables[table.name] = table\n self.free_cols[table.name] = {}\n #for new column in the database, add it to free columns\n for (name, col_type) in cols_to_add:\n if col_type not in self.free_cols[table.name]:\n self.free_cols[table.name][col_type] = set([])\n\n col = DSMColumn(getattr(table.c, name), dsm_table=self)\n self.free_cols[table.name][col_type].add(col)", "def setcol(self,c,values):\n if len(values) != 9:\n raise TypeError(\"Columns require exactly 9 values.\")\n \n for r in xrange(9):\n self.M[str(r)+\",\"+str(c)] = values[r]\n self.row[r][c] = values[r]\n self.col[c][r] = values[r]\n self.sec[(r/3)*3 + c/3][c - (c/3)*3 + (r%3)*3] = values[r]", "def set_drop_columns(self, cols):\n if isinstance(cols, list):\n self.drop_columns = cols\n else:\n self.drop_columns = list(cols)", "def set_col(self, c: int, data: List[float]) -> None:\n start, end, step = self.__get_col_ids(c)\n self.data[start:end:step] = data", "def columns_moved(self, ind, old, new):\n idx = 0\n for k, v in self.header.items():\n v.position = self.hv.visualIndex(idx)\n idx += 1", "def _setup_arguments_on_columns(self):\n for prop in self.props:\n prop.active_history = self.active_history\n if self.deferred:\n prop.deferred = self.deferred\n prop.strategy_class = strategies.DeferredColumnLoader\n prop.group = self.group", "def make_all_columns_visible(self):\n self.make_table_columns_visible(table_ui=self.ui.h1_table)\n self.make_table_columns_visible(table_ui=self.ui.h2_table)\n self.make_table_columns_visible(table_ui=self.ui.h3_table)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the grid_search_view of this ModifyLeaderboardEvent.
def grid_search_view(self): return self._grid_search_view
[ "def grid_search_view(self, grid_search_view):\n \n self._grid_search_view = grid_search_view", "def get_gradebook_column_search_session(self):\n return # osid.grading.GradebookColumnSearchSession", "def get_grid_container(self):\n\t\treturn self._grid_container", "def get_gradebook_search_session(self):\n return # osid.grading.GradebookSearchSession", "def new_grid_search(self, params, **kwargs):\n hyper = self.client.api.hyper_grid_search(params, get_run_request(self.client, kwargs))\n return HyperSearch(self.client.api, hyper)", "def grid(self):\n return self.visa_ask(':GRID?')", "def get_event_search(self):\n return # osid.calendaring.EventSearch", "def get_gradebook_column_search_session_for_gradebook(self, gradebook_id):\n return # osid.grading.GradebookColumnSearchSession", "def get_grade_entry_search_session(self):\n return # osid.grading.GradeEntrySearchSession", "def get_index(self, model_class):\r\n try:\r\n return connections['default'].get_unified_index().get_index(model_class)\r\n except NotHandled:\r\n self.log.error(\"Couldn't find a SearchIndex for %s.\" % model_class)\r\n return None", "def get_grade_entry_search_session_for_gradebook(self, gradebook_id):\n return # osid.grading.GradeEntrySearchSession", "def get_log_entry_search(self):\n return # osid.logging.LogEntrySearch", "def get_gradebook_column_search_session_for_gradebook(self, gradebook_id, proxy):\n return # osid.grading.GradebookColumnSearchSession", "def grid_for_index(self, grid_index):\n\n assert 0 <= grid_index < len(self.grid_list)\n return self.grid_list[grid_index]", "def get_hierarchy_search(self):\n return # osid.hierarchy.HierarchySearch", "def get_grid_instance(name):\n return get_grid_class(name)()", "def belief_grid(self):\n return self._grid_belief_state", "def get_grade_entry_search_session(self, proxy):\n return # osid.grading.GradeEntrySearchSession", "def grid(self):\n if not self._segment_grid:\n self._segment_grid = segmentgrid.SegmentGrid(self)\n return self._segment_grid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the grid_search_view of this ModifyLeaderboardEvent.
def grid_search_view(self, grid_search_view): self._grid_search_view = grid_search_view
[ "def grid_search_view(self):\n return self._grid_search_view", "def set_SearchOn(self, value):\n super(GetSeasonGroupsInputSet, self)._set_input('SearchOn', value)", "def add_grid_search(self):\n # Here to apply ramdom search to pipeline, need to follow naming \"rgs__paramname\"\n params = {\"rgs__\" + k: v for k, v in self.model_params.items()}\n self.pipeline = RandomizedSearchCV(estimator=self.pipeline, param_distributions=params,\n n_iter=100, cv=3, verbose=2,\n random_state=42, n_jobs=-1)", "def set_search_result(self, result):\n self._search_result = result", "def set_Search(self, value):\n super(GetSeasonGroupsInputSet, self)._set_input('Search', value)", "def new_grid_search(self, params, **kwargs):\n hyper = self.client.api.hyper_grid_search(params, get_run_request(self.client, kwargs))\n return HyperSearch(self.client.api, hyper)", "def set_grid(self, grid):\n self.grid = grid\n self._update_dimensions()", "def set_grid_info(self, grid_info):\n self._grid_info = grid_info", "def update_search(self) -> None:\n return self._search.update()", "def set_grid_container(self, grid_container):\n\t\tself._grid_container=grid_container", "def search_position(self, search_position):\n\n self._search_position = search_position", "def __search (self, event):\n self.search_values = self.toolbar.get_search_values ( )\n self.__build_list ( )", "def on_control_search(self, ctrl):\n\t\tif ctrl is self.get_control('Search Entry Button'):\n\t\t\tself.search()\n\t\telif ctrl is self.get_control('Search History Button'):\n\t\t\tself.search_history()", "def enter_hit(self, event):\n self.search_button()", "def show_search(self, show_search):\n self._show_search = show_search", "def defaultsearch(self, event):\n self.variable.set('Search Ready!')", "def __update_grid_position(self):\n\n grid_position = self.game.level.convert_to_grid_position(self.center)\n self.grid_position = grid_position", "def searchbarChanged(self):\n tagString = self.searchBox.text().strip('\\n\\t ')\n\n self.searchTags(tagString)\n\n pass", "def toggle_search_headline_option(self, event: Event) -> None: # pragma: no cover (cmd)\n self.toggle_option('search_headline')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the numeric_channels of this ModifyLeaderboardEvent.
def numeric_channels(self): return self._numeric_channels
[ "def get_num_inchannels(self):\n return self.in_channels", "def get_channels(self):\n if self._channels is None:\n log.warn(\"get_channels called before check_for_update succeeded!\")\n return self._channels", "def get_channels_record(self):\n return self.channels_rcrds", "def get_channel_dict(self):\n return self.channels", "def _get_physical_channels(self):\n return self.__physical_channels", "def number(self, channel_number=(0, 0)):\n\n raise NotImplementedError", "def numeric_channels(self, numeric_channels):\n \n self._numeric_channels = numeric_channels", "def get_channels():", "def text_channels(self):\n return self._text_channels", "def number(self):\n chans = ctypes.c_int32()\n sdk.GetNumberADChannels(ctypes.byref(chans))\n return chans.value", "def getChannel(self):\n for evt in self.__events:\n if isinstance(evt, ChannelEvent):\n return evt.channel\n return None", "def get_number_of_channels(self):\n d = uInt32(0)\n CALL('GetTaskNumChans', self, ctypes.byref(d))\n return d.value", "def n_channels(self):\n return self.colours.shape[1]", "def open_channels(self):\n return self.channels.keys()", "def set_counter_channels(self, channel_list=None):\n\n if channel_list is None:\n return self.get_counter_channels()\n\n # Sanity check:\n all_channels = self._get_all_channels()\n if not set(channel_list).issubset(set(all_channels)):\n # self.log.error('set_counter_channels(): requested list of channels is invalid: '\n # 'some channels are not present on the device.'\n # 'requested list: {0} \\n'\n # 'available channels: {1}'\n # ''.format(channel_list, all_channels))\n return self.get_counter_channels()\n\n # Apply changes to internal variable self._channel_list\n self._channel_list = channel_list\n # Sort channel numbers, such that channel order does not depend\n # on order of numbers in the config file\n self._channel_list.sort()\n\n return self.get_counter_channels()", "def get_channel_list(self):\r\n channels = self.items()\r\n channels.sort()\r\n return [value for key, value in channels]", "def get_new_channel_id(self):\n return (max(self.channels) + 1) if self.channels else 0", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the numeric_channels of this ModifyLeaderboardEvent.
def numeric_channels(self, numeric_channels): self._numeric_channels = numeric_channels
[ "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def numeric_channels(self):\n return self._numeric_channels", "def set_counter_channels(self, channel_list=None):\n\n if channel_list is None:\n return self.get_counter_channels()\n\n # Sanity check:\n all_channels = self._get_all_channels()\n if not set(channel_list).issubset(set(all_channels)):\n # self.log.error('set_counter_channels(): requested list of channels is invalid: '\n # 'some channels are not present on the device.'\n # 'requested list: {0} \\n'\n # 'available channels: {1}'\n # ''.format(channel_list, all_channels))\n return self.get_counter_channels()\n\n # Apply changes to internal variable self._channel_list\n self._channel_list = channel_list\n # Sort channel numbers, such that channel order does not depend\n # on order of numbers in the config file\n self._channel_list.sort()\n\n return self.get_counter_channels()", "def number(self, channel_number=(0, 0)):\n\n raise NotImplementedError", "def set_channels_for_hdf5(self, channels=range(1,9)):\n # JOSH: proposed changes for new IOC\n self.hdf5.num_extra_dims.put(0)\n # does the next line mess up the new IOC?\n # yes\n # self.cam.num_channels.put(self.get_channel_count())\n\n # # The number of channel\n # for n in channels:\n # getattr(self, f'channel{n}').rois.read_attrs = ['roi{:02}'.format(j) for j in range(1,17)]\n # self.hdf5.num_extra_dims.put(0)\n # self.settings.num_channels.put(len(channels))\n # #self.settings.num_channels.put(8)", "def text_channels(self, text_channels):\n \n self._text_channels = text_channels", "def set_channels_of_interest(self, channels):\n if not (type(channels) in (tuple, list) or channels is 'all'):\n message = \"channels of interest may be a list or tuple of integers\" \\\n \" corresponding to the index of the channels,\" \\\n \"or the string 'all' to specify that all channels should be loaded \" \\\n \"(the default behavior is all)\"\n\n message += '\\nGot Type: %r' % type(channels)\n raise TypeError(message)\n\n self.channels_of_interest = channels", "def channel_count_test(self, channel_count_test):\n\n self._channel_count_test = channel_count_test", "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def _load_channel_changes(self, chnl):\n\n self.controller.send_command(\"U{}00\\n\".format(chnl))", "def start_channels(self, channels=(1,)):\n assert isinstance(channels, tuple), \"Argument must be a tuple\"\n\n # iterate through all the channels\n for i in range(1, 9):\n # select channel\n self.inst.write(\"CH {}\".format(i))\n # turn on/off selected channel\n self.inst.write(\"OUT {}\".format(int(i in channels)))", "def update_channel(self, channel):", "def setThrottleChannel(self, channel: int):\n self.axes[self.Axis.kThrottle] = channel", "def __init__(self, num_channels, mask):\n super(channel_selection, self).__init__()\n self.indexes = nn.Parameter(torch.ones(num_channels))\n assert len(mask) == num_channels\n mask = torch.from_numpy(mask)\n self.indexes.data.mul_(mask)", "def setInputChannel(self, inputNum, chl):\n\t\tVisualizationModule.setInputChannel(self, inputNum, chl)\n\t\tif self.dataUnit:\n\t\t\tinputDataUnit = self.getInputDataUnit(1)\n\t\t\tif not inputDataUnit:\n\t\t\t\tinputDataUnit = self.dataUnit\n\t\t\tself.colorTransferFunction = inputDataUnit.getColorTransferFunction()\n\t\t\tlib.messenger.send(self, \"set_Palette_ctf\", self.colorTransferFunction)\n\t\t\t\n\t\t\tself.volumeProperty.SetColor(self.colorTransferFunction)", "def set_channels(self, chan_list):\r\n on_chars = '!@#$'\r\n off_chars = '1234'\r\n out_string = ''\r\n for indx, chan in enumerate(chan_list):\r\n if chan == 1:\r\n out_string += on_chars[indx]\r\n elif chan == 0:\r\n out_string += off_chars[indx]\r\n else:\r\n print(\"Invalid channel list. The format should be: [1, 1, 1, 1] and it should only have 0 or 1\")\r\n self.send_board_command(out_string)", "def __channels_set(self, value):\r\n for val in (v.dbobj for v in make_iter(value) if v):\r\n self.db_receivers_channels.add(val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the operation of this ModifyLeaderboardEvent.
def operation(self): return self._operation
[ "def get_op(self):\n if self.op is None:\n raise ValueError(\"%s: Operation undefined. Call compute_op before\"\n % self.get_label())\n return self.op", "def op_code(self):\n return self.__op_code", "def get_cellOperator(self):\n return self._oper", "def currentOperator(self):\n return self.currentOp", "def current_operation(self):\n return EVO_STATE_TO_HA.get(self._status['systemModeStatus']['mode'])", "def operation_description(self):\n ret = self._get_attr(\"operationDescription\")\n return ret", "def op_type(self):\n return self._op_type", "def get_training_op(self):\n _verif(self._training_op, \"training_op\")\n return self._training_op", "def get_command(self):\n return self.code[-1]", "def last_operation_message(self) -> Optional[str]:\n return pulumi.get(self, \"last_operation_message\")", "def tf_op(self):\n return self._tf_op", "def device_op(self):\n return self.args[0].device_op", "def op_info(self) -> OperatorInfo:\n return self._op_info", "def get_operation(self, idx):\n # get the index of the parameter in the script\n t_idx = self.trainable_params[idx]\n\n # get the info for the parameter\n info = self._par_info[t_idx]\n return info[\"op\"], info[\"op_idx\"], info[\"p_idx\"]", "def _get_event_operation(cls, message):\n event_type_message = message.mbf_find_object('TYPE', 'MBFE_BEGINNING')\n event_type = event_type_message.mbf_get_value()\n index_of_underscore = event_type.index('_')\n return event_type[0: index_of_underscore]", "def get_action(self):\r\n return self.player_action", "def op(self, op_id):\n return self._ops[op_id]", "def current_operation(self):\n if self.device.mode == 'cool':\n return STATE_COOL\n elif self.device.mode == 'heat':\n return STATE_HEAT\n elif self.device.mode == 'range':\n return STATE_AUTO\n elif self.device.mode == 'off':\n return STATE_OFF\n else:\n return STATE_UNKNOWN", "def __priority(self, operation):\n return self._operations.priority(operation)", "def get_op(self, op_name):\n graph = self.session.graph\n return graph.get_tensor_by_name(op_name + ':0')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the sorted_by of this ModifyLeaderboardEvent.
def sorted_by(self): return self._sorted_by
[ "def sort_leaderboard(self):\n return self.order_by(\"-score\")", "def sorted_by(self, sorted_by):\n \n self._sorted_by = sorted_by", "def sort_leaderboard(self):\n return self.get_queryset().sort_leaderboard()", "def sort_order_by_key(self, sort_by):\n\n if self.current_sort_by == sort_by:\n return self.current_sort_order\n return 'unsorted'", "def sort_key(self) -> \"Attribute\":\n return self._values.get(\"sort_key\")", "def edited_by(self):\n return self._edited_by", "def get_key(self):\n\n # defaults\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n\n # return tuple of sort keys\n return sort_notify, sort_type, sort_status, sort_name", "def history_sort_key(history_item_dict):\n second_order = 0\n if \"prop_changed\" in history_item_dict:\n changed_property = history_item_dict[\"prop_changed\"]\n if changed_property == \"name\" or changed_property == \"what\":\n second_order = 1\n\n return history_item_dict[\"time\"], second_order", "def _sort_key(self, author):\n if (\n self.config(\"show_line_count\")\n or self.config(\"show_contribution\")\n or self.config(\"sort_authors_by\") == \"contribution\"\n ):\n key = \"contribution\"\n else:\n key = \"name\"\n\n func = getattr(author, key)\n return func()", "def sorted_keys(self):\r\n return canonsort_keys(self.keys(), self.canonical_order)", "def get_key(self):\n\n # defaults\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n\n peer = self.peers[0]\n try:\n sort_status = self.status_key[peer.status]\n except KeyError:\n sort_status = len(self.status_key) + 1\n sort_name = peer.alias\n\n # return tuple of sort keys\n return sort_notify, sort_type, sort_status, sort_name", "def sorted_jobs(self):\n return sorted(self.jobs.items(), key=lambda item: getattr(item[1], self.priority), reverse=self.descending)", "def _getSortInfo(self):\n req = self.REQUEST\n sort_by = req.get('sort_by', 'anon')\n sort_reverse = int(req.get('sort_reverse', 1))\n return sort_by, sort_reverse", "def get_sorted_variables(self):\n return self.sorted_variables", "def sortByKeys(self):\n\t\tself._dKeys = sorted(self._dKeys, key=lambda tupl: tupl[0])\n\t\treturn self", "def order_by(self, *args, **kwargs):\n return self.list().order_by(*args, **kwargs)", "def sort(self):\n self.entries.sort(key=lambda x: -x.priority)", "def get_ordering(self):\n if not check_empty_dict(self.params):\n return super(TreeChangeList, self).get_ordering()\n return None, 'asc'", "def last_modified_by(self) -> \"str\":\n return self._attrs.get(\"last_modified_by\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the sorted_by of this ModifyLeaderboardEvent.
def sorted_by(self, sorted_by): self._sorted_by = sorted_by
[ "def sorted_by(self):\n return self._sorted_by", "def edited_by(self, edited_by):\n\n self._edited_by = edited_by", "def set_SortOn(self, value):\n super(GetSeasonGroupsInputSet, self)._set_input('SortOn', value)", "def reviewed_by(self, reviewed_by):\n\n self._reviewed_by = reviewed_by", "def sort_order_by_key(self, sort_by):\n\n if self.current_sort_by == sort_by:\n return self.current_sort_order\n return 'unsorted'", "def setSortedObjectOrderStrategy(self, *args):\n return _coin.SoGLRenderAction_setSortedObjectOrderStrategy(self, *args)", "def set_sort_as(self, value):\n self.sort_as = value", "def sort_leaderboard(self):\n return self.get_queryset().sort_leaderboard()", "def apply_order_bys_for_primary_model(self) -> None: # noqa: CCR001\n if self.order_columns:\n for clause in self.order_columns:\n if \"__\" not in clause:\n text_clause = (\n text(f\"{self.table.name}.{self.alias(clause[1:])} desc\")\n if clause.startswith(\"-\")\n else text(f\"{self.table.name}.{self.alias(clause)}\")\n )\n self.sorted_orders[clause] = text_clause\n else:\n order = text(self.prefixed_pk_name)\n self.sorted_orders[self.prefixed_pk_name] = order", "def sort_leaderboard(self):\n return self.order_by(\"-score\")", "def set_commented_by(self, commented_by):\n\n\t\tif commented_by is not None and not isinstance(commented_by, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: commented_by EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__commented_by = commented_by\n\t\tself.__key_modified['commented_by'] = 1", "def SetNewManualOrder(self):\n self.sortMode = \"manual\"\n self.isManuallySorted = True\n self.lastManuallySortedEntries = self.entries\n \n self.ManualSortingEnabled.emit()", "def _sort_by(self, criteria):\n log.info('Sorting kernels by {}')\n assert self._select_drop_down('sort', criteria)", "def set_order(self, order_key: str) -> None:\n if order_key not in self.orders:\n raise exceptions.CommandError(\n \"Unknown flow order: %s\" % order_key\n )\n order_key = self.orders[order_key]\n self.order_key = order_key\n newview = sortedcontainers.SortedListWithKey(key=order_key)\n newview.update(self._view)\n self._view = newview", "def sortByKeys(self):\n\t\tself._dKeys = sorted(self._dKeys, key=lambda tupl: tupl[0])\n\t\treturn self", "def _sort_prep(self):\n self._sort_outdated = True\n self._last_node_id = self.nodes[self.clineno]", "def _sort_key(self, author):\n if (\n self.config(\"show_line_count\")\n or self.config(\"show_contribution\")\n or self.config(\"sort_authors_by\") == \"contribution\"\n ):\n key = \"contribution\"\n else:\n key = \"name\"\n\n func = getattr(author, key)\n return func()", "def clickon(self, event):\n self._sort_by(self.columns.index(event.widget['text']))", "def sort_by_storm(self):\n self.vitals=sorted(self.vitals,cmp=tcutil.storminfo.vit_cmp_by_storm)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the system_columns of this ModifyLeaderboardEvent.
def system_columns(self): return self._system_columns
[ "def system_columns(self, system_columns):\n \n self._system_columns = system_columns", "def getAllColumns (self):\n\n return self.columns", "def columns(self):\n return self.c", "def get_columns(self):\n return list(zip(*self.get_board()))", "def get_columns(self) -> dict:\n\n return self.source.columns", "def columns(self):\n\t\treturn super().columns+[\"subsystems\"]", "def columns(self):\n return self.cs", "def get_feature_columns(self):\n return self.feature_columns", "def get_level0200_column_headers(self):\n try: self.level0200_column_headers\n except:\n self.set_level0200_standards()\n return self.level0200_column_headers", "def get_column_names(self):\n columns = list(self.table_content.keys())\n return columns", "def host_columns(self):\n columns = ('*',)\n limit = 1\n record = self.get_host(columns=columns, limit=limit)[0]\n return self.get_columns(record)", "def columns(self):\n return self.properties.get('columns',\n ColumnDefinitionCollection(self.context,\n ResourcePath(\"columns\", self.resource_path), self))", "def data_columns(self):\n return list(self.data.keys())", "def columns(self):\n return list(self.values())", "def host_snmp_cache_columns(self):\n columns = ('*',)\n limit = 1\n record = self.get_snmp_cache(columns=columns, limit=limit)[0]\n return self.get_columns(record)", "def get_well_columns(self) -> List[List[str]]:\n return self._definition.ordering", "def getColumns(self):\r\n # Save list of main columns\r\n for row in self.table:\r\n for entry in row:\r\n if str(row[0]).startswith('!') and not str(row[0]).startswith('!!'):\r\n delimiter = misc.getDelimiter(row)\r\n column_names = list(row)\r\n break\r\n\r\n # Insert mandatory first column if not existent\r\n inserted_column = False\r\n #if not column_names[0].title() == '!' + self.table_type.title():\r\n # column_names.insert(0, '!' + self.table_type.title())\r\n # inserted_column = True\r\n\r\n # Get column positions\r\n columns = {}\r\n for i, column in enumerate(column_names):\r\n columns[column] = i\r\n\r\n return column_names, columns, inserted_column, delimiter", "def get_column_names(self):\r\n return [column.key for column in self.table.columns]", "def get_display_columns(self):\n\n # Find the first input field in the dynamic data area after 'Interval Date =>' which is unique and appears\n # across all displays\n try:\n field_found = self.get_first_field(text_before_input_field='Time =>')\n except ValueError:\n field_found = self.get_first_field(text_before_input_field='Time ==>')\n\n # Set initial line, pos, and length for both column names and dash rows on the display\n self.col_name_line = field_found.row - 2\n col_dash_line = field_found.row - 1\n col_pos = field_found.col\n # adjusted_screen_length = self.screenLen - field_found.col\n adjusted_screen_length = self.screenLen - 1\n\n # Get the page of column names and dashes.\n col_name_str = self.ptg2_em.string_get(self.col_name_line, col_pos, adjusted_screen_length)\n col_len_str = self.ptg2_em.string_get(col_dash_line, col_pos, adjusted_screen_length)\n\n return col_name_str, col_len_str" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the system_columns of this ModifyLeaderboardEvent.
def system_columns(self, system_columns): self._system_columns = system_columns
[ "def system_columns(self):\n return self._system_columns", "def columns(self, columns):\n \n self._columns = columns", "def setColumns( self, names ):\n self.columns = names", "def _update_columns(self, new_columns):\n for name, column in new_columns.items():\n self.columns[name] = column\n # Make sure the underlying dataframe is in sync in case series data has changed\n self._dataframe[name] = column._series", "def SetColumnOrder(self, columns):\n oldColsCount = self.__table.GetNumberCols()\n self.__table.SetColumnOrder(columns)\n newColsCount = self.__table.GetNumberCols()\n \n self.BeginBatch()\n \n if (oldColsCount > newColsCount): # deleted\n msg = wx.grid.GridTableMessage(self.__table, \n wx.grid.GRIDTABLE_NOTIFY_COLS_DELETED, newColsCount, \n oldColsCount - newColsCount)\n self.ProcessTableMessage(msg)\n elif (oldColsCount < newColsCount): # added\n msg = wx.grid.GridTableMessage(self.__table, \n wx.grid.GRIDTABLE_NOTIFY_COLS_APPENDED, \n newColsCount - oldColsCount)\n self.ProcessTableMessage(msg)\n \n for i in range(newColsCount):\n wx.grid.Grid.SetColSize(self, i, \n self.__table.GetColSize(self.__table.GetColKey(i)))\n \n msg = wx.grid.GridTableMessage(self.__table, \n wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n self.ProcessTableMessage(msg)\n \n self.EndBatch()", "def system_permission_descriptions(self, system_permission_descriptions):\n\n self._system_permission_descriptions = system_permission_descriptions", "def system_health_score(self, system_health_score):\n\n self._system_health_score = system_health_score", "def setSavedColumnsOnReset(self, columns):\n self._savedColumnsOnReset = columns", "def _toggle_columns(self, view: View, defx: Defx,\n context: Context) -> None:\n columns = (context.args[0] if context.args else '').split(':')\n if not columns:\n return\n current_columns = [x.name for x in view._columns]\n if columns == current_columns:\n # Use default columns\n columns = context.columns.split(':')\n view._init_columns(columns)", "def updateColumns(self):\r\n if self._table_name:\r\n Base.cursor.execute(f\"describe {self._table_name}\")\r\n results = Base.cursor.fetchall()\r\n if Base.cursor.rowcount>0:\r\n self._column_list = []\r\n for column in results:\r\n self._column_list.append(column[0])\r\n if column[3] == \"PRI\":\r\n self.pk = column[0]\r\n setattr(self,column[0],None)\r\n else:\r\n raise Exception(f\"Table {self._table_name} has no columns\")", "def setup_column_prefs( self ):\n\n\t\tpass", "def setColumns(self, *args):\n if not args:\n self._column_to_role = {col: role for col, role in enumerate(itertools.chain(self._role_to_prop.keys(),\n self._ref_role_to_prop.keys()))}\n self._column_names = [prop for prop in itertools.chain(self._role_to_prop.values(),\n self._ref_role_to_prop.values())]\n return\n\n names = args[0].toVariant() if isinstance(args[0], qtc.QVariant) else list(map(lambda a: str(a), args))\n self._column_names = names\n\n self._column_to_role = {}\n for col, name in enumerate(names):\n try:\n role = next(filter(lambda rn: rn[1] == name, itertools.chain(self._role_to_prop.items(),\n self._ref_role_to_prop.items())))[0]\n except:\n continue\n\n self._column_to_role[col] = role", "def update_cols(self):\n self.cols = []\n\n # Iterate through the list of lists and append the element to the appropriate list.\n for x in range(self.row_num):\n i = 0\n for y in self.rows[x]:\n if x == 0:\n self.cols.append([])\n self.cols[i].append(y)\n i += 1\n self.col_num = len(self.cols)", "def columns(self):\n\t\treturn super().columns+[\"subsystems\"]", "def SetGridCols(self, grid_name, no_of_cols):\r\n grid_name.ClearGrid() #clear all data first\r\n change_cols = no_of_cols - grid_name.GetNumberCols()\r\n if change_cols > 0:\r\n grid_name.AppendCols(change_cols) #always to end\r\n elif change_cols < 0:\r\n grid_name.DeleteRows(0, -change_cols) #from posn 0\r\n self.other_self.m_scrolledWindow3.SendSizeEvent() # make sure new size is fitted\r", "def systems(self, systems):\n allowed_values = [\"ORIGINAL\", \"UPDATED\", \"NONE\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and systems not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `systems` ({0}), must be one of {1}\" # noqa: E501\n .format(systems, allowed_values)\n )\n\n self._systems = systems", "def set_col(self, system, icol, column):\n if self._colnames is None:\n self._setup_index_maps(system)\n\n wrt = self._colnames[self._col2name_ind[icol]]\n _, offset, _, _, _ = self._col_var_info[wrt]\n loc_idx = icol - offset # local col index into subjacs\n\n scratch = np.zeros(column.shape)\n\n for of, start, end, _ in system._jac_of_iter():\n key = (of, wrt)\n if key in self._subjacs_info:\n subjac = self._subjacs_info[key]\n if subjac['cols'] is None:\n subjac['value'][:, loc_idx] = column[start:end]\n else:\n match_inds = np.nonzero(subjac['cols'] == loc_idx)[0]\n if match_inds.size > 0:\n row_inds = subjac['rows'][match_inds]\n subjac['value'][match_inds] = column[start:end][row_inds]\n else:\n row_inds = np.zeros(0, dtype=INT_DTYPE)\n arr = scratch[start:end]\n arr[:] = column[start:end]\n arr[row_inds] = 0.\n nzs = np.nonzero(arr)\n if nzs[0].size > 0:\n raise ValueError(f\"{system.msginfo}: User specified sparsity (rows/cols) \"\n f\"for subjac '{of}' wrt '{wrt}' is incorrect. There are \"\n f\"non-covered nonzeros in column {loc_idx} at \"\n f\"row(s) {nzs[0]}.\")", "def make_cols(self):\n column_types = self.config.column_types\n table = self.make_new_table()\n #update current table\n self.curr_table = table\n\n cols_to_add = []\n count = 0\n for column_type in column_types:\n num = int(self.MAX_COLS_TABLE * .8)\n cols_to_add += [(table.name+\"__\"+str(c), column_type) for c in range(count, count+num)]\n count += num\n\n values=[]\n for (name, col_type) in cols_to_add:\n values.append(\"ADD COLUMN `%s` %s\" % (name, col_type))\n\n values = \", \".join(values)\n qry = \"\"\"\n ALTER TABLE `{table}`\n {cols_to_add}\n \"\"\".format(table=table.name, cols_to_add=values)\n self.engine.execute(qry)\n\n \n #reflect table again to have update columns\n table = Table(table.name, MetaData(bind=self.engine), autoload=True, autoload_with=self.engine)\n self.tables[table.name] = table\n self.free_cols[table.name] = {}\n #for new column in the database, add it to free columns\n for (name, col_type) in cols_to_add:\n if col_type not in self.free_cols[table.name]:\n self.free_cols[table.name][col_type] = set([])\n\n col = DSMColumn(getattr(table.c, name), dsm_table=self)\n self.free_cols[table.name][col_type].add(col)", "def delete_columns(self):\n self.focus()\n self.dispatch('DeleteColumns')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the text_channels of this ModifyLeaderboardEvent.
def text_channels(self): return self._text_channels
[ "def get_channels(self):\n if self._channels is None:\n log.warn(\"get_channels called before check_for_update succeeded!\")\n return self._channels", "def getChannels(self):\r\n\t\tchannels = []\r\n\t\tfor row in self.db(self.db.user_channels.owner_id==self.user).select():\r\n\t\t\tchannels.append(row.channel_title)\r\n\t\treturn channels", "def get_channel_dict(self):\n return self.channels", "def text_channels(self, text_channels):\n \n self._text_channels = text_channels", "def list_channels(self):\n path = self.build_url(\"/channels\")\n return self.request('get', path)", "def get_channels():", "def getChannel(self):\n for evt in self.__events:\n if isinstance(evt, ChannelEvent):\n return evt.channel\n return None", "def open_channels(self):\n return self.channels.keys()", "def channels(self):\n return self.get_property('channels',\n ChannelCollection(self.context, ResourcePath(\"channels\", self.resource_path)))", "def group_channel(self) -> discord.TextChannel:\n assert self.player.role.grouped\n return self.game.role_chats[self.player.role]", "def list_channels(self,c):\n\t\tkeys = yield self.channels_by_id.keys()\n\t\treturnValue([ [str(key),self.channels_by_id[key].name] for key in keys])", "def fetch_all_channels(self):\n try:\n response = self.client.conversations_list()\n channels = response['channels']\n except SlackApiError as error:\n self.logger.warning(\n f\"slack {self.fetch_all_channels.__name__} request failed and raised error: {error.response['error']}\")\n return channels", "def get_all_channels(self):\r\n return self.all()", "def get_channels_record(self):\n return self.channels_rcrds", "def get_channels(self) -> List[str]:\n channels = set()\n for series in self.series:\n channels.update(series.get_channels())\n return sorted(channels)", "def _create_text_channel(ctx_data, bot: Bot):\n return bot.get_channel(int(ctx_data[\"channel_id\"]))", "def get_channels(bot, trigger):\n m_chans = manager(bot).channels\n parts = trigger.lower().split()\n if parts:\n channels = [p for p in parts if p in m_chans]\n if channels:\n return channels\n return m_chans.keys()", "def list_channels(self):\r\n with self.channel_lock:\r\n return [{'status': self.is_channel_busy(i.channel_name),\r\n 'name': i.channel_name} for i in self.channel_list]", "def numeric_channels(self):\n return self._numeric_channels" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the text_channels of this ModifyLeaderboardEvent.
def text_channels(self, text_channels): self._text_channels = text_channels
[ "def text_channels(self):\n return self._text_channels", "def set_channels(self, chan_list):\r\n on_chars = '!@#$'\r\n off_chars = '1234'\r\n out_string = ''\r\n for indx, chan in enumerate(chan_list):\r\n if chan == 1:\r\n out_string += on_chars[indx]\r\n elif chan == 0:\r\n out_string += off_chars[indx]\r\n else:\r\n print(\"Invalid channel list. The format should be: [1, 1, 1, 1] and it should only have 0 or 1\")\r\n self.send_board_command(out_string)", "async def set_text_only(self, channel: discord.TextChannel) -> None:\n await self.db.channel(channel).is_text_only.set(True)\n await self.db.channel(channel).is_image_only.set(False)", "def update_channels(self):\n client = slack.WebClient(token=self.access_token)\n response = client.conversations_list(\n exclude_archived=\"true\",\n limit=500,\n )\n\n channels = [SlackChannel(\n workspace=self,\n name=c.get(\"name\"),\n channel_id=c.get(\"id\")\n ) for c in sorted(response.get('channels', []), key=itemgetter('name'))]\n \n SlackChannel.objects.bulk_create(channels, ignore_conflicts=True)", "async def set_target_channels(self, ctx, target_channels: str):\n self.set_target_channels_inner(target_channels)\n await self.log_and_discord_print(ctx, message=f\"Set channel whitelist to {self.channel_whitelist}\")", "def numeric_channels(self, numeric_channels):\n \n self._numeric_channels = numeric_channels", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _load_channel_changes(self, chnl):\n\n self.controller.send_command(\"U{}00\\n\".format(chnl))", "def set_channels_of_interest(self, channels):\n if not (type(channels) in (tuple, list) or channels is 'all'):\n message = \"channels of interest may be a list or tuple of integers\" \\\n \" corresponding to the index of the channels,\" \\\n \"or the string 'all' to specify that all channels should be loaded \" \\\n \"(the default behavior is all)\"\n\n message += '\\nGot Type: %r' % type(channels)\n raise TypeError(message)\n\n self.channels_of_interest = channels", "def set_counter_channels(self, channel_list=None):\n\n if channel_list is None:\n return self.get_counter_channels()\n\n # Sanity check:\n all_channels = self._get_all_channels()\n if not set(channel_list).issubset(set(all_channels)):\n # self.log.error('set_counter_channels(): requested list of channels is invalid: '\n # 'some channels are not present on the device.'\n # 'requested list: {0} \\n'\n # 'available channels: {1}'\n # ''.format(channel_list, all_channels))\n return self.get_counter_channels()\n\n # Apply changes to internal variable self._channel_list\n self._channel_list = channel_list\n # Sort channel numbers, such that channel order does not depend\n # on order of numbers in the config file\n self._channel_list.sort()\n\n return self.get_counter_channels()", "def set_channels_for_hdf5(self, channels=range(1,9)):\n # JOSH: proposed changes for new IOC\n self.hdf5.num_extra_dims.put(0)\n # does the next line mess up the new IOC?\n # yes\n # self.cam.num_channels.put(self.get_channel_count())\n\n # # The number of channel\n # for n in channels:\n # getattr(self, f'channel{n}').rois.read_attrs = ['roi{:02}'.format(j) for j in range(1,17)]\n # self.hdf5.num_extra_dims.put(0)\n # self.settings.num_channels.put(len(channels))\n # #self.settings.num_channels.put(8)", "def start_channels(self, channels=(1,)):\n assert isinstance(channels, tuple), \"Argument must be a tuple\"\n\n # iterate through all the channels\n for i in range(1, 9):\n # select channel\n self.inst.write(\"CH {}\".format(i))\n # turn on/off selected channel\n self.inst.write(\"OUT {}\".format(int(i in channels)))", "def _create_text_channel(ctx_data, bot: Bot):\n return bot.get_channel(int(ctx_data[\"channel_id\"]))", "def SetLcmText(self, row, message):\n row_number = Lcm2004._LCM_ROW[row]\n\n self._servo.whale_lcm_row = row_number\n self._servo.whale_lcm_text = message", "async def on_channel_update(self, before, after):", "def update_channel(self, channel):", "def credits_unique_texts(self, credits_unique_texts):\n\n self._credits_unique_texts = credits_unique_texts", "def update_channel(self, _ch):\n self.update(DB_CHANNELS_TABLE+'_editable', (\n _ch['enabled'],\n _ch['display_number'],\n _ch['display_name'],\n _ch['group_tag'],\n _ch['thumbnail'],\n str(_ch['thumbnail_size']),\n _ch['namespace'],\n _ch['instance'],\n _ch['uid']\n ))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses command line tools to filter trio VCF file and add PS tag
def filter_trio_vcf(trio_vcf, workdir, sample_name): trio_vcf_basename = os.path.basename(trio_vcf) if trio_vcf_basename.endswith('.vcf'): offset = -4 elif trio_vcf_basename.endswith('.vcf.gz'): offset = -7 else: return tmp_header = workdir + '/tmp_header.vcf' tmp_variants = workdir + '/tmp_variants.vcf' tmp_reheadered = workdir + '/tmp_reheadered.vcf' trio_filtered_het_phased_vcf = workdir + '/' + trio_vcf_basename[:offset] + '.filtered.het.phased.pstag.vcf' trio_filtered_het_phased_zipped_vcf = trio_filtered_het_phased_vcf + '.gz' command_get_header = ['bcftools', 'view', '-h', trio_vcf, '>', tmp_header] command_modify_header = 'sed -i \'5i##FORMAT=<ID=PS,Number=1,Type=Integer,Description=\"ID of Phase Set for Variant\">\' ' + str(tmp_header) command_get_variants = ['bcftools', 'view', '-H', trio_vcf, '>', tmp_variants] command_reheader = ['cat', tmp_header, tmp_variants, '>', tmp_reheadered] command_zip = ['bgzip', trio_filtered_het_phased_vcf] command_index = ['tabix', trio_filtered_het_phased_zipped_vcf] command_clean = ['rm', workdir + '/tmp*'] logging.info(' -> Adding PS FORMAT to header') run(' '.join(command_get_header), shell=True, check=True, executable='/bin/bash') run(command_modify_header, shell=True, check=True, executable='/bin/bash') run(' '.join(command_get_variants), shell=True, check=True, executable='/bin/bash') run(' '.join(command_reheader), shell=True, check=True, executable='/bin/bash') logging.info(' -> Write filtered, phased and heterozygous variants to {0}'.format(trio_filtered_het_phased_vcf)) get_filtered_phased_het_trio_variants(tmp_reheadered, trio_filtered_het_phased_vcf, sample_name) logging.info(' -> Compress VCF file') run(' '.join(command_zip), shell=True, check=True, executable='/bin/bash') logging.info(' -> Index VCF file') run(' '.join(command_index), shell=True, check=True, executable='/bin/bash') logging.info(' -> Clean temporary files') run(' '.join(command_clean), shell=True, check=True, executable='/bin/bash') return trio_filtered_het_phased_zipped_vcf
[ "def filter_pfcp(imsi,file_name):\r\n\tfilter_patten = '\\\"pfcp && e212.imsi == ' +imsi+ '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seqno'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_list = []\r\n\tfor x in set(subprocess.getoutput( cmd ).split(\"\\n\")):\r\n\t\tif(len(x)>0):\r\n\t\t\ttmp_list.append( 'pfcp.seqno == ' + x )\r\n\r\n\tif(len(tmp_list)<=0):\r\n\t\tprint(\"imsi %s not found in pfcp\" %imsi);\r\n\t\treturn \"\"\r\n\r\n\t\"\"\"\r\n\t2. search pfcp.seid by pfcp.seqno\r\n\t\"\"\"\t\r\n\tfilter_pfcp = \"||\".join(tmp_list)\r\n\t#print(\"filter_pfcp= \",filter_pfcp)\r\n\r\n\tfilter_patten = '\\\"' + filter_pfcp + '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seid'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\t#print(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_set = set(subprocess.getoutput( cmd ).replace('\\n',',').split(\",\"))\r\n\ttmp_set.discard('0x0000000000000000')\r\n\ttmp_set.discard('')\r\n\t\r\n\tset_pfcp_seid = set()\r\n\tfor x in tmp_set:\r\n\t\tset_pfcp_seid = set_pfcp_seid | { 'pfcp.seid==' + x }\r\n\r\n\treturn \"||\".join( set_pfcp_seid )", "def main(argv): \n \n # Set defaults for the arguments\n config_xml = source = source_type = vo_name = \"\"\n skip_disabled = 'yes'\n \n try:\n opts, args = getopt.getopt(argv, \"hx:s:t:v:d:\", [\"help\"])\n except getopt.GetoptError:\n print(\"Unrecognized or incomplete input arguments.\")\n print(USAGE)\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print(USAGE)\n sys.exit()\n else:\n if opt == '-x':\n config_xml = arg\n elif opt == '-s': \n source = arg\n elif opt == '-t':\n source_type = arg\n elif opt == '-v':\n vo_name = arg #TODO do we want to accept a list of VOs?\n elif opt == '-d':\n skip_disabled = arg\n else:\n print(\"Unrecognized input arguments.\")\n print(USAGE)\n sys.exit(2)\n \n # Validate args\n err_msg = \"\"\n if config_xml == \"\":\n err_msg += \"No configuration file was provided.\\n\"\n else: \n if not os.path.isfile(config_xml):\n err_msg += \"Config file '%s' does not exist.\\n\" % config_xml\n if source == '' or source_type == '':\n err_msg += \"Source and source type must be defined.\\n\" \n if err_msg:\n print(err_msg)\n print(USAGE)\n sys.exit(2)\n\n if skip_disabled.lower() != 'yes' and skip_disabled.lower() != 'no':\n print(\"Skip disabled argument must be 'yes' or 'no'.\")\n print(USAGE)\n sys.exit(2) \n if skip_disabled == 'yes':\n skip_disabled = True\n else:\n skip_disabled = False\n \n # Find new entries \n new_entries = find_new_entries_in_infosys(config_xml, source, source_type, skip_disabled, vo_name)\n \n # Format output\n datestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %M:%S\")\n output = \"\\nThis file contains all new entries published in the information system that are not identifiable in \" \\\n \"the config file. They are formatted to be pasted directly into the config file.\\n\"\n output += \"Script run on : %s \\n\" % datestamp\n output += \"Number of new entries : %i\\n\\n\" % len(new_entries)\n \n # Create formatted xml output\n if len(new_entries) > 0:\n \n # Get list of schedd\n try:\n # Find all config entries not disabled\n config_dom = minidom.parse(config_xml)\n schedds = infosys_lib.parse_factory_schedds(config_dom)\n except: \n print(\"Error parsing the config file '%s' for the schedds, exiting the tool.\" % config_xml)\n sys.exit(2) \n \n for entry in new_entries:\n # Pick a random schedd to assign to this entry TODO - need to be able to assign to a specific schedd?\n random.shuffle(schedds)\n output += infosys_lib.generate_entry_xml(entry, schedds[0]) \n else:\n output = \"No new entries were found.\\n\" \n\n # Output results\n print(output)", "def runETQL(vcfFile, pheFile, outputName = None):\n if outputName is not None:\n os.system(\"fastQTL --vcf {vcfFile} --bed phenotypes.bed.gz \\\n --region 22:17000000-18000000 --out {outName}\".format(\n vcfFile = vcfFile, pheFile = pheFile, outName = outputName)\n\n\ndef getVCF(id):\n \"\"\"Gets the name of the vcf file\n corresponding to the \n > getVCF(\"SRR1202488\")\n \"SRR1202488.cleaned_1.vcf\"\n \"\"\"\n return id + \".cleaned_1.vcf\"", "def main():\r\n #We check if the file ends in .vm else its an error\r\n if len(sys.argv) != 2 or sys.argv[1][-3:] != \".vm\":\r\n badusage()\r\n #We get the filename\r\n filename = str(sys.argv[1])\r\n #Create the parser\r\n parser = Parser(filename)\r\n #We create the codewriter without the .vm extention\r\n codewriter = Codewriter(filename[0:-3])\r\n #Start reading checking if it has more commands advancing and seeing the command\r\n while parser.hasMoreCommands():\r\n parser.advance()\r\n #If it is a arithmetic command we write it or a push or pop command\r\n if parser.command_type() == \"C_ARITHMETIC\":\r\n command = parser.arg1()\r\n codewriter.writeArithmetic(command)\r\n elif parser.command_type() == \"C_PUSH\":\r\n m_segment = parser.arg1()\r\n index = parser.arg2()\r\n codewriter.writePushPop(\"C_PUSH\", m_segment, index)\r\n elif parser.command_type() == \"C_POP\":\r\n m_segment = parser.arg1()\r\n index = parser.arg2()\r\n codewriter.writePushPop(\"C_POP\", m_segment, index)\r\n #Then we close the codewriter and parser\r\n del codewriter\r\n del parser", "def filter_pfcp_ngap(imsi,file_name):\r\n\tfilter_patten = '\\\"pfcp && e212.imsi == ' +imsi+ '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seqno'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_list = []\r\n\tfor x in set(subprocess.getoutput( cmd ).split(\"\\n\")):\r\n\t\tif(len(x)>0):\r\n\t\t\ttmp_list.append( 'pfcp.seqno == ' + x )\r\n\r\n\tif(len(tmp_list)<=0):\r\n\t\tprint(\"imsi %s not found in pfcp\" %imsi);\r\n\t\treturn \"\"\r\n\r\n\t\"\"\"\r\n\t2. search pfcp.teid used in ngap by pfcp.seqno\r\n\t\"\"\"\t\r\n\tfilter_pfcp = \"||\".join(tmp_list)\r\n\t#print(\"filter_pfcp= \",filter_pfcp)\r\n\r\n\tfilter_patten = '\\\"' + filter_pfcp + '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.f_teid.teid'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\t#print(\"\\n\",cmd,\"\\n\")\r\n\r\n\ttmp_list = []\r\n\tfor x in set(subprocess.getoutput( cmd ).split(\"\\n\")):\r\n\t\tif len(x) > 0:\r\n\t\t\ttmp_list.append( 'ngap.gTP_TEID == ' + teid2str(x) )\r\n\r\n\t\"\"\"\r\n\t3. search ngap id by teid\r\n\t\"\"\"\t\r\n\tif( len(tmp_list)<1 ):\r\n\t\tprint(\"no gtp teid found in pfcp.\");\r\n\t\treturn filter_pfcp\r\n\t\r\n\tprint(\"Searching in ngap...\");\r\n\tfilter_ngap = '\\\"' + \" || \".join(tmp_list) + '\\\"'\r\n\t#print(filter_ngap)\r\n\r\n\tfilter_patten = filter_ngap\r\n\tTfield = ' -Tfields -e ngap.RAN_UE_NGAP_ID -e ngap.AMF_UE_NGAP_ID'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\tset_ranid = set()\r\n\tset_amfid = set()\r\n\ttmp_set = set(subprocess.getoutput( cmd ).split('\\n'))\r\n\ttmp_set.discard('')\r\n\t\r\n\tif(len(tmp_set)==0):\r\n\t\treturn \"\"\r\n\t\r\n\tfor x in tmp_set:\r\n\t\ty = x.split('\\t')\r\n\t\tset_ranid = set_ranid | {y[0]}\r\n\t\tset_amfid = set_amfid | {y[1]}\r\n\r\n\tset_ranid.discard('')\r\n\tset_amfid.discard('')\r\n\t\r\n\tif( len(set_ranid)>0 ):\r\n\t\ttmp_set = set()\r\n\t\tfor x in set_ranid:\r\n\t\t\ttmp_set = tmp_set | { 'ngap.RAN_UE_NGAP_ID=='+x }\r\n\t\tset_ranid = tmp_set\r\n\r\n\tif( len(set_amfid)>0 ):\r\n\t\ttmp_set = set()\r\n\t\tfor x in set_amfid:\r\n\t\t\ttmp_set = tmp_set | { 'ngap.AMF_UE_NGAP_ID=='+x }\r\n\t\tset_amfid = tmp_set\r\n\t\r\n\ttmp_set = set_ranid | set_amfid\r\n\ttmp_set.discard('')\r\n\treturn \"||\".join( tmp_set ) +\"||\"+filter_pfcp", "def main():\n parser = OptionParser()\n parser.add_option('-p', '--population', action='append',\n dest=\"populations\", help='population_files')\n parser.add_option('-a', '--arguments-selection-pipelines',\n dest=\"extra_args\", help=('Arguments to the selection'\n 'pipeline script'))\n parser.add_option('-l', '--log-file', dest=\"log_file\", help=\"Log file\")\n parser.add_option('-i', '--vcf-input-file', dest=\"vcf_input\",\n help=\"VCF Input File\")\n parser.add_option('-c', '--chromosome', dest=\"chromosome\",\n help=(\"Chromosome label doesn't actually have to\"\n \"correspond to the real chromosome but is required\"\n \" to determine what output files to make\"))\n parser.add_option('--config-file', dest='config_file',\n help='Configuration File')\n parser.add_option('--fst-window-size', dest=\"fst_window_size\",\n help=\"FST window size (kb)\")\n parser.add_option('--fst-window-step', dest=\"fst_window_step\",\n help=\"FST window step size (kb)\")\n parser.add_option('--no-clean-up', dest=\"no_clean_up\",\n action=\"store_true\",\n help=\"Do not clean up intermediate datafiles\")\n parser.add_option('--cores', dest=\"cores\", help=(\"Overrides number of \"\n \"cores avaliable as provided in the config file\"))\n parser.add_option('--no-rsb',dest=\"no_rsb\", action=\"store_true\",\n help=\"Do not calculate RSB\")\n (options, args) = parser.parse_args()\n print(options.extra_args)\n assert options.vcf_input is not None, \\\n \"no VCF file has been specified as input\"\n assert os.path.isfile(options.vcf_input), \\\n \"Cannot locate vcf file at path = {0)\".format(options.vcf_input)\n assert options.chromosome is not None, \\\n \"no chromosome has been specified to the script\"\n assert options.populations is not None and \\\n len(options.populations) >= 2, \\\n \"At least two population files are required\"\n if options.config_file is None:\n options.config_file = 'defaults.cfg'\n if not(os.path.isfile(options.config_file)):\n raise Exception(\"Cannot find config file\")\n elif not(os.path.isfile(options.config_file)):\n raise Exception(\"Cannot find config file\")\n config = parse_config(options)\n if options.log_file is None:\n options.log_file = 'multi_population.log'\n logging.basicConfig(format='%(asctime)s %(message)s',\n filename=options.log_file, filemode='w',\n level=logging.INFO)\n if not (check_executables_and_scripts_exist(options, config)):\n sys.exit(CANNOT_FIND_EXECUTABLE)\n if options.no_clean_up is None:\n options.clean_up_files = False\n if options.fst_window_step is None:\n options.fst_window_step = str(1000)\n else:\n options.fst_window_step = str(\n float(options.fst_window_step) * 1e3)\n if options.fst_window_size is None:\n options.fst_window_size = str(1000)\n else:\n options.fst_window_size = str(\n float(options.fst_window_size) * 1e3)\n if options.no_rsb is None:\n options.no_rsb = False\n if options.cores is not None:\n config['system']['cores_avaliable'] = options.cores\n set_environment(config['environment'])\n options.vcf_input = os.path.abspath(options.vcf_input)\n populations = get_populations(options.populations)\n populations = OrderedDict(sorted(populations.items(), key=lambda t: t[0]))\n fst_vcf(options.vcf_input, config, options, populations)\n output_vcfs = subset_vcf(options.vcf_input, config, populations)\n run_selection_pipeline(output_vcfs, options, populations, config)\n # TODO move FST to here on filtered dataset\n if not (options.no_rsb):\n rsb(config, options, populations)\n if not os.path.exists('logs'):\n os.mkdir('logs')\n os.rename(options.log_file, 'logs/' + options.log_file)\n if not options.no_clean_up:\n keep = [os.path.basename(options.vcf_input),os.path.basename(options.config_file)]\n keep.extend(options.populations)\n clean_folder('.', keep=keep)\n logger.info(\"Multi_population Complete\")\n logger.info(\"Goodbye :\")\n print(\"Multi-population selection pipeline completed successfully !:)\")", "def parse_haplotype_to_vcf(haplotype_to_vcf): \n haplotype_to_vcf.add_argument(\n \"-haplotypeFormat\", default = 'iupac',\n help = \"report which format (numeric vs. iupac) the haplotype file is in.\\n\" \n \"Default = iupac\", \n metavar = '')", "def writeVCFFromBedpe(inputFile, outputFile):\n with open(inputFile, 'r') as inpt, open(outputFile,'w') as otpt:\n counter = -1\n printVCFHeader(otpt)\n for line in inpt:\n counter+=1\n if counter == 0:\n #header\n continue\n tokens = line.split()\n precise=tokens[11].find(\"SR\")\n support=\"SUPPORT=\" + tokens[16] + \";PE=\" + tokens[19] + \";SR=\" + tokens[20] + \";\"\n chr1 = tokens[0]\n chr1Start = tokens[1]\n chr1End = tokens[2]\n chr2Start = tokens[4]\n chr2End = tokens[5]\n name = tokens[10]\n bnd = tokens[17]\n CM = tokens[18]\n cl_support = tokens[21]\n cipos = str(int(chr1End)-int(chr1Start))\n svlen = str(abs(int(chr2End) - int(chr1Start)))\n covInfo = float(tokens[25])\n\n if precise == -1:\n precise = \"IMPRECISE\"\n else:\n precise=\"PRECISE\"\n\n chr2=\"\"\n if chr1 != chr2:\n chr2=\"CHR2=\"+ tokens[3] + \";\"\n covRejInfo = \"\"\n if covInfo > 0 and CM == \"INS_halfRF\":\n covRejInfo= \";CR=TD_rejected_due_to_relative_coverage_\" + str(covInfo)\n elif covInfo > 0 and CM == \"INS_halfFR\":\n covRejInfo= \";CR=DEL_rejected_due_to_relative_coverage_\" + str(covInfo)\n elif covInfo > 0:\n covRejInfo= \";CINFO=\" + str(covInfo)\n\n if name == \"BND\":\n GROUPID = \"GROUPID=\" + tokens[24] + \";\"\n if CM.startswith(\"INS_C\"):\n CM = \"Translocation\"\n elif CM.startswith(\"INS_half\"):\n CM = \"TranslocationOrDuplication\"\n elif CM.startswith(\"INS\") or CM.startswith(\"TD\"):\n CM = \"Duplication\"\n elif CM.startswith(\"INV\"):\n CM = \"Inversion\"\n elif CM.startswith(\"DN_INS\"):\n CM = \"DeNovoInsertion\"\n\n if tokens[22] != \".\" and tokens[23] != \".\":\n BNDAlt1, BNDAlt2 = tokens[22].replace(\"p\", tokens[3] + \":\" + chr2End),\\\n tokens[23].replace(\"p\", chr1 + \":\" + chr1Start)\n else:\n BNDAlt1, BNDAlt2 = \".\", \".\"\n \n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start, counter, \"N\", BNDAlt1, \".\",\"PASS\", \"SVTYPE=BND;CIPOS=0,\" + cipos + \";CIEND=-\" + cipos + \",0;PROBTYPE=\" + CM + \";MATEID=\" + str(counter + 1) + \";\" + GROUPID + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (tokens[3], chr2End, counter + 1, \"N\", BNDAlt2, \".\",\"PASS\", \"SVTYPE=BND;CIPOS=0,\" + cipos + \";CIEND=-\" + cipos + \",0;PROBTYPE=\" + CM + \";MATEID=\" + str(counter) + \";\" + GROUPID + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n counter+= 1\n elif name == \"DN_INS\":\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start,counter,\"N\", \"<INS>\",\".\",\"PASS\", \"SVTYPE=INS;CIPOS=0,\" + cipos + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name == \"DEL\":\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start,counter,\"N\", \"<DEL>\",\".\",\"PASS\", \"SVTYPE=DEL;END=\" + chr2End + \";SVLEN=-\" + svlen + \";CIPOS=0,\" + cipos + \";CIEND=-\" + cipos + \",0;\" + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name == \"TD\" or name == \"TD_INV\":\n isinv=\"\"\n svlen = str(abs(int(chr2Start) - int(chr1End)))\n if name==\"TD_INV\":\n isinv=\"ISINV;\"\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1End,counter,\"N\", \"<DUP:TANDEM>\",\".\",\"PASS\", \"SVTYPE=DUP;END=\" + chr2Start + \";SVLEN=\" + svlen + \";CIPOS=-\" + cipos + \",0;CIEND=0,\" + cipos + \";\" + isinv + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name == \"INV\":\n ciend = int(chr2End) - int(chr2Start)\n pos = int((int(chr1Start) + int(chr1End))/2.0)\n end = int((int(chr2Start) + int(chr2End))/2.0)\n svlen = str(abs(end - pos))\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, pos, counter,\"N\", \"<INV>\",\".\",\"PASS\", \"SVTYPE=INV;END=\" + str(end) + \";SVLEN=\" + svlen + \";CIPOS=-\" + str(int(int(cipos)/2.0)) +\",\" + str(int(int(cipos)/2.0)) + \";CIEND=-\" + str(int(int(ciend)/2.0)) +\",\" + str(int(int(ciend)/2.0)) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name in [\"INS\",\"INS_I\",\"INS_C_P\",\"INS_C_I_P\"]:\n GROUPID= \"GROUPID=\" + tokens[24] + \";\"\n if name in [\"INS\",\"INS_I\"]:\n field1 = \"DUP\"\n svlen = str(abs(int(chr1End)-int(chr1Start)))\n CM = \"CopyPasteInsertion\"\n else:\n field1 = \"DEL\"\n CM = \"CutPasteInsertion\"\n svlen = \"-\" + str(abs(int(chr1End)-int(chr1Start)))\n cipos = int(chr2End)-int(chr2Start)\n isinv=\"\"\n if name==\"INS_I\":\n isinv=\"ISINV;\"\n \n BNDAlt1, BNDAlt2 = \"N[\" + chr1 + \":\" + chr1Start + \"[\", \"]\" + tokens[3] + \":\" + chr2Start + \"]N\"\n BNDAlt3, BNDAlt4 = \"]\" + tokens[3] + \":\" + chr2Start + \"]N\", \"N[\" + chr1 + \":\" + chr1End + \"[\"\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start,counter,\"N\", \"<\" + field1 + \">\", \".\",\"PASS\", \"SVTYPE=\" + field1 + \";CM=\" + CM + \";END=\" + chr1End + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=-\" + str(cipos) +\",0;\" + GROUPID + isinv + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (tokens[3], chr2Start, counter + 1,\"N\", BNDAlt1,\".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 2) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start, counter + 2,\"N\", BNDAlt2, \".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 1) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1End, counter + 3,\"N\", BNDAlt3, \".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 4) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (tokens[3], chr2Start, counter + 4,\"N\", BNDAlt4, \".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 3) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n counter+= 4\n else:\n print>>stderr, \"Unrecognized SV type\"\n exit(1)", "def annotateVCF(self):\n cwd = os.getcwd()\n if self.__finalVCF:\n self.__ifVerbose(\"Annotating final VCF.\")\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.__finalVCF])\n self.__annotation = self.fOut + \"/\" + self.name +'_annotation.txt'\n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Final_annotation.txt'],\n [self.__parser, self.__annotation, self.name, self.mutationloci])\n if os.path.isfile(self.fOut + \"/\" + self.name +'_SamTools_Resistance_filtered.vcf'):\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_Resistance_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.fOut + \"/\" + self.name +'_SamTools_Resistance_filtered.vcf']) \n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Resistance_Final_annotation.txt'],\n [self.__parser, self.fOut + \"/\" + self.name +'_Resistance_annotation.txt', self.name, self.mutationloci])\n elif os.path.isfile(self.fOut + \"/\" + self.name +'_GATK_Resistance_filtered.vcf'):\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_Resistance_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.fOut + \"/\" + self.name +'_GATK_Resistance_filtered.vcf']) \n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Resistance_Final_annotation.txt'],\n [self.__parser, self.fOut + \"/\" + self.name +'_Resistance_annotation.txt', self.name, self.mutationloci])\n else:\n self.__ifVerbose(\"Use SamTools, GATK, or Freebayes to annotate the final VCF.\")\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_genes.txt\"])\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_summary.html\"])", "def main(method, keyword, source, output):\n # click.echo(\"Replace this message by putting your code into textfilter.__main__.main\")\n # click.echo(\"See click documentation at https://click.palletsprojects.com/\")\n\n print(method, source, keyword, output)\n\n if keyword:\n file = keyword if os.path.isfile(keyword) else None\n else:\n file = None\n\n if os.path.isfile(source):\n with open(source, 'r') as fs:\n source = fs.read()\n\n f = TextFilter(method=method, file=file)\n\n if not file:\n f.add(keyword)\n\n o = f.filter(source, '*')\n print(o)\n\n if output:\n with open(output, 'w') as fp:\n fp.write(o)\n\n return 0", "def cli(input, output, pdk, pdk_options):\n output.write(f\"{80 * '*'}\\n\")\n output.write(\"* Converted using tanner_to_eldo.\\n\")\n output.write(f\"{80 * '*'}\\n\")\n\n if pdk:\n output.write(f\".lib \\\"{pdk}\\\" {pdk_options or ''}\\n\")\n\n # Short 0 node with gnd node\n output.write(\"v_gnd 0 gnd 0\\n\")\n params = []\n\n for line in input.readlines():\n if line[0] == \"*\":\n # Don't process commented lines\n pass\n\n elif \"$\" in line:\n # remove unsupported comments\n line = f\"{line.split('$')[0]}\\n\"\n\n elif \".probe\" in line:\n line = \".option probe\\n.option post\\n\"\n\n elif \".option probe\" in line:\n line = \"\"\n\n elif \".param\" in line:\n params.append(line.split()[1])\n\n # elif line.startswith(\".dc\"):\n # _, sweep_object, start, end, n_points = line.split()\n # if sweep_object in params:\n # sweep_object = f\"param {sweep_object}\"\n # step = (float(end) - float(start)) / (float(n_points) - 1)\n # line = f\".dc {sweep_object} {start} {end} {step}\"\n\n output.write(line)", "def generate_repp_command(self, inputfilename):\n ...", "def OnMenuFileVerboseMenu(self, event):\r\n\r\n Terminal.Feed(\"verbose\")\r\n # event.Skip()\r", "def cli(raw_args: Optional[list[str]] = None) -> None:\n if not raw_args:\n raw_args = sys.argv[1:]\n\n parser = configure_argument_parser()\n args = parser.parse_args(raw_args)\n VerbosityConfiguration.set(args)\n CLIAnnotationContext.register(args)\n\n context = get_genomic_context()\n pipeline = CLIAnnotationContext.get_pipeline(context)\n grr = CLIAnnotationContext.get_genomic_resources_repository(context)\n\n if args.output:\n output = args.output\n else:\n output = os.path.basename(args.input).split(\".\")[0] + \"_annotated.vcf\"\n\n if not os.path.exists(args.work_dir):\n os.mkdir(args.work_dir)\n\n\n task_graph = TaskGraph()\n\n task_graph.input_files.append(args.input)\n task_graph.input_files.append(args.pipeline)\n if args.reannotate:\n task_graph.input_files.append(args.reannotate)\n\n if not tabix_index_filename(args.input):\n # annotate(args.input, None, pipeline.get_info(),\n # grr.definition, output, args.reannotate)\n assert grr is not None\n task_graph.create_task(\n \"all_variants_annotate\",\n annotate,\n [args.input, None, pipeline.get_info(),\n grr.definition, output, args.reannotate],\n []\n )\n else:\n with closing(TabixFile(args.input)) as pysam_file:\n regions = produce_regions(pysam_file, args.region_size)\n file_paths = produce_partfile_paths(args.input, regions, args.work_dir)\n region_tasks = []\n for index, (region, file_path) in enumerate(zip(regions, file_paths)):\n assert grr is not None\n region_tasks.append(task_graph.create_task(\n f\"part-{index}\",\n annotate,\n [args.input, region,\n pipeline.get_info(), grr.definition,\n file_path, args.reannotate],\n []\n ))\n\n assert grr is not None\n task_graph.create_task(\n \"combine\",\n combine,\n [args.input, pipeline.get_info(),\n grr.definition, file_paths, output],\n region_tasks\n )\n\n args.task_status_dir = os.path.join(args.work_dir, \".tasks-status\")\n args.log_dir = os.path.join(args.work_dir, \".tasks-log\")\n\n TaskGraphCli.process_graph(task_graph, **vars(args))", "def embedded_pipeline():\n return \"\\n\".join(args['--cmd'])", "def commandEcho(state=bool, filter=\"string\", lineNumbers=bool, addFilter=\"string\"):\n pass", "def createVTKOutput(self, pcfile, outType, prefix):\n import os\n current_env = os.environ.copy()\n pvpythonCMD = current_env[\"pvpythonCMD\"]\n# #running in appImage (isolate PV environment from HEAT's)\n# try:\n# pvpythonCMD = current_env[\"pvpythonCMD\"]\n# #running on dev machine\n# #(it is expected that you have set up env externally, perhaps in dashGUI.py)\n# except:\n# pvpythonCMD = 'pvpython'\n print(\"Spawning PVpython subprocess\")\n log.info(\"Spawning PVpython subprocess\")\n args = [pvpythonCMD, self.rootDir + '/GUIscripts/csv2vtk.py', pcfile, outType, prefix]\n from subprocess import run\n run(args, env=current_env)\n print(\"PVpython subprocess complete\")\n log.info(\"PVpython subprocess complete\")\n return", "def FilterAvcLogs(WS):\n AVC_PATTERN = patt.pattern_avc\n DENIED_PATTERN = patt.pattern_denied\n COMM_PATTERN = patt.pattern_comm\n NAME_PATTERN = patt.pattern_name\n\n comm_list = []\n name_list = []\n scontext_list = []\n temp_avc_file = '/tmp/temp_avc_file.txt'\n TAG = 'dump_avc.py'\n\n def AvcLogs(WS):\n \"\"\"FilterAvcLogs :\n filter avc denied\n \"\"\"\n try:\n sys_log_buf = open(WS.file_event_logs, 'rU')\n except IOError as err:\n err_string = 'failed to read : ' + WS.file_system_logs + \\\n 'error : ' + err\n util.PLOGE(TAG,err_string)\n return False\n\n try:\n f_tmp_avc = open(temp_avc_file,'w+')\n except IOError:\n util.PLOGE(TAG,'failed to create : ' + temp_avc_file)\n return False\n\n for line in sys_log_buf:\n if not AVC_PATTERN in line:\n continue\n if not DENIED_PATTERN in line:\n continue\n if COMM_PATTERN in line or NAME_PATTERN in line:\n f_tmp_avc.write(line)\n split_list = line.split()\n for word in split_list:\n data = []\n if 'comm=' in word or 'name=' in word:\n data = word.split('=')\n data[1] = data[1].strip('\"')\n if data[0] == 'comm':\n if data[1] not in comm_list:\n comm_list.append(data[1])\n elif data[0] == 'name':\n if data[1] not in name_list:\n name_list.append(data[1])\n\n if 'scontext=' in word:\n data = word.split(':')\n if data[2] not in scontext_list:\n scontext_list.append(data[2])\n\n sys_log_buf.close()\n f_tmp_avc.close()\n return comm_list, scontext_list\n\n\n def WriteToFile(WS):\n \"\"\"FilterAvcLogs\n Dump filtered avc logs in to file\n \"\"\"\n dash_line = '-' * 90 + '\\n'\n if os.path.isfile(WS.file_avc_logs):\n os.remove(WS.file_avc_logs)\n f = open(WS.file_avc_logs, 'a+')\n\n f.write(dash_line)\n f.write('\\t' + 'comm' + '\\n')\n f.write(dash_line)\n for cmd in comm_list:\n f.write(cmd + '\\n')\n f.write('\\n')\n\n f.write(dash_line)\n f.write('\\t' + 'name' + '\\n')\n f.write(dash_line)\n for cmd in name_list:\n f.write(cmd + '\\n')\n f.write('\\n')\n\n\n f.write(dash_line)\n f.write('\\t' + 'scontext' + '\\n')\n f.write(dash_line)\n for scontext in scontext_list:\n f.write(scontext + '\\n')\n f.write('\\n')\n\n f.write(dash_line)\n f.write('\\t' + 'Logs' + '\\n')\n f.write(dash_line)\n\n avc_count = 1\n for cmd in comm_list:\n f.write(' \\n')\n f.write(cmd + ' \\n')\n f.write(dash_line)\n\n logcat_buf = open(temp_avc_file, 'rU')\n\n for line in logcat_buf:\n if AVC_PATTERN in line and DENIED_PATTERN in line and COMM_PATTERN in line:\n if cmd in line:\n f.write(line)\n\n f.write(' \\n')\n logcat_buf.close()\n avc_count += 1\n\n f.close()\n avc_summary = str(avc_count) + ' : type of avc logs found'\n util.PLOGV(TAG,avc_summary)\n\n # Start avc filter\n AvcLogs(WS)\n WriteToFile(WS)", "def cli(output_path, normal_bam,tumor_bam, comparison, genome_reference_fasta, excludable_regions):\n tumor_id = comparison.split(\"_vs_\")[0]\n normal_id = comparison.split(\"_vs_\")[1]\n sample_tsv = pd.DataFrame.from_dict({tumor_id: 'tumor',\n normal_id: 'control'}, orient='index')\n sample_tsv.to_csv(os.path.join(output_path, 'sample.tsv'), sep='\\t',header=False)\n\n sv_list = ['bnd', 'del', 'dup', 'ins', 'inv']\n\n for sv in sv_list:\n call_file = os.path.join(output_path,comparison+\"_\"+sv+\".bcf\")\n subprocess.run(\"source activate sv_delly_calling && delly call -t \"+sv.upper()+\" -q 20 -x \" + excludable_regions\n +' -o ' + call_file + \" -g \" + genome_reference_fasta + ' ' + tumor_bam + ' ' + normal_bam,\n shell=True, executable='/bin/bash')\n filter_file = os.path.join(output_path, comparison + \"_\" + sv + \"_filtered.bcf\")\n sample_tsv = os.path.join(output_path, 'sample.tsv')\n if sv == 'bnd':\n subprocess.run(\"source activate sv_delly_calling && delly filter -p -f somatic -m 0 -r 0.75 -a 0.1 -o \"+\n filter_file+\" -s \"+sample_tsv+\" \"+call_file, shell=True, executable='/bin/bash')\n elif (sv == 'del') or (sv == 'ins'):\n subprocess.run(\"source activate sv_delly_calling && delly filter -p -f somatic -m 50 -r 0.75 -a 0.1 -o \"+\n filter_file+\" -s \"+sample_tsv+\" \"+call_file, shell=True, executable='/bin/bash')\n elif sv == 'inv':\n subprocess.run(\"source activate sv_delly_calling && delly filter -p -f somatic -m 0 -r 0.75 -a 0.1 -o \"+\n filter_file+\" -s \"+sample_tsv+\" \"+call_file, shell=True, executable='/bin/bash')\n elif sv == 'dup':\n subprocess.run(\"source activate sv_delly_calling && delly filter -p -f somatic -m 0 -r 0.75 -a 0.1 -o \"+\n filter_file+\" -s \"+sample_tsv+\" \"+call_file, shell=True, executable='/bin/bash')\n else:\n \"wrong sv, write: bnd, inv, del, ins, dup\"\n call_vcf = os.path.join(output_path,comparison+\"_\"+sv+\"_delly.vcf\")\n subprocess.run(\"source activate sv_delly_calling && bcftools view \" +filter_file+\" > \"+call_vcf,\n shell=True, executable='/bin/bash')\n bedpe = os.path.join(output_path,comparison+\"_\"+sv+\"_delly.bedpe\")\n subprocess.run(\"source activate sv_delly_calling && svtools vcftobedpe -i \"+call_vcf+\" -o \"+bedpe,\n shell=True, executable='/bin/bash')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge filtered trio VCF and rephased 10x VCF
def merge_trio_10X_vcf(tenx_rephased, trio_filtered, workdir): tenx_trio_merged_vcf = workdir + '/10X_and_trio_merged.vcf' tenx_trio_merged_sorted_vcf = tenx_trio_merged_vcf[:-4] + '.sorted.vcf' tenx_trio_merged_sorted_zipped_vcf = tenx_trio_merged_sorted_vcf + '.gz' command_merge = ['bcftools', 'concat', '-a', '-d', 'all', tenx_rephased, trio_filtered, '>', tenx_trio_merged_vcf] command_sort = ['bcftools', 'sort', tenx_trio_merged_vcf, '>', tenx_trio_merged_sorted_vcf] command_zip = ['bgzip', tenx_trio_merged_sorted_vcf] command_index = ['tabix', tenx_trio_merged_sorted_zipped_vcf] command_rm = ['rm', tenx_trio_merged_vcf] logging.info(' -> Merge 10X and trio VCF files to {0}'.format(tenx_trio_merged_vcf)) run(' '.join(command_merge), shell=True, check=False, executable='/bin/bash') logging.info(' -> Sort merged VCF file') run(' '.join(command_sort), shell=True, check=True, executable='/bin/bash') logging.info(' -> Compress VCF file') run(' '.join(command_zip), shell=True, check=True, executable='/bin/bash') logging.info(' -> Index VCF file') run(' '.join(command_index), shell=True, check=True, executable='/bin/bash') logging.info(' -> Remove intermediate VCF file') run(' '.join(command_rm), shell=True, check=True, executable='/bin/bash')
[ "def filter_trio_vcf(trio_vcf, workdir, sample_name):\n trio_vcf_basename = os.path.basename(trio_vcf)\n if trio_vcf_basename.endswith('.vcf'):\n offset = -4\n elif trio_vcf_basename.endswith('.vcf.gz'):\n offset = -7\n else:\n return\n tmp_header = workdir + '/tmp_header.vcf'\n tmp_variants = workdir + '/tmp_variants.vcf'\n tmp_reheadered = workdir + '/tmp_reheadered.vcf'\n trio_filtered_het_phased_vcf = workdir + '/' + trio_vcf_basename[:offset] + '.filtered.het.phased.pstag.vcf'\n trio_filtered_het_phased_zipped_vcf = trio_filtered_het_phased_vcf + '.gz'\n \n command_get_header = ['bcftools', 'view', '-h', trio_vcf, '>', tmp_header]\n command_modify_header = 'sed -i \\'5i##FORMAT=<ID=PS,Number=1,Type=Integer,Description=\\\"ID of Phase Set for Variant\\\">\\' ' + str(tmp_header)\n command_get_variants = ['bcftools', 'view', '-H', trio_vcf, '>', tmp_variants]\n command_reheader = ['cat', tmp_header, tmp_variants, '>', tmp_reheadered]\n command_zip = ['bgzip', trio_filtered_het_phased_vcf]\n command_index = ['tabix', trio_filtered_het_phased_zipped_vcf]\n command_clean = ['rm', workdir + '/tmp*']\n \n logging.info(' -> Adding PS FORMAT to header')\n run(' '.join(command_get_header), shell=True, check=True, executable='/bin/bash')\n run(command_modify_header, shell=True, check=True, executable='/bin/bash')\n run(' '.join(command_get_variants), shell=True, check=True, executable='/bin/bash')\n run(' '.join(command_reheader), shell=True, check=True, executable='/bin/bash')\n \n logging.info(' -> Write filtered, phased and heterozygous variants to {0}'.format(trio_filtered_het_phased_vcf))\n get_filtered_phased_het_trio_variants(tmp_reheadered, trio_filtered_het_phased_vcf, sample_name)\n \n logging.info(' -> Compress VCF file')\n run(' '.join(command_zip), shell=True, check=True, executable='/bin/bash')\n \n logging.info(' -> Index VCF file')\n run(' '.join(command_index), shell=True, check=True, executable='/bin/bash')\n \n logging.info(' -> Clean temporary files')\n run(' '.join(command_clean), shell=True, check=True, executable='/bin/bash')\n \n return trio_filtered_het_phased_zipped_vcf", "def merge_tmp_vcfs():\n start = time.time()\n header = False\n # Loop through all chromomsomes\n for contig in contig_list:\n if not header:\n os.system('cat SMuRF_tmp/{}_SMuRF_reannotate.vcf > {}_reannotate.vcf'.format(contig, vcf_name))\n header = True\n else:\n os.system('grep -v \\'^#\\' SMuRF_tmp/{}_SMuRF_reannotate.vcf >> {}_reannotate.vcf'.format(contig, vcf_name))\n os.system(\"grep -P '^#|\\s+PASS\\s+' \"+vcf_name+\"_reannotate.vcf > \"+vcf_name+\"_reannotate_filtered.vcf\")\n time.sleep(5)\n os.system(\"rm -rf SMuRF_tmp\")", "def collectfeature(self):\n\n vread = [[] for _ in range(len(self.__vcindel))]\n\n # Only reads within the affected region will be associated with complex indel.\n for i, cindel in enumerate(self.__vcindel):\n brkpntl = cindel.brkpntl\n brkpntr = cindel.brkpntr\n marginl = max(brkpntl - self.__threshold, 0)\n marginr = min(brkpntr + self.__threshold, 1000000)\n\n for read in self.__vread:\n pos1 = read.pos\n pos2 = read.pnext\n if marginl <= pos1 and pos2 <= marginr:\n vread[i].append(read)\n\n label = [0 for _ in range(len(self.__vcindel))]\n feature = [[0, 0, 0, 0] for _ in range(len(self.__vcindel))]\n\n # Collect features and label for each complex indel.\n for i, cindel in enumerate(self.__vcindel):\n label[i] = cindel.label\n brkpntl = cindel.brkpntl\n brkpntr = cindel.brkpntr\n marginl = max(brkpntl - self.__threshold, 0)\n marginr = min(brkpntr + self.__threshold, 1000000)\n\n for read in vread[i]:\n pos1 = read.pos\n pos2 = read.pnext\n\n # Abnormal insert size\n if self.__insertsize-3*self.__stdvar < abs(read.pos - read.pnext) < self.__insertsize+3*self.__stdvar:\n feature[i][0] += 1\n\n if brkpntl < pos1 and brkpntl < pos1 + self.__readlength < brkpntr:\n # Read depth\n feature[i][1] += 1\n\n # Fully mapped reads\n if read.mapq == 60 and read.mate.mapq == 60:\n feature[i][2] += 1\n\n # Incomplete mapped reads\n if (brkpntl < pos1 < brkpntr < pos1 + self.__readlength or\n pos1 < brkpntl < pos1 + self.__readlength < brkpntr < pos2) and read.mate.mapq == 60:\n feature[i][3] += 1\n if (pos2 < brkpntl < pos2 + self.__readlength < brkpntr or\n pos1 < brkpntl < pos2 < brkpntr < pos2 + self.__readlength) and read.mapq == 60:\n feature[i][3] += 1\n\n # TODO(heyongzhang@outlook.com):Affected reads\n\n # ReadDepth is affected significantly by the region length, so calculate the density instead.\n feature[i][1] = int(feature[i][1] / (marginr - marginl) * 10000)\n\n return [label, feature]", "def project_from_tsvd_patches(collection, shape, with_f0=False, baseline_smoothness=_baseline_smoothness_):\n out_data = np.zeros(shape,dtype=_dtype_)\n if with_f0:\n out_f0 = np.zeros_like(out_data)\n #counts = np.zeros(shape[1:], np.int)\n counts = np.zeros(shape,_dtype_) # candidate for crossfade\n\n #tslice = (slice(None),)\n i = 0\n #print('Doing inverse transform', flush=True)\n tqdm_desc = 'Doing inverse transform ' + ('with baseline' if with_f0 else '')\n for signals,filters,center,sq, w_sh in tqdm(collection, desc=tqdm_desc):\n L = w_sh[0]\n crossfade_coefs = tanh_step(arange(L), L).astype(_dtype_)[:,None,None]\n #crossfade_coefs = np.ones(L)[:,None,None]\n counts[sq] += crossfade_coefs\n\n rec = (signals.T@filters).reshape(w_sh)\n out_data[tuple(sq)] += (rec + center.reshape(w_sh[1:]))*crossfade_coefs\n\n if with_f0:\n bs = np.array([simple_baseline(v,plow=50,smooth=baseline_smoothness,ns=mad_std(v)) for v in signals])\n if any(isnan(bs)):\n print('Nan in ', sq)\n #return (signals, filters, center,sq,w_sh)\n rec_b = (bs.T@filters).reshape(w_sh)\n out_f0[tuple(sq)] += (rec_b + center.reshape(w_sh[1:]))*crossfade_coefs\n\n out_data /= (1e-12 + counts)\n out_data *= (counts > 1e-12)\n if with_f0:\n out_f0 /= (1e-12 + counts)\n out_f0 *= (counts > 1e-12)\n return out_data, out_f0\n return out_data", "def rewrite_trk_file_with_ED_vs_FL_scalars(trk_file_orig,trk_file_new, scalar_type):\t\n\timport nibabel as nib\n\timport numpy as np\n\tfrom nipype.interfaces.cmtk.cmtk import length as fib_length\n\tfibres_orig, hdr_orig = nib.trackvis.read(trk_file_orig, False)\n\thdr_new = hdr_orig.copy()\n\toutstreams = []\n\tfor f in fibres_orig:\n\t\t# Calculate fiber lengths\t\n\t\tFL = fib_length(f[0]) \n\t\t# Calculate Euclidean distance between fibre start and endpoints\n\t\tED = np.sqrt(np.square(f[0][0][0]-f[0][-1][0])+np.square(f[0][0][1]-f[0][-1][1])+np.square(f[0][0][2]-f[0][-1][2]))\n\t\t# Fiber length minus Euclidean distance:\n\t\tFLsubED = np.subtract(FL, ED)\n\t\tED_as_percent_of_FL = np.divide(100,FL)*ED\n\t\tif scalar_type == 'FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FL\n\t\t\tproperty_array = np.array([FL], dtype='float32')\n\t\tif scalar_type == 'ED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED\n\t\t\tproperty_array = np.array([ED], dtype='float32')\n\t\tif scalar_type == 'FLsubED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FLsubED\n\t\t\tproperty_array = np.array([FLsubED], dtype='float32')\n\t\tif scalar_type == 'ED_as_percent_of_FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED_as_percent_of_FL\n\t\t\tproperty_array = np.array([ED_as_percent_of_FL], dtype='float32')\n\t\tnew_tuple=tuple([f[0], scalar_array,property_array])\t\t\t\t\n\t\toutstreams.append(new_tuple)\n\tn_fib_out = len(outstreams)\n\thdr_new['n_count'] = n_fib_out\t\n\thdr_new['n_scalars'] = np.array(1, dtype='int16')\t\t\t\t#hdr_new['scalar_name'] = np.array(['JG_COLOURS', '', '', '', '', '', '', '', '', ''],dtype='|S20')\t\t\n\thdr_new['scalar_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['n_properties'] = np.array(1, dtype='int16')\n#\thdr_new['property_name'] = np.array(['JG_PROPERTY', '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['property_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\tnib.trackvis.write(trk_file_new, outstreams, hdr_new)", "def writeVCFFromBedpe(inputFile, outputFile):\n with open(inputFile, 'r') as inpt, open(outputFile,'w') as otpt:\n counter = -1\n printVCFHeader(otpt)\n for line in inpt:\n counter+=1\n if counter == 0:\n #header\n continue\n tokens = line.split()\n precise=tokens[11].find(\"SR\")\n support=\"SUPPORT=\" + tokens[16] + \";PE=\" + tokens[19] + \";SR=\" + tokens[20] + \";\"\n chr1 = tokens[0]\n chr1Start = tokens[1]\n chr1End = tokens[2]\n chr2Start = tokens[4]\n chr2End = tokens[5]\n name = tokens[10]\n bnd = tokens[17]\n CM = tokens[18]\n cl_support = tokens[21]\n cipos = str(int(chr1End)-int(chr1Start))\n svlen = str(abs(int(chr2End) - int(chr1Start)))\n covInfo = float(tokens[25])\n\n if precise == -1:\n precise = \"IMPRECISE\"\n else:\n precise=\"PRECISE\"\n\n chr2=\"\"\n if chr1 != chr2:\n chr2=\"CHR2=\"+ tokens[3] + \";\"\n covRejInfo = \"\"\n if covInfo > 0 and CM == \"INS_halfRF\":\n covRejInfo= \";CR=TD_rejected_due_to_relative_coverage_\" + str(covInfo)\n elif covInfo > 0 and CM == \"INS_halfFR\":\n covRejInfo= \";CR=DEL_rejected_due_to_relative_coverage_\" + str(covInfo)\n elif covInfo > 0:\n covRejInfo= \";CINFO=\" + str(covInfo)\n\n if name == \"BND\":\n GROUPID = \"GROUPID=\" + tokens[24] + \";\"\n if CM.startswith(\"INS_C\"):\n CM = \"Translocation\"\n elif CM.startswith(\"INS_half\"):\n CM = \"TranslocationOrDuplication\"\n elif CM.startswith(\"INS\") or CM.startswith(\"TD\"):\n CM = \"Duplication\"\n elif CM.startswith(\"INV\"):\n CM = \"Inversion\"\n elif CM.startswith(\"DN_INS\"):\n CM = \"DeNovoInsertion\"\n\n if tokens[22] != \".\" and tokens[23] != \".\":\n BNDAlt1, BNDAlt2 = tokens[22].replace(\"p\", tokens[3] + \":\" + chr2End),\\\n tokens[23].replace(\"p\", chr1 + \":\" + chr1Start)\n else:\n BNDAlt1, BNDAlt2 = \".\", \".\"\n \n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start, counter, \"N\", BNDAlt1, \".\",\"PASS\", \"SVTYPE=BND;CIPOS=0,\" + cipos + \";CIEND=-\" + cipos + \",0;PROBTYPE=\" + CM + \";MATEID=\" + str(counter + 1) + \";\" + GROUPID + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (tokens[3], chr2End, counter + 1, \"N\", BNDAlt2, \".\",\"PASS\", \"SVTYPE=BND;CIPOS=0,\" + cipos + \";CIEND=-\" + cipos + \",0;PROBTYPE=\" + CM + \";MATEID=\" + str(counter) + \";\" + GROUPID + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n counter+= 1\n elif name == \"DN_INS\":\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start,counter,\"N\", \"<INS>\",\".\",\"PASS\", \"SVTYPE=INS;CIPOS=0,\" + cipos + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name == \"DEL\":\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start,counter,\"N\", \"<DEL>\",\".\",\"PASS\", \"SVTYPE=DEL;END=\" + chr2End + \";SVLEN=-\" + svlen + \";CIPOS=0,\" + cipos + \";CIEND=-\" + cipos + \",0;\" + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name == \"TD\" or name == \"TD_INV\":\n isinv=\"\"\n svlen = str(abs(int(chr2Start) - int(chr1End)))\n if name==\"TD_INV\":\n isinv=\"ISINV;\"\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1End,counter,\"N\", \"<DUP:TANDEM>\",\".\",\"PASS\", \"SVTYPE=DUP;END=\" + chr2Start + \";SVLEN=\" + svlen + \";CIPOS=-\" + cipos + \",0;CIEND=0,\" + cipos + \";\" + isinv + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name == \"INV\":\n ciend = int(chr2End) - int(chr2Start)\n pos = int((int(chr1Start) + int(chr1End))/2.0)\n end = int((int(chr2Start) + int(chr2End))/2.0)\n svlen = str(abs(end - pos))\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, pos, counter,\"N\", \"<INV>\",\".\",\"PASS\", \"SVTYPE=INV;END=\" + str(end) + \";SVLEN=\" + svlen + \";CIPOS=-\" + str(int(int(cipos)/2.0)) +\",\" + str(int(int(cipos)/2.0)) + \";CIEND=-\" + str(int(int(ciend)/2.0)) +\",\" + str(int(int(ciend)/2.0)) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n elif name in [\"INS\",\"INS_I\",\"INS_C_P\",\"INS_C_I_P\"]:\n GROUPID= \"GROUPID=\" + tokens[24] + \";\"\n if name in [\"INS\",\"INS_I\"]:\n field1 = \"DUP\"\n svlen = str(abs(int(chr1End)-int(chr1Start)))\n CM = \"CopyPasteInsertion\"\n else:\n field1 = \"DEL\"\n CM = \"CutPasteInsertion\"\n svlen = \"-\" + str(abs(int(chr1End)-int(chr1Start)))\n cipos = int(chr2End)-int(chr2Start)\n isinv=\"\"\n if name==\"INS_I\":\n isinv=\"ISINV;\"\n \n BNDAlt1, BNDAlt2 = \"N[\" + chr1 + \":\" + chr1Start + \"[\", \"]\" + tokens[3] + \":\" + chr2Start + \"]N\"\n BNDAlt3, BNDAlt4 = \"]\" + tokens[3] + \":\" + chr2Start + \"]N\", \"N[\" + chr1 + \":\" + chr1End + \"[\"\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start,counter,\"N\", \"<\" + field1 + \">\", \".\",\"PASS\", \"SVTYPE=\" + field1 + \";CM=\" + CM + \";END=\" + chr1End + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=-\" + str(cipos) +\",0;\" + GROUPID + isinv + support + precise + covRejInfo, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (tokens[3], chr2Start, counter + 1,\"N\", BNDAlt1,\".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 2) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1Start, counter + 2,\"N\", BNDAlt2, \".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 1) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (chr1, chr1End, counter + 3,\"N\", BNDAlt3, \".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 4) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n print >> otpt, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (tokens[3], chr2Start, counter + 4,\"N\", BNDAlt4, \".\",\"PASS\", \"SVTYPE=BND;CM=\" + CM + \";SVLEN=\" + svlen + \";CIPOS=0,\" + str(cipos) + \";CIEND=0,\" + str(cipos) + \";\" + GROUPID + \"MATEID=\" + str(counter + 3) + \";\" + support + precise, \"GT:SU:PE:SR\", \"./.:\" + tokens[16] + \":\" + tokens[19] + \":\" + tokens[20])\n counter+= 4\n else:\n print>>stderr, \"Unrecognized SV type\"\n exit(1)", "def STRESS_rvs( self, filetable, rvars, seasonid, vardict ):\n if filetable is None:\n return [],[]\n reduced_vars = []\n needed_derivedvars = []\n \n for var in rvars:\n if var in ['TAUX','TAUY'] and filetable.filefmt.find('CAM')>=0:\n # We'll cheat a bit and change the sign as well as reducing dimensionality.\n # The issue is that sign conventions differ in CAM output and the obs files.\n\n if filetable.has_variables(['OCNFRAC']):\n # Applying the ocean mask will get a derived variable with variableid=var.\n reduced_vars.append( reduced_variable(\n variableid=var, filetable=filetable, season=self.season,\n #reduction_function=(lambda x,vid=var+'_nomask':\n reduction_function=(lambda x,vid=None:\n minusb(reduce2latlon_seasonal( x, self.season, self.region, vid)) ) ))\n needed_derivedvars.append(var)\n reduced_vars.append( reduced_variable(\n variableid='OCNFRAC', filetable=filetable, season=self.season,\n reduction_function=(lambda x,vid=None:\n reduce2latlon_seasonal( x, self.season, self.region, vid) ) ))\n elif filetable.has_variables(['ORO']):\n # Applying the ocean mask will get a derived variable with variableid=var.\n reduced_vars.append( reduced_variable(\n variableid=var, filetable=filetable, season=self.season,\n reduction_function=(lambda x,vid=var+'_nomask':\n minusb(reduce2latlon_seasonal( x, self.season, self.region, vid)) ) ))\n needed_derivedvars.append(var)\n reduced_vars.append( reduced_variable(\n variableid='ORO', filetable=filetable, season=self.season,\n reduction_function=(lambda x,vid=None:\n reduce2latlon_seasonal( x, self.season, self.region, vid) ) ))\n else:\n # No ocean mask available. Go on without applying one. But still apply minusb\n # because this is a CAM file.\n reduced_vars.append( reduced_variable(\n variableid=var, filetable=filetable, season=self.season,\n reduction_function=(lambda x,vid=None:\n minusb(reduce2latlon_seasonal( x, self.season,\n self.region, vid)) ) ))\n else:\n # No ocean mask available and it's not a CAM file; just do an ordinary reduction.\n reduced_vars.append( reduced_variable(\n variableid=var, filetable=filetable, season=self.season,\n reduction_function=(lambda x,vid=None:\n reduce2latlon_seasonal( x, self.season, self.region, vid ) ) ))\n vardict[var] = rv.dict_id( var, seasonid, filetable )\n return reduced_vars, needed_derivedvars", "def algorithm(self, chain):\n # the HLSVD results were calculated aligned with the original raw time\n # data, no frequency shift applied. As we apply any frequency shift to\n # the raw data, we must also shift the HLSVD fids. However, if we use\n # the Spectral tab's cutoff to determine which HLSVD fids to remove,\n # then we need to apply the threshold to HLSVD frequencies that have\n # had the frequency shift added to them. And in the end, the HLSVD fids\n # need to have a phase roll applied to line them up with the raw data.\n\n chain.data = chain.data - chain.svd_fids_checked", "def sep_fir_filtering(x, S, ht, hv, hmimo, b, kernel=\"naive\"):\n\n B, N, T, C = x.get_shape() # B: number of samples in batch, N: number of nodes, T: temporal length, C: channels\n K, F = hv.get_shape() # K: Length vertex filter, F: Number of filters\n M, F = ht.get_shape() # M: Length time filter, F: Number of filters\n C, F = hmimo.get_shape() # M: Length time filter, F: Number of filters\n\n x = tf.transpose(x, perm=[0, 1, 3, 2]) # BxNxCxT\n x = tf.expand_dims(x, axis=4) # BxNxCxTx1\n x = tf.reshape(x, shape=[-1, T, 1]) # BNCxTx1\n\n x_convt = tf.nn.conv1d(x, tf.expand_dims(ht, axis=1), stride=1, padding=\"SAME\", data_format=\"NHWC\") # BNCxTxF\n x_convt = tf.reshape(x_convt, shape=[-1, N, C, T, F]) # BxNxCxTxF\n x_convt = tf.transpose(x_convt, perm=[0, 1, 3, 2, 4])\n\n with tf.name_scope(\"kernel_creation\"):\n if kernel == \"naive\":\n SK = _vertex_fir_kernel(S, K) # KxNxN\n elif kernel == \"chebyshev\":\n SK = _chebyshev_kernel(S, K)\n else:\n raise ValueError(\"Specified kernel type {} is not valid.\" % kernel)\n\n # KxNxN, BxNxTxCxF -> BxKxNxTxCxF\n # a b c d c e f g -> d a b e f g\n SKx = tf.einsum(\"abc,dcefg->dabefg\", SK, x_convt) # BxKxNxTxCxF\n print(SKx.shape)\n # KxF BxKxNxTxCxF -> BxNxTxCxF\n # a b c a e f g b -> c e f g b\n Yunmixed = tf.einsum(\"ab,caefgb->cefgb\", hv, SKx) # BxNxTxCxF\n print(Yunmixed.shape)\n # CxF BxNxTxCxF -> BxNxTxF\n # a b c d e a b -> c d e b\n Ymimo = tf.einsum(\"ab,cdeab->cdeb\", hmimo, Yunmixed)\n print(Ymimo.shape)\n\n if b is not None:\n Ymimo += b\n return Ymimo", "def pairwise_cnvs(vcfs,temp_dir, output_directory, in_file, cluster_merge_slop=0):\n\n # Quality is lierally the sum of the previous VCF files.\n\n vcf_pairs = []\n for i in range(len(vcfs)):\n for j in range(i+1,len(vcfs)):\n vcf_pairs.append((vcfs[i],vcfs[j]))\n for p1, p2 in vcf_pairs: \n print(p1)\n print(p2)\n __bedtools_all__= \"\"\"awk '{{if ($0 !~ /^#/ && $6 > 10 ){{ split($8,a,\";\"); split(a[2],b,\"=\");print $1,$2,$2+b[2],$3,FILENAME}}}}' {0} |tr ' ' '\\t' | sort -k 1,1 -k 2,2g | bedtools cluster -i - -d {1} > tmp_clusters.txt\"\"\" \n bedtools_cmd = __bedtools_all__.format(\" \".join([p1,p2]), cluster_merge_slop)\n subprocess.check_call(bedtools_cmd, shell=True)\n __vcf_sort__ =\"vcf-sort {0} | bgzip -c > {0}.gz && tabix -p vcf {0}.gz\" \n vcf_sort_command = __vcf_sort__.format(p1)\n subprocess.check_call(vcf_sort_command, shell=True)\n __vcf_sort__ =\"vcf-sort {0} | bgzip -c > {0}.gz && tabix -p vcf {0}.gz\" \n vcf_sort_command = __vcf_sort__.format(p2)\n subprocess.check_call(vcf_sort_command, shell=True)\n p1 = p1 + \".gz\"\n p2 = p2 + \".gz\"\n all_vcfs = {}\n all_vcfs[p1] = vcf.VCFSimple(p1)\n all_vcfs[p2] = vcf.VCFSimple(p2)\n try:\n os.mkdir(os.path.join(output_directory, \"paired_vcfs\"))\n except OSError:\n pass\n output_file = os.path.join(output_directory,\"paired_vcfs\", os.path.basename(p1) + os.path.basename(p2) + \".vcf\")\n _process_clustering_pairs(\"tmp_clusters.txt\",all_vcfs, output_file,in_file, output_directory, temp_dir)", "def write_integrated_smallVariantsTable_as_vcf(df, filename, ploidy):\n\n print_if_verbose(\"getting vcf intersection\")\n \n # get a df that has unique vars\n df = cp.deepcopy(df)\n df = df.drop_duplicates(subset=\"#Uploaded_variation\")\n \n # get the vcf df with info\n df[\"#CHROM\"] = df.chromosome\n df[\"POS\"] = df.position\n df[\"REF\"] = df.ref\n df[\"ALT\"] = df.alt\n\n # get the filter as the number of programs that pass the calling\n df[\"FILTER\"] = df.number_PASS_programs.apply(str) + \"xPASS\"\n\n # define the programs\n programs = [\"freebayes\", \"HaplotypeCaller\", \"bcftools\"]\n\n # add the PASS programs\n print_if_verbose(\"getting PASS programs\")\n df[\"PASS_programs\"] = df.apply(lambda r: [p for p in programs if r[\"%s_PASS\"%p]], axis=1)\n df[\"PASS_programs_str\"] = df.PASS_programs.apply(lambda x: \"|\".join(x))\n \n # get the AF as the mean of the pass programs\n print_if_verbose(\"getting allele frequency\")\n\n df[\"AF\"] = df.apply(lambda r: \"%.4f\"%convert_NaN_to0(np.mean([r[\"%s_fractionReadsCoveringThisVariant\"%p] for p in r[\"PASS_programs\"]])), axis=1)\n \n # get the AF for each program\n df[\"AF_programs\"] = df.apply(lambda r: [\"%s_AF=%.4f\"%(p, r[\"%s_fractionReadsCoveringThisVariant\"%p]) for p in programs], axis=1)\n \n # define the vcffields\n vcf_fields = [\"#CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\", \"SAMPLE\"]\n \n # if it is haploid, avoid any issues with the genotyping\n if ploidy==1:\n \n # define the vcf fields\n df[\"ID\"] = df[\"#Uploaded_variation\"]\n df[\"QUAL\"] = \".\"\n df[\"FORMAT\"] = \"GT:AF\" \n \n # add genotyping fields\n df[\"GT\"] = \".\"\n \n # add to the info the agorithms that PASS the str\n df[\"AF_programs_str\"] = df.AF_programs.apply(lambda x: \";\".join(x))\n df[\"INFO\"] = (\"PASSALGS=\" + df.PASS_programs_str + \";\" + df.AF_programs_str).apply(get_corrected_INFO)\n \n # add the sample\n df[\"SAMPLE\"] = df.GT + \":\" + df.AF\n \n # the df_vcf is equivalent to the df\n df_vcf = df[vcf_fields].sort_values(by=[\"#CHROM\", \"POS\", \"REF\"])\n \n else:\n \n # add the number of chromosomes with this variant according to each genotype\n for p in programs: \n \n # change vars\n df[\"%s_GT\"%p] = df[\"%s_GT\"%p].apply(lambda x: \"/\".join(re.split(\"/|\\|\", x)))\n df[\"%s_GTset\"%p] = df[\"%s_GT\"%p].apply(lambda x: set(x.split(\"/\")))\n df[\"%s_GT_index\"%p] = df[\"%s_GT_index\"%p].apply(str)\n \n # add n chromosomes\n df[\"%s_numberChromosomes_withVar\"%p] = df.apply(lambda r: get_nChroms_with_var(r, p), axis=1)\n \n # test \n if sum(df[\"%s_numberChromosomes_withVar\"%p]>0)!=sum(df[\"%s_called\"%p]): \n \n # check that there are no rows with GT and not called\n df_notCalled_withGT = df[(df[\"%s_numberChromosomes_withVar\"%p]>0) & ~(df[\"%s_called\"%p])] \n if len(df_notCalled_withGT)>0: raise ValueError(\"There are some uncalled vars with GT\")\n \n # add the numberChromosomes_withVar considering only PASS vars\n df[\"numberChromosomes_withVar\"] = df.apply(get_numberChromosomes_withVar, axis=1) \n \n # report if there is any POS \n nvars_consideringREF = len(df.drop_duplicates(subset=[\"#CHROM\", \"POS\", \"REF\"]))\n nvars_not_consideringREF = len(df.drop_duplicates(subset=[\"#CHROM\", \"POS\"]))\n if nvars_consideringREF!=nvars_not_consideringREF: print_if_verbose(\"Warning there are some positions with >1 REF\") \n \n # get the grouped df per chromosome and position \n print_if_verbose(\"getting vcf lines\")\n df_vcf = df.groupby([\"#CHROM\", \"POS\", \"REF\"], as_index=False).apply(lambda df_pos: get_row_vcf(df_pos, ploidy))\n df_vcf.index = list(range(len(df_vcf)))\n \n # get the vcf content\n vcf_lines = df_vcf[vcf_fields].sort_values(by=[\"#CHROM\", \"POS\", \"REF\"]).to_csv(sep=\"\\t\", header=True, index=False)\n\n # get the header\n header_lines = [\"##fileformat=VCFv4.2\",\n \"##perSVade small variant calling pipeline. This is the merged output of freebayes (fb), GATK Haplotype Caller (HC) and bcftools (bt) for variants that PASS the filters in at least %i algorithms.\"%min(df.number_PASS_programs),\n \"##FILTER indicates the number of algorithms were this variant was called and PASSed the filters\",\n \"##FORMAT includes the GT (genotype) and AF (allele frequency).\",\n \"##GT includes the genotype in the case that all the PASS algorithms called the same GT, and the one that implies least varying positions otherwise.\",\n \"##AF includes the mean fraction of reads calling this variant across PASS alorithms\",\n \"##INFO includes the name of the algorithms that called this variant (PASSALGS) and the AF of each of the programs. Note that for multiallelic positions the ',' indicates each of the alleles in the order of 'ALT'\"\n ]\n \n print_if_verbose(\"writing %s\"%(filename))\n filename_tmp = \"%s.tmp\"%filename\n open(filename_tmp, \"w\").write(\"\\n\".join(header_lines) + \"\\n\" + vcf_lines)\n os.rename(filename_tmp, filename)", "def convertVCF2FLATJSON(self):\n if self.input_type not in ['vcf','vcf.gz'] or self.output_type != 'json':\n msg = \"Error: vcf files (possibly gzipped) must be given as input files, and a json file should be given as output file.\"\n status = \"failed\"\n raise generalException(msg)\n\n f = open(self.input_file)\n o = open(self.output_file,'w')\n vcf_reader = vcf.Reader(f)\n #cc = 1\n for record in vcf_reader:\n #for i in [1]:\n record = vcf_reader.next()\n for s in record.samples:\n if hasattr(s.data,'DP'):\n call_DP = s.data.DP\n else:\n call_DP = \"NA\"\n if len(uniqueInList(s.data.GT.split('|'))) > 1:\n call_het = \"Heterozygous\"\n else:\n call_het = \"Homozygous\"\n if isinstance(record.ALT, list):\n ALT = '|'.join([str(a) for a in record.ALT])\n else:\n ALT = record.ALT\n if isinstance(record.FILTER, list):\n FILTER = '|'.join([str(a) for a in record.FILTER])\n else:\n FILTER = str(record.FILTER)\n \n linedic = {\n \"variants_info_num_genes\" : \"NA\", \n \"variants_quality\" : str(record.QUAL),\n \"variants_info_allele_num\": \"NA\",\n \"variants_calls_info_zygosity\": call_het,\n \"variants_info_short_tandem_repeat\": \"NA\",\n \"readGroupSets_readGroups_experiment_sequencingCenter\": \"NA\",\n \"readGroupSets_readGroups_info_patient\": s.sample,\n \"variants_info_change_type\": record.var_type,\n \"variants_calls_info_read_depth\": str(call_DP),\n \"variants_info_other_effects\": \"NA\",\n \"variants_referenceBases\": record.REF,\n \"variants_info_is_scSNV_Ensembl\": \"NA\",\n \"readGroupSets_readGroups_experiment_libraryId\": \"NA\",\n \"variants_info_dbsnp_id_137\": \"NA\",\n \"variants_info_lof_tolerant_or_recessive_gene\": \"NA\",\n \"variants_info_is_scSNV_RefSeq\": \"NA\",\n \"variants_filters\": FILTER,\n \"readGroupSets_readGroups_sampleID\": s.sample,\n \"variants_start\": str(record.POS),\n \"variants_info_downsampled\": \"NA\",\n \"variants_referenceName\": record.CHROM,\n \"variants_alternateBases\": ALT,\n \"variants_calls_genotype\" : s.data.GT\n }\n o.write(json.dumps(linedic, ensure_ascii=False) + \"\\n\")\n\n o.close()\n f.close()\n\n status = \"succeeded\"\n return(status)\n # #sampleIdList = \n # varDic = {{\"Callset\": {\"id\" : , \"sampleId\" : , \"variantSetIds\" : [] }},\n # # {\"ReadGroupSets\" :\n # # {\"ReadGroups\" : {\"sampleId\" : }, {\"sampleId\" : }}\n # # },\n # {\"Variants\" :\n # {\"variantSetId\" : \"\",\n # \"referenceName\" : \"\",\n # \"start\" : \"\",\n # \"end\" : \"\",\n # \"referenceBases\" :\n # \"alternateBases\" :\n # \"quality\" :\n # \"filter\" :\n # },\n # \"calls\" :\n # { \"callSetId\": ,\n # \"genotype\" : []\n # }\n # },\n # { \"Variantsets\" { \"id\" : }}\n \n \n \n # jsonline = json.dumps(varDic, ensure_ascii=False)\n # cc += 1", "def dvc(v1, v2, windowShape, roiShape, op, strideShape, fitWindowSize,\n mapDisplayement=True, verbose=False, stdvThreshold=0):\n # Handle some errors\n if v1.shape != v2.shape:\n raise ValueError(\n \"DVC error: Voxel volumes V1 and V2 must have the same shape.\")\n\n # DVC step one: Preprocessing\n #############################\n\n py_spy_path = os.environ.get('DVC_PYSPY_PATH', None)\n import subprocess\n pid = os.getpid()\n\n filename = 'profile_dvc_old.svg'\n\n subprocess.Popen([py_spy_path, 'record',\n '--output', filename,\n '--pid', str(pid),\n '--rate', '100'])\n\n # Split v1\n subvolumeListV1 = splitVolume(v1, windowShape, strideShape)\n\n # DVC step two: Fourier based correlation\n ########################################\n\n # Calculate ROI, normalize values and correlate each subvolume of v1 with ROI of v2.\n # Returns the resulting displacement vectors it in a numpy 3d array.\n try:\n localDisplacementArrayAndSplitIndices = correlate(\n subvolumeListV1, v2, roiShape, op, stdvThreshold, fitWindowSize)\n except Exception as e:\n print(\"Unable to calculate correlation: \", e)\n return None\n\n # Displaying results: Print displacementArray\n if verbose:\n for p in range(0, subvolumeListV1.shape[0]):\n for q in range(0, subvolumeListV1.shape[1]):\n for v in range(0, subvolumeListV1.shape[2]):\n print(\"Undervolume index within V1: \",\n localDisplacementArrayAndSplitIndices[p, q, v, 4:])\n print(\"Associated displacement vector: \",\n localDisplacementArrayAndSplitIndices[p, q, v, :3])\n print(\"Correlation's Max Value: \",\n localDisplacementArrayAndSplitIndices[p, q, v, 3])\n print(\"---\")\n\n # DVC step three: Mapping of displacementArray\n ###########################################\n # map displacement vectors on volume v1 as result of the algorithm\n if mapDisplayement:\n result = mapDisplacement(\n localDisplacementArrayAndSplitIndices, v1, windowShape)\n else:\n result = localDisplacementArrayAndSplitIndices[:, :, :, :4]\n\n return result", "def union(self, other, temporal_iou_threshold=0.5, spatial_iou_threshold=0.6, strict=True, overlap='average', percentilecover=0.8, percentilesamples=100, activity=True, track=True):\n assert overlap in ['average', 'replace', 'keep'], \"Invalid input - 'overlap' must be in [average, replace, keep]\"\n assert spatial_iou_threshold >= 0 and spatial_iou_threshold <= 1, \"invalid spatial_iou_threshold, must be between [0,1]\"\n assert temporal_iou_threshold >= 0 and temporal_iou_threshold <= 1, \"invalid temporal_iou_threshold, must be between [0,1]\" \n assert percentilesamples >= 1, \"invalid samples, must be >= 1\"\n if not activity and not track:\n return self # nothing to do\n\n sc = self.clone() # do not change self yet, make a copy then merge at the end\n for o in tolist(other):\n assert isinstance(o, Scene), \"Invalid input - must be vipy.video.Scene() object and not type=%s\" % str(type(o))\n\n if strict:\n assert sc.filename() == o.filename(), \"Invalid input - Scenes must have the same underlying video. Disable this with strict=False.\"\n oc = o.clone() # do not change other, make a copy\n\n # Key collision?\n if len(set(sc.tracks().keys()).intersection(set(oc.tracks().keys()))) > 0:\n print('[vipy.video.union]: track key collision - Rekeying other... Use other.rekey() to suppress this warning.')\n oc.rekey()\n if len(set(sc.activities().keys()).intersection(set(oc.activities().keys()))) > 0:\n print('[vipy.video.union]: activity key collision - Rekeying other... Use other.rekey() to suppress this warning.') \n oc.rekey()\n\n # Similarity transform? Other may differ from self by a temporal scale (framerate), temporal translation (clip) or spatial isotropic scale (rescale)\n assert np.isclose(sc.aspect_ratio(), oc.aspect_ratio(), atol=1E-2), \"Invalid input - Scenes must have the same aspect ratio\"\n if sc.width() != oc.width():\n oc = oc.rescale(sc.width() / oc.width()) # match spatial scale\n if not np.isclose(sc.framerate(), oc.framerate(), atol=1E-3):\n oc = oc.framerate(sc.framerate()) # match temporal scale (video in oc will not match, only annotations)\n if sc.startframe() != oc.startframe():\n dt = (oc.startframe() if oc.startframe() is not None else 0) - (sc.startframe() if sc.startframe() is not None else 0)\n oc = oc.trackmap(lambda t: t.offset(dt=dt)).activitymap(lambda a: a.offset(dt=dt)) # match temporal translation of tracks and activities\n oc = oc.trackfilter(lambda t: ((not t.isdegenerate()) and len(t)>0), activitytrack=False) \n\n # Merge other tracks into selfclone: one-to-many mapping from self to other\n merged = {} # dictionary mapping trackid in other to the trackid in self, each track in other can be merged at most once\n for ti in sorted(sc.tracklist(), key=lambda t: len(t), reverse=True): # longest to shortest\n for tj in sorted(oc.tracklist(), key=lambda t: len(t), reverse=True): \n if ti.category() == tj.category() and (tj.id() not in merged) and tj.segment_percentilecover(sc.track(ti.id()), percentile=percentilecover, samples=percentilesamples) > spatial_iou_threshold: # mean framewise overlap during overlapping segment of two tracks\n sc.tracks()[ti.id()] = sc.track(ti.id()).union(tj, overlap=overlap) # merge duplicate/fragmented tracks from other into self, union() returns clone\n merged[tj.id()] = ti.id() \n print('[vipy.video.union]: merging track \"%s\"(id=%s) + \"%s\"(id=%s) for scene \"%s\"' % (str(ti), str(ti.id()), str(tj), str(tj.id()), str(sc))) \n oc.trackfilter(lambda t: t.id() not in merged, activitytrack=False) # remove duplicate other track for final union\n\n # Merge other activities into selfclone: one-to-one mapping\n for (i,j) in merged.items(): # i=id of other, j=id of self\n oc.activitymap(lambda a: a.replaceid(i, j) if a.hastrack(i) else a) # update track IDs referenced in activities for merged tracks\n for (i,ai) in sc.activities().items():\n for (j,aj) in oc.activities().items():\n if ai.category() == aj.category() and set(ai.trackids()) == set(aj.trackids()) and ai.temporal_iou(aj) > temporal_iou_threshold:\n oc.activityfilter(lambda a: a.id() != j) # remove duplicate activity from final union\n oc.activityfilter(lambda a: len(a.tracks())>0) # remove empty activities not merged\n\n # Union\n sc.tracks().update(oc.tracks())\n sc.activities().update(oc.activities())\n\n # Final union of unique tracks/activities\n if track:\n self.tracks(sc.tracklist()) # union of tracks only\n if activity:\n self.activities(sc.activitylist()) # union of activities only: may reference tracks not in self of track=False\n return self", "def load_fluctuations_2D_all(self):\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.mesh['R'])) )\n self.nane_bar = np.zeros((len(self.time_steps)))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.mesh['R'])) )\n self.dni_bar = np.zeros((len(self.time_steps)))\n\n self.phi = np.zeros((self.n_cross_section,len(self.time_steps),len(self.mesh['R'])))\n self.phi_bar = np.zeros((len(self.time_steps)))\n for i in range(len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n if (i == 0):\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.planes = np.arange(self.n_cross_section) * dn\n\n self.phi_bar[i] = np.mean(fluc_mesh['dpot'][...])\n if(self.HaveElectron):\n self.nane_bar[i] = np.mean(fluc_mesh['eden'][...])\n if(self.load_ions):\n self.dni_bar[i] = np.mean(fluc_mesh['iden'][...])\n for j in range(self.n_cross_section):\n self.phi[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,self.planes[j]],0,1)\n self.phi[j,i] -= self.phi_bar[i]\n\n if(self.HaveElectron):\n self.nane[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,self.planes[j]],0,1)\n self.nane[j,i] -= self.nane_bar[i]\n if(self.load_ions):\n self.dni[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,self.planes[j]],0,1)\n self.dni[j,i] -= self.dni_bar[i]\n fluc_mesh.close()\n\n\n\n\n return 0", "def test_filter_region():\n # Create two filters\n fs = [LowpassFilter(5, False, 0.1),\n NoneFilter(3, False)]\n\n # Create the filter region\n fr = FilterRegion(fs, dt=0.001)\n\n # The size should be correct (count of words + header 1 + data 1 + header 2\n # + data 2)\n assert fr.sizeof() == 4 + 16 + 8 + 16 + 0\n\n # Check that the data is written out correctly\n fp = tempfile.TemporaryFile()\n fr.write_subregion_to_file(fp)\n fp.seek(0)\n\n length, = struct.unpack(\"<I\", fp.read(4))\n assert length == len(fs)\n\n expected_data = bytearray(fr.sizeof() - 4)\n fs[0].pack_into(fr.dt, expected_data)\n fs[1].pack_into(fr.dt, expected_data, (fs[0].size_words() + 4)*4)\n assert fp.read() == expected_data", "def process_vcf_post_l_merge(l_merge_output_vcf_path, processed_vcf_path):\n with open(l_merge_output_vcf_path) as l_merge_output_fh:\n with open(processed_vcf_path, 'w') as processed_vcf_fh:\n vcf_reader = vcf.Reader(l_merge_output_fh)\n\n # Fix info strings.\n _update_info_string_number(vcf_reader, 'SVTYPE', -1)\n _update_info_string_number(vcf_reader, 'SVLEN', -1)\n\n # Fix format header.\n orig = vcf_reader.formats['SU']\n vcf_reader.formats['DP'] = vcf.parser._Format(\n 'DP', orig.num, orig.type, orig.desc)\n del vcf_reader.formats['SU']\n\n # Make column headers match what's expected by vcf_parser.\n # l_merge output is missing FORMAT column header, and columns\n # for each sample.\n if not 'FORMAT' in vcf_reader._column_headers:\n vcf_reader._column_headers.append('FORMAT')\n vcf_reader.samples = [\n x['ID'] for x in vcf_reader.metadata['SAMPLE']]\n\n # Writer object using Reader as template.\n vcf_writer = vcf.Writer(processed_vcf_fh, vcf_reader)\n\n # Format each record with correct setting.\n for record in vcf_reader:\n # Filter when insufficient support.\n if int(record.INFO['SU'][0]) < 10:\n continue\n\n # Per-sample values.\n record.FORMAT = 'GT:DP'\n\n # vcf.model._Call requires data as a hashable type so follow\n # vcf internal code pattern of making a tuple.\n calldata_tuple_type = vcf.model.make_calldata_tuple(\n record.FORMAT.split(':'))\n\n samples_with_sv = [\n x.split(':')[0] for x in record.INFO['SNAME']]\n\n if 'SULIST' in record.INFO:\n dp_list = [x.split(':')[0] for x in record.INFO['SULIST']]\n else:\n dp_list = record.INFO['SU']\n\n # Parse the record\n record_samples = []\n for sample_id in vcf_reader.samples:\n try:\n sample_idx = samples_with_sv.index(sample_id)\n\n sample_data = calldata_tuple_type(\n GT='1/1',\n DP=dp_list[sample_idx])\n except ValueError:\n sample_data = calldata_tuple_type(GT='./.', DP=0)\n record_samples.append(\n vcf.model._Call(record, sample_id, sample_data))\n record.samples = record_samples\n\n # update METHOD field\n record.__dict__['INFO']['METHOD'] = 'LUMPY'\n\n vcf_writer.write_record(record)", "def load_fluctuations_3D_fluc_only(self):\n #similar to the 2D case, we first read one file to determine the total toroidal plane number in the simulation\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[0]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.center_planes = np.arange(self.n_cross_section)*dn\n\n self.planes = np.unique(np.array([np.unique(self.prevplane),np.unique(self.nextplane)]))\n self.planeID = {self.planes[i]:i for i in range(len(self.planes))} #the dictionary contains the positions of each chosen plane, useful when we want to get the data on a given plane known only its plane number in xgc file.\n\n #initialize the arrays\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n nane_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n dni_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n self.phi = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n phi_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n\n #load all the rest of the files\n for i in range(1,len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n for j in range(self.n_plane):\n phi_all[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,j],0,1)\n if(self.HaveElectron):\n nane_all[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,j],0,1)\n if(self.load_ions):\n dni_all[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,j],0,1)\n fluc_mesh.close()\n\n\n #similar to the 2D case, we take care of the equilibrium relaxation contribution. See details in the comments in 2D loading function.\n\n phi_avg_tor = np.average(phi_all,axis = 0)\n if self.HaveElectron:\n nane_avg_tor = np.average(nane_all,axis=0)\n if self.load_ions:\n dni_avg_tor = np.average(dni_all,axis=0)\n\n for j in range(self.n_cross_section):\n self.phi[j,...] = np.swapaxes(phi_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - phi_avg_tor[:,np.newaxis,:]\n if self.HaveElectron:\n self.nane[j,...] = np.swapaxes(nane_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - nane_avg_tor[:,np.newaxis,:]\n if self.load_ions:\n self.dni[j,...] = np.swapaxes(dni_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - dni_avg_tor[:,np.newaxis,:]\n\n self.ne0[:] += np.average(phi_avg_tor,axis=0)\n if self.HaveElectron:\n self.ne0[:] += np.average(nane_avg_tor,axis=0)\n self.ni0[:] += np.average(phi_avg_tor,axis=0)\n if self.load_ions:\n self.ni0[:] += np.average(dni_avg_tor,axis=0)\n\n return 0", "def tune():\n if ir_config.FILTER == 'conf':\n tune_range = np.arange(0.05, 1.05, 0.05)\n else:\n interval = 10\n tune_range = range(interval, 500 + interval, interval)\n\n ir_tune_dp = join(path_parser.summary_rank, ir_config.IR_TUNE_DIR_NAME_TF)\n ir_tune_result_fp = join(path_parser.tune, ir_config.IR_TUNE_DIR_NAME_TF)\n with open(ir_tune_result_fp, mode='a', encoding='utf-8') as out_f:\n headline = 'Filter\\tRecall\\tF1\\n'\n out_f.write(headline)\n\n cids = tools.get_test_cc_ids()\n for filter_var in tune_range:\n if exists(ir_tune_dp): # remove previous output\n shutil.rmtree(ir_tune_dp)\n os.mkdir(ir_tune_dp)\n\n for cid in tqdm(cids):\n retrieval_params = {\n 'model_name': ir_config.IR_MODEL_NAME_TF,\n 'cid': cid,\n 'filter_var': filter_var,\n 'filter': ir_config.FILTER,\n 'deduplicate': ir_config.DEDUPLICATE,\n 'prune': True,\n }\n\n retrieved_items = ir_tools.retrieve(**retrieval_params) # pid, score\n\n passage_ids = [item[0] for item in retrieved_items]\n original_passages, _, _ = load_retrieved_passages(cid=cid,\n get_sents=True,\n passage_ids=passage_ids)\n passages = ['\\n'.join(sents) for sents in original_passages]\n summary = '\\n'.join(passages)\n print(summary)\n # print(summary)\n with open(join(ir_tune_dp, cid), mode='a', encoding='utf-8') as out_f:\n out_f.write(summary)\n\n performance = rouge.compute_rouge_for_dev(ir_tune_dp, tune_centrality=False)\n with open(ir_tune_result_fp, mode='a', encoding='utf-8') as out_f:\n if ir_config.FILTER == 'conf':\n rec = '{0:.2f}\\t{1}\\n'.format(filter_var, performance)\n else:\n rec = '{0}\\t{1}\\n'.format(filter_var, performance)\n\n out_f.write(rec)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
method for initialising ontologyOb from database
def initFromDatabase(self, identifier, connection): # first init base class - this will get obid ob.initFromDatabase(self, identifier, "ontologyOb", connection) # now get the complete object self.databaseFields = getObjectRecord(connection, "ontologyOb", self.databaseFields['obid']) self.obState.update({'ERROR' : 0 , 'NEW' : 0, 'MESSAGE' : "initialised from database OK"})
[ "def initFromDatabase(self, identifier, connection):\n\n # first init base class - this will get obid\n ob.initFromDatabase(self, identifier, \"ontologyTermFact\", connection)\n\n\n # now get the complete object\n self.databaseFields = getObjectRecord(connection, \"ontologyTermFact\", self.databaseFields['obid'])\n self.obState.update({'ERROR' : 0 , 'NEW' : 0, 'MESSAGE' : \"initialised from database OK\"})", "def initialize(self):\n raise NotImplementedError('The initialize method needs to be '\n 'implemented when subclassing '\n 'IndraOntology')", "def addontology(ontofilename,ontoname,dbserver='http://127.0.0.1:5000',ontoprefix=''):\n\n\turl=dbserver+'/ontology/add'\n\tidname=getidname(ontofilename)\n\tparser=oboparse.Parser(open(ontofilename))\n\tfor citem in parser:\n\t\ttags=citem.tags\n\t\tcid=tags[\"id\"][0]\n\t\tif len(ontoprefix)==0:\n\t\t\ttt=cid.split(':')\n\t\t\tif len(tt)>1:\n\t\t\t\tontoprefix=tt[0]\n\t\t# do no add obsolete terms\n\t\tif \"is_obsolete\" in tags:\n\t\t\tif tags[\"is_obsolete\"][0].lower()=='true':\n\t\t\t\tcontinue\n\t\tif \"name\" in tags:\n\t\t\torigname=tags[\"name\"][0]\n\t\telse:\n\t\t\tprint(\"ontology item id %s does not have a name\" % cid)\n\t\t\tcontinue\n\t\tif \"synonym\" in tags:\n\t\t\tsynonyms=tags[\"synonym\"]\n\t\telse:\n\t\t\tsynonyms=None\n\t\tparent='NA'\n\t\tparentid=None\n\t\tif \"is_a\" in tags:\n\t\t\tparentid=tags[\"is_a\"][0]\n\t\telif \"relationship\" in tags:\n\t\t\trela=tags[\"relationship\"][0]\n\t\t\trela=rela.split(' ',1)\n\t\t\tif rela[0] in ['derives_from','located_in','part_of','develops_from','participates_in']:\n\t\t\t\tparentid=rela[1]\n\t\tif parentid is not None:\n\t\t\tif parentid in idname:\n\t\t\t\tparent=idname[parentid]\n\t\t\telse:\n\t\t\t\tprint(\"parentid %s not found\" % parentid)\n\t\tdata={'term':origname,'synonyms':synonyms,'parent':parent,'ontologyname':ontoname}\n\t\tres=requests.post(url,json=data)\n\tprint('done')", "def load_ontology(self):\n multipart_data = MultipartEncoder(fields={'file': ('FinancialNewsOntology_beta3.owl',\n open(\n '../../resources/ontologies/FinancialNewsOntology_beta3.owl',\n 'rb'), 'text/plain')\n })\n response = requests.put('http://localhost:3030/' + self.__dataset + '/data', data=multipart_data,\n # auth=('admin', 'admin'),\n headers={'Content-Type': multipart_data.content_type})\n response.raise_for_status()", "def populate_ontologies(self):\n raise NotImplementedError", "def __init__(self) -> None:\r\n self.db = Db()\r\n self.init_db()", "def createontologytree(db,ontologies=[],outname=None):\n\tif outname is None:\n\t\toutname=os.path.join(getheatsequerdir(),'db/ontologygraph.pickle')\n\n\tif not ontologies:\n\t\tontologies=db.ontologyfiles\n\n\tif not db.ontologyfromid:\n\t\tdb=loaddbonto(db)\n\n\tontodict={}\n\tfor conto in ontologies:\n\t\tDebug(6,'Processing ontology %s' % conto)\n\t\tg=ontologytotree(conto)\n\t\tg=ontotreetonames(g,db.ontologyfromid)\n\t\tontodict[conto]=g\n\tDebug(6,'ontologies loaded. saving to pickel %s' % outname)\n\tfl=open(outname,'wb')\n\tpickle.dump(ontodict,fl,protocol=2)\n\tDebug(6,'ontologies pickled')\n\tdb.ontodict=ontodict\n\treturn db", "def __init__(self, sobj, context, term):\n self.sobj = sobj\n self.context = context\n self.term = term\n\n # get stored idiom objects\n fp = create_idiom_file()\n self.all_idioms = None\n with open(fp, 'r') as f:\n try:\n self.all_idioms = json.load(f)\n except json.JSONDecodeError as e:\n self.all_idioms = {}\n except Exception as e:\n raise e\n self.fp = fp", "def load_ontology_to_neo4jdb(db, ontology):\n nodeMap = {}\n\n for term in ontology:\n log.info('Loading term %s...' % term.name)\n node = db.node()\n \n if term.obsolete:\n log.info(' ** Skipping node %s because it is obsolete **' % term.name)\n continue\n \n for (attr, value) in term.__dict__.iteritems():\n if value and attr not in [\"relationships\", \"synonyms\"]:\n node.set(attr, value)\n elif value and attr == \"synonyms\":\n # Synonyms need to be converted from a list of tuples to a list\n # of strings\n synonymStrList = [\" \".join(x) for x in value]\n node.set(attr, synonymStrList)\n \n nodeMap.setdefault(term.id, {})\n nodeMap[term.id]['node_id'] = node.id\n nodeMap[term.id]['relationships'] = term.relationships\n\n index_neo4j_node(db, node, term.id)\n \n return nodeMap", "def __init__(self):\n self.db = self._read_db()\n self._setup_dirs()", "def __init__(self, word, synset_relations=...) -> None:\n ...", "def ontology():\n url = \"http://purl.obolibrary.org/obo/mondo.obo\"\n ontology_file = \"mondo.obo\"\n if not os.path.exists (ontology_file):\n r = requests.get(url, stream=True)\n with open(ontology_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\n return GenericOntology(ServiceContext.create_context(),\n ontology_file)", "def _declare_as_ontology(self, version_info=None):\n model = Model(self.graph)\n model.addOntologyDeclaration(self.summary_level_curie)\n model.addOWLVersionIRI(self.summary_level_curie,\n self.version_level_curie)\n if version_info is not None:\n model.addOWLVersionInfo(self.distribution_level_turtle_curie,\n version_info)", "def __setKnowledgeBaseAttributes():\n\n debugMsg = \"initializing the knowledge base\"\n logger.debug(debugMsg)\n\n kb.absFilePaths = set()\n kb.bannerFp = advancedDict()\n kb.data = advancedDict()\n\n # Basic back-end DBMS fingerprint\n kb.dbms = None\n kb.dbmsDetected = False\n\n # Active (extensive) back-end DBMS fingerprint\n kb.dbmsVersion = [ \"Unknown\" ]\n\n kb.dep = None\n kb.docRoot = None\n kb.headersCount = 0\n kb.headersFp = {}\n kb.htmlFp = []\n kb.injParameter = None\n kb.injPlace = None\n kb.injType = None\n\n # Back-end DBMS underlying operating system fingerprint via banner (-b)\n # parsing\n kb.os = None\n kb.osVersion = None\n kb.osSP = None\n\n kb.parenthesis = None\n kb.resumedQueries = {}\n kb.stackedTest = None\n kb.targetUrls = set()\n kb.timeTest = None\n kb.unionComment = \"\"\n kb.unionCount = None\n kb.unionPosition = None", "def loadDOFile(filename):\n\n\tscriptDir = os.path.dirname(__file__)\n\tabsFilename = os.path.join(os.path.dirname(scriptDir),'src','ontology', filename)\n\treturn pronto.Ontology(absFilename)", "def __init__(self, py_dict=None):\n super(NetVdl2Schema, self).__init__()\n self.table = [Vdl2EntrySchema()]\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def __init__(self):\n # use networkX to create a directed graph\n # of words\n self.__graph = nx.DiGraph()\n # # map graph nodes to positions\n # self.__layout = {}\n # # map words to the synsets they belong to\n # self.__words_to_synsets = {}\n # # reverse of above\n # self.__synsets_to_words = {}\n # # map words to tense, definition, and id\n # self.__info_dict = {}\n # create w/ all synsets\n self.__create_graph_all_words()", "def __init__(self):\n try:\n self.__curSqlManager = SqlConnector()\n self.__rooms = self.__curSqlManager.getRooms()\n self.__bookings = __curSqlManager.getBookings()\n self.__customers = __curSqlManager.getCustomers()\n self.__problems = list()\n except RuntimeError:\n print(\"Couldn't load sql Connector. A empyt data set will be created\")\n self.__rooms = list()\n self.__bookings = list()\n self.__customers = list()\n self.__problems = list()", "def createData(self):\n self.data = self.dbroot['data'] = OOBTree()\n self.meta = self.dbroot['meta'] = Meta('PEAT-DB database')\n self.meta.staticfields = { 'name': 'text', 'Mutations':'text',\n 'Structure':'PDB'}\n self.blobs = self.dbroot['blobs'] = OOBTree() \n #use meta.info for any misc settings specific to the DB\n self.meta.info = PersistentMapping(self.defaultinfo)\n self.commit()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method can be used to add a term to an ontology. The method will check that the term does not already exist, and will only add it if it does not exist, if the checkexisting parameter is True (set False if importing and sure there is no existing data, as this will speed up the transaction)
def addTerm(self,connection,termname, checkexisting = True, termdescription = None, unitname=None,termcode=None): termDict = { 'ontologyob' : self.databaseFields['obid'], 'xreflsid' : "%s.%s"%(self.databaseFields['xreflsid'],termname), 'termname' : termname, 'termdescription' : termdescription, 'unitname': unitname, 'termcode' : termcode } insertCursor = connection.cursor() if checkexisting: # if required check if this term is already in the db - if it is do not duplicate sql = """ select obid from ontologytermfact where ontologyob = %(ontologyob)s and termname = %(termname)s """ ontologymodulelogger.info("checking for term using %s"%(sql%termDict)) insertCursor.execute(sql,termDict) row = insertCursor.fetchone() ontologymodulelogger.info("rowcount = %s"%insertCursor.rowcount) if insertCursor.rowcount > 0: insertCursor.close() return (row[0],False) # do the insert termDict.update ({ 'obid' : getNewObid(connection) }) sql = """ insert into ontologytermfact(obid,ontologyob,xreflsid,termname,termdescription, unitname,termcode) values(%(obid)s,%(ontologyob)s,%(xreflsid)s,%(termname)s, %(termdescription)s,%(unitname)s,%(termcode)s) """ ontologymodulelogger.info("executing %s"%(sql%termDict)) insertCursor.execute(sql,termDict) connection.commit() insertCursor.close() self.obState.update({'NEW' : 0 , 'DB_PENDING' : 0, 'ERROR' : 0, 'MESSAGE' : "database insert OK"}) return (termDict['obid'],True)
[ "def add_term(term):\n # See if we already added it.\n # Search in reverse\n with open(CORPUSFILE) as ofile:\n for line in reversed(ofile.readlines()):\n if ' ' + term + ' ' in line.strip():\n term = None\n break\n # Add it if we never encountered it.\n if term:\n with open(CORPUSFILE, 'a') as afile:\n afile.write(term)\n afile.write('\\n')\n # print(\"[ term '%s' added ]\" % colorize(term, \"green\", True))\n print(\"[ term '\" + Fore.GREEN + term + \"' added ]\")\n # with colorama_text():\n # print(Fore.GREEN + term, end=\"\")\n # print(\"' added ]\")", "async def pglossary_add(self, ctx, term, *, definition):\n await self._pglossary_add(ctx, term, definition, True)", "def add_term(mytrie, word, weight):\r\n assert isinstance(word, str), \"The word to be added should be a string.\"\r\n assert isinstance(weight, int), \"The weight of the word should be an integer\"\r\n mytrie.insertWord(weight, word)", "async def blacklist_add_term(self, ctx, term):\n term = term.lower()\n\n try:\n self.bot.databasehandler.sqlquery(\n \"INSERT INTO Blacklist(term) VALUES (?)\",\n term,\n return_type='commit'\n )\n await ctx.send(f\"'{term}' added to blacklist!\")\n except:\n await ctx.send(f\"'{term}' failed to add to blacklist!\")", "async def pglossary_edit(self, ctx, term, *, definition):\n await self._pglossary_add(ctx, term, definition, False)", "def add_terms(self, new_terms):\n\n self.overall_terms.extend([term for term in new_terms \\\n if term not in self.overall_terms])", "def _create_term_definition(self, active_ctx, local_ctx, term, defined):\n if term in defined:\n # term already defined\n if defined[term]:\n return\n # cycle detected\n raise JsonLdError(\n 'Cyclical context definition detected.',\n 'jsonld.CyclicalContext', {\n 'context': local_ctx,\n 'term': term\n }, code='cyclic IRI mapping')\n\n # now defining term\n defined[term] = False\n\n if _is_keyword(term):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; keywords cannot be overridden.',\n 'jsonld.SyntaxError', {'context': local_ctx, 'term': term},\n code='keyword redefinition')\n\n if term == '':\n raise JsonLdError(\n 'Invalid JSON-LD syntax; a term cannot be an empty string.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid term definition')\n\n # remove old mapping\n if term in active_ctx['mappings']:\n del active_ctx['mappings'][term]\n\n # get context term value\n value = local_ctx[term]\n\n # clear context entry\n if (value is None or (\n _is_object(value) and '@id' in value and\n value['@id'] is None)):\n active_ctx['mappings'][term] = None\n defined[term] = True\n return\n\n # convert short-hand value to object w/@id\n _simple_term = False\n if _is_string(value):\n _simple_term = True\n value = {'@id': value}\n\n if not _is_object(value):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context property values must be '\n 'strings or objects.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid term definition')\n\n # create new mapping\n mapping = active_ctx['mappings'][term] = {'reverse': False}\n\n # make sure term definition only has expected keywords\n valid_keys = ['@container', '@id', '@language', '@reverse', '@type']\n if self._processing_mode(active_ctx, 1.1):\n valid_keys.extend(['@context', '@nest', '@prefix'])\n for kw in value:\n if kw not in valid_keys:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; a term definition must not contain ' + kw,\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid term definition')\n\n # always compute whether term has a colon as an optimization for _compact_iri\n _term_has_colon = ':' in term\n\n if '@reverse' in value:\n if '@id' in value:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @reverse term definition must '\n 'not contain @id.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid reverse property')\n if '@nest' in value:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @reverse term definition must '\n 'not contain @nest.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid reverse property')\n reverse = value['@reverse']\n if not _is_string(reverse):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @reverse value must be '\n 'a string.', 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid IRI mapping')\n\n # expand and add @id mapping\n id_ = self._expand_iri(\n active_ctx, reverse, vocab=True, base=False,\n local_ctx=local_ctx, defined=defined)\n if not _is_absolute_iri(id_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @reverse value must be '\n 'an absolute IRI or a blank node identifier.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid IRI mapping')\n mapping['@id'] = id_\n mapping['reverse'] = True\n elif '@id' in value:\n id_ = value['@id']\n if not _is_string(id_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @id value must be a '\n 'string.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid IRI mapping')\n if id_ != term:\n # add @id to mapping\n id_ = self._expand_iri(\n active_ctx, id_, vocab=True, base=False,\n local_ctx=local_ctx, defined=defined)\n if not _is_absolute_iri(id_) and not _is_keyword(id_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @id value must be '\n 'an absolute IRI, a blank node identifier, or a '\n 'keyword.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid IRI mapping')\n mapping['@id'] = id_\n mapping['_prefix'] = (\n not _term_has_colon\n and re.match('.*[:/\\?#\\[\\]@]$', id_)\n and (_simple_term or self._processing_mode(active_ctx, 1.0)))\n if '@id' not in mapping:\n # see if the term has a prefix\n colon = term.find(':')\n if colon != -1:\n prefix = term[0:colon]\n if prefix in local_ctx:\n # define parent prefix\n self._create_term_definition(\n active_ctx, local_ctx, prefix, defined)\n\n # set @id based on prefix parent\n if active_ctx['mappings'].get(prefix) is not None:\n suffix = term[colon + 1:]\n mapping['@id'] = (\n active_ctx['mappings'][prefix]['@id'] + suffix)\n # term is an absolute IRI\n else:\n mapping['@id'] = term\n else:\n # non-IRIs MUST define @ids if @vocab not available\n if '@vocab' not in active_ctx:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context terms must define '\n 'an @id.', 'jsonld.SyntaxError', {\n 'context': local_ctx,\n 'term': term\n }, code='invalid IRI mapping')\n # prepend vocab to term\n mapping['@id'] = active_ctx['@vocab'] + term\n\n # IRI mapping now defined\n defined[term] = True\n\n if '@type' in value:\n type_ = value['@type']\n if not _is_string(type_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @type value must be '\n 'a string.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid type mapping')\n if type_ != '@id' and type_ != '@vocab':\n # expand @type to full IRI\n type_ = self._expand_iri(\n active_ctx, type_, vocab=True,\n local_ctx=local_ctx, defined=defined)\n if not _is_absolute_iri(type_):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @context @type value must '\n 'be an absolute IRI.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid type mapping')\n if type_.startswith('_:'):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; an @context @type values '\n 'must be an IRI, not a blank node identifier.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid type mapping')\n # add @type to mapping\n mapping['@type'] = type_\n\n if '@container' in value:\n container = JsonLdProcessor.arrayify(value['@container'])\n valid_containers = ['@list', '@set', '@index', '@language']\n is_valid = True\n has_set = '@set' in container\n\n if self._processing_mode(active_ctx, 1.1):\n valid_containers.extend(['@graph', '@id', '@type'])\n\n # check container length\n if '@list' in container:\n if len(container) != 1:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container with @list must have no other values.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid container mapping')\n elif '@graph' in container:\n _extra_keys = [kw for kw in container if kw not in ['@graph', '@id', '@index', '@set']]\n if _extra_keys:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container with @graph must have no other values ' +\n 'other than @id, @index, and @set',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid container mapping')\n else:\n is_valid = is_valid and (len(container) <= (2 if has_set else 1))\n else: # json-ld-1.0\n is_valid = is_valid and _is_string(value['@container'])\n\n # check against valid containers\n is_valid = is_valid and not [kw for kw in container if kw not in valid_containers]\n\n # @set not allowed with @list\n is_valid = is_valid and not (has_set and '@list' in container)\n\n if not is_valid:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container value '\n 'must be one of the following: ' + ', '.join(valid_containers) + '.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid container mapping')\n _extra_reverse_keys = [kw for kw in container if kw not in ['@index', '@set']]\n if (mapping['reverse'] and _extra_reverse_keys):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @container value for '\n 'an @reverse type definition must be @index or @set.',\n 'jsonld.SyntaxError', {'context': local_ctx},\n code='invalid reverse property')\n\n # add @container to mapping\n mapping['@container'] = container\n\n # scoped contexts\n if '@context' in value:\n mapping['@context'] = value['@context']\n\n if '@language' in value and '@type' not in value:\n language = value['@language']\n if not (language is None or _is_string(language)):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @language value must be '\n 'a string or null.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid language mapping')\n # add @language to mapping\n if language is not None:\n language = language.lower()\n mapping['@language'] = language\n\n # term may be used as prefix\n if '@prefix' in value:\n if _term_has_colon:\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @prefix used on a compact IRI term.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid term definition')\n if not _is_bool(value['@prefix']):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context value for @prefix must be boolean.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid @prefix value')\n mapping['_prefix'] = value['@prefix']\n\n # nesting\n if '@nest' in value:\n nest = value['@nest']\n if not _is_string(nest) or (nest != '@nest' and nest[0] == '@'):\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context @nest value must be ' +\n 'a string which is not a keyword other than @nest.',\n 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid @nest value')\n mapping['@nest'] = nest\n\n # disallow aliasing @context and @preserve\n id_ = mapping['@id']\n if id_ == '@context' or id_ == '@preserve':\n raise JsonLdError(\n 'Invalid JSON-LD syntax; @context and @preserve '\n 'cannot be aliased.', 'jsonld.SyntaxError',\n {'context': local_ctx}, code='invalid keyword alias')", "def test_insert_word_exists(self):\n testword = (\"testword\", 4.87)\n self.trie.insert_word(testword[0], testword[1])\n try:\n self.trie.find_word(testword[0])\n except SearchMiss:\n self.fail(\"SearchMiss raised\")", "def test_glossary_term_create(self):\n pass", "def from_terms(cls, terms, name, description, refgen):\n self = cls.create(name, description, refgen)\n self.log(\"Adding {} terms to the database.\", len(terms))\n self.add_terms(terms, overwrite=False)\n # Build the indices\n self.log(\"Building the indices.\")\n self._build_indices()\n\n self.log(\"Your gene ontology is built.\")\n return self", "def addontology(ontofilename,ontoname,dbserver='http://127.0.0.1:5000',ontoprefix=''):\n\n\turl=dbserver+'/ontology/add'\n\tidname=getidname(ontofilename)\n\tparser=oboparse.Parser(open(ontofilename))\n\tfor citem in parser:\n\t\ttags=citem.tags\n\t\tcid=tags[\"id\"][0]\n\t\tif len(ontoprefix)==0:\n\t\t\ttt=cid.split(':')\n\t\t\tif len(tt)>1:\n\t\t\t\tontoprefix=tt[0]\n\t\t# do no add obsolete terms\n\t\tif \"is_obsolete\" in tags:\n\t\t\tif tags[\"is_obsolete\"][0].lower()=='true':\n\t\t\t\tcontinue\n\t\tif \"name\" in tags:\n\t\t\torigname=tags[\"name\"][0]\n\t\telse:\n\t\t\tprint(\"ontology item id %s does not have a name\" % cid)\n\t\t\tcontinue\n\t\tif \"synonym\" in tags:\n\t\t\tsynonyms=tags[\"synonym\"]\n\t\telse:\n\t\t\tsynonyms=None\n\t\tparent='NA'\n\t\tparentid=None\n\t\tif \"is_a\" in tags:\n\t\t\tparentid=tags[\"is_a\"][0]\n\t\telif \"relationship\" in tags:\n\t\t\trela=tags[\"relationship\"][0]\n\t\t\trela=rela.split(' ',1)\n\t\t\tif rela[0] in ['derives_from','located_in','part_of','develops_from','participates_in']:\n\t\t\t\tparentid=rela[1]\n\t\tif parentid is not None:\n\t\t\tif parentid in idname:\n\t\t\t\tparent=idname[parentid]\n\t\t\telse:\n\t\t\t\tprint(\"parentid %s not found\" % parentid)\n\t\tdata={'term':origname,'synonyms':synonyms,'parent':parent,'ontologyname':ontoname}\n\t\tres=requests.post(url,json=data)\n\tprint('done')", "async def spellbook_add(self, ctx, *, spell_name):\n spell = await select_spell_full(ctx, spell_name)\n character: Character = await Character.from_ctx(ctx)\n character.add_known_spell(spell)\n await character.commit(ctx)\n await ctx.send(f\"{spell.name} added to known spell list!\")", "def add_word(self, word):\n # if it's a new word\n if word not in self.word2index:\n # add it into word2index\n self.index2word[self.num_words] = word\n self.word2index[word] = self.num_words\n self.word_counts[word] = 1\n self.num_words += 1\n else: # it has been in the vocabulary\n self.word_counts[word] += 1", "def test_glossary_term_update(self):\n pass", "def add_doc_if_not_exists(self, doc, unique_property_name):\n doc_type = doc['type']\n property_value = doc[unique_property_name]\n existing_doc = self.find_doc(\n doc_type, unique_property_name, property_value)\n if existing_doc is not None:\n LOG.debug('Existing {} doc where {}={}:\\n{}'.format(\n doc_type, unique_property_name, property_value, existing_doc))\n else:\n LOG.debug('Creating {} doc where {}={}'.format(\n doc_type, unique_property_name, property_value))\n try:\n self.client.connect()\n db = self.client[self.db_name]\n db.create_document(doc)\n except Exception:\n LOG.exception(\"Cloudant DB exception:\")\n finally:\n self.client.disconnect()", "def add():\n updateDB()\n node = getSelectedNode()\n if node != None:\n if node.type().definition() is None:\n hou.ui.displayMessage(\"Not a Digital Asset.\")\n else:\n libraryPath = node.type().definition().libraryFilePath()\n filename = os.path.basename(libraryPath)\n info = getFileInfo(filename)\n if info == None:\n saveOTL(node)\n moveToOtlDir(node, filename)\n addOTL(filename)\n hou.ui.displayMessage(\"Add Successful!\")\n else:\n hou.ui.displayMessage(\"Already Added\")\n else:\n hou.ui.displayMessage(\"Select EXACTLY one node.\")", "def add_to_index(self, term_, doc_id_, total_tokens):\n if term_ in self.inverted_index.keys():\n postings_list = self.inverted_index[term_]\n postings_list.insert_at_end(doc_id_, total_tokens)\n self.inverted_index[term_] = postings_list\n else:\n postings_list = LinkedList()\n postings_list.insert_at_end(doc_id_, total_tokens)\n self.inverted_index[term_] = postings_list", "def load_disease_term(self, disease_obj):\n try:\n self.disease_term_collection.insert_one(disease_obj)\n except DuplicateKeyError as err:\n raise IntegrityError(\n \"Disease term %s already exists in database\".format(disease_obj[\"_id\"])\n )", "def add_freetext_constraint(self, term):\n raise NotImplemented()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
method for initialising ontologyTermFact from database
def initFromDatabase(self, identifier, connection): # first init base class - this will get obid ob.initFromDatabase(self, identifier, "ontologyTermFact", connection) # now get the complete object self.databaseFields = getObjectRecord(connection, "ontologyTermFact", self.databaseFields['obid']) self.obState.update({'ERROR' : 0 , 'NEW' : 0, 'MESSAGE' : "initialised from database OK"})
[ "def initFromDatabase(self, identifier, connection):\n\n # first init base class - this will get obid\n ob.initFromDatabase(self, identifier, \"ontologyOb\", connection)\n\n\n # now get the complete object\n self.databaseFields = getObjectRecord(connection, \"ontologyOb\", self.databaseFields['obid'])\n self.obState.update({'ERROR' : 0 , 'NEW' : 0, 'MESSAGE' : \"initialised from database OK\"})", "def initialize(self):\n raise NotImplementedError('The initialize method needs to be '\n 'implemented when subclassing '\n 'IndraOntology')", "def from_terms(cls, terms, name, description, refgen):\n self = cls.create(name, description, refgen)\n self.log(\"Adding {} terms to the database.\", len(terms))\n self.add_terms(terms, overwrite=False)\n # Build the indices\n self.log(\"Building the indices.\")\n self._build_indices()\n\n self.log(\"Your gene ontology is built.\")\n return self", "def __init(self, terms, termset, varmap, mixed):\n\n self.__terms = terms\n self.__termset = termset\n self.__varmap = varmap\n self.__mixed = mixed\n\n if termset != None:\n assert len(termset) == len(terms)", "def addTerm(self,connection,termname, checkexisting = True, termdescription = None, unitname=None,termcode=None):\n termDict = {\n 'ontologyob' : self.databaseFields['obid'],\n 'xreflsid' : \"%s.%s\"%(self.databaseFields['xreflsid'],termname),\n 'termname' : termname,\n 'termdescription' : termdescription,\n 'unitname': unitname,\n 'termcode' : termcode\n }\n\n insertCursor = connection.cursor()\n if checkexisting:\n # if required check if this term is already in the db - if it is do not duplicate\n sql = \"\"\"\n select obid from ontologytermfact where\n ontologyob = %(ontologyob)s and\n termname = %(termname)s \"\"\"\n ontologymodulelogger.info(\"checking for term using %s\"%(sql%termDict))\n insertCursor.execute(sql,termDict)\n row = insertCursor.fetchone()\n ontologymodulelogger.info(\"rowcount = %s\"%insertCursor.rowcount)\n if insertCursor.rowcount > 0:\n insertCursor.close()\n return (row[0],False) \n\n # do the insert\n termDict.update ({\n 'obid' : getNewObid(connection)\n }) \n sql = \"\"\"\n insert into ontologytermfact(obid,ontologyob,xreflsid,termname,termdescription,\n unitname,termcode)\n values(%(obid)s,%(ontologyob)s,%(xreflsid)s,%(termname)s,\n %(termdescription)s,%(unitname)s,%(termcode)s)\n \"\"\"\n ontologymodulelogger.info(\"executing %s\"%(sql%termDict))\n insertCursor.execute(sql,termDict)\n connection.commit()\n insertCursor.close()\n self.obState.update({'NEW' : 0 , 'DB_PENDING' : 0, 'ERROR' : 0, 'MESSAGE' : \"database insert OK\"})\n return (termDict['obid'],True)", "def __init__(self):\n self.tfidf = TfIdf()\n self.data_ids = {}", "def load_ontology_to_neo4jdb(db, ontology):\n nodeMap = {}\n\n for term in ontology:\n log.info('Loading term %s...' % term.name)\n node = db.node()\n \n if term.obsolete:\n log.info(' ** Skipping node %s because it is obsolete **' % term.name)\n continue\n \n for (attr, value) in term.__dict__.iteritems():\n if value and attr not in [\"relationships\", \"synonyms\"]:\n node.set(attr, value)\n elif value and attr == \"synonyms\":\n # Synonyms need to be converted from a list of tuples to a list\n # of strings\n synonymStrList = [\" \".join(x) for x in value]\n node.set(attr, synonymStrList)\n \n nodeMap.setdefault(term.id, {})\n nodeMap[term.id]['node_id'] = node.id\n nodeMap[term.id]['relationships'] = term.relationships\n\n index_neo4j_node(db, node, term.id)\n \n return nodeMap", "def __init__(self):\n try:\n self.conn = sqlite3.connect('db/budget.db')\n self.c = self.conn.cursor()\n except sqlite3.Error as e:\n logging.error(\"Error connecting to database!\")\n raise\n\n self.c.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='budget'\")\n if (self.c.fetchone() == None):\n self.c.execute(\"\"\"CREATE TABLE budget (rule_id integer primary key, budget_name text, budget_value float, account_id integer, budget_balance float)\"\"\")\n self.conn.commit()", "def __init__(self, word, synset_relations=...) -> None:\n ...", "def __init__(self):\r\n nltk.download('punkt')\r\n self.data = None\r\n self.codes = []\r\n self.tfidf_vect = None\r\n self.multinomial_nb = None\r\n self.model = None", "def __init__(self, sobj, context, term):\n self.sobj = sobj\n self.context = context\n self.term = term\n\n # get stored idiom objects\n fp = create_idiom_file()\n self.all_idioms = None\n with open(fp, 'r') as f:\n try:\n self.all_idioms = json.load(f)\n except json.JSONDecodeError as e:\n self.all_idioms = {}\n except Exception as e:\n raise e\n self.fp = fp", "def __init__(self):\n self.indices_idf = {}", "def create_term(name, code, start):\n return Term.objects.create(name=name, code=code, start=start, end=start + datetime.timedelta(7*20-1))", "def __setKnowledgeBaseAttributes():\n\n debugMsg = \"initializing the knowledge base\"\n logger.debug(debugMsg)\n\n kb.absFilePaths = set()\n kb.bannerFp = advancedDict()\n kb.data = advancedDict()\n\n # Basic back-end DBMS fingerprint\n kb.dbms = None\n kb.dbmsDetected = False\n\n # Active (extensive) back-end DBMS fingerprint\n kb.dbmsVersion = [ \"Unknown\" ]\n\n kb.dep = None\n kb.docRoot = None\n kb.headersCount = 0\n kb.headersFp = {}\n kb.htmlFp = []\n kb.injParameter = None\n kb.injPlace = None\n kb.injType = None\n\n # Back-end DBMS underlying operating system fingerprint via banner (-b)\n # parsing\n kb.os = None\n kb.osVersion = None\n kb.osSP = None\n\n kb.parenthesis = None\n kb.resumedQueries = {}\n kb.stackedTest = None\n kb.targetUrls = set()\n kb.timeTest = None\n kb.unionComment = \"\"\n kb.unionCount = None\n kb.unionPosition = None", "def __init__(self):\n # use networkX to create a directed graph\n # of words\n self.__graph = nx.DiGraph()\n # # map graph nodes to positions\n # self.__layout = {}\n # # map words to the synsets they belong to\n # self.__words_to_synsets = {}\n # # reverse of above\n # self.__synsets_to_words = {}\n # # map words to tense, definition, and id\n # self.__info_dict = {}\n # create w/ all synsets\n self.__create_graph_all_words()", "def __init__(self, mongo_loc='clutch', mongo_port=27017, nclasses = 2, stopwords = None, burn=1.0, words_file=None, fit_prior=False, alpha=1.0):\n\n self._conn = pymongo.Connection(mongo_loc, port=mongo_port)\n \n if words_file is None:\n self.dictionary = self.getWords() #the mapping word -> vector index\n else:\n f = open(words_file,'r')\n self.dictionary = json.loads(f.read())\n f.close()\n self.fit_prior = fit_prior\n self.M = len(self.dictionary.keys())\n #temporary data structures used to build sparse matrix\n #number of columns\n #kill after sparse matrix created\n self.num_classes = nclasses\n #storage for the labelled data\n self.ldata = []\n self.lrow = []\n self.lcol = []\n self.stopwords = stopwords\n self.burn =burn\n self.alpha = alpha\n self.bayes_model = None", "def __init__(self, filename, ngram_count):\n\n word_table = {}\n phrase_counts = {}\n totalwords = 0\n # deque automatically evicts the first entry when over maxlen\n tempwords = collections.deque(maxlen=ngram_count)\n\n with open(filename) as f:\n for line in f:\n for word in line.strip().split(\" \"):\n word = word.lower()\n # if this is the first word in the corpus\n if not len(tempwords) == ngram_count:\n tempwords.append(word)\n continue\n # otherwise we use the previous words to generate the next\n tempword = \" \".join(tempwords)\n if tempword not in word_table:\n word_table[tempword] = {}\n phrase_counts[tempword] = 1\n else:\n phrase_counts[tempword] += 1\n # add counts to the word table\n if word not in word_table[tempword]:\n word_table[tempword][word] = 1\n else:\n word_table[tempword][word] += 1\n # add the current word to the previous words and evict one\n tempwords.append(word)\n # add to the total count of words\n totalwords += 1\n\n self.total_words = totalwords\n self.word_table = word_table\n self.ngram_size = ngram_count\n self.Session = db.get_session()", "def __init__(self):\n self.graph = Graph()\n self._configure_namespaces()\n self.dcat_vocabularies = URIRef(dcat_config['vocabularies'])\n self.language_map = dcat_config['language_map']\n self.dcat_spec = dcat_config['rdf']\n self.exclusion = self.dcat_spec['_exclusions']", "def get_term(self, id, attrs=None):\n cur = self.db.cursor()\n # look to see if this is an alt id first\n main_id = cur.execute(\"SELECT main FROM alts WHERE alt = ?\", (id,)).fetchone()\n if main_id:\n (id,) = main_id\n try:\n # Get the term information\n (id, desc, name) = cur.execute(\n \"SELECT * from terms WHERE id = ?\", (id,)\n ).fetchone()\n except TypeError: # Not in database\n raise KeyError(\"This term is not in the database.\")\n\n # Get the loci associated with the term\n term_loci = self.refgen.from_ids(\n [\n gene_id[0]\n for gene_id in cur.execute(\n \"SELECT id FROM term_loci WHERE term = ?\", (id,)\n ).fetchall()\n ]\n )\n\n # Get the isa relationships\n is_a = set(\n chain(\n *cur.execute(\n \"SELECT parent FROM rels WHERE child = ?\", (id,)\n ).fetchall()\n )\n )\n\n # Get the alternate ids of the term\n alts = set(\n chain(*cur.execute(\"SELECT alt FROM alts WHERE main = ?\", (id,)).fetchall())\n )\n # retrieve the stored term attrs\n term_attrs = {\n k: v\n for k, v in self.db.cursor().execute(\n \"\"\" SELECT key,val FROM term_attrs WHERE term = ?\"\"\", (id,)\n )\n }\n return GOTerm(\n id,\n name=name,\n desc=desc,\n alt_id=alts,\n is_a=is_a,\n loci=term_loci,\n **term_attrs,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Permutate this block with the specified table
def __permutate(self, table, block): return list(map(lambda x: block[x], table))
[ "def apply_to_table( self, table ):\n table.add_key( self.key )", "def _fill_table(self, table, gen) -> None:\n seq_table = self._table_map[table]\n seq_table.table.put_value(next(gen))", "def visit_table(self, table):\n pass", "def apply_to_table( self, table ):\n table.change_constraint( self.constraint )", "def fill_table(table, proba_preds, preds, ids, tests):\n \"\"\"\n \"\"\"\n pi_scores = []\n pi_preds = []\n pi_tests = []\n def is_in_set(x):\n if x['ID'] in ids:\n return True\n else: \n return False\n table['take'] = table.apply(is_in_set, axis=1)\n table = table[table['take']]\n for i in table['ID'].values:\n index = ids.index(i)\n pi_scores.append(proba_preds[index]) #pi = repermuté dans le sens de table\n pi_preds.append(preds[index])\n pi_tests.append(tests[index])\n table['proba_preds'] = pi_scores\n table['prediction'] = pi_preds\n table['test'] = pi_tests\n return table", "def permute(self, row=None):\n import abjad\n if self._expression:\n return self._update_expression(inspect.currentframe())\n row = abjad.TwelveToneRow(items=row)\n items = row(self)\n return type(self)(items=items)", "def reflecttable(self, connection, table):\n raise NotImplementedError()", "def mutateRowTs(self, tableName, row, mutations, timestamp, attributes):\n pass", "def render_table(self, block):\n before = '<table>\\n<tr>\\n<td>'\n end = '</td>\\n</tr>\\n</table>'\n content = [\"</td>\\n<td>\".join(row) for row in block.data]\n content = \"</td>\\n</tr>\\n<tr>\\n<td>\".join(content)\n block.data = before + content + end\n return None", "def updateVpcTable(tableName,data,paGroupName):\n try:\n #VpcCidr is the primary key for VpcTable\n table=dynamodb.Table(tableName)\n item={\n 'VpcId': data['VpcId'],\n 'VpcCidr': data['VpcCidr'],\n 'Region': data['Region'],\n 'SubscriberSnsArn': data['SubscriberSnsArn'],\n 'SubscriberAssumeRoleArn': data['SubscriberAssumeRoleArn'],\n 'PaGroupName': paGroupName,\n 'CurrentStatus': 'Inprogress'\n }\n response=table.put_item(Item=item)\n except Exception as e:\n logger.error(\"Updating Transit VpcTalbe is Failed, Error: {}\".format(str(e)))", "def untablify(self, obj_table):\n pass", "def mutateRowTs(self, tableName, row, mutations, timestamp, attributes):\n self.send_mutateRowTs(tableName, row, mutations, timestamp, attributes)\n self.recv_mutateRowTs()", "def change_table(self, table_name):\n self._random_statement = \"\"\"SELECT * FROM {} ORDER BY RANDOM() LIMIT 1;\"\"\".format(table_name)", "def _transform_table(self, method, **kwargs):\n for table in method(**kwargs):\n yield EncryptedTable(\n table=table,\n materials_provider=self._materials_provider,\n table_info=self._table_info_cache.table_info(table.name),\n attribute_actions=self._attribute_actions,\n )", "def normalize_table(self):\n pass", "def _balance(self):\r\n\r\n # we make all modifications on a working copy of the\r\n # actual table. This allows us to add columns/rows\r\n # and re-balance over and over without issue.\r\n self.worktable = deepcopy(self.table)\r\n options = copy(self.options)\r\n\r\n # balance number of rows\r\n ncols = len(self.worktable)\r\n nrows = [len(col) for col in self.worktable]\r\n nrowmax = max(nrows) if nrows else 0\r\n for icol, nrow in enumerate(nrows):\r\n if nrow < nrowmax:\r\n # add more rows\r\n self.worktable[icol].extend([Cell(\"\", **self.options) for i in range(nrowmax-nrow)])\r\n\r\n self.ncols = ncols\r\n self.nrows = nrowmax\r\n\r\n # add borders - these add to the width/height, so we must do this before calculating width/height\r\n self._borders()\r\n\r\n # equalize widths within each column\r\n cwidths = [max(cell.get_width() for cell in col) for col in self.worktable]\r\n\r\n if self.width or self.maxwidth and self.maxwidth < sum(cwidths):\r\n # we set a table width. Horizontal cells will be evenly distributed and\r\n # expand vertically as needed (unless self.height is set, see below)\r\n\r\n # use fixed width, or set to maxwidth\r\n width = self.width if self.width else self.maxwidth\r\n\r\n if ncols:\r\n # get minimum possible cell widths for each row\r\n cwidths_min = [max(cell.get_min_width() for cell in col) for col in self.worktable]\r\n cwmin = sum(cwidths_min)\r\n\r\n if cwmin > width:\r\n # we cannot shrink any more\r\n raise Exception(\"Cannot shrink table width to %s. Minimum size is %s.\" % (self.width, cwmin))\r\n\r\n excess = width - cwmin\r\n if self.evenwidth:\r\n # make each collumn of equal width\r\n for i in range(excess):\r\n # flood-fill the minimum table starting with the smallest collumns\r\n ci = cwidths_min.index(min(cwidths_min))\r\n cwidths_min[ci] += 1\r\n cwidths = cwidths_min\r\n else:\r\n # make each collumn expand more proportional to their data size\r\n for i in range(excess):\r\n # fill wider collumns first\r\n ci = cwidths.index(max(cwidths))\r\n cwidths_min[ci] += 1\r\n cwidths[ci] -= 3\r\n cwidths = cwidths_min\r\n\r\n # reformat worktable (for width align)\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n try:\r\n cell.reformat(width=cwidths[ix], **options)\r\n except Exception, e:\r\n msg = \"ix=%s, iy=%s, width=%s: %s\" % (ix, iy, cwidths[ix], e.message)\r\n raise Exception (\"Error in horizontal allign:\\n %s\" % msg)\r\n\r\n # equalize heights for each row (we must do this here, since it may have changed to fit new widths)\r\n cheights = [max(cell.get_height() for cell in (col[iy] for col in self.worktable)) for iy in range(nrowmax)]\r\n\r\n if self.height:\r\n # if we are fixing the table height, it means cells must crop text instead of resizing.\r\n if nrowmax:\r\n\r\n # get minimum possible cell heights for each collumn\r\n cheights_min = [max(cell.get_min_height() for cell in (col[iy] for col in self.worktable)) for iy in range(nrowmax)]\r\n chmin = sum(cheights_min)\r\n #print \"cheights_min:\", cheights_min\r\n\r\n if chmin > self.height:\r\n # we cannot shrink any more\r\n raise Exception(\"Cannot shrink table height to %s. Minimum size is %s.\" % (self.height, chmin))\r\n\r\n # now we add all the extra height up to the desired table-height.\r\n # We do this so that the tallest cells gets expanded first (and\r\n # thus avoid getting cropped)\r\n\r\n excess = self.height - chmin\r\n even = self.height % 2 == 0\r\n for i in range(excess):\r\n # expand the cells with the most rows first\r\n if 0 <= i < nrowmax and nrowmax > 1:\r\n # avoid adding to header first round (looks bad on very small tables)\r\n ci = cheights[1:].index(max(cheights[1:])) + 1\r\n else:\r\n ci = cheights.index(max(cheights))\r\n cheights_min[ci] += 1\r\n if ci == 0 and self.header:\r\n # it doesn't look very good if header expands too fast\r\n cheights[ci] -= 2 if even else 3\r\n cheights[ci] -= 2 if even else 1\r\n cheights = cheights_min\r\n\r\n # we must tell cells to crop instead of expanding\r\n options[\"enforce_size\"] = True\r\n #print \"cheights2:\", cheights\r\n\r\n # reformat table (for vertical align)\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n try:\r\n cell.reformat(height=cheights[iy], **options)\r\n except Exception, e:\r\n msg = \"ix=%s, iy=%s, height=%s: %s\" % (ix, iy, cheights[iy], e.message)\r\n raise Exception (\"Error in vertical allign:\\n %s\" % msg)\r\n\r\n # calculate actual table width/height in characters\r\n self.cwidth = sum(cwidths)\r\n self.cheight = sum(cheights)\r\n #print \"actual table width, height:\", self.cwidth, self.cheight, self.width, self.height\r", "def setTable( self, table ):\n self._table = table\n self.setEnabled(table is not None)\n self.clear()", "def permute_rows(self, e, write=False):\n e.insert(0, 0)\n self.permute(e, write)", "def rehash(self) -> None:\n # Copy the previous table\n prev_table = self.table[:]\n\n # Create a new array of slots\n self.table = [self.EMPTY_SINCE_START] * self.capacity\n\n for entry in prev_table:\n # Ensure we do not try to unpack `EmptySlot`s\n if isinstance(entry, tuple):\n self.set(entry[0], entry[1])", "def update_visit_table(self):\n pid = self.disp_model[\"pid\"]\n self.visit_table_data = self.sql.query.visit_by_pid(pid=pid)\n print(\"update_visit_table table data\\n\\t%s\" % self.visit_table_data)\n generic_fill_table(self.visit_table, self.visit_table_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn the string data, into a list of bits (1, 0)'s
def __String_to_BitList(self, data): if 2.7 < 3: # Turn the strings into integers. Python 3 uses a bytes # class, which already has this behaviour. data = [ord(c) for c in data] l = len(data) * 8 result = [0] * l pos = 0 for ch in data: i = 7 while i >= 0: if ch & (1 << i) != 0: result[pos] = 1 else: result[pos] = 0 pos += 1 i -= 1 return result
[ "def string_to_bitlist(data: ByteString) -> List[int]:\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n\n return result", "def convert_to_bits(data):\n\tresult = []\n\tfor c in data:\n\t\tbits = bin(c)[2:]\n\t\tbits = '00000000'[len(bits):] + bits\n\t\tresult.extend(bits)\n\t\t# result.extend([int(b) for b in bits])\n\treturn ''.join([i for i in result])", "def __to_bits(data_byte):\n return [(int(data_byte) >> i) & 1 for i in range(0, 8)]", "def str_to_bits(text: str) -> np.ndarray:\n msg_bytes = text.encode('utf-8')\n bits = []\n for byte in msg_bytes:\n bits.extend([(byte >> i) & 3 for i in range(6, -1, -2)])\n bits.extend([3, 3, 3, 3])\n return np.array(bits)", "def to_bool_list(bytes_array):\n ba = []\n index = 1\n for byte in bytes_array:\n for bit in range(7):\n if byte & 1 << bit:\n ba.append(index)\n index += 1\n return ba", "def bits(data):\n for d in data:\n for i in [5, 4, 3, 2, 1, 0]:\n yield (d >> i) & 1", "def _mk_bits(self,data):\n if isinstance(data, bytes):\n return data[data.index(b\"\\xfc\") :]\n # handles int and unquoted hex\n if isinstance(data, int):\n length = data.bit_length() >> 3\n bites = int.to_bytes(data, length, byteorder=\"big\")\n return bites\n try:\n # Handles hex byte strings\n i = int(data, 16)\n i_len = i.bit_length() >> 3\n bites = int.to_bytes(i, i_len, byteorder=\"big\")\n return bites\n except (LookupError, TypeError, ValueError):\n if data[:2].lower() == \"0x\":\n data = data[2:]\n if data[:2].lower() == \"fc\":\n return bytes.fromhex(data)\n try:\n return b64decode(self.fix_bad_b64(data))\n except (LookupError, TypeError, ValueError):\n return data", "def fromhex(s: str) -> bitlist:\n return bitlist(bytes.fromhex(s))", "def convert_intList_to_bit(l):\n result = []\n for a in l:\n result += [int(n) for n in bin(a)[2:].zfill(8)]\n return result", "def convertBinData(binDataString):\r\n data = []\r\n for i in range(len(binDataString)/4):\r\n data.append(struct.unpack('>h', binDataString[i*4:i*4+2])[0])\r\n\r\n return data", "def convert_bytes_to_bit_field(input_bytes):\n byte_list = list(input_bytes)\n byte_list.reverse()\n result = []\n for byte in byte_list:\n bin_string = bin(ord(byte))[2:].rjust(8, '0')\n result.extend([int(x) for x in list(bin_string)])\n log.trace(\"Returning a bitfield of %s for input string: [%s]\", result, input_bytes)\n return result", "def to_braille(binary: str) -> list:\n output = []\n for count, value in enumerate(binary):\n if value == '1':\n output.append(count+1)\n return output", "def __get_bin_list(string):\n return [1 if str(c).isupper() else 0 for c in string]", "def byte_to_bit_array(byte: int) -> [int]:\n return [int(i) for i in \"{0:08b}\".format(byte)]", "def byte_to_bits(byte):\n return \"\".join([str(get_bit(byte, bit_num)) for bit_num in range(7, -1, -1)])", "def pattern_to_bit_string(pattern):\n \n bits = []\n for line in pattern.splitlines():\n if line.startswith('~'):\n bits.append(0)\n else:\n bits.append(1)\n\n return tuple(bits)", "def bitlist_to_string(data: List[int]) -> ByteString:\n result = []\n pos = 0\n c = 0\n while pos < len(data):\n c += data[pos] << (7 - (pos % 8))\n if (pos % 8) == 7:\n result.append(c)\n c = 0\n pos += 1\n\n return bytes(result)", "def get_set_bits_list(x):\n idx = 0\n \"\"\"idx represents position from end\n hence bitboard can be prepared by simply shifting 1\n by the idx\"\"\"\n l = []\n while(x):\n if(x & 1):\n l.append(idx)\n x = x >> 1\n idx += 1\n return l", "def convert_intbitset(s):\n ibs = intbitset()\n ibs.fastload(s)\n return ibs", "def hex2bin(data):\n data = re.findall(r'[0-9a-fA-F]',''.join(data))\n return map(lambda x: '{0:04b}'.format(int(x,16)) , data )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the 6 bytes of expansion en hexadecimal
def expand(self, fbits): bitlist = self.__String_to_BitList(fbits) expansion = self.__permutate(self.__expansion_table, bitlist) expansion_str = self.__BitList_to_String(expansion) return self.__String_to_hex(expansion_str)
[ "def get_hex(self):\n pass", "def to_hex(self):\n if self.size not in VAR_PREFIXES:\n return \"0\" * int((self.size - len(bin(self.value)[2:]))/4) + hex(int(bin(self.value)[2:], 2))[2:]", "def form_hex(dense_hash):\n return ''.join([format(number, '02x') for number in dense_hash])", "def modhex_to_hex(data):\n t_map = str.maketrans(\"cbdefghijklnrtuv\", \"0123456789abcdef\")\n return data.translate(t_map)", "def genFullBitField(self):\n # meta['pieces'] contains a sha1-sum for each piece. \n # thus, pieces_amount = len()/20\n pieces = self.numPieces\n num = int(math.ceil(pieces / 8.0))\n result = list('\\xFF'*num)\n if pieces % 8:\n result[-1] = chr(ord(result[-1]) ^ (2**(8 - (pieces % 8))) - 1)\n return ''.join(result)", "def hex_in_string(bytes_to_show):\n return ''.join('0x{:02x} '.format(letter) for letter in bytes_to_show)", "def shellcode_to_hex(msf_payload, host, port):\r\n proc = Popen(\"msfvenom -p {0} LHOST={1} LPORT={2} EXITFUNC=thread -f raw -b '\\\\x00\\\\x20\\\\x0d\\\\x0a'\".format(\r\n msf_payload, host, port), shell=True, stdout=PIPE, stderr=PIPE\r\n )\r\n stdout, _ = proc.communicate()\r\n return hexlify(stdout)", "def longhex(amqp_value):\n hex_str = hex(int(amqp_value))\n if len(hex_str) == 19 and hex_str[-1] == 'L':\n return hex_str[:-1] # strip trailing 'L' if present on some ulongs\n return hex_str", "def printHex(content):\n out = \"\"\n while len(content) > 0:\n out += str(content[:8]) + \" \"\n content = content[8:]\n print(out)", "def hex_view(self):\n \n # Remove all whitespace.\n return b''.join(self.load_hex_view().split())", "def bytes_to_hex(data):\n\n #from binascii import hexlify\n #return hex_string\n #hex_string = hexlify(data)\n return ''.join([\"%02X \" % ord(x) for x in data]).strip()", "def bytes_hex(data):\n res = \"0x\"\n for byte in data:\n res += \"%02X\" % byte\n\n return res", "def to_hex(v: gdb.Value) -> str:\n return f\"{int(v):#0{18}x}\"", "def escape_bytes(value7_uint64be):\n x = value7_uint64be\n x0 = x & 0x000000000FFFFFFF\n x1 = x & 0x00FFFFFFF0000000\n x = x0 | (x1 << 4)\n x0 = x & 0x00003FFF00003FFF\n x1 = x & 0x0FFFC0000FFFC000\n x = x0 | (x1 << 2)\n x0 = x & 0x007F007F007F007F\n x1 = x & 0x3F803F803F803F80\n x = x0 | (x1 << 1) | 0x8080808080808080\n return x", "def format_bytes_as_hex(_bytes):\n out = \"\"\n for _int in _bytes:\n out = out + f\"{_int:02x} \"\n return out.upper().strip()", "def hex(self) -> str:\r\n return self.string.encode().hex()", "def get_hex(self):\n\n if not self.data_available:\n return None\n\n ljust_len = 0\n str = ''\n if self.log_has_timestamps and not self.skip_timestamps:\n if self.abs_timestamps:\n str = '[{:.6f}] '.format(self.ts)\n else:\n str = '[{:.6f}] '.format(self.ts_diff)\n ljust_len = len(str)\n\n if self.include_dump_desc_in_output and self.cur_dump_desc:\n str = '{}{} '.format(str, self.cur_dump_desc)\n ljust_len = len(str)\n\n str = '{}{}: {}'.format(str, self.dump_addr, self.dump_data)\n\n if not self.remove_ascii_part and self.dump_data_ascii is not None:\n ljust_len += len(self.dump_addr) + 1 + self.max_num_hex_dump_values * 3 + 2\n str = str.ljust(ljust_len)\n str = '{}{}'.format(str, self.dump_data_ascii)\n else:\n str = str.rstrip(' ')\n\n self.data_available = False\n return str", "def digest_converter(self, digest):\r\n binary = bin(int(digest, 16))[2:].zfill(len(digest * 4))\r\n return binary", "def _bytes_to_hex_compatible(bytes_in):\n if sys.version_info[0] == 3 and sys.version_info[1] >= 5: # pragma: no cover\n return bytes_in.hex()\n else:\n import binascii\n return binascii.hexlify(bytes_in)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the 8 bytes permutation result in hexadecimal
def ipermutation(self, fbits): bitlist = self.__String_to_BitList(fbits) ipermutacion = self.__permutate(self.__ip, bitlist) permut_str = self.__BitList_to_String(ipermutacion) return self.__String_to_hex(permut_str)
[ "def finalPermutation(code):\n return_list = ''\n for i in range(16):\n list = ''\n for j in range(4):\n list += code[DS.ip_1[i * 4 + j] - 1]\n return_list += \"%x\" % int(list, 2)\n return return_list", "def random_cipher():\n return np.random.permutation(26)", "def form_hex(dense_hash):\n return ''.join([format(number, '02x') for number in dense_hash])", "def form_dense_hash(numbers):\n return [reduce(xor, chunk) for chunk in list(form_chunks(numbers, 16))]", "def bin2hex(data):\n data = re.findall(r'[0-1]{4}',''.join(data))\n return map(lambda x: '{0:X}'.format(int(x,2)) , data )", "def genFullBitField(self):\n # meta['pieces'] contains a sha1-sum for each piece. \n # thus, pieces_amount = len()/20\n pieces = self.numPieces\n num = int(math.ceil(pieces / 8.0))\n result = list('\\xFF'*num)\n if pieces % 8:\n result[-1] = chr(ord(result[-1]) ^ (2**(8 - (pieces % 8))) - 1)\n return ''.join(result)", "def gen_permutations(n):\n max_int = '0b' + '1' * n\n for i in range(0, int(max_int, 2)+1):\n yield str(format(i, 'b').zfill(n))", "def as_hex(self):\n return \"\".join(format(b, \"0>2x\") for b in six.iterbytes(self.key))", "def lexicographic_permutations():\n ans = list()\n x = copy.copy(MILLIONTH)\n nums = copy.copy(NUMS)\n while nums:\n a = x // fac(len(nums) - 1)\n x = x % fac(len(nums) - 1)\n # 刚好整除 要退一位 不进位\n a = a - 1 if x == 0 else a\n ans.append(nums[a])\n nums.remove(nums[a])\n return ''.join(str(x) for x in ans)", "def polyRollHash(string):\n p = 100\n m = 10**9 + 9\n\n result = 0\n\n for i, ch in enumerate(string):\n result += ord(ch) * p**i % m\n\n return result", "def pibble32(data: bytes) -> str:\n table: bytes = bytes.maketrans(\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\",\n b\"0123456789bcdfghjklmnopqrstvwxyz\",\n )\n encoded: bytes = base64.b32encode(data)\n return str(encoded.translate(table), \"ascii\")", "def toHexArray(self):\n return ''.join([\"0x%02x,%s\" % (b, \"\\n\"[:(i&15)==15])\n for i, b in enumerate(self.compressRLE())])", "def tweak(a, n):\n bs = [b for b in a]\n bs[n] = (bs[n] + 1) % 256\n return bytearray(bs)", "def u_reps(n):\n assert n >= 0 and type(n) is int, \"only unsigned (nonnegative) integer arguments.\"\n print(\"DEC:\", n)\n b = padded_dtob(n)\n x = \"0x\" + format(n, 'X')\n print(\"BIN:\", b)#dec -> bin\n print(\"HEX:\", x)#dec -> hex", "def bech32_create_checksum( hrp, data ):\n values = bech32_hrp_expand( hrp ) + data\n polymod = bech32_polymod( values + [ 0, 0, 0, 0, 0, 0 ] ) ^ 1\n return [ ( polymod >> 5 * ( 5 - i ) ) & 31 for i in range( 6 ) ]", "def pandigitals(N, base=1):\n\tNUMBERS = list(range(base,N+base))\n\tpandigits = []\n\tfor i in list(itertools.permutations(NUMBERS)):\n\t\tif i[0] != 0:\n\t\t\ttmp = \"\"\n\t\t\tfor j in i:\n\t\t\t\ttmp = tmp + str(j)\n\t\t\tpandigits.append(int(tmp))\n\treturn sorted(pandigits)", "def Hash2FingerPrint(hash_series):\n bits = hash2bits_pd(hash_series).replace({True:1, False:0})\n \n return bits", "def main(self):\n result = []\n rawCodes = self.randomize()\n for code in rawCodes:\n code36 = self.base36_encode(code)\n #Be sure to have X characters in the code [ugly check]\n nbCharLeft = self.nbChar - len(code36)\n while nbCharLeft > 0:\n code36 = '0'+code36\n nbCharLeft = nbCharLeft - 1\n \n result.append(self.prefix+code36)\n print \"Number of code to generate: %d\" % self.nbCode\n print \"Number of Character: %d\" % self.nbChar\n if self.prefix != '':\n print \"Prefix to use: %s\" % self.prefix\n else:\n print \"No prefix\"\n \n return result", "def bytes_hex(data):\n res = \"0x\"\n for byte in data:\n res += \"%02X\" % byte\n\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read crypt file method
def _readcrypt(self): if self.dbg: print(bgre(self._readcrypt)) __dct = {} try: __dct, err = self.decrypt(self.crypt) except DecryptError as err: error(err) exit(1) __dct = dict(load(str(__dct), Loader=FullLoader)) if err: if err == 'SIGERR': if self.gui: yesno = xyesno('reencrypt, even though ' \ 'the passcryt signature could not be verified?') else: print(grn('reencrypt, even though ' \ 'the passcryt signature could not be verified?'), '[Y/n]') yesno = input() yesno = True if yesno in ('', 'y') else False if yesno and __dct: self._writecrypt(__dct) return __dct
[ "def read_password():\n with open(passwordfile,'r') as handle:\n read = handle.read()\n return read", "def read_from_file(filename: str, key: bytes) -> bytes:\n with pyscrypt.ScryptFile(filename, key) as file:\n return file.read()", "def read_file(self, group, name, ext='yaml'):\n with open(self.encrypted_file_path(group, name, ext=ext), mode='rb') as encrypted:\n ciphertext = encrypted.read()\n return self.keypair.decrypt(ciphertext)", "def read(path=None) -> str:\n if path is None:\n path = _get_login_path_file()\n with open(path, \"rb\") as fp:\n return _read_encrypted_file(fp).decode()", "def read():\n global accounts, users\n try:\n f = open(\"accounts.dat\", \"rb\")\n except FileNotFoundError:\n return None\n rd = f.read()\n users, accounts = json.loads(base64.b64decode(zlib.decompress(rd[65:])).decode())\n f.close()", "def readCredential(name):\n try:\n file=open(name, \"r\")\n user=file.readline().strip()\n passw=file.readline().strip()\n file.close()\n return user,passw\n except:\n print(\"Invalid credentials\\nCheck your txt file.\")\n print(\"The format of passGit.txt must be:\\n\\tusername\\npassword\")", "def load_crypt(fname):\n with open(fname, 'r') as file:\n data = file.read()\n return re.sub('[^A-Z]+', '', data.upper())", "def part4b(filename, password=None):\n f = open(filename, \"r\")\n lines = f.readlines()\n read_user = lines[0][:-1]\n read_pass = lines[1][:-1]\n\n if password == None: \n print(\"Username: \" + base64.b64decode(bytes(read_user)))\n print(\"Password: \" + base64.b64decode(bytes(read_pass)))\n else:\n username_encrypted = read_user\n password_encrypted = base64.b64encode(bytes(password))\n\n print(\"Username: \" + base64.b64decode(bytes(read_user)))\n print(\"Password: \" + password)", "def _read_pw_file(self):\n import codecs\n\n with open(self.password_file, \"r\") as f:\n pwstring = codecs.decode(f.read(), \"rot_13\")\n (username, password) = pwstring.split(\",\", 2)\n return (username, password)", "def advapi32_ReadEncryptedFileRaw(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pfExportCallback\", \"pvCallbackContext\", \"pvContext\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def read_passFile(passFile):\n try:\n with open(passFile, 'r') as inFile:\n passwd = inFile.read()\n except:\n return ''\n return passwd[:-1]", "def load(self):\n try:\n with open(self.filename, 'rb') as ciphertext:\n self.__accounts = self.__decoder.decode(self.__gpg.decrypt(\n ciphertext,\n verify=False,\n passphrase=getpass(\"Password: \")\n )[0].decode())\n print(\"Credentials loaded.\", file=sys.stderr)\n except FileNotFoundError:\n pass", "def read_user_credentials(file_path):\n return read_file(file_path)", "def _read_passwd(self):\n with open('/etc/passwd', 'r') as handle:\n self._passwd = handle.read()", "def test_bin_file(self):\n cypher = CypherAES('password')\n\n filepath = '../tests/'\n filename = 'robot-clipart.png'\n\n cypher.encrypt_file(filepath + filename)\n cypher.decrypt_file(filepath + '(encrypted) ' + filename)\n with open(filepath + filename, 'rb') as original:\n with open(filepath + '(decrypted) ' + filename, 'rb') as decrypted:\n orig = original.read()\n dec = decrypted.read()\n self.assertEqual(orig, dec, 'Input text doesn''t correspond to output text')\n os.remove(filepath + '(decrypted) ' + filename)\n os.remove(filepath + '(encrypted) ' + filename)", "def secure_read_file(path):\n contents = None\n if is_secure_path(path):\n with open(path) as fid:\n if is_secure_file(path, fid):\n contents = fid.read()\n return contents", "def test_text_file(self):\n cypher = CypherAES('password')\n\n filepath = '../tests/'\n filename = 'dark_chronicle'\n\n cypher.encrypt_file(filepath + filename)\n cypher.decrypt_file(filepath + '(encrypted) ' + filename)\n with open(filepath + filename, 'rb') as original:\n with open(filepath + '(decrypted) ' + filename, 'rb') as decrypted:\n orig = original.read()\n dec = decrypted.read()\n self.assertEqual(orig, dec, 'Input text doesn''t correspond to output text')\n os.remove(filepath + '(decrypted) ' + filename)\n os.remove(filepath + '(encrypted) ' + filename)", "def __readfile(self):\n raise NotImplementedError", "def retrieve_password(username):\r\n return open('passfile').readlines()[find_password_line(username)].strip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop immediately the PostgreSQL cluster.
def pg_stop(pg_bin, pg_port, pg_socket_dir, pg_data): cmd = '%s/pg_ctl stop -m immediate -D %s -o "-p%s -k%s"' % ( pg_bin, pg_data, pg_port, pg_socket_dir ) ret_code, out, err = exec_command(cmd, shell=True) assert 0 == ret_code, out + err
[ "def stopDB(self):\n pass", "def stop() -> None:\n config = load_config_file()\n instance_ips = [i.public_ip_address for i in get_running_instances(config)]\n if not instance_ips:\n raise Exception('ERROR: No instances with public IPs found. Exiting.')\n try:\n execute_commands_on_linux_instances(\n config,\n [\n COMMAND_KILL\n ],\n instance_ips\n )\n except Exception as e:\n logging.error(\"Something went wrong.\")\n raise\n logging.info('Done!')", "def teardown_postgres_container():\n if environment.upper() != 'INTEGRATION':\n print('Tearing Down Docker PostgreSQL Container...')\n config = ConfigurationFactory.get_config(environment.upper())\n docker_client = docker.from_env()\n try:\n container = docker_client.containers.get(config.CONTAINER_NAME)\n container.stop()\n except Exception:\n print('Unable to stop container {}...'.format(config.CONTAINER_NAME))", "async def shutdown() -> None:\n await self.database.disconnect()", "def restart_server():\n caput('13XRM:SCANDB:Shutdown', 1)", "def test_stop_running_job(self, db_session: Session) -> None:\n pass # TODO implement", "def stop_cluster(self):\n for worker in self.workers:\n worker.stop_worker()\n self._stop_master()", "def stop(self):\n\n self.active = False\n self.join()", "def stop(self):\n if self.send('/stop', 'post') is None:\n self.delete()", "def __stopRestoreInstance(self, dbInst):\n pgCmd = \"gs_ctl stop -Z restoremode -D %s\" % dbInst.datadir \n self.logger.debug(\"stop local instance in restore mode cmd is %s\" % pgCmd)\n (status, output) = commands.getstatusoutput(pgCmd)\n if (status != 0):\n self.logger.debug(\"Stop instance failed!Output: %s\" % output)", "def stop(self):\n c = Controller()\n instance_id = c.instance.id\n c.terminate_instance()\n\n print('Successfully shut down instance: ' + instance_id)", "def stop(target):\n print('\\033[93m'+\" Stopping scripts on {}..\".format(target)+'\\033[0m')\n execute_remote(target, \"pkill -f remote_launch\")\n\n return True", "def terminate(self) -> None:\n self._database_connection.close()", "def stop(status=\"\"):\n raise StopScript(status)", "def stop(self, aws_tags: List[Dict]) -> None:\n for cluster_arn in self.tag_api.get_resources(\"rds:cluster\", aws_tags):\n cluster_id = cluster_arn.split(\":\")[-1]\n try:\n self.documentdb.stop_db_cluster(DBClusterIdentifier=cluster_id)\n print(f\"Stop documentdb cluster {cluster_id}\")\n except ClientError as exc:\n documentdb_exception(\"documentdb cluster\", cluster_id, exc)", "def stop_replica(self, allow_restart=True):\n\t\texit=open(self.exit_file, 'w')\n\t\texit.close()\n\t\tself.wait_for_replica_end()\n\t\tif allow_restart:\n\t\t\tos.remove(self.exit_file)", "async def disconnect_from_postgres(_app, _loop):\n logger.info(\"Disconnecting from postgres...\")\n await RDB.pool.close()", "def close_cluster(self):\n pass", "def stop(self):\n for thread in self.threads:\n thread.stop()\n self.topology = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove PostgreSQL data directory.
def pg_drop(pg_data): # /!\ WARNING: This is VERY dangerous /!\ # TODO: Find a safer way to drop the data directory. (ret_code, stdout, stderr) = exec_command(["rm", "-rf", pg_data]) if ret_code != 0: raise Exception(str(stderr))
[ "def wipe_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n os.system(\"rm -f {0}\".format(dbpath))", "def cleanup_data_dir():\n print \"cleaning up data directory...\"\n file_list = [ f for f in os.listdir(DATA_DIRECTORY) ]\n for f in file_list:\n os.remove(DATA_DIRECTORY + \"/\" + f)", "def purge_database() -> None:\n _confirm_intent('purge cardbuilder\\'s entire local database')\n\n with InDataDir():\n os.remove(DATABASE_NAME)", "def clean_data(uid, data_path):\n if not data_sources.get(uid):\n print(f\"Data clean failed, no datasource named {uid}\")\n return\n link_path = os.path.join(data_path, data_sources[uid][\"link\"])\n version_tag = data_sources[uid][\"version\"]\n version_dir = os.path.join(data_path, \"versioned_data/\" + uid + \"_\" + version_tag)\n print(\n f\"Cleaning datasource ({uid}). Directory: '{version_dir}'. Symlink: '{link_path}'.\"\n )\n try:\n shutil.rmtree(version_dir)\n os.unlink(link_path)\n except OSError:\n print(\"Removal error:\")\n traceback.print_exc(file=sys.stdout)\n print(\"--------------------\")", "def remove_data_file():\n try:\n os.remove(manage_config.package_data_file)\n except OSError:\n pass", "def _remove_local_grype_db(self, grype_db_dir):\n if os.path.exists(grype_db_dir):\n logger.info(\"Removing old grype_db at %s\", grype_db_dir)\n shutil.rmtree(grype_db_dir)\n else:\n logger.warning(\n \"Failed to remove grype db at %s as it cannot be found.\", grype_db_dir\n )\n return", "def _DeleteHostData(self):\n shutil.rmtree(self._host_profile_dir, ignore_errors=True)", "def del_database():\n path = os.path.join(os.getcwd(), \"WorkTimer.db\")\n database.connection.close()\n os.system(f\"del /f {path}\")", "def clean(self):\n shutil.rmtree(self.parameters['dbpath'])\n shutil.rmtree(self.parameters['logpath'])\n r = Shell.mkdir(self.parameters['dbpath'])\n Console.msg(r)", "def removeGraphDB (self):\n self.graph_db.delete_all()", "def purge_all_data() -> None:\n _confirm_intent('purge cardbuilder\\'s database and all downloaded data')\n with InDataDir():\n for file in glob.glob('*'):\n os.remove(file)", "def clear_data():\n directory_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n print(\"Cleaning up Magento 2 static content...\\n\")\n\n for directory in directory_list:\n print(f\"Cleaning files from {directory} path.\")\n subprocess.run([\"rm\", \"-rf\", directory])", "def clean(ctx):\n run('/opt/gremlin/bin/gremlin-server.sh stop && rm -rf /temp/gremlin_databases/ && rm -rf /opt/gremlin/ && cd graphdbtest && python3 cleanMonitoringDB.py')", "def remove_sample_dir(self):\n data_dir = self.create_data_dir()\n self.clear_sample_data()\n if isinstance(data_dir, list):\n for d_dir in data_dir:\n os.rmdir(d_dir)\n else:\n os.rmdir(data_dir)", "def reset_database():\n if os.path.exists(testinit.database_file):\n os.remove(testinit.database_file)\n shutil.copy(testinit.clean_db, testinit.database_file)", "def destroy_db():\n run('mysqladmin --defaults-file={home}/.my-admin.cnf --force drop {dbname}'.format(\n home=env.HOME, dbname=env.DB_NAME))\n run(\"mysql --defaults-file={home}/.my-admin.cnf -e 'DROP USER \\\"{user}\\\"@\\\"%\\\"'\".format(\n home=env.HOME, user=env.DB_USER))", "def drop_test_database():\n CONNECTION.get_connection().drop_database(TEST_DATABASE_NAME)", "def drop_database():\n drop_service_db()", "def rm_hgrps_dir(self, dirname):\n self.host_group_manager.rm_object_dir(dirname)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the variable updated_at is an instance of date time
def test_updated_at_instance_of(self): self.assertTrue(isinstance(self.base.updated_at, datetime))
[ "def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)", "def test_updated_at_type(self):\n self.assertEqual(type(self.city.updated_at), datetime)", "def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is datetime)", "def test_updated_at(self):\n new = self.value()\n self.assertEqual(type(new.updated_at), datetime.datetime)\n n = new.to_dict()\n new = BaseModel(**n)\n self.assertFalse(new.created_at == new.updated_at)", "def test_for_datetime(self):\n my_object = BaseModel()\n date = datetime.now()\n time_diff = my_object.updated_at - my_object.created_at\n self.assertTrue(abs(time_diff.total_seconds()) < 0.01)", "def test_created_at_type(self):\n self.assertEqual(type(self.user.created_at), datetime)", "def test_updated_at(self):\n self.base.save()\n self.assertTrue(self.base.created_at != self.base.updated_at)", "def _check_datetime(self, node):\n try:\n inferred = next(node.infer())\n except astroid.InferenceError:\n return\n if isinstance(inferred, Instance) and inferred.qname() == \"datetime.time\":\n self.add_message(\"boolean-datetime\", node=node)", "def test_that_created_at_equals_updated_at_initially(self):\n b = BaseModel()\n self.assertEqual(b.created_at, b.updated_at)", "def test_creation_time(self):\n dummy = self.dummy\n self.assertIsInstance(dummy.created_at, datetime)\n self.assertIsInstance(dummy.updated_at, datetime)\n self.assertEqual(dummy.updated_at, dummy.created_at)", "def test_dict_to_updated_at_attr_type(self):\n c = City()\n c_dictionary = c.to_dict()\n c2 = City(**c_dictionary)\n self.assertEqual(type(datetime.now()), type(c2.updated_at))", "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.dtype.datetime64", "def test_validate_datetime(dt):\n assert isinstance(validate_datetime(dt), pd.Timestamp)", "def is_valid_datetime(obj):\n if isinstance(obj, datetime):\n return True\n else:\n attrs = (\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"microsecond\",\n \"tzinfo\",\n )\n return all(hasattr(obj, attr) for attr in attrs)", "def is_valid_datetime(json_post):\n try:\n if not strict_rfc3339.validate_rfc3339(json_post[\"datetime\"]):\n return False\n else:\n return True\n except KeyError as e:\n print(e)\n return False", "def outdated(obj):\n if not hasattr(obj, '__updated__'): return True\n return (time.time() - obj.__updated__) > 300 # we update every 5 minutes", "def is_datetime(self) -> \"bool\":\n return self._value.getType() == Value.DTVAL", "def test_updated_at_keeps_latest(age_check_one_day, now, one_day_ago, two_days_ago):\n age_check_one_day.updated_at(one_day_ago)\n age_check_one_day.updated_at(two_days_ago)\n assert age_check_one_day.age_at(now) == 86400", "def test_to_dict_updated_at(self):\n test_dict = self.base.to_dict()\n self.assertEqual(type(test_dict['updated_at']), str)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the variable updated_at
def test_updated_at(self): self.base.save() self.assertTrue(self.base.created_at != self.base.updated_at)
[ "def test_updated_at(self):\n new = self.value()\n self.assertEqual(type(new.updated_at), datetime.datetime)\n n = new.to_dict()\n new = BaseModel(**n)\n self.assertFalse(new.created_at == new.updated_at)", "def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)", "def test_updated_at_type(self):\n self.assertEqual(type(self.city.updated_at), datetime)", "def test_modify_value_updated_at_field(self):\n sleep(1)\n self.scraper.value = 100\n self.scraper.save()\n self.assertNotEqual(\n self.scraper.created_at.strftime(self.time_format), self.scraper.value_updated_at.strftime(self.time_format)\n )", "def test_that_save_func_update_update_at_attr(self):\n b = BaseModel()\n b.save()\n self.assertNotEqual(b.created_at, b.updated_at)\n self.assertGreater(b.updated_at.microsecond,\n b.created_at.microsecond)", "def test_timestampedmodel_update(self):\n course = self.get_test_course() # Course is a subclass of TimeStampedModel\n initial_updated_on = course.updated_on\n sleep(0.1)\n course.update()\n course = Course.objects.get(id=course.id)\n self.assertLess(initial_updated_on, course.updated_on)", "def test_updated_at_instance_of(self):\n self.assertTrue(isinstance(self.base.updated_at, datetime))", "def test_that_created_at_equals_updated_at_initially(self):\n b = BaseModel()\n self.assertEqual(b.created_at, b.updated_at)", "def test_update_time_tracking_entry(self):\n pass", "def outdated(obj):\n if not hasattr(obj, '__updated__'): return True\n return (time.time() - obj.__updated__) > 300 # we update every 5 minutes", "def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is datetime)", "def updated_recently(self):\n now = datetime.datetime.now(pytz.utc)\n one_day_ago = now - datetime.timedelta(1)\n return self.updated_at > one_day_ago", "def set_updated_at(self):\n self.record['updated_at'] = datetime.utcnow()", "def test_for_datetime(self):\n my_object = BaseModel()\n date = datetime.now()\n time_diff = my_object.updated_at - my_object.created_at\n self.assertTrue(abs(time_diff.total_seconds()) < 0.01)", "def test_place_update_at(self):\n old1 = self.place1.updated_at\n old2 = self.place2.updated_at\n\n self.place1.save()\n self.place2.save()\n\n self.assertNotEqual(old1, self.place1.updated_at)\n self.assertNotEqual(old2, self.place2.updated_at)", "def test_save(self):\n instance1 = BaseModel()\n attr_updated_before_save = instance1.updated_at\n instance1.save()\n attr_updated_after_save = instance1.updated_at\n self.assertNotEqual(attr_updated_before_save, attr_updated_after_save)", "def test_updated_at_keeps_latest(age_check_one_day, now, one_day_ago, two_days_ago):\n age_check_one_day.updated_at(one_day_ago)\n age_check_one_day.updated_at(two_days_ago)\n assert age_check_one_day.age_at(now) == 86400", "def test_no_update_fresh_data_single(self):\n w = Weather.objects.get(pk=6)\n w.last_modified = self.CURRENT_TIME\n w.save()\n weather = Weather.objects.retrieve_weather_object(city='Azusa', state='CA')\n self.assertEqual(w.last_modified, weather.last_modified)", "def HasChangedSince(self, someTime):\n return self.lastUpdate > someTime" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if updated_at is a string inside of the dictionary
def test_to_dict_updated_at(self): test_dict = self.base.to_dict() self.assertEqual(type(test_dict['updated_at']), str)
[ "def test_to_dict_updated_at_str(self):\n c = City()\n c_dictionary = c.to_dict()\n self.assertEqual(str, type(c_dictionary['updated_at']))", "def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)", "def test_updated_at_type(self):\n self.assertEqual(type(self.city.updated_at), datetime)", "def test_updated_at(self):\n new = self.value()\n self.assertEqual(type(new.updated_at), datetime.datetime)\n n = new.to_dict()\n new = BaseModel(**n)\n self.assertFalse(new.created_at == new.updated_at)", "def test_to_dict_updated_at(self):\n c = City()\n c_dictionary = c.to_dict()\n self.assertIn('updated_at', c_dictionary)", "def is_valid_datetime(json_post):\n try:\n if not strict_rfc3339.validate_rfc3339(json_post[\"datetime\"]):\n return False\n else:\n return True\n except KeyError as e:\n print(e)\n return False", "def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is datetime)", "def test_updated_at_instance_of(self):\n self.assertTrue(isinstance(self.base.updated_at, datetime))", "def test_that_updated_at_returned_by_to_dict_is_an_iso_string(self):\n b = BaseModel()\n self.assertEqual(b.to_dict()[\"updated_at\"], b.updated_at.isoformat())", "def test_dict_to_updated_at_attr_type(self):\n c = City()\n c_dictionary = c.to_dict()\n c2 = City(**c_dictionary)\n self.assertEqual(type(datetime.now()), type(c2.updated_at))", "def _config_is_in_new_format(self, config):\r\n return any([profile_data for profile_data in config.values() \\\r\n if \"date_modified\" in profile_data])", "def check_last_updated():\n date_today = date.today()\n with (open(LAST_UPDATED, 'r')) as f:\n return f.readline() == str(date_today)", "def test_dict_to_updated_at_attr(self):\n c = City()\n c_dictionary = c.to_dict()\n c2 = City(**c_dictionary)\n self.assertEqual(c.updated_at, c2.updated_at)", "def test_to_dict_created_at(self):\n test_dict = self.base.to_dict()\n self.assertEqual(type(test_dict['created_at']), str)", "def outdated(obj):\n if not hasattr(obj, '__updated__'): return True\n return (time.time() - obj.__updated__) > 300 # we update every 5 minutes", "def updated_recently(self):\n now = datetime.datetime.now(pytz.utc)\n one_day_ago = now - datetime.timedelta(1)\n return self.updated_at > one_day_ago", "def test_prepare_datetime_format(datetime_format, expected):\n formatted_datetime = Elasticsearch_v2.prepare_datetime_format(datetime_format)\n assert formatted_datetime == expected\n assert not any(c.replace('T', '').isalpha() for c in arrow.get(datetime.now()).format(formatted_datetime))", "def requires_update(self, item):\n for k, v in ATTRIBUTES.iteritems():\n if v is None:\n continue\n if item.get(v, None) is None:\n return True\n log.info(u'echonest: no update required')\n return False", "def _is_datetime(self, name):\n return self._arg_tree[name]['type'] in ['iso-8601', 'rfc-1123']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if created_at is a string inside of the dictionary
def test_to_dict_created_at(self): test_dict = self.base.to_dict() self.assertEqual(type(test_dict['created_at']), str)
[ "def is_valid_datetime(json_post):\n try:\n if not strict_rfc3339.validate_rfc3339(json_post[\"datetime\"]):\n return False\n else:\n return True\n except KeyError as e:\n print(e)\n return False", "def test_created_at_type(self):\n self.assertEqual(type(self.user.created_at), datetime)", "def test_that_created_at_returned_by_to_dict_is_an_iso_string(self):\n b = BaseModel()\n self.assertEqual(b.to_dict()[\"created_at\"], b.created_at.isoformat())", "def test_to_dict_created_at(self):\n c = City()\n c_dictionary = c.to_dict()\n self.assertIn('created_at', c_dictionary)", "def _is_datetime(self, name):\n return self._arg_tree[name]['type'] in ['iso-8601', 'rfc-1123']", "def test_to_dict_updated_at(self):\n test_dict = self.base.to_dict()\n self.assertEqual(type(test_dict['updated_at']), str)", "def test_date_format_for_created_at(self):\n bm = BaseModel()\n c_at = bm.created_at.__str__()\n c_at_pattern = '\\d{4}\\-\\d{2}\\-\\d{2}\\ \\d{2}\\:\\d{2}\\:\\d{2}\\.\\d{6}'\n self.assertRegex(c_at, c_at_pattern)", "def test_to_dict_updated_at_str(self):\n c = City()\n c_dictionary = c.to_dict()\n self.assertEqual(str, type(c_dictionary['updated_at']))", "def test_dict_to_created_at_attr_type(self):\n c = City()\n c_dictionary = c.to_dict()\n c2 = City(**c_dictionary)\n self.assertEqual(type(datetime.now()), type(c2.created_at))", "def _add_iso_created_at(self, tweet_dict):\n if tweet_dict.get('created_at'):\n tweet_dict['traptor']['created_at_iso'] = self._tweet_time_to_iso(\n tweet_dict['created_at']\n )\n\n return tweet_dict", "def test_prepare_datetime_format(datetime_format, expected):\n formatted_datetime = Elasticsearch_v2.prepare_datetime_format(datetime_format)\n assert formatted_datetime == expected\n assert not any(c.replace('T', '').isalpha() for c in arrow.get(datetime.now()).format(formatted_datetime))", "def test_that_updated_at_returned_by_to_dict_is_an_iso_string(self):\n b = BaseModel()\n self.assertEqual(b.to_dict()[\"updated_at\"], b.updated_at.isoformat())", "def test_dict_to_created_at_attr(self):\n c = City()\n c_dictionary = c.to_dict()\n c2 = City(**c_dictionary)\n self.assertEqual(c.created_at, c2.created_at)", "def test_updated_at_type(self):\n self.assertEqual(type(self.city.updated_at), datetime)", "def test_iso8601_string_in_dict():\n date = '2016-01-15'\n params = {\n 'time': date,\n 'tzid': 'Etc/UTC',\n }\n assert format_event_time(params) == {'time': '2016-01-15', 'tzid': 'Etc/UTC'}", "def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)", "def test_to_dict_updated_at(self):\n c = City()\n c_dictionary = c.to_dict()\n self.assertIn('updated_at', c_dictionary)", "def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is datetime)", "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.dtype.datetime64" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the egress of this V1NetworkPolicySpec.
def egress(self, egress): self._egress = egress
[ "def egress(self) -> 'outputs.EgressResponse':\n return pulumi.get(self, \"egress\")", "def egress_configuration(self) -> Optional['outputs.ServiceNetworkConfigurationEgressConfiguration']:\n return pulumi.get(self, \"egress_configuration\")", "def egress_setting(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"egress_setting\")", "def AddEgressSettingsFlag(parser):\n parser.add_argument(\n '--vpc-egress',\n help=(\n 'The outbound traffic to send through the VPC connector'\n ' for this resource. This resource must have a VPC connector to set'\n ' VPC egress.'\n ),\n choices={\n container_resource.EGRESS_SETTINGS_PRIVATE_RANGES_ONLY: (\n 'Default option. Sends outbound traffic to private IP addresses '\n 'defined by RFC1918 through the VPC connector.'\n ),\n container_resource.EGRESS_SETTINGS_ALL_TRAFFIC: (\n 'Sends all outbound traffic through the VPC connector.'\n ),\n container_resource.EGRESS_SETTINGS_ALL: (\n '(DEPRECATED) Sends all outbound traffic through the VPC '\n \"connector. Provides the same functionality as '{all_traffic}'.\"\n \" Prefer to use '{all_traffic}' instead.\".format(\n all_traffic=container_resource.EGRESS_SETTINGS_ALL_TRAFFIC\n )\n ),\n },\n )", "def patch_namespaced_egress_network_policy(self, name, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.patch_namespaced_egress_network_policy_with_http_info(name, namespace, body, **kwargs)\n else:\n (data) = self.patch_namespaced_egress_network_policy_with_http_info(name, namespace, body, **kwargs)\n return data", "def list_namespaced_egress_network_policy_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'field_selector', 'label_selector', 'resource_version', 'timeout_seconds', 'watch']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_egress_network_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `list_namespaced_egress_network_policy`\")\n\n\n collection_formats = {}\n\n resource_path = '/oapi/v1/namespaces/{namespace}/egressnetworkpolicies'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = ['BearerToken']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1EgressNetworkPolicyList',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def read_namespaced_egress_network_policy_with_http_info(self, name, namespace, **kwargs):\n\n all_params = ['name', 'namespace', 'pretty', 'exact', 'export']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_egress_network_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_egress_network_policy`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `read_namespaced_egress_network_policy`\")\n\n\n collection_formats = {}\n\n resource_path = '/oapi/v1/namespaces/{namespace}/egressnetworkpolicies/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'exact' in params:\n query_params['exact'] = params['exact']\n if 'export' in params:\n query_params['export'] = params['export']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = ['BearerToken']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1EgressNetworkPolicy',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def create_egress_only_internet_gateway(DryRun=None, VpcId=None, ClientToken=None):\n pass", "def add_egress_rule(self, rule):\n self.egress_rules.append(rule)", "def replace_namespaced_egress_network_policy(self, name, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_namespaced_egress_network_policy_with_http_info(name, namespace, body, **kwargs)\n else:\n (data) = self.replace_namespaced_egress_network_policy_with_http_info(name, namespace, body, **kwargs)\n return data", "def create_namespaced_egress_network_policy_with_http_info(self, namespace, body, **kwargs):\n\n all_params = ['namespace', 'body', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_egress_network_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_egress_network_policy`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_egress_network_policy`\")\n\n\n collection_formats = {}\n\n resource_path = '/oapi/v1/namespaces/{namespace}/egressnetworkpolicies'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = ['BearerToken']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1EgressNetworkPolicy',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def create_namespaced_egress_network_policy(self, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_egress_network_policy_with_http_info(namespace, body, **kwargs)\n else:\n (data) = self.create_namespaced_egress_network_policy_with_http_info(namespace, body, **kwargs)\n return data", "def monthly_cost_network_egress(self) -> 'outputs.MoneyResponse':\n return pulumi.get(self, \"monthly_cost_network_egress\")", "def describe_egress_only_internet_gateways(DryRun=None, EgressOnlyInternetGatewayIds=None, MaxResults=None, NextToken=None):\n pass", "def exponentialDecay(self):\n\n lr = self._lr * pow(self._decay_rate, self._step / self._decay_steps)\n for param_group in self._optimizer.param_groups:\n param_group[\"lr\"] = lr", "def egress_acl_analysis(self, egress_acl_analysis):\n\n self._egress_acl_analysis = egress_acl_analysis", "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def negatives(self, negatives):\n if negatives is None:\n raise ValueError(\"Invalid value for `negatives`, must not be `None`\") # noqa: E501\n\n self._negatives = negatives", "def egress_rule_containing(self, other_policy, other_egress_rule_index):\n return self.rule_containing(other_policy, other_policy.egress_rules[other_egress_rule_index - 1],\n other_egress_rule_index, self.egress_rules)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the ingress of this V1NetworkPolicySpec.
def ingress(self, ingress): self._ingress = ingress
[ "def _ovs_set_interface_ingress(self, interface_uuid, inbound_limit):\n if inbound_limit < 0:\n raise ValueError(\"inbound_limit is negative.\")\n \n burst = 0.1 * inbound_limit * 1024\n if burst < CONF.network_device_mtu:\n burst = CONF.network_device_mtu\n \n ingress = int(inbound_limit * 1024.0 * 8.0 / 1000.0)\n ingress_burst = int(burst * 8.0 / 1000.0)\n \n args = ['ovs-vsctl', '--timeout=2',\n 'set', 'Interface', interface_uuid,\n 'ingress_policing_rate=%d' % ingress]\n utils.execute(args, root_helper=self.root_helper)\n \n args = ['ovs-vsctl', '--timeout=2',\n 'set', 'Interface', interface_uuid,\n 'ingress_policing_burst=%d' % ingress_burst]\n utils.execute(args, root_helper=self.root_helper)", "def ingress(\n self,\n value: typing.Union[typing.List[\"NetworkPolicyIngressRule\"], typing.List[dict]],\n ):\n cleaned: typing.List[NetworkPolicyIngressRule] = []\n for item in value:\n if isinstance(item, dict):\n item = typing.cast(\n NetworkPolicyIngressRule,\n NetworkPolicyIngressRule().from_dict(item),\n )\n cleaned.append(typing.cast(NetworkPolicyIngressRule, item))\n self._properties[\"ingress\"] = cleaned", "def ingress(self) -> 'outputs.IngressResponse':\n return pulumi.get(self, \"ingress\")", "def ingress_configuration(self) -> Optional['outputs.ServiceNetworkConfigurationIngressConfiguration']:\n return pulumi.get(self, \"ingress_configuration\")", "def ingress_acl_analysis(self, ingress_acl_analysis):\n\n self._ingress_acl_analysis = ingress_acl_analysis", "def vpp_enable_input_acl_interface(\n node, interface, ip_version, table_index):\n cmd = u\"input_acl_set_interface\"\n args = dict(\n sw_if_index=InterfaceUtil.get_interface_index(node, interface),\n ip4_table_index=table_index if ip_version == u\"ip4\"\n else Constants.BITWISE_NON_ZERO,\n ip6_table_index=table_index if ip_version == u\"ip6\"\n else Constants.BITWISE_NON_ZERO,\n l2_table_index=table_index if ip_version == u\"l2\"\n else Constants.BITWISE_NON_ZERO,\n is_add=1)\n err_msg = f\"Failed to enable input acl on interface {interface}\"\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)", "def AddIngressFlag(parser):\n parser.add_argument(\n '--ingress',\n choices=_INGRESS_MODES,\n help=(\n 'Set the ingress traffic sources allowed to call the service. For '\n 'Cloud Run (fully managed) the `--[no-]allow-unauthenticated` flag '\n 'separately controls the identities allowed to call the service.'\n ),\n default='all',\n )", "def network_in(self, network_in):\n\n self._network_in = network_in", "def replace_ingress_rule(ns, name, host, service_name):\n ing = Ingress(namespace=ns, config=config['apiserver'])\n for item in (\n ('templates', 'ingress-rule.yaml.j2'),\n ):\n with open(os.path.join(*item), 'r') as f:\n yaml_data = Template(f.read()).render({\n 'name': name,\n 'host': host,\n 'service_name': service_name\n })\n ing.replace('{}-ingress'.format(name), yaml.load(yaml_data))", "def patch_resource(\n self, namespace: typing.Optional[\"str\"] = None\n ) -> \"IngressStatus\":\n names = [\"patch_namespaced_ingress\", \"patch_ingress\"]\n\n response = _kube_api.execute(\n action=\"patch\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict(), \"name\": self.metadata.name},\n )\n\n output = IngressStatus()\n if response is not None:\n output.from_dict(_kube_api.to_kuber_dict(response.status))\n return output", "def ingress_url(self) -> str | None:\n if not self.with_ingress:\n return None\n\n url = f\"/api/hassio_ingress/{self.ingress_token}/\"\n if ATTR_INGRESS_ENTRY in self.data:\n return f\"{url}{self.data[ATTR_INGRESS_ENTRY]}\"\n return url", "def ingress(self) -> typing.List[\"IngressLoadBalancerIngress\"]:\n return typing.cast(\n typing.List[\"IngressLoadBalancerIngress\"],\n self._properties.get(\"ingress\"),\n )", "def select_ingress(self, ingress_point):\n\t\ttry:\n\t\t\tingress_index = self.ingress_points.index(ingress_point)\n\n\t\t\tif not self.is_constrained('direction'):\n\t\t\t\tdirection = [ingress_index, (ingress_index+1)%2]\n\n\t\t\t\tself.constrain_parameter('direction', direction)\n\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\tprint(f\"Error: specified ingress_point {ingress_point} not found in constraint ingress_points\")\n\t\t\treturn False\n\t\texcept:\n\t\t\tprint('Error: an unknown error occurred while trying to select ingress')\n\t\t\treturn False", "def add_ingress_rule(self, rule):\n self.ingress_rules.append(rule)", "def set_net_in_z(self, z, net_in):\n assert(0 < z <= self.hidden)\n self.net_in_z[z-1,0] = net_in", "def setNet(self, net) -> retval:\n ...", "def register_ingress():\n for c in ['PREROUTING', 'OUTPUT']:\n chain = iptc.Chain(iptc.Table(iptc.Table.MANGLE), c)\n for rule in chain.rules:\n if rule.target.name == FILTER_CHAIN:\n # Already registered\n break\n else:\n rule = iptc.Rule()\n t = rule.create_target(FILTER_CHAIN)\n chain.insert_rule(rule)", "def spec(self, value: typing.Union[\"NetworkPolicySpec\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n NetworkPolicySpec,\n NetworkPolicySpec().from_dict(value),\n )\n self._properties[\"spec\"] = value", "def ingress_traffic_allowed(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ingress_traffic_allowed\")", "def update(name, spec, **_):\n if \"ingress\" in spec:\n utils.create_or_update('placement/ingress.yml.j2',\n name=name, spec=spec)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the pod_selector of this V1NetworkPolicySpec.
def pod_selector(self, pod_selector): if self.local_vars_configuration.client_side_validation and pod_selector is None: # noqa: E501 raise ValueError("Invalid value for `pod_selector`, must not be `None`") # noqa: E501 self._pod_selector = pod_selector
[ "def pod_selector(self, pod_selector):\n if pod_selector is None:\n raise ValueError('Invalid value for `pod_selector`, must not be `None`')\n\n self._pod_selector = pod_selector", "def setSelector(self, selector: cern.japc.core.Selector) -> 'LsaSelectorBuilder':\n ...", "def patch_core_v1_namespaced_pod(self, name, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.patch_core_v1_namespaced_pod_with_http_info(name, namespace, body, **kwargs)\n else:\n (data) = self.patch_core_v1_namespaced_pod_with_http_info(name, namespace, body, **kwargs)\n return data", "def node_selector(self, node_selector):\n self._node_selector = node_selector", "def patchPod(self, **kwargs):\n\n allParams = ['name', 'namespaces', 'body', 'ca_cert', 'cert', 'key']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method patchPod\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/namespaces/{namespaces}/pods/{name}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'PATCH'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json-patch+json,application/merge-patch+json,application/strategic-merge-patch+json,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n\n \n\n \n if ('body' in params):\n bodyParam = params['body']\n \n\n postData = (formParams if formParams else bodyParam)\n\n ca = params.get('ca_cert')\n cert = params.get('cert')\n key = params.get('key')\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, ca, cert, key, headerParams, files=files)", "def service_selector(self, service_selector):\n\n self._service_selector = service_selector", "def add_node_selector_constraint(\n self, label_name: Union[str, _pipeline_param.PipelineParam],\n value: Union[str, _pipeline_param.PipelineParam]):\n\n self.node_selector[label_name] = value\n return self", "def pod_monitor_selector(self) -> Optional[pulumi.Input['PrometheusSpecPodMonitorSelectorArgs']]:\n return pulumi.get(self, \"pod_monitor_selector\")", "def _setSelectorList(self, selectorList):\r\n self._checkReadonly()\r\n self._selectorList = selectorList", "def spec(self, value: typing.Union[\"NetworkPolicySpec\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n NetworkPolicySpec,\n NetworkPolicySpec().from_dict(value),\n )\n self._properties[\"spec\"] = value", "def replacePod(self, **kwargs):\n\n allParams = ['name', 'namespaces', 'body', 'ca_cert', 'cert', 'key']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method replacePod\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/namespaces/{namespaces}/pods/{name}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'PUT'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = '*/*,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n\n \n\n \n if ('body' in params):\n bodyParam = params['body']\n \n\n postData = (formParams if formParams else bodyParam)\n\n ca = params.get('ca_cert')\n cert = params.get('cert')\n key = params.get('key')\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, ca, cert, key, headerParams, files=files)\n\n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'V1beta3_Pod')\n return responseObject", "def replace_core_v1_namespaced_pod(self, name, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_core_v1_namespaced_pod_with_http_info(name, namespace, body, **kwargs)\n else:\n (data) = self.replace_core_v1_namespaced_pod_with_http_info(name, namespace, body, **kwargs)\n return data", "def rule_namespace_selector(self) -> Optional[pulumi.Input['PrometheusSpecRuleNamespaceSelectorArgs']]:\n return pulumi.get(self, \"rule_namespace_selector\")", "def send_selector(selector, host, port = 0):\r\n import socket\r\n if not port:\r\n i = host.find(':')\r\n if i >= 0:\r\n host, port = host[:i], int(host[i+1:])\r\n if not port:\r\n port = DEF_PORT\r\n elif type(port) == type(''):\r\n port = int(port)\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((host, port))\r\n s.sendall(selector + CRLF)\r\n s.shutdown(1)\r\n return s.makefile('rb')", "def rule_selector(self) -> Optional[pulumi.Input['PrometheusSpecRuleSelectorArgs']]:\n return pulumi.get(self, \"rule_selector\")", "def node_topology(self, value: typing.Union[\"LabelSelector\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n LabelSelector,\n LabelSelector().from_dict(value),\n )\n self._properties[\"nodeTopology\"] = value", "def add_pod_label(self, name: str, value: str):\n\n self.pod_labels[name] = value\n return self", "def set_network_policy(\n self,\n project_id,\n zone,\n cluster_id,\n network_policy,\n name=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"set_network_policy\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"set_network_policy\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.set_network_policy,\n default_retry=self._method_configs[\"SetNetworkPolicy\"].retry,\n default_timeout=self._method_configs[\"SetNetworkPolicy\"].timeout,\n client_info=self._client_info,\n )\n\n request = cluster_service_pb2.SetNetworkPolicyRequest(\n project_id=project_id,\n zone=zone,\n cluster_id=cluster_id,\n network_policy=network_policy,\n name=name,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"set_network_policy\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def _spec(self) -> k8s.PodSpec:\n return k8s.PodSpec(\n containers=self.containers,\n volumes=self._volumes(),\n affinity=self._affinity(),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the policy_types of this V1NetworkPolicySpec.
def policy_types(self, policy_types): self._policy_types = policy_types
[ "def policy_types(self, policy_types):\n\n self._policy_types = policy_types", "def pool_types(self, pool_types):\n\n self._pool_types = pool_types", "def hybridization_types(self, hybridization_types: List[Chem.HybridizationType]) -> None:\r\n self._hybridization_types = hybridization_types", "def set_policylabeltype(self, policylabeltype):\n valid_types = ('HTTP', 'OTHERTCP')\n if policylabeltype and policylabeltype not in valid_types:\n raise ValueError(\"policylabeltype must be one of %s\" %\n \",\".join(valid_types))\n self.options['policylabeltype'] = policylabeltype", "def connection_types(self, connection_types: List[str]):\n\n self._connection_types = connection_types", "def webhook_types(self, webhook_types):\n\n self._webhook_types = webhook_types", "def allowed_attachment_types(self, allowed_attachment_types):\n\n self._allowed_attachment_types = allowed_attachment_types", "def betting_period_types(self, betting_period_types):\n\n self._betting_period_types = betting_period_types", "def assessment_types(self, assessment_types):\n\n self._assessment_types = assessment_types", "def stdtypes(self, stdtypes):\n\n self._stdtypes = stdtypes", "def spec(self, value: typing.Union[\"NetworkPolicySpec\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n NetworkPolicySpec,\n NetworkPolicySpec().from_dict(value),\n )\n self._properties[\"spec\"] = value", "def _set_policies(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"name\",yc_policies_vnf_bd__scenario_policies, yang_name=\"policies\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"policies\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"policies must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"name\",yc_policies_vnf_bd__scenario_policies, yang_name=\"policies\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"policies\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__policies = t\n if hasattr(self, '_set'):\n self._set()", "def _set_network_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=RestrictedClassType(\n base_type=six.text_type,\n restriction_type=\"dict_key\",\n restriction_arg={\n \"POINT_TO_POINT_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospf-types:POINT_TO_POINT_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospft:POINT_TO_POINT_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospf-types:BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospft:BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"NON_BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospf-types:NON_BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospft:NON_BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n },\n ),\n is_leaf=True,\n yang_name=\"network-type\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"identityref\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"network_type must be of a type compatible with identityref\"\"\",\n \"defined-type\": \"openconfig-network-instance:identityref\",\n \"generated-type\": \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'POINT_TO_POINT_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:POINT_TO_POINT_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:POINT_TO_POINT_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'NON_BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:NON_BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:NON_BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}},), is_leaf=True, yang_name=\"network-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=True)\"\"\",\n }\n )\n\n self.__network_type = t\n if hasattr(self, \"_set\"):\n self._set()", "def set_network_policy(\n self,\n project_id,\n zone,\n cluster_id,\n network_policy,\n name=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"set_network_policy\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"set_network_policy\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.set_network_policy,\n default_retry=self._method_configs[\"SetNetworkPolicy\"].retry,\n default_timeout=self._method_configs[\"SetNetworkPolicy\"].timeout,\n client_info=self._client_info,\n )\n\n request = cluster_service_pb2.SetNetworkPolicyRequest(\n project_id=project_id,\n zone=zone,\n cluster_id=cluster_id,\n network_policy=network_policy,\n name=name,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"set_network_policy\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def set_policy(self, policy):\n self._policy = 'custom'\n self._P = policy", "def create(self, certTypes=None):\r\n self.certTypes = certTypes\r\n return self", "def betting_bet_types(self, betting_bet_types):\n\n self._betting_bet_types = betting_bet_types", "def ip_config_type(self, ip_config_type):\n allowed_values = [\"static\", \"DHCP\"]\n if ip_config_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `ip_config_type` ({0}), must be one of {1}\"\n .format(ip_config_type, allowed_values)\n )\n\n self._ip_config_type = ip_config_type", "def setNattype(self, nattype):\n self.nattype = nattype", "def chi_types(self, chi_types: List[rdchem.ChiralType]) -> None:\r\n self._chi_types = chi_types" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Face value of the bond.
def face_value(self) -> float: return self.__face_value
[ "def getFace(self):\n return self._face", "def getFaceIndex(self) -> \"int\":\n return _coin.SoFaceDetail_getFaceIndex(self)", "def face_callback(self,value):", "def faces(self):\n return self.face.values()", "def GetValence(self) -> \"int\":\n return _itkQuadEdgeMeshPointPython.itkQuadEdgeMeshPointF2GQEULLULLBBT_GetValence(self)", "def value(self):\n value = 0.0\n for bond in self._bonds:\n value += bond.value()\n return value", "def GetValence(self) -> \"int\":\n return _itkQuadEdgeMeshPointPython.itkQuadEdgeMeshPointF3GQEULLULLBBT_GetValence(self)", "def getAdjFaceColor(self, faceColor, index):\n\n if index == EDGERIGHT: return self.cubeDict[faceColor]['right']\n elif index == EDGETOP: return self.cubeDict[faceColor]['up']\n elif index == EDGELEFT: return self.cubeDict[faceColor]['left']\n elif index == EDGEDOWN: return self.cubeDict[faceColor]['down']\n elif index == CORNERTOPLEFT: return self.cubeDict[faceColor]['up'], self.cubeDict[faceColor]['left']\n elif index == CORNERDOWNLEFT: return self.cubeDict[faceColor]['left'], self.cubeDict[faceColor]['down']\n elif index == CORNERDOWNRIGHT: return self.cubeDict[faceColor]['down'], self.cubeDict[faceColor]['right']\n elif index == CORNERTOPRIGHT : return self.cubeDict[faceColor]['right'], self.cubeDict[faceColor]['up']\n else: raise Exception", "def _calculateFaceBins(self):\n vc = np.cross(\n self._V[self._F[:, 0], :] - self._V[self._F[:, 2], :],\n self._V[self._F[:, 1], :] - self._V[self._F[:, 2], :])\n\n A = np.sqrt(np.sum(vc ** 2, 1))\n FA = A / np.sum(A)\n self._faceBins = np.concatenate(([0],np.cumsum(FA)))", "def get_faces(self):\n\n return self.faces", "def faces(self):\n upper = [self.face(i,True) for i in range(self.dimension())]\n lower = [self.face(i,False) for i in range(self.dimension())]\n return upper + lower", "def GetForegroundValue(self) -> \"float\":\n return _itkBinaryMorphologicalOpeningImageFilterPython.itkBinaryMorphologicalOpeningImageFilterIF3IF3SE3_GetForegroundValue(self)", "def faceDiv(self):\n if getattr(self, '_faceDiv', None) is None:\n # Get the stencil of +1, -1's\n D = self._faceDivStencil\n # Compute areas of cell faces & volumes\n S = self.area\n V = self.vol\n self._faceDiv = sdiag(1/V)*D*sdiag(S)\n return self._faceDiv", "def GetForegroundValue(self) -> \"float\":\n return _itkBinaryMorphologicalOpeningImageFilterPython.itkBinaryMorphologicalOpeningImageFilterIF2IF2SE2_GetForegroundValue(self)", "def GetForegroundValue(self) -> \"unsigned char\":\n return _itkBinaryMorphologicalOpeningImageFilterPython.itkBinaryMorphologicalOpeningImageFilterIUC3IUC3SE3_GetForegroundValue(self)", "def fan_value(self, fan):\r\n fan = self._get_fan(fan)\r\n if fan is not None:\r\n return fan[\"value\"]", "def getFaceIndices(self) -> \"int const *\":\n return _coin.SoGlyph_getFaceIndices(self)", "def GetForegroundValue(self) -> \"unsigned short\":\n return _itkBinaryMorphologicalOpeningImageFilterPython.itkBinaryMorphologicalOpeningImageFilterIUS3IUS3SE3_GetForegroundValue(self)", "def getValue(self) -> \"int\":\n return _coin.SoSFEnum_getValue(self)", "def getValue(self) -> \"float const *\":\n return _coin.SbVec4f_getValue(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Liquidity score assigned to buying/selling the bond.
def liquidity_score(self) -> RangeFilter: return self.__liquidity_score
[ "def bid_liquidity_used(self, bid_liquidity_used):\n\n self._bid_liquidity_used = bid_liquidity_used", "def income(self):\r\n if self.blockaded:\r\n return 0 # Blockaded planets have no income.\r\n income = round(self.realisedValue / float(100) * math.sqrt(self.owner.tech[\"Production Technology\"]),2)\r\n for level in self.improvementLevels:\r\n if level <= self.realisedImprovement:\r\n income += 1\r\n return income", "def constraint(self, x) -> float:\n portfolio_capacity = self.portfolio_capacity\n for i in range(len(x)):\n portfolio_capacity -= x[i] * self.share_price[i]\n return portfolio_capacity", "def insurance_price(self, items: Iterable[Item]) -> int:\n total_price: float = sum(\n self.__templates_repository.get_template(item).props.CreditsPrice\n for item in items\n )\n total_price *= self.__insurance_price_multiplier\n total_price -= total_price * min(self.standing.current_standing, 0.5)\n return int(total_price)", "def earned_value(self): \n \n return self.apc * self.budget", "def how_many_I_am_holding(self, security, style='shares'):\n for ct in self.context.portfolio.positions:\n if same_security(ct,security):\n if style=='shares':\n return self.context.portfolio.positions[ct].amount\n if style=='value': \n return self.context.portfolio.positions[ct].amount*self.context.portfolio.positions[ct].last_sale_price\n if style=='portfolio_percentage':\n if self.context.portfolio.portfolio_value > 0.00001:\n# self.throwError('how_many_I_am_holding','Zero portfolio value')\n return self.context.portfolio.positions[ct].last_sale_price / self.context.portfolio.portfolio_value\n return 0", "def discount_available():", "def stealability(self):\r\n stealable = self.price / self.weight\r\n if stealable < 0.5:\r\n return \"Not so stealable...\"\r\n elif 1 > stealable >= 0.5:\r\n return \"Kinda stealable.\"\r\n else:\r\n return \"Very stealable!\"", "def compute_score(self):\n return (self.priority, self.queued)", "def value(self):\n return self.shares() * self.price()", "def get_buy_amount(self):\r\n return self.balance / 3", "def planned_value(self):\n \n return self.ppc * self.budget", "def get_profit_percent(self) -> float:\n buy_price = self.get_buy_price()\n sell_price = self.get_sell_price()\n if sell_price is None:\n sell_price = self.get_asset().get_price()\n if sell_price is None:\n return None\n return ((sell_price-buy_price)/buy_price)*100", "def get_buy_bonus_rule(self):\n return self.__buy_bonus_rule", "def calculate_fair_value(self):\n pass", "def backstage_quality(item_: Item) -> None:\n if item_.quality >= 50:\n return\n if item_.sell_in <= 0:\n item_.quality = 0\n elif item_.sell_in <= 5:\n item_.quality += 3\n elif item_.sell_in <= 10:\n item_.quality += 2\n else:\n item_.quality += 1", "def equity(self):\n return self.assets - self.liabilities", "def calculate_purity(rating):\n if rating >= 1:\n return 1\n elif rating <= -1:\n return -1\n else:\n return 0", "def rigidity(self):\n try:\n from sage.libs.braiding import rigidity\n except ImportError:\n raise PackageNotFoundError(\"libbraiding\")\n return Integer(rigidity(self))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goldman Sachs' indicative charge of the bond (bps).
def gs_charge_bps(self) -> RangeFilter: return self.__gs_charge_bps
[ "def charge(self):\n return self.__charge", "def calculate_gdp_per_capita():\n pass", "def rate(self):\n rate, unused_value = self._get_bond_info()\n return rate", "def bond_B(k):\n return (4-k) * 300000", "def price_per_gb(self):\n return self.price / self.gb", "def price_per_gb(self):\n return self.price / self.capacity", "def test_get_bonds(self):\n r_bonds, p_bonds = self.rxn1.get_bonds()\n self.assertEqual(r_bonds, [(0, 1), (0, 2), (0, 3), (0, 4), (5, 6)]) # CH4 + OH\n self.assertEqual(p_bonds, [(0, 1), (0, 2), (0, 3), (4, 5), (4, 6)]) # CH3 + H2O", "def price_per_gb(self):\n return self.price / self.ram", "def get_charge(symbol):\n atom = as_atom(symbol)\n return atom.GetFormalCharge()", "def calculateBs(self):\n if self.verbose:\n print(self.name + \" calculating probability of bs\")\n if self.log is not None:\n self.log.write(self.name + ' calculating probability of bs')\n if self.world.getCurrentPlayer().gethandlength() <= 4:\n self.world.checkBs(self)\n if self.world.getCurrentPlayer().getnumplayed() + self.countCardsByNum(self.world.getTurnNum()) > 4:\n # if the number of cards played and the number of honest cards self has add up to more than 4, then\n # the current player must have been lying\n if self.verbose:\n print(self.name + \" knows the player must be lying.\")\n if self.log is not None:\n self.log.write(self.name + \" knows the player must be lying.\")\n self.world.checkBs(self)\n return\n else:\n print(self.name + \" can't be certain that the player is lying.\")\n if self.risk * 2 >= self.world.getDeckLen():\n if 2 <= self.difficulty <= 5: # MAKE THE BLUFFING SMARTER FOR HIGHER DIFFICULTIES\n if self.verbose:\n print(\"Checking the estimate dictionary: \" + str(self.estimate_dict))\n if self.log is not None:\n self.log.write(\"Checking the estimate dictionary: \" + str(self.estimate_dict))\n if self.world.getTurnNum() not in self.estimate_dict[self.world.getCurrentPlayer()]:\n if self.verbose:\n print(self.name + \" did not find the given number in the player's estimate dictionary\")\n if self.log is not None:\n self.log.write(self.name + \" did not find the given number in the player's estimate dictionary\\n\")\n self.world.checkBs(self)\n else: # assume the player played honestly and remove the honest numbers from the player's estimate\n if self.verbose:\n print(self.name + \" assuming the player played honestly\")\n if self.log is not None:\n self.log.write(self.name + \" assuming the player played honestly\")\n entry = self.estimate_dict[self.world.getCurrentPlayer()] # stored in a variable for better readability\n for n in range(entry.count(self.world.getTurnNum())):\n entry.remove(self.world.getTurnNum())\n self.world.askBs(self.world.getNextPlayer(self)) # MOVE THIS LINE LATER? WHY WOULD I MOVE IT LATER?", "def price_mg():\n return Gold.price_oz() / Gold.GRAM_PER_OZ / 1000.0", "def charge(posJ,boss):\n d = math.sqrt((posJ[1] - boss.position[1])**2 + (posJ[2] - boss.position[2])**2)\n boss.directionCharge = [(posJ[1]-0.5 - boss.position[1])/d*1/16,(posJ[2]-0.5 - boss.position[2])/d*1/16] \n #definit la direction de la charge\n boss.aura = \"charge\"\n boss.auratimer = 0", "def gs_charge_dollars(self) -> RangeFilter:\n return self.__gs_charge_dollars", "def additional_charge(self):\n self._balance=self._balance+1", "def bell():\n qc = QuantumCircuit(2)\n qc.h(0)\n qc.cx(0, 1)\n qc.measure_all()\n return CircuitProbsPair(qc, {0: 0.5, 3: 0.5, 1: 0, 2: 0})", "def charge_2(dists, charges):\n d6 = dists <= 6.0\n d8 = dists <= 8.0\n d6_8 = logical_and(logical_not(d6), d8)\n epsilons = (d6*4.0) + \\\n d6_8*(38.0*dists-224.0) + \\\n logical_not(d8)*80.0\n charge = (charges / ( epsilons * dists ))\n return sum(charge)", "def gate_drive_charge(self, V_d: float, V_gs: float) -> float:\n C_gd = self.C_rss\n C_gs = self.C_iss - self.C_rss\n C_equiv = C_gs + C_gd * (1 + V_d / V_gs)\n Q_gs = V_gs * C_equiv\n return Q_gs", "def calculate_breeding_cost(self, parent_breed_counts: List[int]) -> Decimal:\n prices = [SLP_BREEDING_COST[i] for i in parent_breed_counts]\n return (\n self.price_converter.slp_to_usd(sum(prices))\n + self.price_converter.axs_to_usd(\n AXS_BREEDING_COST * len(parent_breed_counts)\n )\n ).quantize(Decimal(\"0.01\"))", "async def get_chance(msg):\n if msg.channel.id not in data.get_currency_channels(msg.guild.id):\n return await embeds.desc_only(msg.channel, 'Currency Generation is **disabled** in this Channel. '\n 'Ask an Administrator to enable it.')\n return await embeds.desc_only(msg.channel, f'Currency Generation for this Server is set to '\n f'**{data.get_currency_chance(msg.guild.id)} %**.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }