query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Return node's language tag. Look iteratively in self and parents for a class argument starting with ``language`` and return the remainder of it (which should be a `BCP49` language tag) or the `fallback`. | def get_language_code(self, fallback=''):
for cls in self.get('classes', []):
if cls.startswith('language-'):
return cls[9:]
try:
return self.parent.get_language(fallback)
except AttributeError:
return fallback | [
"def language(element: ET.Element) -> Optional[str]:\n classes = element.get('class', '').split()\n # Return the first one that matches.\n for css_class in classes:\n match = re.match(r'(lang|language)-(.*)$', css_class)\n if match is not None:\n prefix, language = match.groups()\n return language",
"def scanLanguageDirectives(self, p: Position) -> str:\n c = self.c\n language = g.getLanguageFromAncestorAtFileNode(p)\n return language or c.target_language",
"def _getTree(self, language):\n\n try:\n return self._words[language]\n except KeyError:\n raise LexiconError('Unsupported language: %s' % language)",
"def getLanguage(cls, code):\n cls.initialize()\n return None if code is None else cls.languageIndex.get(code, None)",
"def _language(self, item):\n\n response = item['spider_response'].body\n try:\n root = html.fromstring(response)\n except ValueError:\n root = html.fromstring(response.encode(\"utf-8\"))\n\n # Check for lang-attributes\n lang = root.get('lang')\n\n if lang is None:\n lang = root.get('xml:lang')\n\n # Check for general meta tags\n if lang is None:\n meta = root.cssselect('meta[name=\"language\"]')\n if len(meta) > 0:\n lang = meta[0].get('content')\n\n # Check for open graph tags\n if lang is None:\n meta = root.cssselect('meta[property=\"og:locale\"]')\n if len(meta) > 0:\n lang = meta[0].get('content')\n\n # Look for <article> elements and inspect the one with the largest payload with langdetect\n if lang is None:\n article_list = []\n for article in root.xpath('//article'):\n article_list.append(re.sub(r'\\s+', ' ', article.text_content().strip()))\n longest_articles = sorted(article_list, key=lambda article: len(article), reverse=True)\n for article in longest_articles:\n try:\n lang = detect(article)\n except LangDetectException:\n continue\n else:\n break\n\n # Analyze the whole body with langdetect\n if lang is None:\n try:\n lang = detect(root.text_content().strip())\n except LangDetectException:\n pass\n\n # Try to normalize output\n if lang is not None:\n # First search for suitable locale in the original output\n matches = self.langcode_pattern.search(lang)\n if matches is not None:\n lang = matches.group(0)\n else:\n # If no match was found, normalize the original output and search again\n normalized = locale.normalize(re.split(r'\\s|;|,', lang.strip())[0])\n matches = self.langcode_pattern.search(normalized)\n if matches is not None:\n lang = matches.group(0)\n\n return lang",
"def get_language_code(tag_008=None):\n try:\n return tag_008[35:38]\n except TypeError:\n return None\n except IndexError:\n return None",
"def detect_language(self, language=None):\n log.info('Detecting language for %s', self.fname)\n\n if language:\n self.lang = language\n\n else:\n ext = os.path.splitext(self.fname)[1]\n self.lang = languages.get_by_ext(ext)\n\n self.ms = self.lang['multistart']\n self.me = self.lang['multiend']\n self.multi_re = re.compile('%s.*?%s' % (self.me, self.ms))\n log.debug('Detected %s for %s', self.lang['name'], self.fname)",
"def language(self):\n pass",
"def scanColorDirectives(self, p: Position) -> str:\n c = self.c\n root = p.copy()\n for p in root.self_and_parents(copy=False):\n language = g.findFirstValidAtLanguageDirective(p.b)\n if language:\n return language\n # Get the language from the nearest ancestor @<file> node.\n language = g.getLanguageFromAncestorAtFileNode(root) or c.target_language\n return language",
"def get_language(self):\n return self._get_option('language')",
"def language_code(self):\n return self._book_dict[\"language_code\"]",
"def volume_get_language(self, volume):\n return self.request( \"volume-get-language\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'language-code': [ basestring, False ],\n 'nfs-character-set': [ basestring, False ],\n 'oem-character-set': [ basestring, False ],\n 'language': [ basestring, False ],\n } )",
"def get_language(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'language', identifier, category_details)",
"def get_language(entry):\n index_url = entry.url.replace(\"robots.txt\", \"\")\n\n # hack around some issues here,\n if entry.domain in KNOWN_LANGUAGES:\n language = KNOWN_LANGUAGES.get(entry.domain)\n\n else:\n try:\n page = requests.get(index_url)\n try:\n languages = cld2.detect(page.content, isPlainText=False,\n hintTopLevelDomain=entry.domain.split('.')[-1])\n except:\n languages = cld2.detect(page.text.encode(\"utf8\"), isPlainText=False,\n hintTopLevelDomain=entry.domain.split('.')[-1])\n\n # ignoring 'is_reliable' flag here, set on baidu.com etc (even though detects\n # language appropiately\n language = languages.details[0].language_name if languages.details else 'Unknown'\n index_url = page.url\n\n except Exception as e:\n log.exception(\"Failed to analyze language for '%s'\", entry.domain)\n language = 'Failed'\n\n language = language.title()\n # traditional chinese -> chinese\n if language == 'Chineset':\n language = 'Chinese'\n return language, not urlparse(index_url).netloc.endswith(entry.domain)",
"def get_source_language(resources):\r\n return resources[0].source_language",
"def language_name(language_code):\n return get_language_name(language_code)",
"def get(self, language: str) -> str:\n value = None\n\n try:\n # Get specified language\n value = self[language]\n\n # Default to english\n if value is None:\n value = self['en']\n except KeyError:\n # Default to the first property\n for language in self.keys():\n if language in self:\n value = self[language]\n break\n\n return value",
"def language(self):\n if self.service:\n if self.service.supports_single_language:\n self._language = self.service.supported_languages.all()[0]\n elif self.user and self.user.language in self.service.supported_languages.all():\n self._language = self.user.language\n elif self._language and not self._language in self.service.supported_languages.all():\n self._language = None\n else:\n self._language = None\n\n self.save()\n return self._language",
"def getLanguageName(self, language):\n\n return (language['name']\n if ((len(language) > 0) and ('name' in language.keys()) and (language['name'] is not None))\n else \"\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') from node or dictionary `dict_`. | def update_basic_atts(self, dict_):
if isinstance(dict_, Node):
dict_ = dict_.attributes
for att in self.basic_attributes:
self.append_attr_list(att, dict_.get(att, [])) | [
"def update_all_atts(self, dict_, update_fun = copy_attr_consistent,\r\n replace = True, and_source = False):\r\n if isinstance(dict_, Node):\r\n dict_ = dict_.attributes\r\n\r\n # Include the source attribute when copying?\r\n if and_source:\r\n filter_fun = self.is_not_list_attribute\r\n else:\r\n filter_fun = self.is_not_known_attribute\r\n\r\n # Copy the basic attributes\r\n self.update_basic_atts(dict_)\r\n\r\n # Grab other attributes in dict_ not in self except the\r\n # (All basic attributes should be copied already)\r\n for att in filter(filter_fun, dict_):\r\n update_fun(self, att, dict_[att], replace)",
"def update(self, dict):\n self.attr.update(dict)\n return self",
"def copy_attributes(self, parent_dict, child_dict, attrs):\n for attr in attrs:\n has_attr = parent_dict.get(attr)\n if has_attr is not None:\n child_dict[attr] = has_attr",
"def update(self, given_dict):\n self.__dict__.update(given_dict)",
"def update(self, dictionary):\n for key, value in dictionary.items():\n if is_stringlike(key):\n setattr(self, key, value)\n else:\n self[Tag(key)] = value",
"def update(self, data):\n for field in self.ATTR_FIELDS:\n if field in data:\n setattr(self, field, data[field])",
"def update_attributes(self, override: Dict):\n self.additional_attributes.update(override)",
"def set_node_attributes(self, node_attributes: dict) -> None:\n\n node_attribute_map = self._create_node_attributes(node_attributes)\n\n for col in node_attribute_map.keys():\n\n if col in self.node_columns:\n\n for node in node_attribute_map[col]:\n\n if node in self.node_map.map.keys():\n\n self.node_map.map[node]['attributes'].update(node_attribute_map[col][node])",
"def update_svg_from_dict():\n doc = minidom.parse(TOPOLOGY_FILENAME)\n with open(RELATIONS_FILENAME) as fp:\n content = json.load(fp)\n relations, data = content[\"relations\"], content[\"data\"]\n\n for node in doc.getElementsByTagName(\"tspan\"):\n if not node.hasAttribute(RELATIONS_KEY):\n continue\n\n b_id = str(get_block_id(node))\n node.setAttribute(\"type\", data[b_id][\"type\"])\n node.setAttribute(RELATIONS_KEY, str(list(map(int, relations[b_id]))))\n node.setAttribute(ADDITIONAL_DATA_KEY, str({\n k: v for k, v in data[b_id].items() if k != \"type\"\n }))\n\n with open(TOPOLOGY_FILENAME, \"w\") as fp:\n doc.writexml(fp)",
"def update_with_attributes(obj, attributes):\n for key, val in attributes.items():\n setattr(obj, key, val)",
"def update(self):\n for dynamic_attr in self.dynamic_attrs.itervalues():\n dynamic_attr.clear_overloads()\n \n self.update_children()\n \n for modifier in self.modifiers:\n self.apply_modifier(modifier)",
"def update_attributes_instability(attrs_inst: Dict[Attribute, float]):\n for attribute, attribute_instability in attrs_inst.items():\n attributes_instability[attribute] = attribute_instability",
"def _copy_attr(o, attr, adict, key=None):\n if hasattr(o, attr):\n adict[key or attr] = getattr(o, attr)",
"def update_from_dict(instance, attrs, commit):\n\n field_names = list(map(lambda f: f.name, instance._meta.get_fields()))\n for attr, val in attrs.items():\n if attr in field_names:\n setattr(instance, attr, val)\n\n if commit:\n instance.save()",
"def _dict_to_hdf5_attrs(hdf5_dataset_object, dictionary, base_path = ''):\n\n\tfor key, value in dictionary.items():\n\n\t\thdf5_dataset_object.attrs[os.path.join(base_path,key)] = value\n\n\treturn",
"def set_attributes_randomly(self) -> None:\n for f in self.attributes:\n self.data[f.name] = f.random_value()",
"def change(self, new_dict):\n self.dict = new_dict",
"def _set_netcdf_attributes(root, attrs):\n for key, val in attrs.items():\n setattr(root, key, val)",
"def add_additional_attributes(self, attribs: dict):\n for k, v in attribs.items():\n if k not in self.__dict__:\n setattr(self, k, v)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each element in values, if it does not exist in self[attr], append it. | def append_attr_list(self, attr, values):
# List Concatenation
for value in values:
if not value in self[attr]:
self[attr].append(value) | [
"def copy_attr_concatenate(self, attr, value, replace):\r\n if self.get(attr) is not value:\r\n if isinstance(self.get(attr), list) and \\\r\n isinstance(value, list):\r\n self.append_attr_list(attr, value)\r\n else:\r\n self.replace_attr(attr, value, replace)",
"def coerce_append_attr_list(self, attr, value):\r\n # List Concatenation\r\n if not isinstance(self.get(attr), list):\r\n self[attr] = [self[attr]]\r\n if not isinstance(value, list):\r\n value = [value]\r\n self.append_attr_list(attr, value)",
"def __or__(self, attrs):\r\n remove = set([an for an, av in attrs if av is None])\r\n replace = dict([(an, av) for an, av in attrs\r\n if an in self and av is not None])\r\n return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self\r\n if sn not in remove] +\r\n [(an, av) for an, av in attrs\r\n if an not in self and an not in remove])",
"def merge(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value, set value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n if value not in current_value:\n _set(environment, attr, current_value + \" \" + value)\n # do nothing if value in current value\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(\n environment,\n attr,\n CompleteExpression(merge_sublist(list(current_value), list(value))),\n )\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, merge_sublist(current_value, value))\n else:\n raise TypeError(\n \"current value is of unsupported type\"\n f\"'{type(current_value)}' for the 'append' action\"\n )",
"def addVals( self, vals ):\n for x in vals: self.add( x )",
"def addVals( self, vals ):\n for x in vals: self.add( x )",
"def append(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n _set(environment, attr, current_value + \" \" + value)\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(environment, attr, (current_value + value).complete())\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, current_value + value)\n else:\n raise TypeError(\n \"current value and new value are of unsupported types\"\n f\"'{type(current_value)}' and '{type(value)}' for the 'append' action\"\n )",
"def add_to_set(value, values):\n if value:\n values.add(value)\n return values",
"def applyAttrVals(self, obj, vals):\n [setattr(obj, key, val) for key, val in vals.iteritems()]",
"def add_attr(self, attr_name, samplet_id, attr_value):\n\n if attr_name is not None:\n\n if attr_name not in self._attr:\n self._attr[attr_name] = dict()\n self._attr_dtype[attr_name] = None\n\n if is_iterable_but_not_str(samplet_id):\n if not isinstance(attr_value, (Sequence, np.ndarray, np.generic)):\n raise TypeError('When samplet_id is a list, attr_value must '\n 'also be a list')\n if len(samplet_id) != len(attr_value):\n raise ValueError('Number of attribute values provided do not '\n 'match the number of samplet IDs')\n\n for sid, val in zip(samplet_id, attr_value):\n self.__add_single_attr(attr_name, sid, val)\n\n else:\n if is_iterable_but_not_str(attr_value):\n raise TypeError('When samplet_id is not a list, attr_value also '\n 'must not be a list')\n\n self.__add_single_attr(attr_name, samplet_id, attr_value)\n\n else:\n raise ValueError('Attribute name can not be None!')",
"def by_attribute(self, schema_field, att_value, is_lookup=False):\n\n clone = self.prepare_attribute_qs()\n real_name = str(schema_field.real_name)\n if not isinstance(att_value, (list, tuple)):\n att_value = [att_value]\n if is_lookup:\n att_value = Lookup.objects.filter(schema_field__id=schema_field.id, code__in=att_value)\n if not att_value:\n # If the lookup values don't exist, then there aren't any\n # NewsItems with this attribute value. Note that we aren't\n # using QuerySet.none() here, because we want the result to\n # be a NewsItemQuerySet, and none() returns a normal QuerySet.\n clone = clone.extra(where=('1=0',))\n return clone\n att_value = [val.id for val in att_value]\n if schema_field.is_many_to_many_lookup():\n # We have to use a regular expression search to look for all rows\n # with the given att_value *somewhere* in the column. The [[:<:]]\n # thing is a word boundary.\n for value in att_value:\n if not str(value).isdigit():\n raise ValueError('Only integer strings allowed for att_value in many-to-many SchemaFields')\n clone = clone.extra(where=(\"db_attribute.%s ~ '[[:<:]]%s[[:>:]]'\" % (real_name, '|'.join([str(val) for val in att_value])),))\n elif None in att_value:\n if att_value != [None]:\n raise ValueError('by_attribute() att_value list cannot have more than one element if it includes None')\n clone = clone.extra(where=(\"db_attribute.%s IS NULL\" % real_name,))\n else:\n clone = clone.extra(where=(\"db_attribute.%s IN (%s)\" % (real_name, ','.join(['%s' for val in att_value])),),\n params=tuple(att_value))\n return clone",
"def append_all(x, val):\n for el in x:\n el.append(val)",
"def attr_jar_append(cls, value):\n cls.attr_jar.append(value.encode('ascii', 'replace'))",
"def append_val(self, key, val, extra_data):\n raise NotImplementedError",
"def get_values(data, attribute):\n return data[attribute].unique()",
"def get_attribute_values(self, object_dn, key, vals):\n\n r = re.compile(\"^([^;]+);range=(\\d+)-(\\d+|\\*)$\")\n\n m = r.match(key)\n if m is None:\n # no range, just return the values\n return vals\n\n attr = m.group(1)\n hi = int(m.group(3))\n\n # get additional values in a loop\n # until we get a response with '*' at the end\n while True:\n\n n = \"%s;range=%d-*\" % (attr, hi + 1)\n res = self.ldb.search(base=object_dn, scope=SCOPE_BASE, attrs=[n])\n assert len(res) == 1\n res = dict(res[0])\n del res[\"dn\"]\n\n fm = None\n fvals = None\n\n for key in res.keys():\n m = r.match(key)\n\n if m is None:\n continue\n\n if m.group(1) != attr:\n continue\n\n fm = m\n fvals = list(res[key])\n break\n\n if fm is None:\n break\n\n vals.extend(fvals)\n if fm.group(3) == \"*\":\n # if we got \"*\" we're done\n break\n\n assert int(fm.group(2)) == hi + 1\n hi = int(fm.group(3))\n\n return vals",
"def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml",
"def change_attr(el, attr, values):\n v = el.attrib.get(attr, '')\n changed = False\n for value in values.split(';'):\n k, newv = split2(value, \"Each value must be in the form x:y\", \":\")\n v = replace_key(v, k, newv)\n if v == '': # there were no such yet\n v = \"%s:%s\" % (k, newv)\n #print \"Changing %s : %s, got %s\" % (attr, values, str(v))\n el.attrib[attr] = v",
"def set(self, attr, val, strict=True):\n if val is None:\n return\n if isinstance(val, basestring):\n if not len(val): return False\n s = re.sub(\"[\\n\\t\\r ]+\", \"\", val)\n if not len(s): return False\n if val is self:\n raise RRSDatabaseValueError(\"Avoid infinite recursion: Cannot insert itself as a value.\")\n\n # Check type. If it is some new attribute, the type cannot be determined\n if attr in self.__types__:\n if self.__types__[attr] is not _UnknownType:\n self._check_type(attr, val, self.__types__[attr])\n else:\n try:\n if type(self.__dict__[attr]) is not list:\n self.__types__[attr] = _UnknownType\n except KeyError, e:\n if strict:\n cls = str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n raise KeyError(\"Object %s has no attribute %s.\" % (cls, e))\n else:\n self.__types__[attr] = _UnknownType\n # no semanics checking needed, so insert value into object\n setattr(self, attr, val)\n return\n\n # Check semantics\n self._check_semantics(attr, val)\n\n # Insert\n if type(self.__dict__[attr]) == list:\n if not isinstance(val, _RRSDbEntityRelationship):\n raise RRSDatabaseValueError(\"Relationship between entities has to\" \\\n \"be represented by any instance of subclass of _RRSDbEntityRelationship\")\n val._parent = self # set pointer to parent element\n self.__dict__[attr].append(val)\n else:\n setattr(self, attr, val)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First, convert both self[attr] and value to a nonstring sequence type; if either is not already a sequence, convert it to a list of one element. Then call append_attr_list. | def coerce_append_attr_list(self, attr, value):
# List Concatenation
if not isinstance(self.get(attr), list):
self[attr] = [self[attr]]
if not isinstance(value, list):
value = [value]
self.append_attr_list(attr, value) | [
"def copy_attr_concatenate(self, attr, value, replace):\r\n if self.get(attr) is not value:\r\n if isinstance(self.get(attr), list) and \\\r\n isinstance(value, list):\r\n self.append_attr_list(attr, value)\r\n else:\r\n self.replace_attr(attr, value, replace)",
"def append_attr_list(self, attr, values):\r\n # List Concatenation\r\n for value in values:\r\n if not value in self[attr]:\r\n self[attr].append(value)",
"def _set_log_attrs(\n self,\n attr_map: dict,\n attr_type: str,\n value: t.Union[str, t.List[str]],\n ):\n if not hasattr(self, \"_LOG_ATTRS\"):\n self._LOG_ATTRS = {\"response\": [], \"request\": []}\n\n value = [x.lower().strip() for x in listify(value) if isinstance(x, str)]\n\n if not value:\n self._LOG_ATTRS[attr_type] = []\n return\n\n log_attrs = self._LOG_ATTRS[attr_type]\n\n if \"all\" in value:\n for k, v in attr_map.items():\n entry = f\"{k}={v}\"\n if entry not in log_attrs:\n log_attrs.append(entry)\n return\n\n for item in value:\n if item in attr_map:\n value = attr_map[item]\n entry = f\"{item}={value}\"\n if entry not in log_attrs:\n log_attrs.append(entry)",
"def attrsToList(self, attrs):\n return [g.Bunch(name=name, val=attrs.getValue(name))\n for name in attrs.getNames()]",
"def __set_list_value(self, prop, val):\n\t\tif isinstance(val, str):\n\t\t\tif val != \"\":\n\t\t\t\tprop.append(val)\n\t\telif isinstance(val, list):\n\t\t\tif val:\n\t\t\t\tprop.extend([x.strip() for x in val])\n\t\telse:\n\t\t\traise TypeError(\"Expected string, got %r instead\" % type(val))",
"def _convert_param_attr_to_list(param_attr, n):\n if isinstance(param_attr, (list, tuple)):\n assert len(param_attr) == n, (\n \"length of param_attr should be %d when it is a list/tuple\" % n\n )\n param_attrs = []\n for attr in param_attr:\n if isinstance(attr, bool):\n if attr:\n param_attrs.append(ParamAttr._to_attr(None))\n else:\n param_attrs.append(False)\n else:\n param_attrs.append(ParamAttr._to_attr(attr))\n # param_attrs = [ParamAttr._to_attr(attr) for attr in param_attr]\n elif isinstance(param_attr, bool):\n param_attrs = []\n if param_attr:\n param_attrs = [ParamAttr._to_attr(None) for i in range(n)]\n else:\n param_attrs = [False] * n\n else:\n param_attrs = []\n attr = ParamAttr._to_attr(param_attr)\n for i in range(n):\n attr_i = copy.deepcopy(attr)\n if attr.name:\n attr_i.name = attr_i.name + \"_\" + str(i)\n param_attrs.append(attr_i)\n return param_attrs",
"def _parse_attr_proto(self, attributes, node):\n for attr in attributes:\n if sys.getsizeof(attr['value']) > self.MAX_NODE_ATTRIBUTE_VALUE_BYTES:\n message = f\"The attribute value of node({node.name}) \" \\\n f\"is over {self.MAX_NODE_ATTRIBUTE_VALUE_BYTES} Bytes, will ignore.\"\n logger.warning(message)\n continue\n\n if attr['name'] == 'gen_strategy':\n # The gen_strategy value is equal in_strategy value, so we only need to show one strategy value in attr\n continue\n\n value = self._parse_value_proto(attr['value'])\n node.add_attr({attr['name']: str(value)})",
"def get_sequentialAttrDict(self,attr = None):\n\t#log.debug(\">>> %s.get_sequentialAttrDict(attr = '%s') >> \"%(self.p_nameShort,attr) + \"=\"*75) \t\t\n\tuserAttrs = self.getUserAttrsAsDict()\n\td_attrList = {}\n\tfor key in userAttrs.keys():\n\t if '_' in key:\n\t\t_split = key.split('_')\n\t\t_int_ = _split[-1]\n\t\t_str_ = ('_').join(_split[:-1])\n\t\tif \"%s\"%attr == _str_:\n\t\t try:\n\t\t\td_attrList[int(_int_)] = key\n\t\t\t#log.debug(\"match: '%s'\"%key)\n\t\t except:log.warning(\"%s failed to int | int: %s\"%(key,_int_))\n\t\t \n\t#log.debug(\"-\"*100) \t \t\n\treturn d_attrList",
"def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml",
"def uuids_as_list(attrname):\n return (lambda self, value: [operator.attrgetter('id')(obj)\n for obj in operator.attrgetter(\n attrname)(self)])",
"def attr_value_proto(value):\n value_type = str(type(value))\n op_attr_str = str(value)\n value_attr = AttrValue(s=op_attr_str.encode(encoding='utf_8'))\n\n if value_type == \"<class 'int'>\" or value_type == \"<type 'int'>\":\n value_attr = AttrValue(i=value)\n elif value_type == \"<class 'float'>\" or value_type == \"<type 'float'>\":\n value_attr = AttrValue(f=value)\n elif value_type == \"<class 'bool'>\" or value_type == \"<type 'bool'>\":\n value_attr = AttrValue(b=value)\n elif value_type == \"<class 'list'>\" or value_type == \"<type 'list'>\":\n if len(value) > 0:\n value_list_dtype = str(type(value[0]))\n if value_list_dtype == \"<class 'int'>\" or value_list_dtype == \"<type 'int'>\":\n value_attr = AttrValue(list=AttrValue.ListValue(i=value))\n elif value_list_dtype == \"<class 'float'>\" or value_list_dtype == \"<type 'float'>\":\n value_attr = AttrValue(list=AttrValue.ListValue(f=value))\n elif value_list_dtype == \"<class 'bool'>\" or value_list_dtype == \"<type 'bool'>\":\n value_attr = AttrValue(list=AttrValue.ListValue(b=value))\n\n return value_attr",
"def add_attr(self, attr_name, samplet_id, attr_value):\n\n if attr_name is not None:\n\n if attr_name not in self._attr:\n self._attr[attr_name] = dict()\n self._attr_dtype[attr_name] = None\n\n if is_iterable_but_not_str(samplet_id):\n if not isinstance(attr_value, (Sequence, np.ndarray, np.generic)):\n raise TypeError('When samplet_id is a list, attr_value must '\n 'also be a list')\n if len(samplet_id) != len(attr_value):\n raise ValueError('Number of attribute values provided do not '\n 'match the number of samplet IDs')\n\n for sid, val in zip(samplet_id, attr_value):\n self.__add_single_attr(attr_name, sid, val)\n\n else:\n if is_iterable_but_not_str(attr_value):\n raise TypeError('When samplet_id is not a list, attr_value also '\n 'must not be a list')\n\n self.__add_single_attr(attr_name, samplet_id, attr_value)\n\n else:\n raise ValueError('Attribute name can not be None!')",
"def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)",
"def attr_jar_append(cls, value):\n cls.attr_jar.append(value.encode('ascii', 'replace'))",
"def get_all_attr(self, attribute: str): # -> list[tuple[SimpleConstraint, typing.Any]]:\n value = getattr(self, attribute)\n if value is not None:\n return [(self, value)]\n return []",
"def _get_log_attrs(self, attr_type: str) -> t.List[str]:\n return getattr(self, \"_LOG_ATTRS\", {}).get(attr_type, [])",
"def by_attribute(self, schema_field, att_value, is_lookup=False):\n\n clone = self.prepare_attribute_qs()\n real_name = str(schema_field.real_name)\n if not isinstance(att_value, (list, tuple)):\n att_value = [att_value]\n if is_lookup:\n att_value = Lookup.objects.filter(schema_field__id=schema_field.id, code__in=att_value)\n if not att_value:\n # If the lookup values don't exist, then there aren't any\n # NewsItems with this attribute value. Note that we aren't\n # using QuerySet.none() here, because we want the result to\n # be a NewsItemQuerySet, and none() returns a normal QuerySet.\n clone = clone.extra(where=('1=0',))\n return clone\n att_value = [val.id for val in att_value]\n if schema_field.is_many_to_many_lookup():\n # We have to use a regular expression search to look for all rows\n # with the given att_value *somewhere* in the column. The [[:<:]]\n # thing is a word boundary.\n for value in att_value:\n if not str(value).isdigit():\n raise ValueError('Only integer strings allowed for att_value in many-to-many SchemaFields')\n clone = clone.extra(where=(\"db_attribute.%s ~ '[[:<:]]%s[[:>:]]'\" % (real_name, '|'.join([str(val) for val in att_value])),))\n elif None in att_value:\n if att_value != [None]:\n raise ValueError('by_attribute() att_value list cannot have more than one element if it includes None')\n clone = clone.extra(where=(\"db_attribute.%s IS NULL\" % real_name,))\n else:\n clone = clone.extra(where=(\"db_attribute.%s IN (%s)\" % (real_name, ','.join(['%s' for val in att_value])),),\n params=tuple(att_value))\n return clone",
"def _attr_from_documents(self, attr):\n elements = [getattr(doc, attr) for doc in self.documents]\n return elements",
"def get_valid_attribute_values(self, attr, buf, pos):\n node = buf.xml_node_at_pos(pos)\n if node is None: return None\n handlerclass = buf.xml_tree_handler(node)\n values = handlerclass.values(attr, buf.xml_tree, node)\n if not values:\n return None\n values.sort()\n return values"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If self[attr] does not exist or force is True or omitted, set self[attr] to value, otherwise do nothing. | def replace_attr(self, attr, value, force = True):
# One or the other
if force or self.get(attr) is None:
self[attr] = value | [
"def set(self, attr, val):\n if not hasattr(self, attr):\n logger.error('model: set: The attribute \"{0}\" is undefined'.format(attr))\n sys.exit(1)\n setattr(self, attr, val)",
"def set_attribute(self,att,val):\r\n self.attributes[att] = val",
"def set(self, attr, val, strict=True):\n if val is None:\n return\n if isinstance(val, basestring):\n if not len(val): return False\n s = re.sub(\"[\\n\\t\\r ]+\", \"\", val)\n if not len(s): return False\n if val is self:\n raise RRSDatabaseValueError(\"Avoid infinite recursion: Cannot insert itself as a value.\")\n\n # Check type. If it is some new attribute, the type cannot be determined\n if attr in self.__types__:\n if self.__types__[attr] is not _UnknownType:\n self._check_type(attr, val, self.__types__[attr])\n else:\n try:\n if type(self.__dict__[attr]) is not list:\n self.__types__[attr] = _UnknownType\n except KeyError, e:\n if strict:\n cls = str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n raise KeyError(\"Object %s has no attribute %s.\" % (cls, e))\n else:\n self.__types__[attr] = _UnknownType\n # no semanics checking needed, so insert value into object\n setattr(self, attr, val)\n return\n\n # Check semantics\n self._check_semantics(attr, val)\n\n # Insert\n if type(self.__dict__[attr]) == list:\n if not isinstance(val, _RRSDbEntityRelationship):\n raise RRSDatabaseValueError(\"Relationship between entities has to\" \\\n \"be represented by any instance of subclass of _RRSDbEntityRelationship\")\n val._parent = self # set pointer to parent element\n self.__dict__[attr].append(val)\n else:\n setattr(self, attr, val)",
"def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)",
"def set_attr(self,n,attr,val=None):\n\t\tself.realopen()\n\t\ta=loads(self.bdb.get(dumps(n,-1),txn=self.txn))\n\t\tif isinstance(attr,dict) :\n\t\t\ta.update(attr)\n\t\telse: a[attr]=val\n\t\tself[n]=a",
"def setOptionalAttribute(self, name, value):\n if value is not None:\n self.setAttribute(name, value)",
"def _apply_value(self, value):\n\n setattr(self._obj, self._attr, value)",
"def onSetAttr(self, attr, vals, opts):\n pass",
"def set_attribute(self,attr,value,add = None):\n\t\tif (add is None):\n\t\t\tadd = False \n\t\tif (attr is None):\n\t\t\traise ValueError(\"You must specify an attribute\")\n\t\tif (value is None):\n\t\t\traise ValueError(\"You must specify a value\")\n\t\tif ((not add) and (attr not in self._Attributes)):\n\t\t\traise ValueError(\"Attribute \" + attr + \" unrecognized\")\n\t\tself._Attributes[attr] = value",
"def set(value,force=False):",
"def SetAttr(self, attr, value):\n self.__article[attr] = value",
"def set(self, prop, val):\n if prop == 'num_released':\n raise AttributeError(\"cannot set attribute\")\n\n # we don't want to add an attribute that doesn't already exist\n # first check to see that the attribute exists, then change it else\n # raise error\n if hasattr(self.release, prop):\n setattr(self.release, prop, val)\n elif hasattr(self.element_type, prop):\n setattr(self.element_type, prop, val)\n else:\n for init in self.element_type.initializers.values():\n if hasattr(init, prop):\n setattr(init, prop, val)\n break\n else:\n raise AttributeError('{0} attribute does not exist '\n 'in element_type '\n 'or release object'.format(prop))",
"def set_attr(self, node: str, value: dict):\n\n if node in list(self.graph.keys()):\n self.graph[node][self._ATTR] = value\n else:\n raise NodeDoesNotExist(node)",
"def update_attribute(self, instance, name, field, value):\n field_setter = getattr(self, f\"set_{name}\", None)\n if field_setter:\n field_setter(instance, name, field, value)\n else:\n setattr(instance, name, value)",
"def set_attr(self, user, key, value):\n query1 = \"\"\"SELECT attr_value FROM attributes WHERE attr_uid = ?\n AND attr_key = ?\"\"\"\n query2 = \"INSERT INTO attributes VALUES (?, ?, ?)\"\n query3 = \"\"\"UPDATE attributes SET attr_value = ? WHERE attr_uid = ?\n AND attr_key = ?\"\"\"\n with self._db_access_lock, sqlite.connect(self._dbfile) as conn:\n if conn.execute(query1, (user, key)).fetchone():\n conn.execute(query3, (value, user, key))\n else:\n conn.execute(query2, (user, key, value))\n try:\n self._attributes[user][key] = value\n except KeyError:\n self.attributes[user] = {key: value}",
"def set(self, attr_name, attr_value, overwrite=False):\n aname = 'lgt.' + attr_name\n if attr_name in self.lgtattrs:\n if not overwrite:\n log.warn('LGT attribute \"%s\" exists but overwrite is False.' % aname)\n return \n self.lgtattrs.union(set(attr_name))\n self._obj.attrs[aname] = attr_value",
"def _attribute_inverter(obj, name, value):\n setattr(obj, name, value)\n return True",
"def SetAttribute(self, name, value):\n aMap = self._AMap()\n if name in aMap:\n attrName, decode, vType = aMap[name]\n if vType is ListType:\n if value is None:\n value = []\n else:\n value = value.split()\n setattr(self, attrName, map(decode, value))\n elif vType is DictType:\n if value is None:\n value = []\n else:\n value = value.split()\n dValue = {}\n for iv in map(decode, value):\n dValue[iv] = dValue.get(iv, 0) + 1\n setattr(self, attrName, dValue)\n else:\n x = getattr(self, attrName, None)\n if type(x) in (ListType, DictType):\n print \"Problem setting %s in %s: single value will overwrite List or Dict\" % (repr(name), repr(self.__class__.__name__))\n # print self.GetDocument()\n if value is None:\n setattr(self, attrName, None)\n else:\n setattr(self, attrName, decode(value))\n elif hasattr(self.__class__, 'ID') and name == self.__class__.ID:\n self.SetID(value)\n else:\n if value is None:\n if name in self._attrs:\n del self._attrs[name]\n else:\n self._attrs[name] = value",
"def update_attr(field, attr, value):\n\n field.widget.attrs.update({\n attr: value\n })"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If attr is an attribute of self and both self[attr] and value are lists, concatenate the two sequences, setting the result to self[attr]. If either self[attr] or value are nonsequences and replace is True or self[attr] is None, replace self[attr] with value. Otherwise, do nothing. | def copy_attr_concatenate(self, attr, value, replace):
if self.get(attr) is not value:
if isinstance(self.get(attr), list) and \
isinstance(value, list):
self.append_attr_list(attr, value)
else:
self.replace_attr(attr, value, replace) | [
"def replace_attr(self, attr, value, force = True):\r\n # One or the other\r\n if force or self.get(attr) is None:\r\n self[attr] = value",
"def append_attr_list(self, attr, values):\r\n # List Concatenation\r\n for value in values:\r\n if not value in self[attr]:\r\n self[attr].append(value)",
"def __or__(self, attrs):\r\n remove = set([an for an, av in attrs if av is None])\r\n replace = dict([(an, av) for an, av in attrs\r\n if an in self and av is not None])\r\n return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self\r\n if sn not in remove] +\r\n [(an, av) for an, av in attrs\r\n if an not in self and an not in remove])",
"def merge(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value, set value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n if value not in current_value:\n _set(environment, attr, current_value + \" \" + value)\n # do nothing if value in current value\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(\n environment,\n attr,\n CompleteExpression(merge_sublist(list(current_value), list(value))),\n )\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, merge_sublist(current_value, value))\n else:\n raise TypeError(\n \"current value is of unsupported type\"\n f\"'{type(current_value)}' for the 'append' action\"\n )",
"def append(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n _set(environment, attr, current_value + \" \" + value)\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(environment, attr, (current_value + value).complete())\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, current_value + value)\n else:\n raise TypeError(\n \"current value and new value are of unsupported types\"\n f\"'{type(current_value)}' and '{type(value)}' for the 'append' action\"\n )",
"def coerce_append_attr_list(self, attr, value):\r\n # List Concatenation\r\n if not isinstance(self.get(attr), list):\r\n self[attr] = [self[attr]]\r\n if not isinstance(value, list):\r\n value = [value]\r\n self.append_attr_list(attr, value)",
"def set(self, attr, val, strict=True):\n if val is None:\n return\n if isinstance(val, basestring):\n if not len(val): return False\n s = re.sub(\"[\\n\\t\\r ]+\", \"\", val)\n if not len(s): return False\n if val is self:\n raise RRSDatabaseValueError(\"Avoid infinite recursion: Cannot insert itself as a value.\")\n\n # Check type. If it is some new attribute, the type cannot be determined\n if attr in self.__types__:\n if self.__types__[attr] is not _UnknownType:\n self._check_type(attr, val, self.__types__[attr])\n else:\n try:\n if type(self.__dict__[attr]) is not list:\n self.__types__[attr] = _UnknownType\n except KeyError, e:\n if strict:\n cls = str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n raise KeyError(\"Object %s has no attribute %s.\" % (cls, e))\n else:\n self.__types__[attr] = _UnknownType\n # no semanics checking needed, so insert value into object\n setattr(self, attr, val)\n return\n\n # Check semantics\n self._check_semantics(attr, val)\n\n # Insert\n if type(self.__dict__[attr]) == list:\n if not isinstance(val, _RRSDbEntityRelationship):\n raise RRSDatabaseValueError(\"Relationship between entities has to\" \\\n \"be represented by any instance of subclass of _RRSDbEntityRelationship\")\n val._parent = self # set pointer to parent element\n self.__dict__[attr].append(val)\n else:\n setattr(self, attr, val)",
"def change_attr(el, attr, values):\n v = el.attrib.get(attr, '')\n changed = False\n for value in values.split(';'):\n k, newv = split2(value, \"Each value must be in the form x:y\", \":\")\n v = replace_key(v, k, newv)\n if v == '': # there were no such yet\n v = \"%s:%s\" % (k, newv)\n #print \"Changing %s : %s, got %s\" % (attr, values, str(v))\n el.attrib[attr] = v",
"def _combine_attribute(attribute, other_attribute, separator=', '):\n if (other_attribute and attribute != other_attribute and\n attribute not in other_attribute and other_attribute not in attribute):\n return '{}{}{}'.format(attribute, separator, other_attribute)\n return attribute",
"def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)",
"def concat_attribute_values(l, r, delimiter):\n if not l:\n return r\n if not r:\n return l\n return l + delimiter + r",
"def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml",
"def set_attributes(self, model_1, obj_1, obj_2, overwrite=True):\n for (\n attr\n ) in (\n obj_2.traits()\n ): # Iterate through all attributes in obj_2. These should be the same traits as obj_1 assuming the precondition\n class_name = str(type(obj_2.traits()[attr])).strip(\"<>'\").split(\".\")[-1]\n # TODO: check for reactance tuples: str(obj_2.traits()[attr]._trait.klass).strip(\"<>'\").split('.')[-1] != (Int,Int,Int):\n\n if class_name == \"List\":\n phase_order = {\n \"A\": 0,\n \"B\": 1,\n \"C\": 2,\n \"N\": 3,\n } # Should only have to deal with 3 phases.\n #\n # BUG WARNING: The order of objects in the list is important and is used to determine the changes that are made\n # Try to ensure that phases are specified to avoid this problem\n # If number of elements in obj_1 is 0, all elements of obj_2 are added\n # If number of elements is the same, they are modified with a 1-1 comparison\n # If number of elements in obj_2 is < obj_1, set the first values of obj_1 as obj_2\n # If number of elements in obj_2 is > obj_1, set the all the values in obj_1 in the order they'r in obj_2 and append the extras\n # This will fail if obj_1 is (A, B, C) and obj_2 is (A, C), as it'll assign phase C to phase B.\n # This will also fail if obj_1 is (C) and obj_2 is (A,B,C) as C will have A assigned to it.\n # This will also fail if obj_1 is (A,B) and obj_2 is (A,C) as B will have C assigned to it.\n list_1 = getattr(obj_1, attr)\n list_2 = getattr(obj_2, attr)\n if list_1 is None or len(list_1) == 0:\n result_list = []\n for element in list_2:\n result_list.append(self.copy(model_1, element))\n setattr(obj_1, attr, result_list)\n continue\n elif list_2 is None or len(list_2) == 0:\n continue\n\n # Almost all Lists are of objects which have phases. Exceptions being windings, reactances and positions\n # Require the phases to be specified in both systems to modify based on phase\n has_phases = True\n for i in range(len(list_1)):\n if not (\n hasattr(list_1[0], \"phase\") and list_1[0].phase is not None\n ):\n has_phases = False\n for i in range(len(list_2)):\n if not (\n hasattr(list_2[0], \"phase\") and list_2[0].phase is not None\n ):\n has_phases = False\n if has_phases and len(list_1) > 0 and len(list_2) > 0:\n # Firstly sort the lists so they're in correct order by phase.\n list_1.sort(key=lambda x: phase_order[x.phase])\n list_2.sort(key=lambda x: phase_order[x.phase])\n list_1_phase = phase_order[list_1[0].phase]\n list_2_phase = phase_order[list_2[0].phase]\n list_1_idx = 0\n list_2_idx = 0\n while list_1_idx < len(list_1) and list_2_idx < len(list_2):\n if list_1_idx < len(list_1):\n list_1_phase = phase_order[list_1[list_1_idx].phase]\n else:\n list_1_phase = 1000000\n if list_2_idx < len(list_2):\n list_2_phase = phase_order[list_2[list_2_idx].phase]\n else:\n list_2_phase = 1000001\n\n # i.e. recurse\n if list_1_phase == list_2_phase:\n self.set_attributes(\n model_1,\n list_1[list_1_idx],\n list_2[list_2_idx],\n overwrite,\n )\n list_1_idx = list_1_idx + 1\n list_2_idx = list_2_idx + 1\n elif list_1_phase < list_2_phase:\n list_1_idx = (\n list_1_idx + 1\n ) # e.g. obj_1 = (A, B, C) and obj_2 = (B). We don't update this phase\n\n else:\n getattr(obj_1, attr).append(list_2[list_2_idx])\n list_2_idx = list_2_idx + 1\n\n elif len(list_1) == len(list_2):\n for i in range(len(list_1)):\n self.set_attributes(model_1, list_1[i], list_2[i], overwrite)\n\n elif len(list_1) > len(list_2):\n for i in range(len(list_2)):\n self.set_attributes(model_1, list_1[i], list_2[i], overwrite)\n\n else: # i.e. len(list_1) < len(list_2):\n for i in range(len(list_2)):\n if i < len(list_1):\n self.set_attributes(\n model_1, list_1[i], list_2[i], overwrite\n )\n else:\n getattr(obj_1, attr).append(list_2[i])\n\n else:\n value = getattr(obj_2, attr)\n if value is not None:\n if getattr(obj_1, attr) is not None and overwrite == False:\n continue\n setattr(obj_1, attr, value)",
"def merge_attribute(self, attribute, overwrite=False):\n\n assert(attribute.user == self.user)\n for k, v in attribute.other_data.iteritems():\n if not overwrite and k in self.data:\n print \"WARNING! OVEWRITITNG \", k\n self.data[k] = v",
"def normalizeAttributeValue (\n\n self,\n attribute = None,\n value = None\n ) :\n \n if ( ( utilities.isEmpty( attribute ) ) or ( utilities.isEmpty( value ) ) ) : return None, None\n\n attribute = utilities.string( attribute, format = \"identifier\" )\n\n if attribute == \"reference\" : pass\n\n elif attribute == \"bibtex\" : pass\n\n elif attribute in self.aliasDictionary : attribute = self.aliasDictionary[ attribute ]\n\n elif attribute in self.fieldList : pass\n\n else : return None, None\n\n # first normalization of value: removes external {}, quotes, and strips spaces\n\n value = value.strip( \";,: /\\\\\" )\n\n size = len( value )\n\n while True : \n\n if value.startswith( \"{\" ) and value.endswith( \"}\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( \"(\" ) and value.endswith( \")\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( \"[\" ) and value.endswith( \"]\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( '\"' ) and value.endswith( '\"' ) : value = value[ 1 : -1 ]\n\n if value.startswith( \"'\" ) and value.endswith( \"'\" ) : value = value[ 1 : -1 ]\n\n value = value.strip( \";,: /\\\\\" )\n\n if len( value ) == size : break\n\n size = len( value )\n\n # normalizes fields\n \n if attribute == \"author\" :\n\n value = self.normalizeAuthor( value )\n\n self.author = value\n\n elif ( ( attribute == \"reference\" ) or ( attribute == \"bibtex\" ) ) :\n\n attribute = \"bibtex\"\n\n value = utilities.string( value, format = \"identifier\" )\n \n self.bibtex = value\n\n elif attribute == \"booktitle\" : value = self.normalizeBookTitle( value )\n\n elif attribute == \"description\" :\n\n value = self.normalizeDescription( value )\n\n self.description = value\n\n elif attribute == \"editor\" : value = self.normalizeEditor( value )\n\n elif attribute == \"journal\" : value = self.normalizeJournal( value )\n\n elif attribute == \"month\" : value = self.normalizeMonth( value )\n\n elif attribute == \"pages\" : value = self.normalizePages( value )\n\n elif attribute == \"title\" :\n\n value = self.normalizeTitle( value )\n\n self.title = value\n\n elif attribute == \"year\" :\n\n value = self.normalizeYear( value )\n\n self.year = value\n\n## elif attribute == \"bib\" :\n##\n## value = self.normalizePath( value )\n##\n## self.bibPath = value\n\n elif attribute == \"file\" :\n\n value = self.normalizePath( value )\n\n self.filePath = value\n \n elif attribute == \"owner\" :\n\n value = utilities.string( value, format = \"title\" )\n\n self.owner = value\n\n # other values: strips delimiters\n \n else : value = str( value ).strip( \" ()[].;:,/\\\\{}-_\" )\n\n\n\n # cleans value\n\n## print \"normalize\", str( attribute), str( value )\n\n value = value.strip().replace( \"{\", \"\" ).replace( \"}\", \"\" )\n\n## # recodes attribute: reference becomes bibtex and the remainder has a prefix reference **RF\n##\n## if ( ( not attribute == \"bibtex\" ) and ( not attribute.startswith( \"reference\" ) ) ) :\n##\n## attribute = \"reference\" + utilities.string( attribute, format = \"class\" )\n\n return attribute, value",
"def onSetAttr(self, attr, vals, opts):\n pass",
"def mutated_sequence(self):\n for i in range(len(self.seq)):\n for alt in self.vocab:\n if i in self.fixed_positions or alt == self.seq[i]:\n continue\n yield SeqNode(self.seq[:i] + alt + self.seq[i + 1:],\n fixed_positions=self.fixed_positions + [i])",
"def __setattr__(self, attr, value):\n raise AttributeError(\"%s object is immutable\" % (type(self).__name__,))",
"def replace_attributes(soup: BeautifulSoup, attribute: str, value: str, new_value: str) -> None:\n for target in soup.find_all(attrs={attribute: value}):\n target: Tag\n target.attrs[attribute] = new_value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates all attributes from node or dictionary `dict_`. Appends the basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') and then, for all other attributes in dict_, updates the same attribute in self. When attributes with the same identifier appear in both self and dict_, the two values are merged based on the value of update_fun. Generally, when replace is True, the values in self are replaced or merged with the values in dict_; otherwise, the values in self may be preserved or merged. When and_source is True, the 'source' attribute is included in the copy. | def update_all_atts(self, dict_, update_fun = copy_attr_consistent,
replace = True, and_source = False):
if isinstance(dict_, Node):
dict_ = dict_.attributes
# Include the source attribute when copying?
if and_source:
filter_fun = self.is_not_list_attribute
else:
filter_fun = self.is_not_known_attribute
# Copy the basic attributes
self.update_basic_atts(dict_)
# Grab other attributes in dict_ not in self except the
# (All basic attributes should be copied already)
for att in filter(filter_fun, dict_):
update_fun(self, att, dict_[att], replace) | [
"def update_basic_atts(self, dict_):\r\n if isinstance(dict_, Node):\r\n dict_ = dict_.attributes\r\n for att in self.basic_attributes:\r\n self.append_attr_list(att, dict_.get(att, []))",
"def update(self, dict):\n self.attr.update(dict)\n return self",
"def copy_attributes(self, parent_dict, child_dict, attrs):\n for attr in attrs:\n has_attr = parent_dict.get(attr)\n if has_attr is not None:\n child_dict[attr] = has_attr",
"def update(self, given_dict):\n self.__dict__.update(given_dict)",
"def update_attributes(self, override: Dict):\n self.additional_attributes.update(override)",
"def update(self):\n for dynamic_attr in self.dynamic_attrs.itervalues():\n dynamic_attr.clear_overloads()\n \n self.update_children()\n \n for modifier in self.modifiers:\n self.apply_modifier(modifier)",
"def update_with_attributes(obj, attributes):\n for key, val in attributes.items():\n setattr(obj, key, val)",
"def update(self, data):\n for field in self.ATTR_FIELDS:\n if field in data:\n setattr(self, field, data[field])",
"def _update_dict(self, doc_dict, update_doc_dict):\n\n for key, value in update_doc_dict.items():\n if isinstance(value, dict) and key in doc_dict:\n self._update_dict(doc_dict[key], update_doc_dict[key])\n elif isinstance(value, list) and key in doc_dict and\\\n self._is_list_descriptor(value):\n self._update_list(doc_dict[key], update_doc_dict[key])\n else:\n doc_dict[key]=value",
"def update(self, dictionary):\n for key, value in dictionary.items():\n if is_stringlike(key):\n setattr(self, key, value)\n else:\n self[Tag(key)] = value",
"def load_updated(self, grfn_dict):\n for container in self.function_argument_map:\n if container in self.update_functions:\n for container_grfn in grfn_dict[0][\"containers\"]:\n for body_function in container_grfn[\"body\"]:\n function_name = body_function[\"function\"][\"name\"]\n if (\n function_name.startswith(\"@container\")\n and function_name.split(\"::\")[-1] == container\n ):\n updated_variable = [\n body_function[\"input\"][i]\n for i in self.function_argument_map[container][\n \"updated_indices\"\n ]\n ]\n for i in range(len(updated_variable)):\n old_index = int(\n updated_variable[i].split(\"::\")[-1]\n )\n new_index = old_index + 1\n updated_var_list = updated_variable[i].split(\n \"::\"\n )[:-1]\n updated_var_list.append(str(new_index))\n updated_variable[i] = \"::\".join(\n updated_var_list\n )\n self.current_scope = self.update_functions[\n container\n ][\"scope\"]\n variable_name = updated_var_list[1]\n variable_spec = self.generate_variable_definition(\n variable_name,\n None,\n False,\n self.update_functions[container][\"state\"],\n )\n variable_name_list = variable_spec[\n \"name\"\n ].split(\"::\")[:-1]\n variable_name_list.append(str(new_index))\n variable_spec[\"name\"] = \"::\".join(\n variable_name_list\n )\n grfn_dict[0][\"variables\"].append(variable_spec)\n body_function[\"updated\"] = updated_variable\n return grfn_dict",
"def copy_attrs(self, src, overwrite=False):\n # check src.tile.lgtattrs\n if len(src.tile.lgtattrs) == 0:\n src.tile._get_lgt_attrs()\n for attr_name in src.tile.lgtattrs:\n self.set(attr_name, src.tile.get(attr_name), overwrite=overwrite)",
"def update(self, d, o=None):\n if isinstance(d, abc.Mapping):\n for k, v in shallow_items(d):\n if k in vars(self):\n v_ = vars(self)[k]\n if isinstance(v_, Tdict):\n if isinstance(v, abc.Mapping) or o is not None:\n vars(self)[k].update(v, o)\n else:\n vars(self)[k] = v\n elif o is None:\n vars(self)[k] = ensure_tdict(v)\n else:\n vars(self)[k] = o(v_, v)\n else:\n vars(self)[k] = ensure_tdict(v)\n else:\n for k, v in vars(self).items():\n if isinstance(v, Tdict):\n v.update(d, o)\n elif o is None:\n vars(self)[k] = d\n else:\n vars(self)[k] = o(v, d)\n return self",
"def clone_attributes(self, source_cell, target_cell, no_clone_key_dict_list=None):\n if no_clone_key_dict_list is None:\n no_clone_key_dict_list = []\n\n # clone \"C++\" attributes\n for attrName in self.clonable_attribute_names:\n setattr(target_cell, attrName, getattr(source_cell, attrName))\n\n # clone dictionary\n for key, val in source_cell.dict.items():\n\n if key in no_clone_key_dict_list:\n continue\n elif key == '__sbml_fetcher':\n # we are skipping copying of SWIG-added attribute\n # SBMLFetcher - this is added by default during cell creation\n # co no need to copy\n continue\n elif key == 'SBMLSolver':\n self.copy_sbml_simulators(from_cell=source_cell, to_cell=target_cell)\n elif key == CompuCell.CellG.__maboss__:\n # skipping MaBoSS models; need a reliable copy constructor\n continue\n else:\n # copying the rest of dictionary entries\n target_cell.dict[key] = deepcopy(source_cell.dict[key])\n\n # now copy data associated with plugins\n # AdhesionFlex\n if self.adhesionFlexPlugin:\n source_adhesion_vector = self.adhesionFlexPlugin.getAdhesionMoleculeDensityVector(source_cell)\n self.adhesionFlexPlugin.assignNewAdhesionMoleculeDensityVector(target_cell, source_adhesion_vector)\n\n # PolarizationVector\n if self.polarizationVectorPlugin:\n source_polarization_vector = self.polarizationVectorPlugin.getPolarizationVector(source_cell)\n self.polarizationVectorPlugin.setPolarizationVector(target_cell, source_polarization_vector[0],\n source_polarization_vector[1],\n source_polarization_vector[2])\n\n # polarization23Plugin\n if self.polarization23Plugin:\n pol_vec = self.polarization23Plugin.getPolarizationVector(source_cell)\n self.polarization23Plugin.setPolarizationVector(target_cell, pol_vec)\n pol_mark = self.polarization23Plugin.getPolarizationMarkers(source_cell)\n self.polarization23Plugin.setPolarizationMarkers(target_cell, pol_mark[0], pol_mark[1])\n lam = self.polarization23Plugin.getLambdaPolarization(source_cell)\n self.polarization23Plugin.setLambdaPolarization(target_cell, lam)\n\n # CellOrientationPlugin\n if self.cellOrientationPlugin:\n lam = self.cellOrientationPlugin.getLambdaCellOrientation(source_cell)\n self.cellOrientationPlugin.setLambdaCellOrientation(target_cell, lam)\n\n # ContactOrientationPlugin\n if self.contactOrientationPlugin:\n o_vec = self.contactOrientationPlugin.getOriantationVector(source_cell)\n self.contactOrientationPlugin.setOriantationVector(target_cell, o_vec.x, o_vec.y, o_vec.z)\n self.contactOrientationPlugin.setAlpha(target_cell, self.contactOrientationPlugin.getAlpha(source_cell))\n\n # ContactLocalProductPlugin\n if self.contactLocalProductPlugin:\n c_vec = self.contactLocalProductPlugin.getCadherinConcentrationVec(source_cell)\n self.contactLocalProductPlugin.setCadherinConcentrationVec(target_cell, c_vec)\n\n # LengthConstraintPlugin\n if self.lengthConstraintPlugin:\n lam = self.lengthConstraintPlugin.getLambdaLength(source_cell)\n tl = self.lengthConstraintPlugin.getTargetLength(source_cell)\n mtl = self.lengthConstraintPlugin.getMinorTargetLength(source_cell)\n self.lengthConstraintPlugin.setLengthConstraintData(target_cell, lam, tl, mtl)\n\n # ConnectivityGlobalPlugin\n if self.connectivityGlobalPlugin:\n cs = self.connectivityGlobalPlugin.getConnectivityStrength(source_cell)\n self.connectivityGlobalPlugin.setConnectivityStrength(target_cell, cs)\n\n # ConnectivityLocalFlexPlugin\n if self.connectivityLocalFlexPlugin:\n cs = self.connectivityLocalFlexPlugin.getConnectivityStrength(source_cell)\n self.connectivityLocalFlexPlugin.setConnectivityStrength(target_cell, cs)\n\n # Chemotaxis\n if self.chemotaxisPlugin:\n field_names = self.chemotaxisPlugin.getFieldNamesWithChemotaxisData(source_cell)\n\n for fieldName in field_names:\n source_chd = self.chemotaxisPlugin.getChemotaxisData(source_cell, fieldName)\n target_chd = self.chemotaxisPlugin.addChemotaxisData(target_cell, fieldName)\n\n target_chd.setLambda(source_chd.getLambda())\n target_chd.saturationCoef = source_chd.saturationCoef\n target_chd.setChemotaxisFormulaByName(source_chd.formulaName)\n target_chd.assignChemotactTowardsVectorTypes(source_chd.getChemotactTowardsVectorTypes())\n\n # FocalPointPLasticityPlugin - this plugin has to be handled manually -\n # there is no good way to figure out which links shuold be copied from parent to child cell",
"def _copy_attr(o, attr, adict, key=None):\n if hasattr(o, attr):\n adict[key or attr] = getattr(o, attr)",
"def _update_attrs(cls, args, das, attrs, var_id=None, names=None):\n out = cls._format(attrs, args)\n for locale in OPTIONS[METADATA_LOCALES]:\n out.update(\n cls._format(\n cls._get_translated_metadata(\n locale, var_id=var_id, names=names or list(attrs.keys())\n ),\n args=args,\n formatter=get_local_formatter(locale),\n )\n )\n\n # Generate a signature string for the history attribute\n # We remove annotations, replace default float/int/str by values\n # and replace others by type\n callstr = []\n for (k, v) in das.items():\n callstr.append(f\"{k}=<array>\")\n for (k, v) in args.items():\n if isinstance(v, (float, int, str)):\n callstr.append(f\"{k}={v!r}\") # repr so strings have ' '\n else:\n callstr.append(\n f\"{k}={type(v)}\"\n ) # don't take chance of having unprintable values\n\n # Get history and cell method attributes from source data\n attrs = defaultdict(str)\n if names is None or \"cell_methods\" in names:\n attrs[\"cell_methods\"] = merge_attributes(\n \"cell_methods\", new_line=\" \", missing_str=None, **das\n )\n if \"cell_methods\" in out:\n attrs[\"cell_methods\"] += \" \" + out.pop(\"cell_methods\")\n\n attrs[\"xclim_history\"] = update_history(\n f\"{var_id or cls._registry_id}({', '.join(callstr)})\",\n new_name=out.get(\"var_name\"),\n **das,\n )\n\n attrs.update(out)\n return attrs",
"def update_attributes_instability(attrs_inst: Dict[Attribute, float]):\n for attribute, attribute_instability in attrs_inst.items():\n attributes_instability[attribute] = attribute_instability",
"def replicate_attributes(self):\n changed = False\n if getattr(self, 'phonology', None):\n changed = self.set_attr('word_boundary_symbol', self.phonology.word_boundary_symbol, changed)\n changed = self.set_attr('morpheme_delimiters', self.morphology.morpheme_delimiters, changed)\n changed = self.set_attr('morphology_rare_delimiter', self.morphology.rare_delimiter, changed)\n changed = self.set_attr('morphology_rich_upper', self.morphology.rich_upper, changed)\n changed = self.set_attr('morphology_rich_lower', self.morphology.rich_lower, changed)\n changed = self.set_attr('morphology_rules_generated', self.morphology.rules_generated, changed)\n changed = self.set_attr('language_model_start_symbol', self.language_model.start_symbol, changed)\n changed = self.set_attr('language_model_end_symbol', self.language_model.end_symbol, changed)\n changed = self.set_attr('language_model_categorial', self.language_model.categorial, changed)\n self.changed = changed",
"def update_from_dict(instance, attrs, commit):\n\n field_names = list(map(lambda f: f.name, instance._meta.get_fields()))\n for attr, val in attrs.items():\n if attr in field_names:\n setattr(instance, attr, val)\n\n if commit:\n instance.save()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace one child `Node` with another child or children. | def replace(self, old, new):
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new | [
"def _replace_child(self, node, old, new):\n if node is None:\n self.tree = new\n elif node._left == old:\n node._left = new\n node._left._parent = node\n elif node._right == old:\n node._right = new\n node._left._parent = node\n else:\n assert (False) # May need to change",
"def replace_with(self, other):\n self.parent.children[self.parent.children.index(self)] = other\n other.parent = self.parent",
"def replaceChild(self, *args) -> \"void\":\n return _coin.SoGroup_replaceChild(self, *args)",
"def replace_with_node(self,node):\n\n self.set_for_parents(node) # connect new to parent on proper locations\n node.parent= self.parent # set node paent correctly\n self.parent = None # disconnect self from the parent\n return node.find_root() # find root again",
"def replaceChildren(self, newChildren):\n self.content.replaceChild(newChildren)",
"def replaceChild(self, *args) -> \"void\":\n return _coin.SoNodeKitListPart_replaceChild(self, *args)",
"def replace_child(parent, node, replace_with):\n # TODO(soupytwist): Don't refer to the formatting dict directly\n if hasattr(node, PASTA_DICT):\n setprop(replace_with, 'prefix', prop(node, 'prefix'))\n setprop(replace_with, 'suffix', prop(node, 'suffix'))\n for field in parent._fields:\n field_val = getattr(parent, field, None)\n if field_val == node:\n setattr(parent, field, replace_with)\n return\n elif isinstance(field_val, list):\n try:\n field_val[field_val.index(node)] = replace_with\n return\n except ValueError:\n pass\n raise errors.InvalidAstError('Node %r is not a child of %r' % (node, parent))",
"def replaceChild(self, *args) -> \"void\":\n return _coin.SoVRMLLOD_replaceChild(self, *args)",
"def _relink(self, parent, child, is_child_left):\n if is_child_left:\n parent._left = child\n else:\n parent._right = child\n if child is not None:\n child._parent = parent",
"def replace_child(parent, node, replace_with):\n # TODO(soupytwist): Don't refer to the formatting dict directly\n if hasattr(node, fmt.PASTA_DICT):\n fmt.set(replace_with, 'prefix', fmt.get(node, 'prefix'))\n fmt.set(replace_with, 'suffix', fmt.get(node, 'suffix'))\n for field in parent._fields:\n field_val = getattr(parent, field, None)\n if field_val == node:\n setattr(parent, field, replace_with)\n return\n elif isinstance(field_val, list):\n try:\n field_val[field_val.index(node)] = replace_with\n return\n except ValueError:\n pass\n raise errors.InvalidAstError('Node %r is not a child of %r' % (node, parent))",
"def put(self, node, child):\n\n node.add_child(child)",
"def move_children(self, element1, element2):\n for child in element1.getchildren():\n element2.append(child)\n # reversed is needed to safely remove items while iterating\n for child in reversed(element1.getchildren()):\n element1.remove(child)",
"def replaceNode(self, *args) -> \"void\":\n return _coin.SoMFNode_replaceNode(self, *args)",
"def replaceChild(self, *args) -> \"void\":\n return _coin.SoVRMLParent_replaceChild(self, *args)",
"def process_children(self, node):\n if node == None: return\n \n self.parent_stack.append(node)\n for childacc in node.get_child_accessors():\n child = childacc.get()\n if isinstance(child, list):\n newchild = self.process_list(child, childacc.name())\n if not isinstance(newchild, list): raise Exception(\"Cannot replace list with non-list!\")\n else:\n newchild = self.process_node(child, childacc.name())\n if newchild is not None and not isinstance(newchild, Nodes.Node):\n raise Exception(\"Cannot replace Node with non-Node!\")\n childacc.set(newchild)\n self.parent_stack.pop()",
"def _replace_element_by_own_content(self, element):\n # pylint: disable=no-self-use\n\n if element.has_children_elements():\n children = element.get_children_elements()\n for child in children:\n element.insert_before(child)\n element.remove_node()\n elif element.has_children():\n element.replace_node(element.get_first_node_child())",
"def replaceChild(self, *args) -> \"void\":\n return _coin.SoVRMLSwitch_replaceChild(self, *args)",
"def replaceWith(self, newChild):\n self.parentNode.replaceChild(newChild, self)\n return self",
"def replace_node(graph, node, new_node):\n graph.add_node(new_node)\n graph.add_edges_from([(new_node, n) for n in graph.neighbors(node)])\n graph.remove_node(node)",
"def _replace(self, node1:int, node2: int):\n for key in self.graph:\n for index, value in enumerate(self.graph[key]):\n if value == node2:\n self.graph[key][index] = node1\n \n '''\n while node2 in self.graph[key]:\n # instead of reassigning the value of the list where node2 resides, I am deleting the node2 from the list and adding the node1 value\n\n # self.graph[key] = [value for value in self.graph if != node2] \n self.graph[key].remove(node2) # deletes the node2 value from the row\n self.graph[key].append(node1) # adds the node1 value \n '''"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the index of the first child whose class exactly matches. | def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None | [
"def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0",
"def _get_class_index(prediction: np.ndarray, order_number_minus_one: int) -> int:\n return np.where(\n prediction\n == np.partition(prediction.flatten(), -2)[-order_number_minus_one - 1]\n )[1][0]",
"def getMinChildIndex(self, index):\n\t\tleftChild = self.data[self.leftChildIndexOf(index)]\n\t\t\n\t\tif self.rightChildIndexOf(index) < self.numElements:\n\t\t\trightChild = self.data[self.rightChildIndexOf(index)]\n\t\t\tif rightChild < leftChild:\n\t\t\t\treturn self.rightChildIndexOf(index)\n\n\t\treturn self.leftChildIndexOf(index)",
"def position(self):\n if self.parent:\n return self.parent.children.index(self)",
"def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None",
"def get_index_in_parent_list(self):\n\t\tif self.parent:\n\t\t\treturn super(Heading, self).get_index_in_parent_list()\n\t\telif self.document:\n\t\t\tl = self.get_parent_list()\n\t\t\tif l:\n\t\t\t\treturn l.index(self)",
"def first_child_oftype(self, typename):\n for child in self._kids:\n if child.typename == typename:\n return child",
"def _smaller_child(self, idx):\n left = 2 * idx + 1\n # case 1: no child\n if left >= len(self):\n return None\n\n right = left + 1\n # case 2: only left child\n if right == len(self):\n return left\n\n # case 3: two children\n if self._entries[left][1] < self._entries[right][1]:\n return left\n else:\n return right",
"def get_class_index(classes, class_list):\n\tfilter_index = [np.where(class_list == i)[0][0] for i in classes]\n\treturn filter_index",
"def parent_index(self, vaddr):\n return self.parent.child_index(vaddr)",
"def min_child(self, i):\n # Check if the given node has two children\n if i*2*2 > self.size:\n return i*2+1\n else:\n if self.heap_list[i*2+1] < self.heap_list[i*2+2]:\n return i*2+1\n else:\n return i*2+2",
"def find_node(self, node):\n for i in range(self.count):\n if node.name == self.nodes[i].name:\n return i\n\n return -1",
"def child(self, i):\r\n return self.children[i]",
"def smallest_child(self, k):\n\n # 2*k == self.counter: check that k only has one child, if yes,\n # then just return this child's index\n # self.array[2*k][0] < self.array[2*k + 1]: the left child is smaller\n # than the right child\n if 2*k == self.counter \\\n or self.array[2*k].distance < self.array[2*k + 1].distance:\n return 2*k\n else:\n # return the index of the right child if right child is less than\n # the left child\n return 2*k + 1",
"def findChild(self, child: 'SoNode') -> \"int\":\n return _coin.SoVRMLParent_findChild(self, child)",
"def findChild(self, node: 'SoNode') -> \"int\":\n return _coin.SoGroup_findChild(self, node)",
"def indexOfCurrentElement(self):\r\n return self.tableOfContact.indexOfTopLevelItem(self.tableOfContact.currentItem())",
"def _getLeftChild(self, index):\r\n return index * 2 + 1",
"def child_index_a(self):\r\n return self._index_a",
"def get_parent_index(self, index):\n return int((index - 1 ) / 2) if index != 0 else None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the index of the first child whose class does not match. | def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None | [
"def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0",
"def getMinChildIndex(self, index):\n\t\tleftChild = self.data[self.leftChildIndexOf(index)]\n\t\t\n\t\tif self.rightChildIndexOf(index) < self.numElements:\n\t\t\trightChild = self.data[self.rightChildIndexOf(index)]\n\t\t\tif rightChild < leftChild:\n\t\t\t\treturn self.rightChildIndexOf(index)\n\n\t\treturn self.leftChildIndexOf(index)",
"def _get_class_index(prediction: np.ndarray, order_number_minus_one: int) -> int:\n return np.where(\n prediction\n == np.partition(prediction.flatten(), -2)[-order_number_minus_one - 1]\n )[1][0]",
"def position(self):\n if self.parent:\n return self.parent.children.index(self)",
"def get_index_in_parent_list(self):\n\t\tif self.parent:\n\t\t\treturn super(Heading, self).get_index_in_parent_list()\n\t\telif self.document:\n\t\t\tl = self.get_parent_list()\n\t\t\tif l:\n\t\t\t\treturn l.index(self)",
"def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None",
"def _smaller_child(self, idx):\n left = 2 * idx + 1\n # case 1: no child\n if left >= len(self):\n return None\n\n right = left + 1\n # case 2: only left child\n if right == len(self):\n return left\n\n # case 3: two children\n if self._entries[left][1] < self._entries[right][1]:\n return left\n else:\n return right",
"def get_parent_index(self, index):\n return int((index - 1 ) / 2) if index != 0 else None",
"def parent_index(self, vaddr):\n return self.parent.child_index(vaddr)",
"def indexOfCurrentElement(self):\r\n return self.tableOfContact.indexOfTopLevelItem(self.tableOfContact.currentItem())",
"def find_node(self, node):\n for i in range(self.count):\n if node.name == self.nodes[i].name:\n return i\n\n return -1",
"def get_class_index(classes, class_list):\n\tfilter_index = [np.where(class_list == i)[0][0] for i in classes]\n\treturn filter_index",
"def first_child_oftype(self, typename):\n for child in self._kids:\n if child.typename == typename:\n return child",
"def _getLeftChild(self, index):\r\n return index * 2 + 1",
"def min_child(self, i):\n # Check if the given node has two children\n if i*2*2 > self.size:\n return i*2+1\n else:\n if self.heap_list[i*2+1] < self.heap_list[i*2+2]:\n return i*2+1\n else:\n return i*2+2",
"def smallest_child(self, k):\n\n # 2*k == self.counter: check that k only has one child, if yes,\n # then just return this child's index\n # self.array[2*k][0] < self.array[2*k + 1]: the left child is smaller\n # than the right child\n if 2*k == self.counter \\\n or self.array[2*k].distance < self.array[2*k + 1].distance:\n return 2*k\n else:\n # return the index of the right child if right child is less than\n # the left child\n return 2*k + 1",
"def child(self, i):\r\n return self.children[i]",
"def _get_index(self) -> \"int\" :\n return _core.ListItem__get_index(self)",
"def child_insert_index(self, parent: Node, new_child: Node) -> int:\n try:\n new_child_index = self._rule_children_names.index(new_child.name)\n except ValueError as e:\n msg = f\"Child '{new_child.name}' not allowed in parent '{parent.name}'\"\n raise ChildNotAllowedError(msg)\n for index, child in enumerate(parent.children):\n parent_child_index = self._rule_children_names.index(child.name)\n if parent_child_index > new_child_index:\n return index\n index = len(parent.children)\n return index",
"def child_index_a(self):\r\n return self._index_a"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Note that this Element has been referenced by its name `name` or id `id`. | def note_referenced_by(self, name=None, id=None):
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1 | [
"def element_name(self, element_name: str):\n\n self._element_name = element_name",
"def element_name(self) -> str:\n return self._element_name",
"def add_name(self):\n self.curr_iden = self.curr_word\n self.curr_obj.insert_attr_name(self.curr_word)",
"def _reference(self):\n\t\tpass",
"def nodesByIdref(self, QXmlName): # real signature unknown; restored from __doc__\n return []",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"def setName(self, name):\n self.content = name",
"def startElement(self, name: unicode) -> None:\n ...",
"def update_name(self, name):\n if name != self.name:\n self.parent.types[name] = self\n del self.parent.types[self.name]\n self.name = name",
"def set_name(self, name):\n if self.__id == -1:\n self.__shadow_expr = self.__expr\n self.__shadow_dependencies = self.__dependencies\n self.__id = evar.__dic_id\n self.__dependencies = {self.__id}\n evar.__dic_id += 1\n evar.__var_dic[self.__id] = weakref.ref(self)\n self.symbol = sympy.symbols(\"v\" + str(self.__id) + \"v\")\n self.g_symbol = sympy.symbols(\"g\" + str(self.__id) + \"g\")\n self.m_symbol = sympy.symbols(\"m\" + str(self.__id) + \"m\")\n self.__expr = self.symbol\n self.name = name\n self.__finish_operation()\n else:\n self.name = name",
"def element(self, name):\n try:\n return self.elements[self.elements.index(name.lower())]\n except ValueError:\n return None",
"def setName( self, name ):\n if type( name ) == str:\n self.Name = name\n self.graph.graph[ 'name' ] = name",
"def SetID(self, id):\n if not self.IsValidName(id):\n raise XMLIDValueError(id)\n doc = self.GetDocument()\n if doc:\n doc.UnregisterElement(self)\n self.id = id\n doc.RegisterElement(self)\n else:\n self.id = id",
"def set_name(self, new_name):\n\n self.img.attrib['Name'] = new_name",
"def __repr__(self):\n\n return self.tagname",
"def setNameAttribute(self, name: 'char const *') -> \"void\":\n return _coin.ScXMLScxmlElt_setNameAttribute(self, name)",
"def name(self, decl_name):\n self._name = decl_name",
"def name(self):\n # set the default name value\n name = Node.NAME_DEFAULT\n\n # try to get the name of this object from the attributes\n attributes = self.attribute\n if Node.NAME_KEY in attributes:\n name = attributes[Node.NAME_KEY]\n return name",
"def setNameAttribute(self, name: 'char const *') -> \"void\":\n return _coin.ScXMLEventElt_setNameAttribute(self, name)",
"def test_anchorName(self):\r\n listing = Element('a')\r\n listing.setAttribute('name', 'foo')\r\n self.spitter.visitNode(listing)\r\n self.assertEqual(\r\n ''.join(self.output),\r\n \"\\\\label{%sHASHfoo}\" % (\r\n os.path.abspath(self.filename).replace('\\\\', '/'),))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if and only if the given attribute is NOT one of the basic list attributes defined for all Elements. | def is_not_list_attribute(cls, attr):
return attr not in cls.list_attributes | [
"def is_not_known_attribute(cls, attr):\r\n return attr not in cls.known_attributes",
"def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False",
"def _attr_ne(self, name, value):\n self._attr_present(name)\n self.filters.append(lambda elem: elem.attrib[name] != value)",
"def _check_rule_has_not_attribute(self, data_sources, conditions):\n return not self._check_rule_has_attribute(data_sources, conditions)",
"def has_attribute(self, atributo):\r\n return atributo in self.__atributos",
"def is_valid_svg_attribute(self, elementname, attributename):\n element = self._get_element(elementname)\n return attributename in element.valid_attributes",
"def _has_private_attribute(self):\n return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])",
"def has_comm_attr(self, attr):\n\n for comm_attr in self._attr_list:\n if comm_attr == attr:\n return True\n\n return False",
"def empty(self, exc=[]):\n attrs = self.get_own_attrs()\n return not set(attrs.keys()).difference(set(exc))",
"def check_attribute(array):\n if array[0] == array[1] and array[1] == array[2]:\n return True\n elif array[0] != array[1] and array[1] != array[2] and array[0] != array[2]:\n return True\n else:\n return False",
"def verify_export_attrs_removed(attributes):\n self.assertNotIn('index_in_children_list', attributes)\n self.assertNotIn('parent_sequential_url', attributes)\n self.assertNotIn('parent_url', attributes)",
"def check_attr_unit(self, element, attr, unit_list):\n if attr in element.attrib:\n unit = self.parse_length(element.get(attr), percent=True)[1]\n return unit in unit_list",
"def is_lock_attribute(element, attribute):\n\n return cmds.getAttr(\"{}.{}\".format(element, attribute), lock=True)",
"def ignore(self):\n return \"ignore\" in self.attributes and self.attributes[\"ignore\"] == \"true\"",
"def _attr_exists(self, attr):\n\n if self.metadata and attr not in self.metadata:\n self._warn(\"Attribute [{attr}] does not exist. \" +\n \"Check for a typo or disable validation \" +\n \"by .set_validation(False) \".format(attr=attr))\n\n # Return True if attribute validation is disabled\n return False == self.attribute_validation\n\n return True",
"def confirm_attribute(item, attribute):\n if type(item) == dict:\n if item.__contains__(attribute) is True:\n pass\n else:\n raise KeyError('Attribute {} does not exist.'.format(attribute))\n else:\n if hasattr(item, attribute) is True:\n pass\n else:\n raise AttributeError('Attribute {} does not exist.'.format(attribute))\n\n return",
"def has_attribute(self, attributeType):\n return attributeType in self._node._attribute_map",
"def is_attr_protected(attrname: str) -> bool:\n return (\n attrname[0] == \"_\"\n and attrname != \"_\"\n and not (attrname.startswith(\"__\") and attrname.endswith(\"__\"))\n )",
"def remove_attr(self,attr_list=[]):\n for x in attr_list: \n if hasattr(self,x): delattr(self,x)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if and only if the given attribute is NOT recognized by this class. | def is_not_known_attribute(cls, attr):
return attr not in cls.known_attributes | [
"def _attr_exists(self, attr):\n\n if self.metadata and attr not in self.metadata:\n self._warn(\"Attribute [{attr}] does not exist. \" +\n \"Check for a typo or disable validation \" +\n \"by .set_validation(False) \".format(attr=attr))\n\n # Return True if attribute validation is disabled\n return False == self.attribute_validation\n\n return True",
"def is_not_list_attribute(cls, attr):\r\n return attr not in cls.list_attributes",
"def _check_rule_has_not_attribute(self, data_sources, conditions):\n return not self._check_rule_has_attribute(data_sources, conditions)",
"def _has_private_attribute(self):\n return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])",
"def __bool__(self):\n return not hasattr(self, 'missing')",
"def has_attribute(self, atributo):\r\n return atributo in self.__atributos",
"def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False",
"def hasAttribute(*args, **kwargs):\n \n pass",
"def is_unidentified(self):\n try:\n if 'compound' not in self.fields.keys():\n return False\n if not self.compound.contextual_fulfilled:\n return self.compound.is_unidentified\n except AttributeError:\n return True",
"def ignore(self):\n return \"ignore\" in self.attributes and self.attributes[\"ignore\"] == \"true\"",
"def has_attribute(self, key):\n assert isinstance(key, str)\n return key in self._attributes.keys()",
"def has_attribute(self, attributeType):\n return attributeType in self._node._attribute_map",
"def is_attr_protected(attrname: str) -> bool:\n return (\n attrname[0] == \"_\"\n and attrname != \"_\"\n and not (attrname.startswith(\"__\") and attrname.endswith(\"__\"))\n )",
"def is_missing(obj):\n return getattr(obj, \"moya_missing\", False)",
"def confirm_attribute(item, attribute):\n if type(item) == dict:\n if item.__contains__(attribute) is True:\n pass\n else:\n raise KeyError('Attribute {} does not exist.'.format(attribute))\n else:\n if hasattr(item, attribute) is True:\n pass\n else:\n raise AttributeError('Attribute {} does not exist.'.format(attribute))\n\n return",
"def _isprop(self, attr: str) -> bool:\n\n return isinstance(attr, property)",
"def attribute_has(nodeName, attributeName):\r\n\r\n # valider si le noeud possède l'attribut\r\n if maya.cmds.objExists(\"%s.%s\" % (nodeName, attributeName)):\r\n return True\r\n else:\r\n return False",
"def hasattribute(self, k):\n return isinstance(self.attributes, dict) and k in self.attributes",
"def _check_rule_has_attribute(self, data_sources, conditions):\n return hasattr(data_sources['asset'], conditions['attribute']) and \\\n getattr(data_sources['asset'], conditions['attribute']) is not None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a DOM representation of this document. | def asdom(self, dom=None):
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot | [
"def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')",
"def getImplementation(self):\n return DOMImplementation()",
"def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElement = dom.documentElement\n if self.description is not None:\n domElement.setAttribute('description', self.description)\n e = dom.createTextNode(self.filename)\n domElement.appendChild(e)\n\n return domElement",
"def dom_element(self):\n data = self.data\n dom_element = data.dom_element\n return dom_element",
"def html(self):\n doc = dominate.document(title=self.title)\n\n # Add favicon\n if self.favicon is not None:\n with doc.head:\n link(rel='icon', href=self.favicon)\n\n # Add external files (Skin)\n if self.skin is not None:\n with doc.head:\n for ref in self.skin.libs: # Libs\n link(rel='stylesheet', crossorigin='anonymous', href=ref)\n\n for ref in self.skin.fonts: # Fonts\n link(rel='stylesheet', type='text/css', href=ref)\n\n if self.skin.rules != \"\":\n style(raw(self.skin.rules))\n\n # Add Widgets HTML to the page\n main_div = div(cls=\"container\")\n for w in self.widgets:\n main_div.add(w.html())\n main_div.add(br())\n doc.add(main_div)\n\n # Add Javascript code to the page\n js_str = \"\\n\\n\".join([a.js() for a in self.ajax()])\n if js_str != '':\n doc.add(script(src=JQUERY_CDN))\n doc.add(script(raw(js_str + \"\\n\\n\" + COMMON_JS)))\n\n return doc",
"def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElement = dom.documentElement\n\n if self.privilege is not None:\n domElement.setAttribute('privilege', self.privilege)\n\n addChildNode(dom=dom, parentNode=domElement, name='UserID',\n value=self.userId, nodeType=Node.TEXT_NODE,\n attrDict={})\n addChildNode(dom=dom, parentNode=domElement, name='FirstName',\n value=self.firstName, nodeType=Node.TEXT_NODE,\n attrDict={})\n addChildNode(dom=dom, parentNode=domElement, name='LastName',\n value=self.lastName, nodeType=Node.TEXT_NODE,\n attrDict={})\n addChildNode(dom=dom, parentNode=domElement, name='Email',\n value=self.email, nodeType=Node.TEXT_NODE,\n attrDict={})\n\n return domElement",
"def get_root_node(self):\n\n return self.dom.documentElement",
"def get_soup(self):\n if self._soup is None:\n self._soup = BeautifulSoup(self.get_data(), \"lxml\", from_encoding='utf8')\n return self._soup",
"def serialize(self):\n # Serialize fields to a dict\n elements = []\n for element in self.elements:\n elements.append(element.serialize())\n data = {'type': 'document', 'elements': elements}\n return data",
"def dump(self):\n return etree.tostring(self.root)",
"def get_DOM(self, xmlfile):\n\t\tdom = None\n\t\ttry: dom = xml.dom.minidom.parseString(xmlfile)\n\t\texcept Exception, e:\n\t \t\tprint \"Error getting dom:\", str(e)\n\t \t\treturn None\n\t\treturn dom",
"def internet_document(self):\n return _InternetDocument(self, 'InternetDocument')",
"def xml(self):\n return oxml_tostring(self, encoding='UTF-8', standalone=True)",
"def makeNewDocument(self):\n\n document = textlayout.Document(\n width=self._propertyToPoints(\"width\"),\n marginTop=self._propertyToPoints(\"margin_top\"),\n marginBottom=self._propertyToPoints(\"margin_bottom\"),\n )\n\n return document",
"def GetDocument(self):\n if self.parent:\n if isinstance(self.parent, Document):\n return self.parent\n else:\n return self.parent.GetDocument()\n else:\n return None",
"def get_xml(self):\n return etree.tostring(self.xml_tree, pretty_print=True, encoding=\"utf-8\").decode(\"utf-8\")",
"def read(self):\n # Get result data from debugger engine and verify length of response\n data = self.read_data()\n # Show debug output\n debug('[Response data] %s' % data)\n # Create XML document object\n document = parseString(data)\n return document",
"def loading_xml(self):\n\n dom = minidom.parse(self.filepath)\n return dom",
"def html(self):\n if not self._html:\n self._html = parse(self.input_doc, self.options.get('url'))\n\n return self._html",
"def main_document(self) -> SpdxDocument:\n self._generate_documents()\n return cast(SpdxDocument, self._main_document)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call self."``visit_`` + node class name" with `node` as parameter. If the ``visit_...`` method does not exist, call self.unknown_visit. | def dispatch_visit(self, node):
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node) | [
"def Visit(self, node):\n mapping = self._mapping\n\n # Build a visitor that performs the old_class -> new_class mapping:\n class Visitor(visitors.Visitor):\n visits_all_node_types = True\n name_to_class = mapping\n for name, new_cls in mapping.iteritems():\n\n def Visit(self, node):\n # Python doesn't allow us to build this as a closure, so we have to\n # use the clunky way of retrieving the replacement class.\n cls = self.name_to_class.get(node.__class__.__name__)\n if cls is not None:\n return cls(*node)\n else:\n return node\n locals()[\"Visit\" + name] = Visit\n return node.Visit(Visitor())",
"def generic_visit(self, node):\n raise NotImplementedError('Unsupported AST node %s' % node)",
"def visit(self, node, args=()):\n if not isinstance(node, broom.Node):\n node = node.node(*args)\n self.visitGraph(node)",
"def generic_visit(self, node, offset=0):\n lead = ' ' * offset\n\n output = f\"{lead}{node.lineno} {node.__class__.__name__}: \"\n\n if node.attr_names:\n vlist = [(n, getattr(node, n)) for n in node.attr_names]\n output += ', '.join('%s = %s' % v for v in vlist)\n\n print(output)\n\n for (_, child) in node.children():\n self.visit(child, offset=offset + 2)",
"def traverse(self, node, branch=None, **kw):\n\n parent = node\n if branch:\n node = node[branch]\n\n if node is None:\n return\n\n if branch and 'type' in parent:\n self.debug('TRAVERSE {parent[type]} -> {branch}:{node[type]}',\n node=node, parent=parent, branch=branch)\n else:\n self.debug('TRAVERSE {node[type]}', node=node)\n\n assert '__traversed' not in node\n node['__traversed'] = True\n\n self.set_location(node)\n\n try:\n handler = self.node_handlers[node['type']]\n except KeyError:\n if self.err.debug_level:\n self.error(err_id=('traverser', 'traverse', 'unknown_node'),\n error='Unknown node type: {[type]}'.format(node))\n\n log.exception('Unknown node type: {[type]}'.format(node))\n key = 'unknown_node_types'\n self.err.metadata.setdefault(key, defaultdict(int))\n self.err.metadata[key][node['type']] += 1\n else:\n with self._debug_level:\n result = handler(node, **kw)\n if isinstance(result, (JSWrapper, JSValue)):\n result.parse_node = node\n return result",
"def _hx_visit_generic(self, visitor_class, *args):\n v = visitor_class(*args)\n v.apply_to(self._cfunc.body, None)",
"def apply_visitor(visitor, decl_inst):\n\n fname = 'visit_' + \\\n decl_inst.__class__.__name__[:-2] # removing '_t' from class name\n if not hasattr(visitor, fname):\n raise visit_function_has_not_been_found_t(visitor, decl_inst)\n return getattr(visitor, fname)()",
"def parse(self, node):\n parseMethod = getattr(self, \"parse%s\" % node.__class__.__name__)\n parseMethod(node)",
"def generic_visit(self, node):\n\n # let the super class visit this node first\n super().generic_visit(node)\n\n # only trace statements\n if not isinstance(node, ast.stmt):\n return node\n\n # a unique identifier and initial data for this node\n node_id = len(self.nodes)\n self.nodes.append({\n 'node': node,\n 'counter': 0,\n 'time': 0,\n })\n\n # tracing is done by calling \"execute_node\" of this class\n func1 = ast.Attribute(\n value=ast.Name(id=CodeTracer.__INJECT_NAME, ctx=ast.Load()),\n attr='execute_node1',\n ctx=ast.Load()\n )\n func2 = ast.Attribute(\n value=ast.Name(id=CodeTracer.__INJECT_NAME, ctx=ast.Load()),\n attr='execute_node2',\n ctx=ast.Load()\n )\n\n # the argument to the tracing function is the unique node identifier\n args = [ast.Num(n=node_id)]\n\n # the tracer will be executed whenever the statement is executed\n tracer1 = ast.Expr(value=ast.Call(func=func1, args=args, keywords=[]))\n tracer2 = ast.Expr(value=ast.Call(func=func2, args=args, keywords=[]))\n\n # spoof location information for the generated node\n ast.copy_location(tracer1, node)\n ast.copy_location(tracer2, node)\n\n # inject tracers in a try-finally construct around this node\n wrapper = ast.Try(body=[node], handlers=[], orelse=[], finalbody=[tracer2])\n return [tracer1, wrapper]",
"def define_visitor(self, file_ref, baseclass):\n self.write_ln(file_ref, \"class Visitor:\\n\")\n self.write_newline(file_ref)\n for entry in self.types:\n details = entry.split(\":\")\n classname = details[0].strip()\n self.write_ln(file_ref,\n \"def visit_\" + classname.lower() + \"_\" + baseclass.lower() + \"(self, \" + classname.lower() +\n \"_\" + baseclass.lower() + \": '\" + classname + \"'): pass\\n\",\n indent=4)",
"def visitClass(self, testClass):",
"def parse(self, node):\n pm = getattr(self, \"parse_%s\"%node.__class__.__name__)\n pm(node)",
"def get_visitor(self, node):\r\n visitor = self._visitor_cache.get(node.__class__)\r\n if visitor is None:\r\n method = 'visit_' + node.__class__.__name__\r\n visitor = getattr(self, method, None)\r\n self._visitor_cache[node.__class__] = visitor\r\n return visitor",
"def generic_visit(self, node):\n if (\n not self.replaced\n and hasattr(node, \"_location\")\n and node._location == self.search\n ):\n self.replaced = True\n return self.replacement_node\n else:\n return NodeTransformer.generic_visit(self, node)",
"def build_cases(node: ASTNodeType) -> None:\n # Don't bother processing classes unless they actually have\n # concrete subclasses, otherwise we would be producing dead code.\n if not node.concrete_subclasses:\n return\n\n to_pop = False\n\n if node == root_node:\n # As a special case, emit actions for the root node outside of\n # the top-level CASE block as we don't need to dispatch on\n # anything for them: they always must be applied.\n actions = actions_for_node(node, node_var)\n if actions:\n result.append(actions)\n\n else:\n # If there are actions for this node, add a matcher for them\n # and process the subclasses in a nested CASE block.\n actions = actions_for_node(node, Matcher.new_node_var(node))\n if actions:\n m = Matcher(node, actions)\n case_stack[-1].matchers.append(m)\n case_stack.append(m.inner_case)\n to_pop = True\n\n for subcls in node.subclasses:\n build_cases(subcls)\n\n if to_pop:\n case_stack.pop()",
"def transform(self, node):\n try:\n handler = getattr(self, 'transform_%s' % node.kind.name.lower())\n return handler(node)\n except AttributeError:\n print(\n \"Ignoring node of type %s (%s)\" % (\n node.kind,\n ' '.join(\n t.spelling for t in node.get_tokens())\n ),\n file=sys.stderr\n )",
"def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname",
"def EnterClassType(self, node):\n nodes = [node]\n seen = set()\n while nodes:\n cur_node = nodes.pop(0)\n if cur_node in seen:\n continue\n seen.add(cur_node)\n for prefix, cls in self._Lookup(cur_node):\n if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.ClassType):\n if cls.type.cls:\n cls = cls.type.cls\n else:\n nodes.append(cls.type)\n if isinstance(cls, pytd.Class):\n node.cls = cls\n return\n else:\n logging.warning(\"Couldn't resolve %s: Not a class: %s\",\n prefix + node.name, type(cls))",
"def call_visitor(fort_node):\n v = ASR2PyVisitor()\n v.visit(fort_node)\n res_ast = v.ret_ast()\n return res_ast",
"def _(self, node: AnnCastRecordDef):\n # TODO: Where should bases field be used?\n funcs = []\n fields = []\n if len(node.funcs) > 0:\n funcs = self.visit_list(node.funcs)\n if len(node.fields) > 0:\n fields = self.visit_list(node.fields)\n node_uid = uuid.uuid4()\n self.G.add_node(node_uid, label=\"Class: \" + node.name)\n\n # Add attributes to the graph\n attr_uid = uuid.uuid4()\n self.G.add_node(attr_uid, label=\"Attributes\")\n self.G.add_edge(node_uid, attr_uid)\n for n in fields:\n self.G.add_edge(attr_uid, n)\n\n # Add functions to the graph\n funcs_uid = uuid.uuid4()\n self.G.add_node(funcs_uid, label=\"Functions\")\n self.G.add_edge(node_uid, funcs_uid)\n for n in funcs:\n self.G.add_edge(funcs_uid, n)\n\n return node_uid"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call self."``depart_`` + node class name" with `node` as parameter. If the ``depart_...`` method does not exist, call self.unknown_departure. | def dispatch_departure(self, node):
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node) | [
"def connect_directive_node(self, name, f_visit, f_depart):\r\n self.builder._function_node.append((name, f_visit, f_depart))",
"def _depart(self, data, sock, forward=True):\n if forward:\n self.send_replicas_forward()\n time.sleep(1)\n self.send_data_forward()\n # Let the previous node know who its new next node is, after I depart\n self.neighbors.send_back('next:{}:{}:{}'.format(self.neighbors.front_ip,self.neighbors.front_port, self.neighbors.front_hash))\n # Let the next node know who its new previous node is, after I depart\n self.neighbors.send_front('prev:{}:{}:{}'.format(self.neighbors.back_ip,self.neighbors.back_port, self.neighbors.back_hash))\n # forward = False means all have to exit and not pass their values\n \n # Else let the master know that you departed\n if forward == True :\n send_request(self.m_host, self.m_port, 'depart:'+self.id)\n self.close = True\n self.message_queues[sock].put('Done...Bye Bye')",
"def Node2Method(self, node): \n ##TODO(GuoChenkai) Nodef to Encodedmethod\n ## convert through the method_name\n #res = [] \n #methods = self.d.get_method(gvm_node.method_name)\n #for i in methods:\n #if i.get_name() == gvm_node.method_name:\n #res.append(i)\n #return res\n \n #print start_method.XREFfrom.items\n \n ## convert through the id (id does not match) \n #method = self.d.get_method_by_idx(gvm_node.id)\n #return method \n \n ## convert through the map_nodemethod {} within this class\n return self.d.get_method_descriptor(node.class_name,node.method_name,node.descriptor)\n #if not gvm_node.id in self.map_nodemethod:\n #return None \n #elif self.map_nodemethod[gvm_node.id] != None:\n #method = self.map_nodemethod[gvm_node.id]\n #return method\n #else: return None",
"def departure(self, departureTime):\n self._departure = departureTime",
"def departed(self, airport, howMany=MAX_RECORD_LENGTH, filter=TrafficFilter.ALL, offset=0):\n data = {\"airport\": airport, \"howMany\": howMany, \"filter\": filter, \"offset\": offset}\n return self._request(\"Departed\", data)",
"def add_departure(self, departure_date, departure_time):\r\n self.departure_date = departure_date\r\n self.departure_time = departure_time",
"def depart_ucomment_node(self, node):\n pass",
"def parse(self, node):\n parseMethod = getattr(self, \"parse%s\" % node.__class__.__name__)\n parseMethod(node)",
"def addInstanceRemovedDagPathCallback(*args, **kwargs):\n \n pass",
"def next_nearby_departures_of_station(\n self, station_id: int, time: str, lang: str = \"en\"\n ) -> Optional[PublicTransitResponse]:\n\n data = {\n \"lang\": lang,\n \"stnId\": station_id,\n \"time\": time,\n \"apikey\": self._api_key,\n }\n return self.__get(data, \"board.json\", \"NextDepartures\")",
"def departure_time(self, departure_time: int):\n\n self._departure_time = departure_time",
"def get_next_departure(self, t):\n if t > self.next_departure:\n raise Exception(\"current time is after departure!\")\n return self.next_departure - t",
"def generate_cold_departure(self, t):\n self.next_departure = t + self.cold_service_process.generate_trace()",
"def process_deceased_field(deceased_field):\n # Try to parse the deceased fields when the fields are comma separated.\n try:\n return parse_comma_delimited_deceased_field(deceased_field)\n except Exception:\n pass\n\n # Try to parse the deceased fields when the fields are pipe separated.\n try:\n return parse_pipe_delimited_deceased_field(deceased_field)\n except Exception:\n pass\n\n # Try to parse the deceased fields when the fields are space separated.\n try:\n return parse_space_delimited_deceased_field(deceased_field)\n except Exception:\n pass\n\n # Try to parse the deceased fields assuming it contains an age.\n try:\n return parse_age_deceased_field(deceased_field)\n except Exception:\n pass\n\n raise ValueError(f'Cannot parse {Fields.DECEASED}: {deceased_field}')",
"def dd_run_static_method(dd_node):\n return execute_static_method_dynamically(dd_node.Attributes[DD_NODE_ATTR_NAME['module_name']].GetValueAsString(),\n dd_node.Attributes[DD_NODE_ATTR_NAME['class_name']].GetValueAsString(),\n dd_node.Attributes[DD_NODE_ATTR_NAME['method_name']].GetValueAsString(),\n dd_node)",
"def addDagDagPathCallback(*args, **kwargs):\n \n pass",
"def erode(self, grid, dt=None, node_elevs='topographic__elevation',\n node_drainage_areas='drainage_area',\n node_receiving_flow='flow_receiver',\n node_order_upstream='upstream_ID_order',\n node_slope='topographic__steepest_slope',\n steepest_link='links_to_flow_receiver',\n runoff_rate_if_used=None,\n #W_if_used=None, Q_if_used=None,\n stability_condition='loose',\n Dchar_if_used=None, io=None):\n\n if runoff_rate_if_used != None:\n runoff_rate = runoff_rate_if_used\n assert type(runoff_rate) in (int, float, np.ndarray)\n else:\n runoff_rate = 1.\n\n if dt==None:\n dt = self.tstep\n try:\n self.Dchar=self.Dchar_in\n except AttributeError:\n try:\n self.Dchar=grid.at_node[Dchar_if_used]\n except FieldError:\n assert type(Dchar_if_used)==np.ndarray\n self.Dchar=Dchar_if_used\n\n if type(node_elevs)==str:\n node_z = grid.at_node[node_elevs]\n else:\n node_z = node_elevs\n\n if type(node_drainage_areas)==str:\n node_A = grid.at_node[node_drainage_areas]\n else:\n node_A = node_drainage_areas\n\n if type(node_receiving_flow)==str:\n flow_receiver = grid.at_node[node_receiving_flow]\n else:\n flow_receiver = node_receiving_flow\n\n #new V3:\n if type(node_order_upstream)==str:\n s_in = grid.at_node[node_order_upstream]\n else:\n s_in = node_order_upstream\n\n if type(node_slope)==str:\n node_S = grid.at_node[node_slope]\n else:\n node_S = node_slope\n\n if self.lamb_flag:\n variable_shields_crit = 0.15*node_S**0.25\n try:\n variable_thresh = variable_shields_crit*self.shields_prefactor_to_shear\n except AttributeError:\n variable_thresh = variable_shields_crit*self.shields_prefactor_to_shear_noDchar*self.Dchar\n\n\n if type(steepest_link)==str:\n link_length = np.empty(grid.number_of_nodes,dtype=float)\n link_length.fill(np.nan)\n draining_nodes = np.not_equal(grid.at_node[steepest_link], BAD_INDEX_VALUE)\n core_draining_nodes = np.intersect1d(np.where(draining_nodes)[0], grid.core_nodes, assume_unique=True)\n link_length[core_draining_nodes] = grid.link_length[grid.at_node[steepest_link][core_draining_nodes]]\n #link_length=grid.node_spacing_horizontal\n else:\n link_length = grid.link_length[steepest_link]\n square_link_length = np.square(link_length) #nans propagate forward\n\n try:\n transport_capacities_thresh = self.thresh*self.Qs_thresh_prefactor*runoff_rate**(0.66667*self._b)*node_A**self.Qs_power_onAthresh\n except AttributeError:\n transport_capacities_thresh = variable_thresh*self.Qs_thresh_prefactor*runoff_rate**(0.66667*self._b)*node_A**self.Qs_power_onAthresh\n\n transport_capacity_prefactor_withA = self.Qs_prefactor*runoff_rate**(0.6+self._b/15.)*node_A**self.Qs_power_onA\n\n internal_t = 0.\n break_flag = False\n dt_secs = dt*31557600.\n counter = 0\n\n while 1: #use the break flag, to improve computational efficiency for runs which are very stable\n #we assume the drainage structure is forbidden to change during the whole dt\n #print \"loop...\"\n #note slopes will be *negative* at pits\n #track how many loops we perform:\n counter += 1\n downward_slopes = node_S.clip(0.)\n #positive_slopes = np.greater(downward_slopes, 0.)\n transport_capacities_S = transport_capacity_prefactor_withA*(downward_slopes)**0.7\n trp_diff = (transport_capacities_S - transport_capacities_thresh).clip(0.)\n transport_capacities = np.sqrt(trp_diff*trp_diff*trp_diff)\n\n if stability_condition == 'tight':\n mock_diffusivities = np.zeros_like(transport_capacities, dtype=float)\n mock_diffusivities = transport_capacities/downward_slopes\n tstep_each_node = 10.*square_link_length/mock_diffusivities #we're relaxing the condition fivefold here, as the true VonNeumann condition is VERY restrictive\n #if no node exceeds crit, tstep_each_node will just be nans and infs\n delta_t_internal = np.nanmin(tstep_each_node) #in seconds, nanmin avoids the pit nodes\n if delta_t_internal == np.inf: #no node exceeds crit\n delta_t_internal = dt_secs #nothing happened, so let the loop complete, awaiting more uplift\n if internal_t + delta_t_internal >= dt_secs:\n dt_this_step = dt_secs-internal_t #now in seconds\n break_flag = True\n else:\n dt_this_step = delta_t_internal #a min tstep was found (seconds). We terminate the loop\n else: #loose, gradient based method\n dt_this_step = dt_secs-internal_t #and the adjustment is made AFTER the dz calc\n\n sed_into_node = np.zeros(grid.number_of_nodes, dtype=float)\n dz = np.zeros(grid.number_of_nodes, dtype=float)\n len_s_in = s_in.size\n cell_areas = self.cell_areas\n\n for i in s_in[::-1]: #work downstream\n sed_flux_into_this_node = sed_into_node[i]\n sed_flux_out_of_this_node = transport_capacities[i] #we work in volume flux, not volume per se here\n flux_excess = sed_flux_into_this_node - sed_flux_out_of_this_node #gets deposited\n dz[i] = flux_excess/cell_areas*dt_this_step\n sed_into_node[flow_receiver[i]] += sed_flux_out_of_this_node\n\n if stability_condition == 'loose':\n elev_diff = node_z - node_z[flow_receiver]\n delta_dz = dz[flow_receiver] - dz\n node_flattening = self.fraction_gradient_change*elev_diff - delta_dz #note the condition is that gradient may not change by >X%, not must be >0\n #note all these things are zero for a pit node\n most_flattened_nodes = np.argmin(node_flattening[grid.core_nodes])\n most_flattened_nodes = np.take(grid.core_nodes, most_flattened_nodes) #get it back to node number, not core_node number\n most_flattened_val = np.take(node_flattening, most_flattened_nodes)\n if most_flattened_val>=0.:\n break_flag = True #all nodes are stable\n else: # a fraction < 1\n dt_fraction = self.fraction_gradient_change*np.take(elev_diff, most_flattened_nodes)/np.take(delta_dz, most_flattened_nodes)\n #print dt_fraction\n #correct those elevs\n dz *= dt_fraction\n dt_this_step *= dt_fraction\n\n #print np.amax(dz), np.amin(dz)\n\n node_z[grid.core_nodes] += dz[grid.core_nodes]\n\n if break_flag:\n break\n #do we need to reroute the flow/recalc the slopes here? -> NO, slope is such a minor component of Diff we'll be OK\n #BUT could be important not for the stability, but for the actual calc. So YES.\n node_S = np.zeros_like(node_S)\n #print link_length[core_draining_nodes]\n node_S[core_draining_nodes] = (node_z-node_z[flow_receiver])[core_draining_nodes]/link_length[core_draining_nodes]\n internal_t += dt_this_step #still in seconds, remember\n\n self.grid=grid\n\n active_nodes = grid.get_active_cell_node_ids()\n if io:\n try:\n io[active_nodes] += node_z[active_nodes]\n except TypeError:\n if type(io)==str:\n elev_name = io\n else:\n return grid, io\n\n else:\n elev_name = node_elevs\n\n if self.return_ch_props:\n #add the channel property field entries,\n #'channel_width', 'channel_depth', and 'channel_discharge'\n Q = self.k_Q*runoff_rate*node_A**self._c\n W = self.k_w*Q**self._b\n H = Q**(0.6*(1.-self._b))*(self.mannings_n/self.k_w)**0.6*node_S**-0.3\n tau = self.fluid_density*self.g*H*node_S\n grid.at_node['channel_width'] = W\n grid.at_node['channel_depth'] = H\n grid.at_node['channel_discharge'] = Q\n grid.at_node['channel_bed_shear_stress'] = tau\n\n\n grid.at_node['fluvial_sediment_transport_capacity'] = transport_capacities\n grid.at_node['fluvial_sediment_flux_into_node'] = sed_into_node\n #elevs set automatically to the name used in the function call.\n if stability_condition == 'tight':\n grid.at_node['effective_fluvial_diffusivity'] = mock_diffusivities\n self.iterations_in_dt = counter\n\n return grid, grid.at_node[elev_name]",
"def wait_for_services(self, node_name='default_driver'):\n rospy.init_node(node_name)\n rospy.loginfo('rospy init node '+str(node_name))\n \n rospy.loginfo('waiting for services')\n\n rospy.wait_for_service('/lead/goal')\n rospy.wait_for_service('/lead/next')\n rospy.wait_for_service('/lead/start')\n rospy.wait_for_service('/lead/back')\n\n # Assign callables for the Path services\n self.lead_goal = rospy.ServiceProxy('/lead/goal', Goal)\n self.lead_next = rospy.ServiceProxy('/lead/next', Goal)\n self.lead_start = rospy.ServiceProxy('/lead/start', Goal)\n self.lead_back = rospy.ServiceProxy('/lead/back', Goal)",
"def traverse(self, node, branch=None, **kw):\n\n parent = node\n if branch:\n node = node[branch]\n\n if node is None:\n return\n\n if branch and 'type' in parent:\n self.debug('TRAVERSE {parent[type]} -> {branch}:{node[type]}',\n node=node, parent=parent, branch=branch)\n else:\n self.debug('TRAVERSE {node[type]}', node=node)\n\n assert '__traversed' not in node\n node['__traversed'] = True\n\n self.set_location(node)\n\n try:\n handler = self.node_handlers[node['type']]\n except KeyError:\n if self.err.debug_level:\n self.error(err_id=('traverser', 'traverse', 'unknown_node'),\n error='Unknown node type: {[type]}'.format(node))\n\n log.exception('Unknown node type: {[type]}'.format(node))\n key = 'unknown_node_types'\n self.err.metadata.setdefault(key, defaultdict(int))\n self.err.metadata[key][node['type']] += 1\n else:\n with self._debug_level:\n result = handler(node, **kw)\n if isinstance(result, (JSWrapper, JSValue)):\n result.parse_node = node\n return result",
"def visit(self, node, args=()):\n if not isinstance(node, broom.Node):\n node = node.node(*args)\n self.visitGraph(node)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called when entering unknown `Node` types. Raise an exception unless overridden. | def unknown_visit(self, node):
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__)) | [
"def currentNodeHasUnknownType(*args, **kwargs):\n \n pass",
"def unknownNode(plugin=bool, realClassName=bool, realClassTag=bool):\n pass",
"def generic_visit(self, node):\n raise NotImplementedError('Unsupported AST node %s' % node)",
"def register_for_new_hierarchy_nodes(self):\n pass",
"def allNodeTypes(includeAbstract=bool):\n pass",
"def process(self):\n for node_class in self.setup[\"node_classes\"]:\n for node in nuke.allNodes(recurseGroups=True):\n class_name = node.Class()\n if class_name != node_class:\n continue\n\n self.logger.info(\"%s '%s' because its node class (%s) is \"\n \"included in %s\", self.setup[\"mode\"],\n node.name(), class_name,\n self.setup[\"node_classes\"])\n\n self.handle_node(node)",
"def add_node(self,node_type):\n #Our start node is more specific than this... Need to have another validation method\n if node_type not in node_types:\n raise Exception('node type must be one of greent.node_types')\n self.definition.node_types.append(node_type)",
"def warn_on_undocumented(self, node):\n # Ignore nodes that are created during the expansion of enum nodes:\n # users cannot add documentation for these.\n if node.base and node.base.is_enum_node:\n return\n\n # Likewise for the very abstract generic list type\n elif node.is_generic_list_type:\n return\n\n WarningSet.undocumented_nodes.warn_if(\n not node._doc, 'This node lacks documentation')",
"def handle_unknown(self, name, spec, attrs):\n inherited = self.find_inherited(name, spec, attrs)\n\n attributes = spec\n if attributes.get(\"__extend__\", True):\n attributes = self.combine_dicts(inherited, spec)\n\n kls = attributes.get(\"__main__\")\n kwargs = self.attributes_from(attributes)\n\n self.handle_attributes(name, {name.lower():(name, kls, kwargs)}, None, attrs, bookkeeper_method=\"add_custom\")",
"def _get_node_types(self):\n for type in self.cfg.node_types:\n self._node_types[type.name] = type.label",
"def register_for_deleted_hierarchy_nodes(self):\n pass",
"def resolve_type_nodes(self, root: ASTNode) -> None:\n\n errors = []\n for child in itertools.chain(self.properties,\n self.functions.values(),\n self.classes.values()):\n try:\n try:\n # Give priority to narrowest scope (class-level scope in this case)\n child.resolve_type_nodes(self) # type: ignore\n except TypeResolutionError:\n child.resolve_type_nodes(root) # type: ignore\n except TypeResolutionError as e:\n errors.append(str(e))\n if len(errors) > 0:\n raise TypeResolutionError(\n 'Failed to resolve \"{}\" class against \"{}\". Errors: {}'.format(\n self.full_export_name, root.full_export_name, errors\n )\n )",
"def register_for_new_hierarchies(self):\n pass",
"def unknown_meta_event(self, meta_type, data):\n pass",
"def nodeTypeNameBase(node):\n return ('',)",
"def pre_ImportFrom(self):\n if self.cur_node.module == \"typing\":\n self.replace(None)",
"def test_node_instance(self):\n self.assertTrue(isinstance(self.node, SuperTestNode))",
"def _mark_children(self, t):\n for elt_t in t.children():\n self._mark_type(elt_t)",
"def handle(self, tree, msg, lastRetVal=None):\n raise NotImplementedError, 'needs to be overridden in a subclass'",
"def onUnknown(self, data):\n return CONTINUE"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Quote attributes for pseudoxml | def pseudo_quoteattr(value):
return '"%s"' % value | [
"def quoteAttr(self, value):\n ret = quoteattr(\"'\"+value+\"'\")\n return ret[2:len(ret)-2]",
"def quoteattr(data, entities={}):\r\n data = escape(data, entities)\r\n if '\"' in data:\r\n if \"'\" in data:\r\n data = '\"%s\"' % data.replace('\"', \""\")\r\n else:\r\n data = \"'%s'\" % data\r\n else:\r\n data = '\"%s\"' % data\r\n return data",
"def test_attr_escape_quotes(self):\r\n tmpl = MarkupTemplate(\"\"\"<div xmlns:py=\"http://genshi.edgewall.org/\">\r\n <elem class=\"$myvar\"/>\r\n </div>\"\"\")\r\n self.assertEqual(\"\"\"<div>\r\n <elem class=\""foo"\"/>\r\n </div>\"\"\", str(tmpl.generate(myvar='\"foo\"')))",
"def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, str) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)",
"def quoted_attribute_value(self, value):\n quote_with = '\"'\n if '\"' in value:\n if \"'\" in value:\n replace_with = '"'\n value = value.replace('\"', replace_with)\n else:\n quote_with = \"'\"\n return quote_with + value + quote_with",
"def writeWithAttributeEscaping(write):\n def _write(data):\n write(escapeForContent(data).replace(b'\"', b'"'))\n return _write",
"def _mcpyrate_quotes_attr(attr, *, force_import=False):\n return _mcpyrate_attr(f\"quotes.{attr}\", force_import=force_import)",
"def test_quotes(self):\n node1 = Attribute(wraptext(\"id\"), wraptext(\"foo\"), None)\n node2 = Attribute(wraptext(\"id\"), wraptext(\"bar\"))\n node3 = Attribute(wraptext(\"id\"), wraptext(\"foo bar baz\"))\n self.assertIs(None, node1.quotes)\n self.assertEqual('\"', node2.quotes)\n node1.quotes = \"'\"\n node2.quotes = None\n self.assertEqual(\"'\", node1.quotes)\n self.assertIs(None, node2.quotes)\n self.assertRaises(ValueError, setattr, node1, \"quotes\", \"foobar\")\n self.assertRaises(ValueError, setattr, node3, \"quotes\", None)\n self.assertRaises(ValueError, Attribute, wraptext(\"id\"),\n wraptext(\"foo bar baz\"), None)",
"def _escape_attr_value(val):\n if isinstance(val, int):\n return val\n return val.replace(u'&', u'&') \\\n .replace(u'\\t', u'	') \\\n .replace(u'\\n', u'
') \\\n .replace(u'\\r', u'
') \\\n .replace(u'\"', u'"') \\\n .replace(u'<', u'<')",
"def __repr__(self):\n return str.format(\n 'TextAttributes(letter_spacing={}, paragraph_align={})',\n repr(self.letter_spacing), repr(self.paragraph_align))",
"def attributeEscapingDoneOutside(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n return data",
"def buildattributestring(self, attr):\n if not isinstance(attr, dict):\n attr = dict()\n\n parmlist = []\n for k, v in attr.items():\n if k not in self.EXCLUDEATTR:\n # any properly formed xml/json should have keywords already\n # escaped however this is just a sanity check. also, it\n # misses 'to' which is not a keyword in python, but is\n # treated as such in pymeta oh well\n if keyword.iskeyword(k):\n k += '_'\n\n v = repr(v)\n parmlist.append('%s=%s' % (k, v))\n\n attribstr = ', '.join(parmlist)\n\n return attribstr",
"def _attrprint(d, delimiter=', '):\n return delimiter.join(('\"%s\"=\"%s\"' % item) for item in sorted(d.items()))",
"def attrs2html(self, attrs):\n ks = []\n if 'class' in attrs.keys():\n ks.append('class')\n if 'id' in attrs.keys():\n ks.append('id')\n for k in attrs.keys():\n if k not in ['id', 'class']:\n ks.append(k)\n ls = []\n for k in ks:\n v = attrs[k]\n if k == 'id':\n v = f'{self.before_id}{v}'\n s = f'{k}=\"{v}\"'\n ls.append(s)\n return \" \".join(ls)",
"def text(self) -> str:\n attr_text = self.separator.join(self.values)\n return f'{self.name}=\"{attr_text}\"'",
"def format_custom_attr(ddic):\n s = \"\"\n for k1, d2 in ddic.items():\n if s:\n s += \" \"\n s += \"%s\" % k1\n s2 = \"\"\n for k2, v2 in d2.items():\n if s2:\n s2 += \" \"\n s2 += \"%s:%s;\" % (k2, v2)\n s += \" {%s}\" % s2\n return s",
"def _xmlattrs_str(self):\n return ''.join(self._xmlattrs)",
"def quote(key, as_key=True):\n return el.quote(key, as_key=as_key)",
"def quote(value):\n return '\"%(value)s\"' % {\n 'value': str(value),\n }",
"def catch_phrase_attribute(cls):\r\n return cls.random_element(cls.attributes)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get CSV data from the directive content, from an external file, or from a URL reference. | def get_csv_data(self):
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source | [
"def get_csv_data(self):\r\n encoding = self.options.get(\r\n 'encoding', self.state.document.settings.input_encoding)\r\n error_handler = self.state.document.settings.input_encoding_error_handler\r\n if self.content:\r\n # CSV data is from directive content.\r\n if 'file' in self.options or 'url' in self.options:\r\n error = self.state_machine.reporter.error(\r\n '\"%s\" directive may not both specify an external file and'\r\n ' have content.' % self.name, nodes.literal_block(\r\n self.block_text, self.block_text), line=self.lineno)\r\n raise SystemMessagePropagation(error)\r\n source = self.content.source(0)\r\n csv_data = self.content\r\n elif 'file' in self.options:\r\n # CSV data is from an external file.\r\n if 'url' in self.options:\r\n error = self.state_machine.reporter.error(\r\n 'The \"file\" and \"url\" options may not be simultaneously'\r\n ' specified for the \"%s\" directive.' % self.name,\r\n nodes.literal_block(self.block_text, self.block_text),\r\n line=self.lineno)\r\n raise SystemMessagePropagation(error)\r\n source_dir = os.path.dirname(\r\n os.path.abspath(self.state.document.current_source))\r\n source = os.path.normpath(os.path.join(source_dir,\r\n self.options['file']))\r\n source = utils.relative_path(None, source)\r\n try:\r\n self.state.document.settings.record_dependencies.add(source)\r\n csv_file = io.FileInput(source_path=source,\r\n encoding=encoding,\r\n error_handler=error_handler)\r\n csv_data = csv_file.read().splitlines()\r\n except IOError as error:\r\n severe = self.state_machine.reporter.severe(\r\n 'Problems with \"%s\" directive path:\\n%s.'\r\n % (self.name, SafeString(error)),\r\n nodes.literal_block(self.block_text, self.block_text),\r\n line=self.lineno)\r\n raise SystemMessagePropagation(severe)\r\n elif 'url' in self.options:\r\n # CSV data is from a URL.\r\n # Do not import urllib2 at the top of the module because\r\n # it may fail due to broken SSL dependencies, and it takes\r\n # about 0.15 seconds to load.\r\n import urllib.request, urllib.error, urllib.parse\r\n source = self.options['url']\r\n try:\r\n csv_text = urllib.request.urlopen(source).read()\r\n except (urllib.error.URLError, IOError, OSError, ValueError) as error:\r\n severe = self.state_machine.reporter.severe(\r\n 'Problems with \"%s\" directive URL \"%s\":\\n%s.'\r\n % (self.name, self.options['url'], SafeString(error)),\r\n nodes.literal_block(self.block_text, self.block_text),\r\n line=self.lineno)\r\n raise SystemMessagePropagation(severe)\r\n csv_file = io.StringInput(\r\n source=csv_text, source_path=source, encoding=encoding,\r\n error_handler=(self.state.document.settings.\\\r\n input_encoding_error_handler))\r\n csv_data = csv_file.read().splitlines()\r\n else:\r\n error = self.state_machine.reporter.warning(\r\n 'The \"%s\" directive requires content; none supplied.'\r\n % self.name, nodes.literal_block(\r\n self.block_text, self.block_text), line=self.lineno)\r\n raise SystemMessagePropagation(error)\r\n return csv_data, source",
"def load_csv(cls, path):\n with pyglet.resource.file(path, mode='r') as csv_file:\n csv_data = list(csv.reader(csv_file))\n\n return csv_data",
"def get_csv():\n def generate(header, lines):\n yield '\"'+header+'\"' + '\\n'\n for line in lines: # lines have already quoted fields\n yield line + '\\n'\n if request.form.get('variants_button'):\n header = request.form['vheader']\n lines = request.form.getlist('variant')\n filename = str(request.form.get('subm_id')) + '.Variant.csv'\n else:\n header = request.form['cdheader']\n lines = request.form.getlist('case')\n filename = str(request.form.get('subm_id')) + '.CaseData.csv'\n\n headers = Headers()\n headers.add('Content-Disposition','attachment', filename=filename)\n return Response(generate(header, lines), mimetype='text/csv', headers=headers)",
"def csv_reader(topology, schema, file, header=False, encoding=None, separator=None, ignoreExtraFields=False, hot=False, name=None):\n fe = streamsx.spl.op.Expression.expression(Format.csv.name)\n _op = _FileSource(topology, schema, file=file, format=fe, hotFile=hot,encoding=encoding,separator=separator,ignoreExtraCSVValues=ignoreExtraFields)\n return _op.outputs[0]",
"def download_csv(type, date_range):\n print('Beginning %s file download with requests' % (type))\n try:\n payload.update({'type': type, 'date_range': date_range})\n response = requests.get(url, params=payload)\n filename = type + '.csv'\n with open(filename, 'wb') as f:\n f.write(response.content)\n except:\n pass\n # todo\n return",
"def extract_data(path_to_csv):\n # Typically, one would try to catch exceptions, incase there is an IO error\n # or some other kind of problem. But because this entire script hinges on\n # the idea that we can open this file, I'd rather let the exception surface\n # to the command prompt.\n csvfile = open(path_to_csv)\n return csv.DictReader(csvfile)",
"def csv_file_download_with_stream():\n idPARSING_DSF = int(request.args.get('pdsf_id', 0))\n if idPARSING_DSF != 0:\n pdsf = services.estimator.pdsf_file_info(idPARSING_DSF)\n else:\n return redirect(\"/my_task\")\n\n filename = pdsf[\"ParsingFile\"]\n fname = filename.split(\"/\")[-1]\n temp_df = pd.read_csv(filename, encoding='utf-8')\n\n # 그 결과를 앞서 만든 IO stream에 저장\n output_stream = StringIO()\n\n temp_df.to_csv(output_stream, index=False, encoding='utf-8')\n response = Response(\n output_stream.getvalue(),\n mimetype='text/csv; charset=utf-8',\n content_type='application/octet-stream',\n )\n\n response.headers[\"Content-Disposition\"] = f\"attachment; filename={fname}\".encode('utf-8')\n\n return response",
"def csv(self):\r\n reader = csv.reader(self.text.splitlines())\r\n return [l for l in reader]",
"def extract_source(date):\n with open(\"config.json\") as config:\n config_dict = json.load(config)\n\n csv_source_link = config_dict[\"csv_source_link\"] + f\"{date}.csv\"\n source_link = config_dict[\"source_link\"]\n csv_xpath = config_dict[\"csv_xpath\"]\n\n try:\n source_csv = pd.read_csv(csv_source_link, low_memory=False)\n print(\"Found source csv using csv source link in config...\")\n return source_csv, csv_source_link\n except HTTPError:\n print(f\"Cannot find csv with specified date: {date}\")\n\n driver = webdriver.Safari()\n driver.get(source_link)\n try:\n csv_element = WebDriverWait(driver, 30).until(\n EC.presence_of_element_located((By.XPATH, csv_xpath))\n )\n csv_url = csv_element.get_attribute(\"href\")\n print(f'Source csv found using xpath: {csv_url}')\n finally:\n driver.quit()\n\n if csv_url is not None:\n print(f\"Found source csv using xpath...\")\n return pd.read_csv(csv_url, low_memory=False), csv_url\n\n print(\"Cannot find csv from source using xpath\")\n return None",
"def fetch_dataset(url, pandas_impl=pandas):\n\n print(f'fetching dataset at {url}')\n return pandas_impl.read_csv(url, dtype=str)",
"def read_csv(self, filename):\n\n self.response.read_csv(filename)",
"def get_report():\n response = requests.get(REPORT_URL)\n return csv.DictReader(response.content.decode().split('\\r\\n'))",
"def import_csv(in_csv, delimit=','):\n with open(in_csv, encoding='utf-8') as source:\n sourcereader = csv.reader(source, delimiter=delimit)\n data_list = []\n for row in sourcereader:\n data_list.append(row)\n return data_list",
"def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp",
"def csv_import(name, sep, header):\n csv_file = pd.read_csv(name, sep = sep, header = header) ##loading data using read_csv from pandas\n return csv_file #returning the data structure",
"def test_list_addresses_csv(self):\n test_service.list_addresses_csv(self)\n\n query_string = [('ids', \"1,2\")]\n headers = { \n 'Accept': 'application/csv',\n }\n response = self.client.open(\n '/{currency}/addresses.csv'.format(currency='btc'),\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def __append_to_csv(self, content):\n csv_content = []\n\n # csv headers\n headers = self.queries[self.key]['headers']\n\n issue_content = [content[ih] for ih in headers['issue']] if 'issue' in headers else []\n if 'comment' in headers:\n for c in content['comments']:\n csv_content.append([c[ch] for ch in headers['comment']] + issue_content) \n else:\n csv_content.append(issue_content)\n\n print(csv_content)\n with open(self.queries[self.key]['output_filename'], 'a+') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerows(csv_content)",
"def process_csv(self, file_name: str):",
"def parse(cls, path: str) -> List[QuoteModel]:\r\n if not cls.can_ingest(path):\r\n raise Exception('cannot ingest filetype')\r\n\r\n quotes = []\r\n data_frame = pandas.read_csv(path, header=0)\r\n\r\n for i, row in data_frame.iterrows():\r\n quote = QuoteModel(f'\"{row[\"body\"]}\"', row['author'])\r\n quotes.append(quote)\r\n\r\n return quotes"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Locate and return a role function from its languagedependent name, along with a list of system messages. If the role is not found in the current | def role(role_name, language_module, lineno, reporter):
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
| [
"def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter",
"def find_role(cls, keyword):\n return _CompilerRole.find(keyword)",
"def get_role(obj, role_name):\n for role in obj.roles:\n if role.name == role_name:\n return role\n return None",
"def role_from_first_message(message: Message) -> Dialogue.Role:\n return BaseOefSearchDialogue.Role.AGENT",
"def get_by_name(self, name: str) -> tp.Optional[RoleType]:\n pass",
"def role_from_first_message(message: Message) -> Dialogue.Role:\n return DefaultDialogue.Role.AGENT",
"def validate_role(context, param, value):\n role = context.obj.api.role_by_name(value)\n if role:\n return role\n else:\n raise click.BadParameter(\"\\\"%s\\\" was not found\" % value)",
"def system_wide_role(self):\n # FIXME: This method should be in `ggrc_basic_permissions`, since it\n # depends on `Role` and `UserRole` objects\n\n if self.email in getattr(settings, \"BOOTSTRAP_ADMIN_USERS\", []):\n return u\"Superuser\"\n\n role_hierarchy = {\n u'Administrator': 0,\n u'Editor': 1,\n u'Reader': 2,\n u'Creator': 3,\n }\n unique_roles = set([\n user_role.role.name\n for user_role in self.user_roles\n if user_role.role.name in role_hierarchy\n ])\n if len(unique_roles) == 0:\n return u\"No Access\"\n else:\n # -1 as default to make items not in this list appear on top\n # and thus shown to the user\n sorted_roles = sorted(unique_roles,\n key=lambda x: role_hierarchy.get(x, -1))\n return sorted_roles[0]",
"def function_lookup(pymod_path):\n module_name, func_name = pymod_path.rsplit('.', 1)\n module = importlib.import_module(module_name)\n shell_function = getattr(module, func_name)\n assert callable(shell_function), shell_function\n return shell_function",
"def role(self, name):\n for r, n in itertools.chain(self._role_to_prop.items(), self._ref_role_to_prop.items()):\n if n == name:\n return r\n else:\n return -1",
"def get_role_class(expected_rolename):\n \n try:\n role_class = ROLE_CLASSES_BY_TYPE[expected_rolename]\n except KeyError:\n raise tuf.FormatError(repr(expected_rolename)+' not supported')\n else:\n return role_class",
"def get_role_name(self):\n try:\n return self.tags['Role']\n except KeyError:\n return None",
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")",
"def parse_roles(s: str) -> Roles:\n #return Roles.PEDESTRIAN #stub\n #return ... (s) #template\n \n if s == \"Pedestrian\":\n return Roles.PEDESTRIAN\n elif s == \"Car Driver\":\n return Roles.CAR_DRIVER\n elif s == \"Car Passenger\":\n return Roles.CAR_PASSENGER\n elif s == \"Cyclist\":\n return Roles.CYCLIST \n elif s == \"Other\":\n return Roles.OTHER",
"def get_role(cls, name):\n return cls.query.filter_by(name=name).first()",
"def _get_role(role_name):\n known_roles = kv().get('charm.azure.roles', {})\n if role_name in known_roles:\n return known_roles[role_name]\n sub_id = kv().get('charm.azure.sub-id')\n role_file = Path('files/roles/{}.json'.format(role_name))\n role_data = json.loads(role_file.read_text())\n role_fullname = role_data['Name'].format(sub_id)\n scope = role_data['AssignableScopes'][0].format(sub_id)\n role_data['Name'] = role_fullname\n role_data['AssignableScopes'][0] = scope\n try:\n log('Ensuring role {}', role_fullname)\n _azure('role', 'definition', 'create',\n '--role-definition', json.dumps(role_data))\n except AlreadyExistsAzureError:\n pass\n known_roles[role_name] = role_fullname\n kv().set('charm.azure.roles', known_roles)\n return role_fullname",
"def _existing_only(func):\n\n @wraps(func)\n def _check_existence(db, entity, role=None, *, rolename=None):\n if isinstance(role, str):\n rolename = role\n if rolename is not None:\n # if given as a str, lookup role by name\n role = orm.Role.find(db, rolename)\n if role is None:\n raise ValueError(f\"Role {rolename} does not exist\")\n\n return func(db, entity, role)\n\n return _check_existence",
"def locate_qualified_function(qualified_name: str) -> Callable[[], Iterable[ET]]:\n if \".\" not in qualified_name:\n raise QueryException(\"Could not find a '.' in the function name, e.g. my.reddit.rexport.comments\")\n rdot_index = qualified_name.rindex(\".\")\n return locate_function(qualified_name[:rdot_index], qualified_name[rdot_index + 1:])",
"def _findFunction(self, functionPath):\n\n # Strip module.funcName type paths to deal with earlier versions\n # of the daemon. module is simply thrown away\n parts = functionPath.split(\".\")\n if len(parts)>1:\n calledName = parts[1]\n else:\n calledName = functionPath\n\n if calledName not in self._functions.keys():\n raise xmlrpc.NoSuchFunction(xmlrpc.XMLRPC.NOT_FOUND, \\\n \"Requested function (%s) does not exist!\" % calledName)\n func = self._functions[calledName]\n\n return func",
"def retrieve_role(role_name):\n return RolesManager.retrieve_role(role_name)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register an interpreted text role by its canonical name. | def register_canonical_role(name, role_fn):
set_implicit_options(role_fn)
_role_registry[name] = role_fn | [
"def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '",
"def register_token(cls, a_name, a_re, a_type):\n \n if a_type not in cls._token_type:\n raise Exception(\"No token type with name %s has been registered\"%(a_name))\n else:\n cls._token_type[a_type].append(a_name)\n cls._tokens_re[a_name] = a_re",
"def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []",
"def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []",
"async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")",
"def register_role(name):\n role, created = Role.objects.get_or_create(name=name)\n if created:\n return role\n else:\n return False",
"def test_used_as_role_type (self):\n self._test_typed(self.create_role())",
"def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()",
"def register_access_role(cls):\n try:\n role_name = cls.ROLE\n REGISTERED_ACCESS_ROLES[role_name] = cls\n except AttributeError:\n log.exception(\"Unable to register Access Role with attribute 'ROLE'.\")\n return cls",
"def register_as(self, name):\n raise NotImplementedError()\n self.aliases.append(name)\n def gen_aliaser(k, v):\n cherrypy.config['tools.lg_authority.' + k] = v\n def req_handler():\n post_conf = getattr(cherrypy.serving, 'lg_authority_aliased', None)\n if post_conf is not None:\n log('Applying: {0}'.format(post_conf))\n cherrypy.request.namespaces(post_conf)\n log('New config: {0}'.format(cherrypy.request.config))\n def req_aliaser(k, v):\n import traceback\n traceback.print_stack()\n log('Aliasing {0}, {1}'.format(k,v))\n temp = getattr(cherrypy.serving, 'lg_authority_aliased', None)\n if temp is None:\n temp = {}\n cherrypy.serving.lg_authority_aliased = temp\n cherrypy.request.hooks.attach('on_start_resource', req_handler)\n log('Hook attached for aliases')\n temp['tools.lg_authority.' + k] = v\n cherrypy.config.namespaces[name] = gen_aliaser\n cherrypy.Application.namespaces[name] = req_aliaser\n cherrypy.Application.request_class.namespaces[name] = req_aliaser",
"def parse_role(self, s, nac):\n org_name = self.find_first_item(s, ('role',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find role in Role section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Role section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Role section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac",
"def set_role(self, role):\n self.role.set(role)",
"def RegisterName(self, name):\n self._node.RegisterNameForMBox(self, name)",
"def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter",
"def add(self, name, *args, **kwargs):\n members = {}\n for key, role in self._roles.items():\n try:\n member_arg = kwargs.pop(key)\n except KeyError:\n continue\n members[role] = (member_arg,) if isinstance(member_arg, str) else member_arg\n kwargs['members'] = members\n family = _CompilerFamily(self, name, *args, **kwargs)\n self._families[name] = family\n return family",
"def set_role(self, role):\n self.role = role\n for i, _var_ in enumerate(self.variants):\n self.variants[i].role = role",
"def init_role(role): # -> None:\n ...",
"def add_Snode(self,snode):\r\n \r\n if isinstance(snode, Message):\r\n\r\n self.add_role(snode.get_role1())\r\n self.add_role(snode.get_role2())\r\n \r\n if isinstance(snode, Choice):\r\n self.add_role(snode.get_role())\r\n \r\n self.add_node(snode)",
"def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register an interpreted text role by its local or languagedependent name. | def register_local_role(name, role_fn):
set_implicit_options(role_fn)
_roles[name] = role_fn | [
"def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '",
"def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)",
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")",
"def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []",
"def register_token(cls, a_name, a_re, a_type):\n \n if a_type not in cls._token_type:\n raise Exception(\"No token type with name %s has been registered\"%(a_name))\n else:\n cls._token_type[a_type].append(a_name)\n cls._tokens_re[a_name] = a_re",
"def register(mgr):\n mgr.set_lang_info(lang,\n silvercity_lexer=XMLLexer(),\n buf_class=XMLBuffer,\n langintel_class=XMLLangIntel,\n import_handler_class=None,\n cile_driver_class=None,\n is_cpln_lang=True)",
"def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []",
"def test_used_as_role_type (self):\n self._test_typed(self.create_role())",
"def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()",
"async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"def register_role(name):\n role, created = Role.objects.get_or_create(name=name)\n if created:\n return role\n else:\n return False",
"def register_processor( self, language, proc):\n self.registry[language] = proc",
"def register_shader(name, **kwargs):\n\n ShaderPart(name, **kwargs)",
"def register(mgr):\n mgr.set_lang_info(\"Less\",\n silvercity_lexer=LessLexer(),\n buf_class=LessBuffer,\n langintel_class=LessLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"SCSS\",\n silvercity_lexer=SCSSLexer(),\n buf_class=SCSSBuffer,\n langintel_class=SCSSLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"Sass\",\n silvercity_lexer=SassLexer(),\n buf_class=SassBuffer,\n langintel_class=SassLangIntel,\n is_cpln_lang=True)",
"def add(self, name, *args, **kwargs):\n members = {}\n for key, role in self._roles.items():\n try:\n member_arg = kwargs.pop(key)\n except KeyError:\n continue\n members[role] = (member_arg,) if isinstance(member_arg, str) else member_arg\n kwargs['members'] = members\n family = _CompilerFamily(self, name, *args, **kwargs)\n self._families[name] = family\n return family",
"def init_role(role): # -> None:\n ...",
"def set_role(self, role):\n self.role = role\n for i, _var_ in enumerate(self.variants):\n self.variants[i].role = role",
"def _add_translation_string(self, *args, **kwargs):\r\n self.stringset.add(GenericTranslation(*args, **kwargs))",
"def test_local_roles():\n vocab = roles.LocalRolesChoices\n\n assert len(vocab) == 9\n assert vocab['system'].value == 'system'\n assert vocab['system'].name == 'system'\n assert vocab['system'].label == 'r:system'",
"def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add customization options to role functions, unless explicitly set or disabled. | def set_implicit_options(role_fn):
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option | [
"def experimental_options(self):\n ...",
"def add_experimental_option(self, name, value):\n ...",
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")",
"def _extrasetup(self, name, func):",
"def supports_function_admin(self):\n return False # Change to True when implemented.",
"def conf(func):\n\n func._is_conf = True\n return func",
"def manipOptions(hideManipOnShift=bool, pivotRotateHandleOffset=int, refreshMode=int, lineSize=float, showPivotRotateHandle=int, hideManipOnCtrl=bool, hideManipOnShiftCtrl=bool, showPlaneHandles=int, handleSize=float, planeHandleOffset=int, pointSize=float, relative=bool, rememberActiveHandleAfterToolSwitch=bool, scale=float, forceRefresh=bool, preselectHighlight=bool, linePick=float, rememberActiveHandle=bool):\n pass",
"def supports_function_admin(self):\n return # boolean",
"def add_function(self, func):\n self._conf['functions'].append(func)",
"def apply_customization(self, serializer, customization):\n # apply fields or exclude\n if customization.fields is not None:\n if len(customization.fields) == 0:\n # customization fields are empty, set Meta.fields to '__all__'\n serializer.Meta.fields = ALL_FIELDS\n else:\n serializer.Meta.fields = customization.fields\n if customization.exclude is not None:\n serializer.Meta.exclude = customization.exclude\n\n # apply extra_kwargs\n if customization.extra_kwargs is not None:\n serializer.Meta.extra_kwargs = customization.extra_kwargs\n\n # apply validate_methods\n for method_name, method in customization.validate_methods.items():\n setattr(serializer, method_name, method)",
"def _configure_iam_role(config, crm, iam):\n config = copy.deepcopy(config)\n\n email = SKYPILOT_SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(\n account_id=SKYPILOT_SERVICE_ACCOUNT_ID,\n project_id=config[\"provider\"][\"project_id\"],\n )\n service_account = _get_service_account(email, config, iam)\n\n permissions = VM_MINIMAL_PERMISSIONS\n roles = DEFAULT_SERVICE_ACCOUNT_ROLES\n if config[\"provider\"].get(HAS_TPU_PROVIDER_FIELD, False):\n roles = DEFAULT_SERVICE_ACCOUNT_ROLES + TPU_SERVICE_ACCOUNT_ROLES\n permissions = VM_MINIMAL_PERMISSIONS + TPU_MINIMAL_PERMISSIONS\n\n satisfied, policy = _is_permission_satisfied(\n service_account, crm, iam, permissions, roles\n )\n\n if not satisfied:\n # SkyPilot: Fallback to the old ray service account name for\n # backwards compatibility. Users using GCP before #2112 have\n # the old service account setup setup in their GCP project,\n # and the user may not have the permissions to create the\n # new service account. This is to ensure that the old service\n # account is still usable.\n email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(\n account_id=DEFAULT_SERVICE_ACCOUNT_ID,\n project_id=config[\"provider\"][\"project_id\"],\n )\n logger.info(f\"_configure_iam_role: Fallback to service account {email}\")\n\n ray_service_account = _get_service_account(email, config, iam)\n ray_satisfied, _ = _is_permission_satisfied(\n ray_service_account, crm, iam, permissions, roles\n )\n logger.info(\n \"_configure_iam_role: \"\n f\"Fallback to service account {email} succeeded? {ray_satisfied}\"\n )\n\n if ray_satisfied:\n service_account = ray_service_account\n satisfied = ray_satisfied\n elif service_account is None:\n logger.info(\n \"_configure_iam_role: \"\n \"Creating new service account {}\".format(SKYPILOT_SERVICE_ACCOUNT_ID)\n )\n # SkyPilot: a GCP user without the permission to create a service\n # account will fail here.\n service_account = _create_service_account(\n SKYPILOT_SERVICE_ACCOUNT_ID,\n SKYPILOT_SERVICE_ACCOUNT_CONFIG,\n config,\n iam,\n )\n satisfied, policy = _is_permission_satisfied(\n service_account, crm, iam, permissions, roles\n )\n\n assert service_account is not None, \"Failed to create service account\"\n\n if not satisfied:\n logger.info(\n \"_configure_iam_role: \" f\"Adding roles to service account {email}...\"\n )\n _add_iam_policy_binding(service_account, policy, crm, iam)\n\n account_dict = {\n \"email\": service_account[\"email\"],\n # NOTE: The amount of access is determined by the scope + IAM\n # role of the service account. Even if the cloud-platform scope\n # gives (scope) access to the whole cloud-platform, the service\n # account is limited by the IAM rights specified below.\n \"scopes\": [\"https://www.googleapis.com/auth/cloud-platform\"],\n }\n if _is_head_node_a_tpu(config):\n # SKY: The API for TPU VM is slightly different from normal compute instances.\n # See https://cloud.google.com/tpu/docs/reference/rest/v2alpha1/projects.locations.nodes#Node\n account_dict[\"scope\"] = account_dict[\"scopes\"]\n account_dict.pop(\"scopes\")\n config[\"head_node\"][\"serviceAccount\"] = account_dict\n else:\n config[\"head_node\"][\"serviceAccounts\"] = [account_dict]\n\n return config",
"def decorate(f, **kwargs):\n f = debug_option(f)\n f = verbose_option(f)\n f = click.help_option(\"-h\", \"--help\")(f)\n\n # if the format option is being allowed, it needs to be applied to `f`\n if \"format\" not in disable_opts:\n f = format_option(f)\n\n # if the --map-http-status option is being allowed, ...\n if \"map_http_status\" not in disable_opts:\n f = map_http_status_option(f)\n\n return f",
"def add_options(config):\n return config[\"module\"][\"application\"].add_options(config)",
"async def addRole(self, ctx, role: discord.Role):\n guild = self.bot.cache.get_setting(ctx.guild.id)\n allowed_roles = guild.allowed_roles\n if not allowed_roles:\n roles = []\n roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)\n return\n if role.id in allowed_roles:\n return await ctx.reply(\n f\":negative_squared_cross_mark: | `{role.name}` role already has permissions to make announcements!\"\n )\n allowed_roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n allowed_roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": allowed_roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` role can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)",
"def setup_function(func):\n func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)",
"async def _setfullaccessrole(self, ctx: commands.Context, role: discord.Role):\n await self.config.guild(ctx.guild).fullaccessrole.set(role.id)\n await ctx.send(f\"Full rcon access role has been set to {role}\")",
"async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)",
"async def readd_roles(self, ctx):\n config = hf.database_toggle(ctx, self.bot.db['readd_roles'])\n if config['enable']:\n if not ctx.me.guild_permissions.manage_roles:\n await ctx.send(\"I lack permission to manage roles. Please fix that before enabling this\")\n hf.database_toggle(ctx, self.bot.db['readd_roles'])\n return\n await ctx.send(f\"I will readd roles to people who have previously left the server\")\n else:\n await ctx.send(\"I will NOT readd roles to people who have previously left the server\")\n if 'users' not in config:\n config['users'] = {}\n await hf.dump_json()",
"def common_options(*args, **kwargs):\n\n disable_opts = kwargs.get(\"disable_options\", [])\n\n def decorate(f, **kwargs):\n \"\"\"\n Work of actually decorating a function -- wrapped in here because we\n want to dispatch depending on how `common_options` is invoked\n \"\"\"\n f = debug_option(f)\n f = verbose_option(f)\n f = click.help_option(\"-h\", \"--help\")(f)\n\n # if the format option is being allowed, it needs to be applied to `f`\n if \"format\" not in disable_opts:\n f = format_option(f)\n\n # if the --map-http-status option is being allowed, ...\n if \"map_http_status\" not in disable_opts:\n f = map_http_status_option(f)\n\n return f\n\n return detect_and_decorate(decorate, args, kwargs)",
"def test_roles_decorator_overrides_env_roles():\n @roles('r1')\n def command():\n pass\n eq_effective_roles(command, ['r1'], env={'roledefs': fake_roles,\n 'roles': ['r2']})"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For roles which simply wrap a given `node_class` around the text. | def register_generic_role(canonical_name, node_class):
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role) | [
"def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname",
"def node_roles(node):\n return \"_\".join(sorted(node[\"roles\"]))",
"def add_child_classes(node):\n for para in node.traverse(nodes.paragraph):\n para[\"classes\"] = ([] if \"classes\" in para else para[\"classes\"]) + [\"card-text\"]\n for title in node.traverse(nodes.title):\n title[\"classes\"] = ([] if \"classes\" in title else title[\"classes\"]) + [\n \"card-title\"\n ]",
"def expand_roles(self):\n for i in range(len(self.roles)):\n role = self.roles[i]\n if role in NodeLayout.DEPRECATED_ROLES:\n AppScaleLogger.warn(\"'{}' role has been deprecated, please use '{}'\"\n .format(role, NodeLayout.DEPRECATED_ROLES[role]))\n self.roles.remove(role)\n self.roles.append(NodeLayout.DEPRECATED_ROLES[role])\n\n if 'master' in self.roles:\n self.roles.remove('master')\n self.roles.append('shadow')\n self.roles.append('load_balancer')\n\n # TODO: remove these, db_slave and taskqueue_slave are currently deprecated.\n if 'db_slave' in self.roles or 'db_master' in self.roles \\\n and 'database' not in self.roles:\n self.roles.append('database')\n\n if 'taskqueue_slave' in self.roles or 'taskqueue_master' in self.roles \\\n and 'taskqueue' not in self.roles:\n self.roles.append('taskqueue')\n\n # Remove any duplicate roles\n self.roles = list(set(self.roles))",
"def insert_role_node(self, node):\n self._insert_child(node)\n return self",
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")",
"def add_class(self, value: str) -> HTMLNode:\n return self.add_attr(\"class\", value)",
"def add_Snode(self,snode):\r\n \r\n if isinstance(snode, Message):\r\n\r\n self.add_role(snode.get_role1())\r\n self.add_role(snode.get_role2())\r\n \r\n if isinstance(snode, Choice):\r\n self.add_role(snode.get_role())\r\n \r\n self.add_node(snode)",
"def _cls(self, tag_name, class_name):\n return 'descendant-or-self::node()/%s[contains(concat(\" \", normalize-space(@class), \" \"), \" %s \")]' % (tag_name, class_name)",
"def massage_roles(self):\n if not self.opts.role:\n self.guess_role()\n if self.opts.role:\n self.opts.role = [xx.lower() for xx in self.opts.role]\n for role in [nrole for nrole in VALID_ROLES\n if nrole[:4] == 'node']:\n if role in self.opts.role and not 'node' in self.opts.role:\n self.opts.role.append('node')\n if 'broker' in self.opts.role and not 'client' in self.opts.role:\n self.opts.role.append('client')\n self.logger.info('Please note: --role=broker implicitly '\n 'enables --role=client to ensure /usr/bin/rhc '\n 'is available for testing and '\n 'troubleshooting.')",
"def add_tempest_roles():\n _add_additional_roles(TEMPEST_ROLES)",
"def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []",
"def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []",
"def get_role(row):\n role = row[6]\n\n # Normalize roles Lead Link and Rep Link, as they contain the circle name as well\n if \"Lead Link\" in role:\n role = \"Lead Link\"\n\n if \"Rep Link\" in role:\n role = \"Rep Link\"\n\n return role",
"def replace_cluster_role(self, name, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_cluster_role_with_http_info(name, body, **kwargs)\n else:\n (data) = self.replace_cluster_role_with_http_info(name, body, **kwargs)\n return data",
"def nodeOutliner(string, replace=\"string\", docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", lastMenuChoice=\"string\", numberOfPopupMenus=bool, connectivity=\"string\", width=int, dragCallback=\"string\", showConnectedOnly=bool, highlightColor=float, annotation=\"string\", enable=bool, longNames=bool, preventOverride=bool, nodesDisplayed=bool, showNonKeyable=bool, showInputs=bool, showOutputs=bool, attrAlphaOrder=\"string\", pressHighlightsUnconnected=bool, menuCommand=\"string\", exists=bool, showPublished=bool, showNonConnectable=bool, showHidden=bool, multiSelect=bool, addObject=\"string\", niceNames=bool, enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, useTemplate=\"string\", noBackground=bool, fullPathName=bool, dropCallback=\"string\", selectCommand=\"string\", popupMenuArray=bool, addCommand=\"string\", removeAll=bool, backgroundColor=float, noConnectivity=bool, manage=bool, showReadOnly=bool, menuMultiOption=bool, isObscured=bool, currentSelection=bool, remove=\"string\"):\n pass",
"def spelling_ignore(role, rawtext, text, lineno, inliner,\n options={}, content=[]):\n node = nodes.Text(text)\n setattr(node, \"spellingIgnore\", True)\n return [node], []",
"def _process_wrap_node(\n self,\n wrap_node: nodes.Element,\n token: SyntaxTreeNode,\n explicit: bool,\n classes: list[str],\n path_dest: str,\n ):\n self.add_line_and_source_path(wrap_node, token)\n self.copy_attributes(token, wrap_node, (\"class\", \"id\", \"title\"))\n self.current_node.append(wrap_node)\n\n if explicit:\n inner_node = nodes.inline(\"\", \"\", classes=classes)\n with self.current_node_context(inner_node):\n self.render_children(token)\n elif isinstance(wrap_node, addnodes.download_reference):\n inner_node = nodes.literal(path_dest, path_dest, classes=classes)\n else:\n inner_node = nodes.inline(\"\", \"\", classes=classes)\n\n wrap_node.append(inner_node)",
"def wrap_namespace(self, node):\n self._push_splicer(\"class\")\n for cls in node.classes:\n if not cls.wrap.lua:\n continue\n name = cls.name\n self.reset_file()\n self._push_splicer(name)\n self.wrap_class(cls)\n # self.write_extension_type(cls)\n self._pop_splicer(name)\n self._pop_splicer(\"class\")\n\n self.reset_file()\n if node.functions:\n self._push_splicer(\"function\")\n self.wrap_functions(None, node.functions)\n self._pop_splicer(\"function\")\n\n for ns in node.namespaces:\n if ns.wrap.lua:\n self.wrap_namespace(ns)",
"def addNodeType(self, nodeClass, paths, override=True):\n return NodeLibrary.addNodeType(self, nodeClass=nodeClass, paths=paths, override=override)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Auxiliary function to set options['classes'] and delete options['class']. | def set_classes(options):
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class'] | [
"def reset_class(self, classes):\n self._clear_cached_op()\n self.classes = classes\n self.num_class = len(classes)",
"def remove_class(class_id):\r\n return 200",
"def __init__ (self, options=[]):\r\n for opt in options:\r\n setattr(self, opt, None)",
"def remove_classes(\n seg: np.ndarray,\n rm_classes: Sequence[int],\n classes: Dict[int, int] = None,\n background: int = 0,\n ) -> Union[np.ndarray, Tuple[np.ndarray, Dict[int, int]]]:\n for rc in rm_classes:\n seg[seg == rc] = background\n if classes is not None:\n classes.pop(rc)\n if classes is None:\n return seg\n else:\n return seg, classes",
"def remove_class(self, value: str) -> HTMLNode:\n return self.remove_attr(\"class\", value)",
"def _reset_options(self, cc: int):\n if cc != self.current_cc:\n if cc not in _generator_ccs:\n raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.')\n self.current_cc = cc\n self.options = option_registry.options_for_cc(self.current_cc, self.operation_kind)",
"def removeClass(node, name):\n s = node.getAttribute(\"class\")\n if s:\n classes = s.split(None)\n classes.remove(name)\n if classes:\n node.setAttribute(\"class\", ' '.join(classes))\n else:\n node.removeAttribute(\"class\")",
"def _reset_class_weights(self):\n classifier = self._get_classifier()\n \n for attr in \"class_prior\", \"class_weight\":\n if hasattr(classifier, attr):\n setattr(classifier, attr, None)\n break",
"def remove_css_classes(self, *css_classes):\n for cls in css_classes:\n try:\n self._css_classes.remove(cls)\n except KeyError:\n pass",
"def remove_class(self, klass):\n self.attrs.remove_value(self.AttributeNames.CLASS, klass)\n return self",
"def cmd_remove_mo_class(self):\n self.save()\n self.add_remove(self.OPERATION_REMOVE_MO_CLASS)\n self.quit()",
"def set_class_attr(\n self, class_name: str, attr: str, value: Any\n ) -> \"DatacodeOptions\":\n import datacode as dc\n\n logger.debug(\n f\"Setting datacode options for class attr {class_name}.{attr} to {value}\"\n )\n\n klass = getattr(dc, class_name)\n self._set_class_attr(klass, attr, value)\n return self",
"def set_options(self, options):\n self.n_iter = options['n_iterations']\n self.k_folds = options['k_folds']\n self.splitting_method = 'kfold'\n self._split_type = 'user'\n self.evaluator.set_kfolds(self.k_folds)\n\n if self.k_folds == 1:\n self.splitting_method = 'naive'\n self.options = options.copy()\n\n for option, value in options.items():\n if hasattr(self, '_' + option):\n setattr(self, '_' + option, value)",
"def test_swarm_updates_parsed_options_when_single_userclass_specified(self):\n\n class User1(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n class User2(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n self.environment.web_ui.userclass_picker_is_active = True\n self.environment.available_user_classes = {\"User1\": User1, \"User2\": User2}\n\n response = requests.post(\n \"http://127.0.0.1:%i/swarm\" % self.web_port,\n data={\n \"user_count\": 5,\n \"spawn_rate\": 5,\n \"host\": \"https://localhost\",\n \"user_classes\": [\"User1\"],\n },\n )\n self.assertListEqual([\"User1\"], response.json()[\"user_classes\"])\n\n # stop\n gevent.sleep(1)\n response = requests.get(\"http://127.0.0.1:%i/stop\" % self.web_port)\n self.assertEqual(response.json()[\"message\"], \"Test stopped\")\n\n # Checking environment.parsed_options.user_classes was updated\n self.assertListEqual(self.environment.parsed_options.user_classes, [\"User1\"])",
"def remove_error_class(klass):\n if isinstance(klass, python.str_types):\n if klass not in ERROR_CLASS_MAP:\n raise ValueError('Code %s is not registered' % (klass,))\n elif isinstance(klass, python.class_types):\n classes = ERROR_CLASS_MAP.values()\n if klass not in classes:\n raise ValueError('Class %s is not registered' % (klass,))\n\n klass = ERROR_CLASS_MAP.keys()[classes.index(klass)]\n else:\n raise TypeError(\"Invalid type, expected class or string\")\n\n del ERROR_CLASS_MAP[klass]",
"def unregister():\n for c in classes:\n bpy.utils.unregister_class(c)\n ui.reset_avatar_properties()",
"def reset_class(cls):\n cls.infected.clear()\n cls.target = None",
"def test_swarm_updates_parsed_options_when_multiple_userclasses_specified(self):\n\n class User1(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n class User2(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n self.environment.web_ui.userclass_picker_is_active = True\n self.environment.available_user_classes = {\"User1\": User1, \"User2\": User2}\n\n response = requests.post(\n \"http://127.0.0.1:%i/swarm\" % self.web_port,\n data={\n \"user_count\": 5,\n \"spawn_rate\": 5,\n \"host\": \"https://localhost\",\n \"user_classes\": [\"User1\", \"User2\"],\n },\n )\n self.assertListEqual([\"User1\", \"User2\"], response.json()[\"user_classes\"])\n\n # stop\n gevent.sleep(1)\n response = requests.get(\"http://127.0.0.1:%i/stop\" % self.web_port)\n self.assertEqual(response.json()[\"message\"], \"Test stopped\")\n\n # Checking environment.parsed_options.user_classes was updated\n self.assertListEqual(self.environment.parsed_options.user_classes, [\"User1\", \"User2\"])",
"def remove_feature_classes():\r\n arcpy.env.workspace = OUTPUT_WORKSPACE\r\n feature_classes = arcpy.ListFeatureClasses(\"*\")\r\n\r\n for fc in feature_classes:\r\n count1 = str(arcpy.GetCount_management(fc))\r\n if count1 == \"0\":\r\n fclass = r\"{}\\{}\".format(OUTPUT_WORKSPACE, fc)\r\n arcpy.Delete_management(fclass)",
"def set_hypercube_class(self):\n self.class_dict = dict.fromkeys(list(set([x.class_id for x in self.examples])), 0)\n old_class = self.hypercube_class\n if not self.examples:\n self.hypercube_class = EMPTY_HYPERCUBE_INDICATOR\n else:\n max_class = -1\n for class_id in self.class_dict.keys():\n class_size = len(list(filter(lambda x: x.class_id == class_id, self.examples)))\n # adding the number of examples to the class\n self.class_dict[class_id] += class_size\n if class_size > max_class:\n max_class = class_size\n self.hypercube_class = class_id\n if not old_class == self.hypercube_class:\n print(\"Changed hypercube's class!\\tCoords: \" + str(\n self.coords) + \"\\tOld class: \" + old_class + \"\\tNew class: \" + self.hypercube_class)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse `input_lines` and modify the `document` node in place. | def run(self, input_lines, document, input_offset=0, match_titles=True,
inliner=None):
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=False,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
| [
"def process(self, lines):\n for line in lines:\n self._process_line(line)",
"def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)",
"def updateLineParsing(self):\n self.titleLine = self.parseLine(self.getTitleLine())\n self.outputLines = [self.parseLine(line) for line in\n self.getOutputLines(False)]\n if self.origOutputLines:\n self.origOutputLines = [self.parseLine(line) for line in\n self.getOutputLines(True)]",
"def _mux(docs: list, process_stdin: IO, q: queue.Queue):\n for i, doc in enumerate(docs):\n count = 0\n sents = doc.strip().split('\\n')\n for line in sents:\n line = line + '\\n'\n process_stdin.write(line.encode('utf-8'))\n count += 1\n q.put((i, count))\n q.put(None) #poison\n process_stdin.close()",
"def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n root = markdown.etree.Element(\"div\")\r\n self.parseChunk(root, '\\n'.join(lines))\r\n return markdown.etree.ElementTree(root)",
"def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)",
"def changeOutputLines(self, lines, keepBlanks=False):\n self.outputLines = []\n for line in lines:\n newLine = self.parseLine(line)\n if keepBlanks or newLine:\n self.outputLines.append(newLine)\n if self.useBullets:\n self.origOutputLines = self.outputLines[:]\n self.addBullets()\n if self.useTables:\n self.origOutputLines = self.outputLines[:]\n self.addTables()",
"def process_lines(self, lines, file):\n return lines",
"def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.document().markContentsDirty(block.position(),\r\n block.position() + block.length())\r\n self.rehighlightBlock(block)",
"def applyEventsToLines(lines, events):\n for event in events:\n for change in event[\"changes\"]:\n ix = change[\"lineIndex\"]\n lines[ix : ix + len(change[\"oldLines\"])] = change[\"newLines\"]",
"def import_doc(client: Client, input: list[str]):\n if not client.is_connected:\n ctx = click.get_current_context()\n ctx.fail(\"Import failed: Not connected to a neo4j instance.\")\n for fp in input:\n graph = read_doc(fp)\n client.import_doc(graph)",
"def _process_lines(lines: typing.List[str], offset: int, registration_processor: RegistrationProcessor):\n\n onnx_op = \"ONNX_OPERATOR_KERNEL_CLASS_NAME\"\n onnx_op_len = len(onnx_op)\n onnx_typed_op = \"ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME\"\n onnx_typed_op_len = len(onnx_typed_op)\n onnx_versioned_op = \"ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME\"\n onnx_versioned_op_len = len(onnx_versioned_op)\n onnx_versioned_typed_op = \"ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME\"\n onnx_versioned_typed_op_len = len(onnx_versioned_typed_op)\n onnx_two_typed_op = \"ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME\"\n onnx_two_typed_op_len = len(onnx_two_typed_op)\n onnx_versioned_two_typed_op = \"ONNX_OPERATOR_VERSIONED_TWO_TYPED_KERNEL_CLASS_NAME\"\n onnx_versioned_two_typed_op_len = len(onnx_versioned_two_typed_op)\n end_marks = tuple([\");\", \")>\", \")>,\", \")>,};\", \")>};\"])\n\n end_mark = \"\"\n lines_to_process = []\n\n # merge line if split over multiple.\n # original lines will be in lines_to_process. merged and stripped line will be in code_line\n while True:\n lines_to_process.append(lines[offset])\n stripped = lines[offset].strip()\n line_end = False\n\n for mark in end_marks:\n if stripped.endswith(mark):\n end_mark = mark\n line_end = True\n break\n\n if line_end:\n break\n\n offset += 1\n if offset > len(lines):\n log.error(\"Past end of input lines looking for line terminator.\")\n sys.exit(-1)\n\n code_line = \"\".join([line.strip() for line in lines_to_process])\n\n if onnx_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 7, Cos)>,\n trim_at = code_line.index(onnx_op) + onnx_op_len + 1\n *_, domain, start_version, op_type = (arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\"))\n\n registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, None)\n\n elif onnx_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 7, double, Sin)>,\n trim_at = code_line.index(onnx_typed_op) + onnx_typed_op_len + 1\n *_, domain, start_version, type, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, type)\n\n elif onnx_versioned_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 1, 10, Hardmax)>,\n trim_at = code_line.index(onnx_versioned_op) + onnx_versioned_op_len + 1\n *_, domain, start_version, end_version, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), None\n )\n\n elif onnx_versioned_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 1, 10, float, LogSoftmax)>,\n trim_at = code_line.index(onnx_versioned_typed_op) + onnx_versioned_typed_op_len + 1\n *_, domain, start_version, end_version, type, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), type\n )\n\n elif onnx_two_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,\n trim_at = code_line.index(onnx_two_typed_op) + onnx_two_typed_op_len + 1\n *_, domain, start_version, type1, type2, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), None, type1 + \", \" + type2\n )\n\n elif onnx_versioned_two_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,\n trim_at = code_line.index(onnx_versioned_two_typed_op) + onnx_versioned_two_typed_op_len + 1\n *_, domain, start_version, end_version, type1, type2, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), type1 + \", \" + type2\n )\n\n else:\n log.warning(f\"Ignoring unhandled kernel registration variant: {code_line}\")\n for line in lines_to_process:\n registration_processor.process_other_line(line)\n\n return offset + 1",
"def update_lines(self, lines, update_org_width=False):\n self._org_lines = lines\n self.init_layout(self._height, self._org_width, 0, update_org_width=update_org_width)",
"def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp",
"def process_tokens(self, tokens):\n self._tokens = list(tokens)\n self._pos = 0\n self._ast = self._assert(self._chunk(), 'input to be a program')\n self._ast.store_token_groups(self._tokens)",
"def update(self, new_documents: List[str]):\n if len(new_documents) > 0:\n tokenized_docs = [self.tokenizer(doc) for doc in new_documents]\n # for some reason mypy doesn't get that this converts from a list of tuples to a tuple of lists\n words, positions = map(list, zip(*tokenized_docs)) # type: ignore\n else:\n words, positions = [], []\n self.index.add_documents(words, positions)\n self.documents += new_documents\n self.tokenized_documents += words\n for ngram, index in self.ngram_indexes.items():\n ngram_tok = core.ngrams_from_documents(\n words,\n ngram,\n self.ngram_sep,\n self.ngram_prefix,\n self.ngram_suffix,\n )\n index.index.add_documents(ngram_tok)",
"def parse(cls, input):",
"def preproc_doc(document):\n\n # Each document is a list of lines\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n # set a random seed for reproducability\n hash_object = hashlib.md5(document[0])\n rng = random.Random(int(hash_object.hexdigest(), 16) % (10**8))\n\n # Each document is composed of a list of text lines. Each text line is a\n # paragraph. We split the line into sentences but keep the paragraph grouping.\n # The utility functions below expect the document to be split by paragraphs.\n list_of_paragraphs = []\n for line in document:\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n sent_tokens = [tokenizer.tokenize(sent) for sent in sents if sent]\n list_of_paragraphs.append(sent_tokens)\n\n # In case of any empty paragraphs, remove them.\n list_of_paragraphs = [x for x in list_of_paragraphs if x]\n\n # Convert the list of paragraphs into TrainingInstance object\n # See preprocessing_utils.py for definition\n if FLAGS.format == FORMAT_BINARY:\n instances = create_instances_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n elif FLAGS.format == FORMAT_PARAGRAPH:\n instances = create_paragraph_order_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n\n # Convert token lists into ids and add any needed tokens and padding for BERT\n tf_examples = [\n convert_instance_to_tf_example(tokenizer, instance,\n FLAGS.max_seq_length)[0]\n for instance in instances\n ]\n\n # Serialize TFExample for writing to file.\n tf_examples = [example.SerializeToString() for example in tf_examples]\n\n return tf_examples",
"def _set_input(self, input_: str):\n if len(input_) != 0:\n rows = input_.split('\\n')\n self.command = []\n self.params = []\n for cmd in rows:\n split = cmd.split()\n self.command.append(split[0])\n self.params.append(int(split[1]))",
"def __process_input_file(self, output):\n with open(self.input_file, 'r') as f:\n for line in f:\n if line.replace(' ', '') == \"\\\"playlists\\\":[\\n\":\n # playlist_start = True\n output.write(line)\n self.__process_playlist(f, output)\n else:\n output.write(line)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to input line `abs_line_offset`, ignoring jumps past the end. | def goto_line(self, abs_line_offset):
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass | [
"def jump_to_line(self, lineno=None):\r\n if lineno is not None:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(lineno)\r\n return\r\n\r\n maximum = self.blockCount()\r\n line = QInputDialog.getInt(self, self.tr(\"Jump to Line\"),\r\n self.tr(\"Line:\"), 1, 1, maximum, 1)\r\n if line[1]:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(line[0] - 1)",
"def jump_to_line(self, lineno):\r\n self._main.editor_jump_to_line(lineno=lineno)",
"def goToLine(self, lineno):\n # Go to start and move pointer to given line no\n self.goToStart()\n line_count = 1\n eof = False\n pos = 0\n while not eof and line_count != lineno:\n line = self.file_obj.readline()\n if not line:\n eof = True\n continue\n pos = self.file_obj.tell()\n line_count += 1\n\n self.line_no = line_count\n self.offset = pos",
"def editor_go_to_line(self, line):\r\n editorWidget = self.get_current_editor()\r\n if editorWidget:\r\n editorWidget.jump_to_line(line)",
"def init_goto_line(self):\n self.local_state = State.GOTO_LINE\n self.setFocus()\n self.setValidator(self.int_validator)",
"def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)",
"def _validate_lineno(self, target_line):\n if target_line < 1:\n raise AtTopOfFile()\n elif target_line > self.number_of_lines():\n raise PastEndOfBuffer(str(target_line))",
"def jmp(self, offset):\n self.ip += int(offset)",
"def goto(self, line):\n\n self._text.mark_set(tk.INSERT, '%s.1' % line)\n self.highlight_line(line)\n self.update_info_bar()\n self.update_line_numbers()",
"def op_jump(self, offset):\n\n old_pc = self._opdecoder.program_counter\n\n # The offset to the jump instruction is known to be a 2-byte\n # signed integer. We need to make it signed before applying\n # the offset.\n if (offset >= (1<<15)):\n offset = - (1<<16) + offset\n log(\"Jump unconditionally to relative offset %d\" % offset)\n\n # Apparently reading the 2 bytes of operand *isn't* supposed\n # to increment the PC, thus we need to apply this offset to PC\n # that's still pointing at the 'jump' opcode. Hence the -2\n # modifier below.\n new_pc = self._opdecoder.program_counter + offset - 2\n self._opdecoder.program_counter = new_pc\n log(\"PC has changed from from %x to %x\" % (old_pc, new_pc))",
"def breakpoint(self, line):\n\n source = self.chrome.driver.find_element(By.ID, \"sources-panel-sources-view\")\n assert source is not None, \"Failed to find sources.\"\n lines = source.find_elements(By.CSS_SELECTOR, \"div[class=\\'CodeMirror-linenumber CodeMirror-gutter-elt\\']\")\n length = len(lines)\n assert len(lines) >= line, \"Line {0} not found! Total lines of code: {1}\".format(str(line), str(length))\n lines[line - 1].click()\n sleep(1)\n Log.info(\"Toggle breakpoint on line {0}\".format(str(line)))",
"def offset_from_line(line, firstlineno, lnotab):\n # TODO: Handle negetive offsets!\n n = len(lnotab)\n assert n & 1 == 0\n\n l = firstlineno\n tab = lnotab\n offset = 0\n index = 0\n while tab:\n index += 1\n b, d, *tab = tab\n l += d\n offset += b\n if l >= line:\n return offset, index\n raise IndexError(\"Line out of bound\")",
"def jump_to(self, bytes):\n new_pos = self.find(bytes, max(0, self.position))\n if new_pos > -1:\n new_pos -= self.position\n if self._position == -1:\n self._position = 0\n self._position += (new_pos + len(bytes) - 1)\n return True\n else:\n raise StopIteration",
"def gotoLine(self, n):\n self.fileIndex = n",
"def prev_line(rule):\n return shift_line(-1, rule)",
"def select_line(self, line):\n cursor = self.textCursor()\n cursor.movePosition(QTextCursor.Start, QTextCursor.MoveAnchor, 1)\n cursor.movePosition(QTextCursor.Down, QTextCursor.MoveAnchor, line-1)\n self.setTextCursor(cursor)",
"def go(self, offset: int) -> None:\n if len(self) == 0:\n return\n if offset < 0:\n offset = len(self) + offset\n if offset < 0:\n offset = 0\n if offset > len(self) - 1:\n offset = len(self) - 1\n self.focus.flow = self[offset]",
"def next_line(rule):\n return shift_line(1, rule)",
"def get_line_jump_seq(self):\n line_jump_seq = \"\"\n if not world.config.no_ansi and not world.config.no_line_jump and not world.config.write_steps_once:\n line_jump_seq = \"\\r\\033[A\\033[K\"\n return line_jump_seq",
"def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n # This is an error, meaning that there aren't even offset_line+1 lines in self.path.\n if f.readline() == '':\n raise ValueError('offset: {} is higher than the total number of lines in file {}'.format(\n offset_line, self.path))\n\n total_lines += 1\n\n # Retrieve the console_output just between offset_line and offset_line + max_lines\n for i in range(offset_line, offset_line + max_lines):\n line = f.readline()\n\n # We have reached the end of the file, or a line that has not finished being written to.\n if line == '' or not line.endswith(\"\\n\"):\n break\n\n console_output.append(line)\n output_lines += 1\n total_lines += 1\n\n # If there are more lines, then keep on counting in order to populate total_lines properly\n while f.readline():\n total_lines += 1\n\n return ConsoleOutputSegment(offset_line, output_lines, total_lines, ''.join(console_output))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new StateMachine rooted at `node` and run it over the input `block`. | def nested_parse(self, block, input_offset, node, match_titles=False,
state_machine_class=None, state_machine_kwargs=None):
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset | [
"async def mine_new_block():\n block = await self.create_block_async_func(Address.create_empty_account())\n if not block:\n self.input_q.put((None, {}))\n return\n mining_params = self.get_mining_param_func()\n mining_params[\"consensus_type\"] = self.consensus_type\n # handle mining simulation's timing\n if \"target_block_time\" in mining_params:\n target_block_time = mining_params[\"target_block_time\"]\n mining_params[\"target_time\"] = (\n block.header.create_time\n + self._get_block_time(block, target_block_time)\n )\n work = MiningWork(\n block.header.get_hash_for_mining(),\n block.header.height,\n block.header.difficulty,\n )\n self.work_map[work.hash] = block\n if self.process:\n self.input_q.put((work, mining_params))\n return\n\n self.process = AioProcess(\n target=self.mine_loop,\n args=(work, mining_params, self.input_q, self.output_q),\n )\n self.process.start()\n await handle_mined_block()",
"def build_from_python(self, node, _id=None):\n if _id is None:\n self._check_and_raise(node, 'id')\n _id = node['id']\n self._check_and_raise(node, 'type', ' for node ' + _id)\n _type = copy.copy(node['type'])\n if _type == 'action':\n self._check_and_raise(node, 'script', ' for node ' + _id)\n return leaf.Action(name=_id, memory=self.memory, expression=node['script'])\n elif _type == 'condition':\n params = ['expression', 'true_state', 'false_state']\n self._check_and_raise(node, params, ' for node ' + _id)\n node_copy = copy.copy(node)\n for state in ['true_state', 'false_state']:\n if isinstance(node[state], str):\n node_copy[state] = State.from_str(node[state])\n return leaf.Condition(name=_id, memory=self.memory, **dict((k, node_copy[k]) for k in params))\n elif _type in ['sequence', 'fallback', 'skipper', 'selector']:\n if _type == 'selector':\n _type = 'fallback'\n seq = sequential.Sequential(skip_state=sequential.Sequential.Names[_type], name=_id, memory=self.memory)\n self._check_and_raise(node, 'children', ' for node ' + _id)\n seq.children = node['children']\n return seq",
"def execute_sm(self):\n rospy.loginfo(\"start state machine...\")\n return self.sm.execute()",
"def make_sm(self):\n return smach.StateMachine(outcomes=['succeeded','aborted','preempted'])",
"def start(self):\r\n if self.initial_state == None: # Check that an initial state was declared\r\n raise RuntimeError(\"No initial state set on the state machine.\")\r\n\r\n self.current_state = self.initial_state\r\n\r\n for state in self.states.values():\r\n state.generator = state.handler_func(self)\r\n next(state.generator) # start up the co-routine\r",
"def ex_run_node(self, node):\r\n # Refresh node state\r\n e_vm = self.connection.request(node.extra['uri_id']).object\r\n state = e_vm.findtext('state')\r\n\r\n if state != 'NOT_ALLOCATED':\r\n raise LibcloudError('Invalid Node state', self)\r\n\r\n # --------------------------------------------------------\r\n # Deploy the Node\r\n # --------------------------------------------------------\r\n self._deploy_remote(e_vm)\r\n\r\n # --------------------------------------------------------\r\n # Retrieve it again, to get some schedule-defined\r\n # values.\r\n # --------------------------------------------------------\r\n edit_vm = get_href(e_vm, 'edit')\r\n headers = {'Accept': self.NODE_MIME_TYPE}\r\n e_vm = self.connection.request(edit_vm, headers=headers).object\r\n return self._to_node(e_vm, self)",
"def start_node(self, node):\n return self._action(node, \"start\")",
"def apply_transaction(\n self,\n transaction,\n block):\n # Don't modify the given block\n block.make_immutable()\n self.set_state_root(block.header.state_root)\n computation = self.execute_transaction(transaction)\n\n # Set block.\n block, trie_data_dict = self.add_transaction(transaction, computation, block)\n block.header.state_root = self.state_root\n return computation, block, trie_data_dict",
"def __create_evm_state(\n self,\n trie_root_hash: Optional[bytes],\n sender_disallow_map: Dict[bytes, int],\n timestamp: Optional[int] = None,\n block_hash: Optional[bytes] = None,\n ):\n state = EvmState(\n env=self.env.evm_env, db=self.raw_db, qkc_config=self.env.quark_chain_config\n )\n state.shard_config = self.shard_config\n if trie_root_hash:\n state.trie.root_hash = trie_root_hash\n state.sender_disallow_map = sender_disallow_map\n if timestamp:\n state.timestamp = timestamp\n # iterate until reaches genesis or header list reaches 256\n # since most headers are in LRU cache, this should not affect performance too much\n while block_hash and len(state.prev_headers) < 256:\n h = self.db.get_minor_block_header_by_hash(block_hash)\n if not h:\n break\n state.prev_headers.append(h)\n block_hash = h.hash_prev_minor_block\n return state",
"def __init__(self,\n block: AbstractInstructionBlock,\n context: Dict[AbstractInstructionBlock, 'ImmutableInstructionBlock']=None) -> None:\n super().__init__()\n if context is None:\n context = dict()\n self.__return_ip = None\n return_ip = block.return_ip\n if return_ip is not None:\n self.__return_ip = InstructionPointer(context[return_ip.block], return_ip.offset)\n context[block] = self\n\n def make_immutable(instruction: Instruction) -> Instruction:\n if isinstance(instruction, GOTOInstruction):\n return GOTOInstruction(\n InstructionPointer(\n ImmutableInstructionBlock(instruction.target.block, context),\n instruction.target.offset)\n )\n elif isinstance(instruction, REPJInstruction):\n return REPJInstruction(\n instruction.count,\n InstructionPointer(\n ImmutableInstructionBlock(instruction.target.block, context),\n instruction.target.offset)\n )\n elif isinstance(instruction, CJMPInstruction):\n return CJMPInstruction(\n instruction.trigger,\n InstructionPointer(\n ImmutableInstructionBlock(instruction.target.block, context),\n instruction.target.offset)\n )\n else:\n return instruction\n\n self._instruction_tuple = tuple(make_immutable(instr) for instr in block.instructions)",
"def setStateMachine(self, arg2: 'ScXMLStateMachine') -> \"void\":\n return _coin.ScXMLMinimumEvaluator_setStateMachine(self, arg2)",
"def initialize(evm):\n contr = Contract()\n dfg = DiGraph() # Data Flow Graph\n cfg = DiGraph() # Control Flow Graph\n\n cur_blk = BasicBlock(0)\n pc = 0\n while pc < len(evm):\n op = evm[pc]\n if op not in opcodes.listing:\n raise KeyError('Invalid op. op: {:#x}, offset: {:#x}'.format(op, pc))\n\n name = opcodes.listing[op][0]\n size = opcodes.operand_size(op)\n if size != 0:\n arg = int.from_bytes(evm[pc + 1:pc + 1 + size], byteorder='big')\n else:\n arg = None\n\n instr = Instruction(op, name, arg)\n if name == 'JUMPDEST':\n if len(cur_blk.instructions) > 0:\n contr.blocks.append(cur_blk)\n # Add CFG nodes, representing basic blocks\n cfg.graph.add_node(cur_blk.offset, blk=cur_blk)\n new_blk = BasicBlock(pc)\n cur_blk.next = new_blk\n cur_blk = new_blk\n cur_blk.offset += 1\n contr.jump_destination[pc] = cur_blk\n contr.instructions[pc] = instr\n else:\n cur_blk.instructions.append(instr)\n contr.instructions[pc] = instr\n\n if opcodes.is_swap(op) or opcodes.is_dup(op):\n # Omit SWAP and DUP from IDG\n pass\n elif (name == 'JUMP' or name == 'JUMPI' or name == 'STOP' or name == 'RETURN' or\n name == 'REVERT' or name == 'INVALID' or name == 'SUICIDE'):\n contr.blocks.append(cur_blk)\n # Add CFG nodes, representing basic blocks\n cfg.graph.add_node(cur_blk.offset, blk=cur_blk)\n new_blk = BasicBlock(pc + 1)\n cur_blk.next = new_blk\n cur_blk = new_blk\n # Add DFG nodes, representing instructions\n dfg.graph.add_node(pc, instr=instr)\n else:\n # Add DFG nodes, representing instructions\n dfg.graph.add_node(pc, instr=instr)\n\n pc += size + 1\n\n if len(cur_blk.instructions) > 0 or cur_blk.offset - 1 in contr.jump_destination:\n contr.blocks.append(cur_blk)\n # Add CFG nodes, representing basic blocks\n cfg.graph.add_node(cur_blk.offset, blk=cur_blk)\n else:\n contr.blocks[-1].next = None\n\n return contr, dfg, cfg",
"def call_state(self, addr: Address, args=[]) -> ESILState:\n\n if type(addr) == str:\n addr = self.r2api.get_address(addr)\n\n # seek to function and init vm\n self.r2api.seek(addr)\n self.init_vm()\n state = self.init_state()\n self.set_args(state, addr, args)\n # state.registers[\"PC\"] = addr \n\n return state",
"def __init__(self, root_block):\n self.root_block = root_block\n self.blocks = {'@': root_block}\n self.block_names = {\"default\":[]}\n #registering blocks by id\n self.register_blocks(root_block.ch_blocks)\n self.register_block_names()",
"def test_group(self):\n\n class DoneState(State):\n def __init__(self):\n State.__init__(self,outcomes=['done'])\n def execute(self,ud=None):\n return 'done'\n\n sm = StateMachine(['succeeded','done'])\n with sm:\n StateMachine.add('FAILSAUCE',DoneState())\n transitions = {'aborted':'FAILSAUCE','preempted':'FAILSAUCE'}\n with sm:\n StateMachine.add('FIRST', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n StateMachine.add('SECOND', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g2), transitions)\n StateMachine.add('THIRD', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n spinner = threading.Thread(target=self.spin)\n spinner.start()\n outcome = sm.execute()\n\n assert outcome == 'done'",
"def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree",
"def generate(self):\n if len(self.network.chain) == 0:\n print(\n \"`generate` called, but chain is nonexistant;\",\n \"delegating to `genesis`...\")\n self.genesis()\n return\n\n block = Block(self.network.chain[-1].index+1,\n self.network.chain[-1].hash_val,\n random.choice(DATA_MESSAGES))\n\n # mine block\n block.hash(self.network.difficulty)\n\n # add block to this Node's chain and send it to all other Nodes in\n # network\n self.network.add_block(block)\n self.broadcast(block)",
"def function_from_block(block):\n return Function(block.fields.get('Function', None),\n block.fields.get('Purpose', None), block.fields.get('Inputs', None),\n block.fields.get('Outputs', None))",
"def start(self, *args, **kwargs):\n if self.started:\n raise RuntimeError(\"state machine has already been started\")\n if self.initial_state is None:\n raise ValueError(\"undefined initial state\")\n self._enter(self.initial_state, args, kwargs)",
"def create_initial_node(fsm):\n global_state = create_initial_global_state(len(fsm)) \n \n # check transitions of the state and add them to the stack \n transitions_list = []\n for i in range(len(fsm)):\n state = global_state[i][i]\n \n machine = fsm[i]\n transitions = machine.get_transition(state)\n for t in transitions: \n transitions_list.append(t)\n \n node = Node(global_state, transitions_list)\n return node"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build, compile and return a regular expression based on `definition`. | def build_regexp(definition, compile=True):
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp | [
"def compile(self):\n return re.compile(self.pattern, self.flags)",
"def get_compiled(self, name: str) -> re.compile:\n rx = re.compile(self.regexp)\n if self.flag_multiline:\n rx.flags ^= re.MULTILINE\n if self.flag_dotall:\n rx.flags ^= re.DOTALL\n return rx",
"def __compile_re(self, flags = '', rules = []):\n if not rules:\n return DEFAULT_RE\n regexp = RegExp(flags, *rules).re\n return regexp",
"def compile_regex(regex):\n return re.compile(regex, re.U)",
"def compileRegexp(class_):\n if not class_.allowParseDep:\n return\n\n d = dict(flagFormat=class_.flagFormat, depFormat=class_.depFormat,\n WORD=class_.WORD, IDENT=class_.IDENT)\n\n # zero or more space-separated flags\n flagFmt = '(?:\\( *(%(flagFormat)s?(?: +%(flagFormat)s)*) *\\))?'\n # add ^ and $ to ensure we match the entire string passed in\n regexp = ('^ *(%(depFormat)s) *' + flagFmt + ' *$') % d\n # word is a slightly larger group of chars than ident -\n # includes . and +, because those are used in paths and\n # sonames. May need to be larger some day, and probably\n # could be more restrictive for some groups. Should not contain\n # /, as that's used as a special char in many dep classes.\n regexp = regexp.replace('WORD', d['WORD'])\n regexp = regexp.replace('IDENT',d['IDENT'])\n class_.regexpStr = regexp\n class_.regexp = re.compile(regexp)",
"def make_regex(self):\n forwards_str = ')|('.join(self.forwards)\n reverses_str = ')|('.join(self.reverses)\n re_str = '^.*((' + forwards_str +')).*((' + reverses_str + ')).*$'\n return re.compile(re_str)",
"def __toRegExp(self, fname, targetName) -> re:\n fname = os.path.join(self.packageDir(), fname)\n if not os.path.isfile(fname):\n EmergeDebug.die(\"%s not found at: %s\" % (targetName.capitalize(), os.path.abspath(fname)))\n regex = \"(\"\n for line in fileinput.input(fname):\n # Cleanup white spaces / line endings\n line = line.splitlines()\n line = line[0].rstrip()\n if line.startswith(\"#\") or len(line) == 0:\n continue\n try:\n tmp = \"^%s$\" % line\n regex += \"%s|\" % tmp\n re.compile(tmp, re.IGNORECASE) #for debug\n EmergeDebug.debug(\"%s added to %s as %s\" % (line, targetName, tmp), 2)\n except re.error:\n EmergeDebug.die(\"%s is not a valid regexp\" % tmp)\n return re.compile(\"%s)\" % regex[:-2], re.IGNORECASE)",
"def build_regex(single_commenting_syntax, multi_commenting_syntax, extension):\n\n # Build single_line_regex and full regex\n regex = \"(\"\n single_line_regex = \"\"\n try:\n # Add single line syntax to regex\n for i in range(len(single_commenting_syntax[extension])):\n # single_syntax_length is the length of the single line commenting syntax\n single_syntax_length = len(single_commenting_syntax[extension][i])\n for j in range(single_syntax_length):\n regex += \"\\\\\" + single_commenting_syntax[extension][i][j]\n regex += \"(?:.*)$|\"\n\n single_line_regex = single_line_regex + regex[:-1]\n\n # Add multi line syntax to regex\n for i in range(0, len(multi_commenting_syntax[extension]), 2):\n for j in range(2):\n multi_syntax_length = len(multi_commenting_syntax[extension][i + j])\n # Add each character/symbol of a multi line commenting syntax one at a time into the regex\n for k in range(multi_syntax_length):\n regex += \"\\\\\" + multi_commenting_syntax[extension][i + j][k]\n # When done adding the start and end of a multi line commenting syntax, add regex\n if j == 0:\n regex += \"(?:(?:.|\\\\n)*?)\"\n # When we aren't finished adding the different ways to comment, add a or to the regex\n if (i + 2) < len(multi_commenting_syntax[extension]):\n regex += \"|\"\n\n regex += \")\"\n single_line_regex += \")\"\n except KeyError:\n print(\"Please add the syntax for commenting for that specific langauge in the .csv file to proceed.\")\n sys.exit(1)\n\n return single_line_regex, regex",
"def make_regex(style=None):\n # As new styles are added the current default should be moved into the\n # dict.\n # TODO: this smells terrible\n default = re.compile(r'[\\x0c]{0,1}(\\w+)\\*?[\\s\\t]*(\\d{1,2})[\\s\\t]*(.*?)'\n '[\\s\\t]*\\(*(\\d+)\\s*-\\s*(\\d+)\\)*\\s*$')\n d = {0: re.compile(r'(\\w{1,2}[\\$\\-%]\\w*|PADDING)\\s*CHARACTER\\*(\\d{3})'\n '\\s*\\.{0,1}\\s*\\((\\d*):(\\d*)\\).*'),\n 1: re.compile(r'D (\\w+) \\s* (\\d{1,2}) \\s* (\\d*)'),\n 2: default}\n return d.get(style, default)",
"def create_regex_factory(\n format_string=None, regex_type=None, ignore_case=False\n):\n if regex_type:\n format_string = REGEX_WRAPPERS.get(regex_type)\n if not format_string:\n raise KeyError(\"Unknown regex wrapper: {}\".format(regex_type))\n\n flags = 0\n if ignore_case:\n flags |= re.IGNORECASE\n\n if format_string:\n\n def create_regex(pattern):\n return re.compile(format_string.format(pattern), flags=flags)\n\n else:\n\n def create_regex(pattern):\n return re.compile(pattern, flags=flags)\n\n return create_regex",
"def _re_compile(regex):\n\n return re.compile(regex, re.I | re.UNICODE)",
"def build_custom_regex(text):\n\n # Match the final question mark\n text = re.sub(r\"\\?\", \"\\?\", text)\n # Because of optinal expensions, we need to be lenient on space matching. This will allow to skip some spaces\n text = re.sub(r\"\\s\", \"\\\\\\s*\", text)\n # Hack, because the templates in the dataset somehow don't match the templates exactly\n text = re.sub(\"another\", \"(?:another|a)\", text)\n text = re.sub(\"other\", \"(?:other)?\", text)\n # Replace all attributes by their possibilities, possibly in a group\n text = SIZE_REGEX.sub(partial(add_group, ALL_SIZES), text)\n text = COLOR_REGEX.sub(partial(add_group, ALL_COLORS), text)\n text = MATERIAL_REGEX.sub(partial(add_group, ALL_MATERIALS), text)\n text = SHAPE_REGEX.sub(partial(add_group, ALL_SHAPES), text)\n text = RELATION_REGEX.sub(partial(add_group, ALL_RELATIONS), text)\n # Optional text\n text = OPTIONAL_REGEX.sub(r\"(?:\\1)?\", text)\n # To match plurals in our groups, we detect -s suffixes\n text = PLURAL_REGEX.sub(r\")s)?\\1\", text)\n return re.compile(text)",
"def _createRegex(self, pattern):\n return '%s$' % pattern.replace( '*', '.*').replace( '?', '.')",
"def compile_match(pattern):\n\n regexp = \"\"\n\n while pattern:\n if pattern.startswith(\"**\"):\n regexp += r'.*'\n pattern = pattern[2:]\n elif pattern[0] == \"*\":\n regexp += r'[^/]*/?'\n pattern = pattern[1:]\n elif pattern[0] == '[':\n regexp += r'['\n pattern = pattern[1:]\n\n while pattern and pattern[0] != ']':\n regexp += pattern[0]\n pattern = pattern[1:]\n\n pattern = pattern[1:]\n regexp += ']'\n\n else:\n regexp += re.escape(pattern[0])\n pattern = pattern[1:]\n\n regexp += \"$\"\n\n return re.compile(regexp, re.I)",
"def _make_regex(self, *scopes):\n cmds = []\n # We go through all commands, and collect those\n # who are in one of the given scopes:\n for name in Cmd.commands:\n for scope in scopes:\n if Cmd.commands[name].scope == scope:\n cmds.append(name)\n # Build the regex using the the \"or\" operator\n cmd_list = '|'.join(cmd for cmd in cmds)\n regex = re.compile(\n \"^(?P<command>{})(?:\\s+(?P<arguments>.*))?$\".format(cmd_list)\n )\n return regex",
"def re_compiler(self, pattern):\n try:\n return re.compile(pattern, self.re_flags)\n except Exception as exc: # pylint: disable=broad-except\n _log(\"error\", \"failed to compile pattern `%s`: %s\", pattern, exc)",
"def regenerate_match_re(self):\n def find_broken_token_regex():\n \"\"\"Tries to find which token regex is broken.\n\n Returns:\n (str, str). Tuple of token name and token regex.\n \"\"\"\n trs = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n trs += r\"(?P<{}>{})\".format(token.name, token.pattern_str)\n try:\n re.compile(trs, re.MULTILINE)\n except Exception:\n return (token.name, token.pattern_str)\n trs += r\"|\"\n\n token_re_str = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n token_re_str += r\"(?P<{}>{})|\".format(token.name, token.pattern_str)\n # Remove trailing '|'\n token_re_str = token_re_str[0:-1]\n # Finally try to compile the regex\n try:\n self.__token_re = re.compile(token_re_str, re.MULTILINE)\n except Exception as e:\n tb = sys.exc_info()[2]\n token_name, broken_regex = find_broken_token_regex()\n emsg = str(e) + \" With token '{}' and regexp: '{}' and whole regexp: {}\".format(token_name, broken_regex, token_re_str)\n raise TokenizerRegexpError(emsg).with_traceback(tb)",
"def compile_response_regex(regexp):\n return re.compile(regexp, re.IGNORECASE | re.DOTALL)",
"def _make_re_from_phrase(phrase):\n paragraph_text = r'(^.+\\w.+\\n)*' # need \\S to ensure not just whitespace\n\n # TODO: check slowdown due to inclusion of '^.*' at start\n tmp = '^.*' + re.escape(phrase) + r'.*\\n' + paragraph_text + r'\\s+'\n tmp = tmp.replace(\"\\\\ \", \"(\\\\s|\\\\n)*\")\n tmp = tmp.replace(\":\", \"(:|\\\\s|\\\\n)*\")\n return re.compile(tmp, re.I | re.M) # make it case insensitive",
"def prepare_pattern(regex):\n version_regex = \"\"\n confidence_regex = \"\"\n search_regex = regex\n if '\\\\;' in regex:\n for reg in regex.split('\\\\;'):\n if 'version:' in reg:\n version_regex = rep_slashes(reg)\n elif 'confidence:' in reg:\n confidence_regex = rep_slashes(reg)\n else:\n search_regex = rep_slashes(reg)\n try:\n re.compile(search_regex, re.I)\n return search_regex, version_regex, confidence_regex\n except re.error as e:\n LOGGER.warning(f\"compiling regex: {regex} {e}\")\n # regex that never matches:\n # http://stackoverflow.com/a/1845097/413622\n return r'(?!x)x', \"\", \"\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Settingbased customizations; run when parsing begins. | def init_customizations(self, settings):
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference)) | [
"def setup(self):\n # Call the base class setup first so that all of the variables are fully initialized and formatted.\n super().setup()\n\n # Write out the custom config\n self.writeCustomConfig()",
"def __call__(self, iperf):\n self.validate()\n for key, value in self.settings.items():\n setattr(iperf, self.settings_map[key], value)\n return",
"def reloadSettings(self) -> None:\n # Do basic inits.\n super().reloadSettings()\n # Bind methods.\n if self.use_pygments_styles:\n self.getDefaultFormat = QtGui.QTextCharFormat\n self.getFormat = self.getPygmentsFormat\n self.setFormat = self.setPygmentsFormat\n else:\n self.getDefaultFormat = self.getLegacyDefaultFormat\n self.getFormat = self.getLegacyFormat\n self.setFormat = self.setLegacyFormat",
"def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]",
"def set_render_settings(self, setting=None):\n file_location = 'C:/Users/cmj140030/code/artist_tools/surface_tools/turntable_tool/render_settings.xml'\n\n if not os.path.isfile(file_location):\n IO.error(\"The file, %s, does not exist\" % file_location)\n\n xml_fh = et.parse(file_location)\n root = xml_fh.getroot()\n xml_nodes = root.iter(setting)\n if not xml_nodes:\n print 'I could not find any child nodes'\n\n for xml_node in xml_nodes:\n # Loops through the first indented item, example: Low\n settings = xml_node.getchildren()\n for set in settings:\n # setting = defaultArnoldRenderOptions\n attrs = set.getchildren()\n for attr in attrs:\n # attr = AASamples\n val = attr.attrib['value']\n if str(val).isdigit():\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),int(val))\n elif '.' in val and val.replace('.', '').isdigit():\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),float(val))\n elif '-' in val and val.replace('-', '').isdigit():\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),int(val))\n elif '-' and '.' in str(val):\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),float(val))\n elif '/' or '$' or '&' in str(val):\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),str(val),type=\"string\")\n elif str(val) == '':\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),'',type=\"string\")\n else:\n print 'The value is not valid'",
"def __setstate__(self, d):\n d = param_aliases(d)\n try:\n load_options = Store.load_counter_offset is not None\n if load_options:\n matches = [k for k in d if k.startswith('_custom_option')]\n for match in matches:\n custom_id = int(match.split('_')[-1])\n if not isinstance(d[match], dict):\n # Backward compatibility before multiple backends\n backend_info = {'matplotlib':d[match]}\n else:\n backend_info = d[match]\n for backend, info in backend_info.items():\n if backend not in Store._custom_options:\n Store._custom_options[backend] = {}\n Store._custom_options[backend][Store.load_counter_offset + custom_id] = info\n\n d.pop(match)\n\n if d['id'] is not None:\n d['id'] += Store.load_counter_offset\n else:\n d['id'] = None\n except:\n self.warning(\"Could not unpickle custom style information.\")\n self.__dict__.update(d)",
"def parse_settings(self):\n global DEBUG, LOGGING, G_SET_COMMAND_STRING\n global G_LOGGER, FILE_HANDLER, CONSOLE_HANDLER\n fname = os.path.join(PATH, \"general_settings\")\n if os.path.isfile(fname):\n general_settings_file = open(fname, \"r\")\n try:\n for line in general_settings_file:\n words = line.strip().split(\"=\")\n if words[0] == \"logging\":\n wrds1 = words[1].strip().lower()\n if wrds1 == \"true\":\n self.logging = True\n LOGGING = True\n DEBUG = True\n G_LOGGER = logging.getLogger(\"default\")\n G_LOGGER.setLevel(logging.INFO)\n # Install exception handler\n sys.excepthook = custom_exception_handler\n FILE_HANDLER = logging.FileHandler(\n \"{0}/{1}.log\".format(PATH, \"log\"),\n mode=\"w\")\n G_LOGGER.addHandler(FILE_HANDLER)\n CONSOLE_HANDLER = logging.StreamHandler()\n G_LOGGER.addHandler(CONSOLE_HANDLER)\n G_LOGGER.info(\"Enabled logging to file.\")\n elif words[0] == \"use hotkeys\":\n wrds1 = words[1].strip().lower()\n if wrds1 == \"true\":\n self.use_hotkeys = True\n else:\n self.use_hotkeys = False\n if DEBUG:\n G_LOGGER.info(\"use_hotkeys: %s\", self.use_hotkeys)\n elif words[0] == \"next wallpaper hotkey\":\n binding_strings = words[1].strip().split(\"+\")\n if binding_strings:\n self.hk_binding_next = tuple(binding_strings)\n if DEBUG:\n G_LOGGER.info(\"hk_binding_next: %s\", self.hk_binding_next)\n elif words[0] == \"pause wallpaper hotkey\":\n binding_strings = words[1].strip().split(\"+\")\n if binding_strings:\n self.hk_binding_pause = tuple(binding_strings)\n if DEBUG:\n G_LOGGER.info(\"hk_binding_pause: %s\", self.hk_binding_pause)\n elif words[0] == \"set_command\":\n G_SET_COMMAND_STRING = words[1].strip()\n self.set_command = G_SET_COMMAND_STRING\n elif words[0].strip() == \"show_help_at_start\":\n show_state = words[1].strip().lower()\n if show_state == \"false\":\n self.show_help = False\n else:\n pass\n else:\n G_LOGGER.info(\"Exception: Unkown general setting: %s\",\n words[0])\n finally:\n general_settings_file.close()\n else:\n # if file does not exist, create it and write default values.\n general_settings_file = open(fname, \"x\")\n general_settings_file.write(\"logging=false\\n\")\n general_settings_file.write(\"use hotkeys=true\\n\")\n general_settings_file.write(\"next wallpaper hotkey=control+super+w\\n\")\n self.hk_binding_next = (\"control\", \"super\", \"w\")\n general_settings_file.write(\"pause wallpaper hotkey=control+super+shift+p\\n\")\n self.hk_binding_pause = (\"control\", \"super\", \"shift\", \"p\")\n general_settings_file.write(\"set_command=\")\n general_settings_file.close()",
"def update_override_settings(self, override_settings: dict) -> None:",
"def settingsCollector(self):\n def converter(value, varType):\n if varType == float:\n value = float(value)\n elif varType == int:\n value = int(value)\n return value\n \n for setting in self.entries:\n settingType = self.settings.settings[setting].type\n value = self.entries[setting].get()\n self.settings.settings[setting].value = converter(value, settingType)\n for setting in self.buttons:\n value = self.buttons[setting].get()\n self.settings.settings[setting].value = value\n self.experimentName = self.nameEntry.get()",
"def __init__(self, settings):\n ColorDefParser.__init__(self, settings)\n self._define_parser_colordef_attr()",
"def _text2settings(self, color: str):\n t2xs = [\n (self.t2f, \"font\"),\n (self.t2s, \"slant\"),\n (self.t2w, \"weight\"),\n (self.t2c, \"color\"),\n ]\n # setting_args requires values to be strings\n\n default_args = {\n arg: getattr(self, arg) if arg != \"color\" else str(color) for _, arg in t2xs\n }\n\n settings = self._get_settings_from_t2xs(t2xs, default_args)\n settings.extend(self._get_settings_from_gradient(default_args))\n\n # Handle overlaps\n\n settings.sort(key=lambda setting: setting.start)\n for index, setting in enumerate(settings):\n if index + 1 == len(settings):\n break\n\n next_setting = settings[index + 1]\n if setting.end > next_setting.start:\n new_setting = self._merge_settings(setting, next_setting, default_args)\n new_index = index + 1\n while (\n new_index < len(settings)\n and settings[new_index].start < new_setting.start\n ):\n new_index += 1\n settings.insert(new_index, new_setting)\n\n # Set all text settings (default font, slant, weight)\n temp_settings = settings.copy()\n start = 0\n for setting in settings:\n if setting.start != start:\n temp_settings.append(TextSetting(start, setting.start, **default_args))\n start = setting.end\n if start != len(self.text):\n temp_settings.append(TextSetting(start, len(self.text), **default_args))\n settings = sorted(temp_settings, key=lambda setting: setting.start)\n\n line_num = 0\n if re.search(r\"\\n\", self.text):\n for start, end in self._find_indexes(\"\\n\", self.text):\n for setting in settings:\n if setting.line_num == -1:\n setting.line_num = line_num\n if start < setting.end:\n line_num += 1\n new_setting = copy.copy(setting)\n setting.end = end\n new_setting.start = end\n new_setting.line_num = line_num\n settings.append(new_setting)\n settings.sort(key=lambda setting: setting.start)\n break\n for setting in settings:\n if setting.line_num == -1:\n setting.line_num = line_num\n\n return settings",
"def do_load_settings(self):\n return run_trigger('set', arg=self.profile)\n # return run_alfred(':fzyset {}'.format(self.profile))",
"def customs(self, customs):\n\n self._customs = customs",
"def apply_configuration(self):\n pass # pragma: no cover",
"def _apply_configuration(self, terminal):\n terminal.set_colors(self._fg_color, self._bg_color, self._palette)\n terminal.set_font_scale(self._font_scale)\n if self._font_family:\n font = terminal.get_font()\n font.set_family(self._font_family)\n terminal.set_font(font)",
"def _init_from_dict(self, settings: Settings) -> None:\n # The valid ivars and reasonable defaults.\n valid = dict(\n ignore_case=False,\n node_only=False,\n pattern_match=False,\n search_body=True,\n search_headline=True,\n suboutline_only=False, # Seems safest. # Was True !!!\n whole_word=True,\n )\n # Set ivars to reasonable defaults.\n for ivar in valid:\n setattr(self, ivar, valid.get(ivar))\n # Override ivars from settings.\n errors = 0\n for ivar in settings.keys():\n if ivar in valid:\n val = settings.get(ivar)\n if val in (True, False):\n setattr(self, ivar, val)\n else: # pragma: no cover\n g.trace(\"bad value: {ivar!r} = {val!r}\")\n errors += 1\n else: # pragma: no cover\n g.trace(f\"ignoring {ivar!r} setting\")\n errors += 1\n if errors: # pragma: no cover\n g.printObj(sorted(valid.keys()), tag='valid keys')",
"def init_custom_fields(self):\n mapping = {\n 'application': self.config['sde_application'],\n 'project': self.config['sde_project'],\n 'context': self.config['alm_context']\n }\n\n config_custom_fields = ['alm_custom_fields']\n if self.feature_custom_lookup:\n config_custom_fields.append('alm_custom_lookup_fields')\n\n for config_option in config_custom_fields:\n self.transform_config_value(config_option, mapping)",
"def __parseAllHelper( self, parsed ):\n parsedDict = vars(parsed)\n for name, obj in vars(self).iteritems():\n if isinstance( obj, ConfigHelper ):\n for var in obj.getOptions():\n key = \"%s.%s\" %( name,var )\n if key in parsedDict:\n try:\n obj.setOption( var, parsedDict[key] )\n except RuntimeError as e:\n self._errorMessages.append( \"ERROR: %s \" % e )",
"def apply_customization(self, serializer, customization):\n # apply fields or exclude\n if customization.fields is not None:\n if len(customization.fields) == 0:\n # customization fields are empty, set Meta.fields to '__all__'\n serializer.Meta.fields = ALL_FIELDS\n else:\n serializer.Meta.fields = customization.fields\n if customization.exclude is not None:\n serializer.Meta.exclude = customization.exclude\n\n # apply extra_kwargs\n if customization.extra_kwargs is not None:\n serializer.Meta.extra_kwargs = customization.extra_kwargs\n\n # apply validate_methods\n for method_name, method in customization.validate_methods.items():\n setattr(serializer, method_name, method)",
"def set_config(self, data: dict[str, str]) -> None:\n for key, value in data.items():\n if key not in self.config:\n raise CoreError(f\"unknown config: {key}\")\n self.custom_config[key] = value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if inline markup startstring is 'quoted'. 'Quoted' in this context means the startstring is enclosed in a pair of matching opening/closing delimiters (not necessarily quotes) or at the end of the match. | def quoted_start(self, match):
string = match.string
start = match.start()
if start == 0: # start-string at beginning of text
return False
prestart = string[start - 1]
try:
poststart = string[match.end()]
except IndexError: # start-string at end of text
return True # not "quoted" but no markup start-string either
return punctuation_chars.match_chars(prestart, poststart) | [
"def quotedstart(self, match):\n string = match.string\n start = match.start()\n end = match.end()\n if start == 0: # start-string at beginning of text\n return 0\n prestart = string[start - 1]\n try:\n poststart = string[end]\n if self.inline.openers.index(prestart) \\\n == self.inline.closers.index(poststart): # quoted\n return 1\n except IndexError: # start-string at end of text\n return 1\n except ValueError: # not quoted\n pass\n return 0",
"def is_quoted(string):\n string = string.lstrip()\n return ((string.startswith('\"') and string.endswith('\"')) or\n (string.startswith(\"'\") and string.endswith(\"'\")))",
"def isQuotedString(self):\r\n return _osgDB.Field_isQuotedString(self)",
"def test_contains_quoted_with_escaped_newline(self):\n\n self.assert_selector(\n self.MARKUP,\n 'body :-soup-contains(\"Test\\\\\\ning\")',\n ['1'],\n flags=util.HTML\n )",
"def needs_quote(arg):\n for c in arg:\n if c in ('\"', \"'\"):\n return True\n if c.isspace():\n return True\n else:\n return False",
"def test_complete_html_start_tag_with_single_attribute_with_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show = '1' >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 13",
"def __processQuote(self, parentElem, lines, inList):\r\n dequoted = []\r\n i = 0\r\n blank_line = False # allow one blank line between paragraphs\r\n for line in lines:\r\n m = CORE_RE['quoted'].match(line)\r\n if m:\r\n dequoted.append(m.group(1))\r\n i += 1\r\n blank_line = False\r\n elif not blank_line and line.strip() != '':\r\n dequoted.append(line)\r\n i += 1\r\n elif not blank_line and line.strip() == '':\r\n dequoted.append(line)\r\n i += 1\r\n blank_line = True\r\n else:\r\n break\r\n\r\n blockquote = etree.SubElement(parentElem, \"blockquote\")\r\n\r\n self.parseChunk(blockquote, dequoted, inList)\r\n self.parseChunk(parentElem, lines[i:], inList)",
"def is_quoted(self, *args) -> \"bool\":\n return _ida_pro.channel_redir_t_is_quoted(self, *args)",
"def match_multiline(self, text, delimiter, in_state, style):\n\t\t# If inside triple-single quotes, start at 0\n\t\tif self.previousBlockState() == in_state:\n\t\t\tstart = 0\n\t\t\tadd = 0\n\t\t# Otherwise, look for the delimiter on this line\n\t\telse:\n\t\t\tstart = delimiter.indexIn(text)\n\t\t\t# Move past this match\n\t\t\tadd = delimiter.matchedLength()\n\n\t\t# As long as there's a delimiter match on this line...\n\t\twhile start >= 0:\n\t\t\t# Look for the ending delimiter\n\t\t\tend = delimiter.indexIn(text, start + add)\n\t\t\t# Ending delimiter on this line?\n\t\t\tif end >= add:\n\t\t\t\tlength = end - start + add + delimiter.matchedLength()\n\t\t\t\tself.setCurrentBlockState(0)\n\t\t\t# No; multi-line string\n\t\t\telse:\n\t\t\t\tself.setCurrentBlockState(in_state)\n\t\t\t\tlength = len(text) - start + add\n\t\t\t# Apply formatting\n\t\t\tself.setFormat(start, length, style)\n\t\t\t# Look for the next match\n\t\t\tstart = delimiter.indexIn(text, start + length)\n\n\t\t# Return True if still inside a multi-line string, False otherwise\n\t\tif self.currentBlockState() == in_state:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def is_multiline_string(self):\n return self.is_string and self.value.endswith(('\"\"\"', \"'''\"))",
"def test_complete_html_start_tag_with_single_attribute():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show=1>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 8",
"def test_tag_with_double_quote(self):\n code, out, err = self.t(\"start 1h ago 'this is a \\\"test\\\"'\")\n self.assertIn(\"Note: '\\\"this is a \\\\\\\"test\\\\\\\"\\\"' is a new tag\", out)\n self.t(\"stop\")\n self.t(\"delete @1\")",
"def is_inline_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"inline\"",
"def test_complete_html_start_tag_with_multiple_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show=1 maximize=1 opacity='70'>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 32",
"def test_complete_html_start_tag_with_single_no_value_attributes_and_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 7",
"def isNextString(self):\r\n reg = re.compile('^(\\\"[^\\\"]*\\\")', re.DOTALL)\r\n match = re.search(reg, self.lines)\r\n if match is None:\r\n return\r\n start , end = match.regs[0]\r\n if start == 0 and end != 0:\r\n self.token = self.lines[start+1:end-1]\r\n self.lines = self.lines[end:]\r\n self._tokenType = \"STRING_CONST\"\r\n return True",
"def test_smart_complex_cases_star(self):\n\n self.check_markdown(\n '''\n ***I'm italic and bold* I am just bold.**\n\n ***I'm bold and italic!** I am just italic.*\n ''',\n '''\n <p><strong><em>I'm italic and bold</em> I am just bold.</strong></p>\n <p><em><strong>I'm bold and italic!</strong> I am just italic.</em></p>\n ''',\n True\n )",
"def match(self, bot, user, msg, tag_info):\n cmd = msg.lower().strip()\n return cmd == \"!quote\" or cmd.startswith(\"!quote \")",
"def test_complete_html_start_tag_with_single_no_value_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 6",
"def test_contains_quoted_with_escaped_newline_with_carriage_return(self):\n\n self.assert_selector(\n self.MARKUP,\n 'body :-soup-contains(\"Test\\\\\\r\\ning\")',\n ['1'],\n flags=util.HTML\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check each of the patterns in `self.implicit_dispatch` for a match, and dispatch to the stored method for the pattern. Recursively check the text before and after the match. Return a list of `nodes.Text` and inline element nodes. | def implicit_inline(self, text, lineno):
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))] | [
"def _handleInline(self, line):\r\n\r\n if not(line):\r\n return [self.doc.createTextNode(' ')]\r\n\r\n for pattern in self.inlinePatterns:\r\n list = self._applyPattern( line, pattern)\r\n if list: return list\r\n\r\n return [self.doc.createTextNode(line)]",
"def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l",
"def __code_pattern_analyzer__(self):\n\n if self.get_pattern() is not None and len(self.get_pattern()) == len(self.get_pattern_seperator()):\n for i in range(len(self.get_pattern())):\n pattern_sep = str(self.get_pattern_seperator()[i]) if self.get_pattern_seperator()[i] else None\n data, pattern = condition_checker.check_condition(str(self.get_pattern()[i]), self.dataframe,\n pattern_sep)\n if self.get_run_pattern_match():\n self.__report_xlsx__(data, \"%s_pattern\" % self.get_pattern()[i])\n pattern.to_html(\"%s.html\" % os.path.join(self.report_path, self.get_pattern()[i] + \"Pivot_\" +\n self.get_timestamp()))\n else:\n print(\"The pattern input is expected to be list and should be of same length as pattern separators\")",
"def explicit_construct(self, match):\r\n errors = []\r\n for method, pattern in self.explicit.constructs:\r\n expmatch = pattern.match(match.string)\r\n if expmatch:\r\n try:\r\n return method(self, expmatch)\r\n except MarkupError, error:\r\n lineno = self.state_machine.abs_line_number()\r\n message = ' '.join(error.args)\r\n errors.append(self.reporter.warning(message, line=lineno))\r\n break\r\n nodelist, blank_finish = self.comment(match)\r\n return nodelist + errors, blank_finish",
"def process_paths(\n\t\ttext: str, pattern: str, process_match: Callable[[str], None],\n\t\treplacement: Union[str, Callable[..., str]]):\n\n\t# all the matching files in the given text\n\tfiles = re.findall(pattern, text)\n\n\t# breakpoint()\n\n\t# every one of them...\n\tfor file in files:\n\t\t# ...is processed\n\t\tprocess_match(file)\n\n\t# replacement of matches\n\treturn re.sub(pattern, replacement, text)",
"def explicit_construct(self, match):\n errors = []\n for method, pattern in self.explicit.constructs:\n expmatch = pattern.match(match.string)\n if expmatch:\n try:\n return method(self, expmatch)\n except MarkupError, detail: # never reached?\n errors.append(\n self.statemachine.memo.reporter.warning('%s: %s'\n % (detail.__class__.__name__, detail)))\n break\n nodelist, blankfinish = self.comment(match)\n return nodelist + errors, blankfinish",
"def parse(self, text):\n #: Do not process empty strings (Issue #3)\n if text.strip() == \"\":\n return \"\"\n #: Do not process strings consisting of a single punctuation mark (Issue #4)\n elif text.strip() in PUNCTUATION:\n _sym = text.strip()\n if _sym in tuple('.?!'):\n _tag = \".\"\n else:\n _tag = _sym\n if self.lemmata:\n return \"{0}/{1}/O/O/{0}\".format(_sym, _tag)\n else:\n return \"{0}/{1}/O/O\".format(_sym, _tag)\n if self.tokenize:\n _tokenized = \" \".join(self.tokenizer.tokenize(text))\n else:\n _tokenized = text\n\n _parsed = pattern_parse(_tokenized,\n # text is tokenized before it is passed on to\n # pattern.de.parse\n tokenize=False,\n tags=self.tags, chunks=self.chunks,\n relations=self.relations, lemmata=self.lemmata,\n encoding=self.encoding, tagset=self.tagset)\n if self.pprint:\n _parsed = pattern_pprint(_parsed)\n\n return _parsed",
"def test_patterns2(text, patterns=[]):\n # look for each pattern in the text and print the results\n for pattern, desc in patterns:\n print 'Pattern %r (%s) \\n' % (pattern, desc)\n print ' %r' % text\n for match in re.finditer(pattern, text):\n s = match.start()\n e = match.end()\n prefix = ' ' * (s)\n print ' %s%s%s ' % (prefix, text[s:e], ' '*(len(text)-e)),\n print match.groups()\n if match.groupdict():\n print '%%s' % ( ' ' * (len(text)-s), match.groupdict())\n print\n return",
"def test_patterns(text, patterns=[]):\n # Look for each pattern in the text and print the results\n for pattern, desc, in patterns:\n print \"Pattern %r (%s)\\n\" % (pattern, desc)\n print ' %r' % text\n for match in re.finditer(pattern, text):\n s = match.start()\n e = match.end()\n substr = text[s:e]\n n_backslashes = text[:s].count('\\\\')\n prefix = '.' * (s + n_backslashes)\n print ' %s%r' % (prefix, substr)\n print\n return",
"def _render(self, tokens, options, env):\n pending_tags = []\n pending_content = [[]]\n for t, token in enumerate(tokens):\n if token.type == \"fence\": # Special case\n pending_content[-1].append(self.fence(tokens, t, options, env))\n elif token.tag != \"\":\n if not token.nesting: # Directly append to content\n c = [token.content] if token.content else []\n tag = getattr(dominate.tags, token.tag)\n tag = tag(*c) if token.attrs is None else tag(*c, **token.attrs)\n pending_content[-1].append(tag)\n elif len(pending_tags) > 0 and pending_tags[-1] == token.tag: # Closing tag\n t = pending_tags.pop()\n c = pending_content.pop()\n tag = getattr(dominate.tags, t)\n tag = tag(c) if token.attrs is None else tag(c, **token.attrs)\n pending_content[-1].append(tag)\n else: # Opening tag\n if token.tag == \"p\" and len(pending_tags) > 0 and pending_tags[-1] == \"li\":\n continue\n\n pending_tags.append(token.tag)\n pending_content.append([])\n elif token.children is not None:\n assert len(token.children) > 0\n pending_content[-1].extend(self._render(token.children, options, env))\n else:\n if not token.hidden:\n pending_content[-1].append(escapeHtml(token.content))\n\n assert len(pending_tags) == 0, pending_tags\n assert len(pending_content) == 1, pending_content\n\n return pending_content[-1]",
"def getcallback(self, txt):\n\n for i in self.relist:\n try:\n result = re.search(i.compiled, txt)\n if result:\n return i\n except:\n pass",
"def make_elements(tokens, text, start=0, end=None, fallback=None):\n # type: (List[Token], str, int, Optional[int], ElementType) -> List[InlineElement]\n result = [] # type: List[InlineElement]\n end = end or len(text)\n prev_end = start\n for token in tokens:\n if prev_end < token.start:\n result.append(fallback(text[prev_end : token.start])) # type: ignore\n result.append(token.as_element())\n prev_end = token.end\n if prev_end < end:\n result.append(fallback(text[prev_end:end])) # type: ignore\n return result",
"def gather_statements(self, text):\r\n position = 0\r\n while position < len(text):\r\n # Get the next statement match\r\n match = _dsl.match(text, pos=position)\r\n\r\n if match is None:\r\n end_position = min(\r\n text.find('\\n', position) + 1,\r\n len(text)\r\n )\r\n msg = f\"Couldn't find a match at position {position}.\"\r\n msg += f\"Mis-match starts at {repr(text[position:end_position])}\"\r\n raise ValueError(msg)\r\n\r\n position = match.end()\r\n yield match.groupdict()",
"def postprocess(self, block, switch=None):\n if constants.DETECT_RECURSION:\n assert not block.contains(self)\n \n # Pass the enclosing py:switch directive down in the hierarchy\n if isinstance(block, base_blocks.SwitchBlock):\n switch = block\n\n # Postprocess all the child blocks, it also allows for replacing them\n block.apply_transformation(self.postprocess, switch)\n\n # Collect py:when and py:otherwise directives for the enclosing py:switch one\n if isinstance(block, base_blocks.CaseBlock):\n assert switch, 'Found py:when directive without an enclosing py:choose on line #%d!' % block.lineno\n switch.when_blocks.append(block)\n return []\n if isinstance(block, base_blocks.OtherwiseBlock):\n assert switch, 'Found py:otherwise directive without an enclosing py:choose on line #%d!' % block.lineno\n switch.otherwise_blocks.append(block)\n return []\n\n # Mark the py:switch directive as \"prepared\" when all its children have been processed\n if isinstance(block, base_blocks.SwitchBlock):\n block.prepared = True\n \n # Do not escape the output of template functions defined in this template\n if isinstance(block, base_blocks.TextExpressionBlock):\n \n expression = block.data.strip()\n \n if expression.endswith(')'):\n \n function_name = expression.split('(', 1)[0].strip()\n \n if function_name == 'Markup':\n block = self.blocks_module.MarkupExpressionBlock(\n block.lineno, expression[7: -1].strip())\n \n if function_name in self.function_map:\n block = self.blocks_module.MarkupExpressionBlock(\n block.lineno, expression)\n \n elif expression == 'None':\n # Genshi converts None valued expressions to empty output\n return []\n \n # Finalize elements\n if isinstance(block, base_blocks.ElementBlock):\n \n if block.start_tag:\n \n # We can't shorten the element if there are any child elements\n # in it or we are outputting XHTML and this element does not\n # have a short form.\n # See also: http://www.w3.org/TR/xhtml1/#guidelines\n if (block.children or\n (self.output_standard == 'xhtml' and \n ':' not in block.data and\n block.data not in constants.SHORT_HTML_ELEMENTS_SET)):\n \n # Close start tag\n block.start_tag.append(\n self.blocks_module.MarkupBlock(block.lineno, u'>'))\n \n else:\n # Shorten the element\n block.start_tag.append(\n self.blocks_module.MarkupBlock(block.lineno, u' />'))\n block.end_tag = None\n \n return [block]",
"def mark_text_lines(self, arrow, conditions_panels):\n fig = self.fig\n average_height = np.median([cc.height for cc in fig.connected_components])\n\n areas = [cc.area for cc in fig.connected_components]\n areas.sort()\n def condition1(cc): return cc.role != FigureRoleEnum.STRUCTUREAUXILIARY\n if arrow.is_vertical:\n def condition2(cc): return cc.top > arrow.top and cc.bottom < arrow.bottom\n else:\n def condition2(cc): return cc.left > arrow.left and cc.right < arrow.right\n\n condition = condition1 and condition2\n middle_pixel = arrow.center_px\n def distance_fn(cc): return 2.2 * cc.height\n core_ccs = find_nearby_ccs(middle_pixel, fig.connected_components, (3 * average_height, distance_fn),\n condition=condition)\n if not core_ccs:\n for pixel in arrow.pixels[::10]:\n core_ccs = find_nearby_ccs(pixel, fig.connected_components, (2 * average_height, distance_fn),\n condition=condition)\n if len(core_ccs) > 1:\n break\n else:\n log.warning('No conditions were found in the initial scan. Aborting conditions search...')\n return []\n\n if conditions_panels:\n for panel in conditions_panels:\n core_ccs += find_nearby_ccs(panel, fig.connected_components, (3 * average_height, distance_fn),\n condition=condition)\n\n conditions_region = Panel.create_megarect(core_ccs)\n\n cropped_region = Crop(erase_elements(fig, conditions_panels), conditions_region) # Do not look at structures\n\n text_lines = [TextLine(None, None, top, bottom, crop=cropped_region, anchor=anchor) for (top, bottom, anchor) in\n self.identify_text_lines(cropped_region)]\n\n text_lines = [text_line.in_main_figure for text_line in text_lines]\n\n return text_lines",
"def match(self, text, pos, lno):\n mtch = self.pattern.match(text, pos)\n ret = []\n if self.next_rule is not None and mtch is not None:\n pos = 0\n for rule in self.next_rule:\n another_mtch, another_t = rule.match(mtch.group(), pos, 0)\n if another_mtch:\n ret.append(another_t)\n pos += len(another_mtch.group())\n else:\n if mtch:\n ret = mtch.group()\n else:\n ret = ''\n return mtch, Token(self.identifier, content=ret, position=pos, lineno=lno)",
"def _parse(self, remaining_text, tree, frontier):\n\n # If the tree covers the text, and there's nothing left to\n # expand, then we've found a complete parse; return it.\n if len(remaining_text) == 0 and len(frontier) == 0:\n if self._trace:\n self._trace_succeed(tree, frontier)\n yield tree\n\n # If there's still text, but nothing left to expand, we failed.\n elif len(frontier) == 0:\n if self._trace:\n self._trace_backtrack(tree, frontier)\n\n # If the next element on the frontier is a tree, expand it.\n elif isinstance(tree[frontier[0]], Tree):\n yield from self._expand(remaining_text, tree, frontier)\n\n # If the next element on the frontier is a token, match it.\n else:\n yield from self._match(remaining_text, tree, frontier)",
"def _speak_as(\n self,\n element,\n regular_expression,\n data_property_value,\n operation\n ):\n\n children = []\n pattern = re.compile(regular_expression)\n content = element.get_text_content()\n while content:\n matches = pattern.search(content)\n if matches is not None:\n index = matches.start()\n children = operation(content, index, children)\n\n new_index = index + 1\n content = content[new_index:]\n else:\n break\n if children:\n if content:\n children.append(self._create_content_element(\n content,\n data_property_value\n ))\n while element.has_children():\n element.get_first_node_child().remove_node()\n for child in children:\n element.append_element(child)",
"def _parses(self, chart, start_sym, tree_class):\n return chart.parses(start_sym, tree_class=tree_class)",
"def get_events_from_text_msg(self, text_msg):\n\n raise NotImplementedError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check attribution shape. Return the index past the end of the attribution, and the indent. | def check_attribution(self, indented, attribution_start):
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0) | [
"def indirection_level(self):\n return self.ty.count(\"*\") + self.ty.count(\"[\")",
"def getMarkPosition(self, i: int) -> int:\n ...",
"def _indents(self, line) -> Tuple[int, int]:\n import re\n\n indent = len(re.match(r'( *)', line).group(1))\n list_match = re.match(r'( *)(([*\\-+>]+|\\w+\\)|\\w+\\.) +)', line)\n if list_match:\n sub_indent = indent + len(list_match.group(2))\n else:\n sub_indent = indent\n\n return indent, sub_indent",
"def rowCheck(self, i):\n #row is list of tuples\n #row represents a row of pixels of a photo\n row = self.array[i]\n if row.count(self.outline) > self.size[0]/2:\n return (True, i)\n else: return (False,i)",
"def calc_sag_offset_idx(self):\n return self.offset_pnt-1",
"def _check_legal_index(self, row, col):\n return 0 <= row and row < self._size and\\\n 0 <= col and col < self._size",
"def getAbsLineIndent(self,pointer):\n p=pointer\n while p>0 and self.pihCode[p]!=\"\\n\":\n p=p-1\n p=p+1\n indent=0\n while p<len(self.pihCode) and self.pihCode[p] in string.whitespace:\n p+=1\n indent+=1\n return indent",
"def get_element_indent(file, encaps):\n line_nr = encaps.start.line - 1\n start_indent = get_indent_of_line(file, line_nr)\n if len(file[line_nr].rstrip()) <= encaps.start.column:\n indent = get_indent_of_line(file, line_nr + 1)\n else:\n indent = encaps.start.column\n\n indent = indent - start_indent if indent > start_indent else 0\n return indent",
"def entDimPos(ent):\n return entDimString(ent), entPosString(ent)",
"def test_offset():\n segmenter = NLTKSentencizer()\n text = ' This , text is... . Amazing !!'\n docs_chunks = segmenter.segment(np.stack([text, text]))\n for chunks in docs_chunks:\n assert len(chunks) - 1 == chunks[-1]['offset']",
"def _beginningOfContent(line: str) -> int:\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0",
"def check_shape(chunk, info):\n logger.warn(\"%s:%s[%d]: Checking shapes:\" % \n (os.path.basename(inspect.stack()[2].filename),\n inspect.stack()[2].function,\n inspect.stack()[2].lineno))\n if chunk:\n for key in chunk:\n logger.warn(\"%s %s\" % ( key, str(chunk[key].shape)))\n return chunk",
"def offset_at_position(self):\n offset = 0\n for i, curr_line in enumerate(self.doc.iter_lines()):\n if i == self.line:\n break\n offset += len(curr_line)\n\n return offset + self.col",
"def test_locations():\n segmenter = NLTKSentencizer()\n text = (\n \"This is a sentence. Here's another sentence. One more sentence? Aaand, yes, one more! \\n\"\n \"Lastly, this one is the last sentence.\"\n )\n docs_chunks = segmenter.segment(np.stack([text, text]))\n\n for chunks in docs_chunks:\n # first sentence should start at the first index or later\n assert chunks[0]['location'][0] >= 0\n # last sentence can not end at an index greater than the length of text\n assert chunks[-1]['location'][-1] <= len(text)\n # sentences beginning and ending indeces cannot overlap\n for i in range(1, len(chunks)):\n assert chunks[i]['location'][0] > chunks[i - 1]['location'][-1]",
"def test_paragraph_offsets_present(self):\n text = \"This (a) is a good (b) test for (c) something like this.\"\"\"\n self.assertEqual((5, 19), self.regParser.paragraph_offsets(text, 0, 0))\n self.assertEqual((19, 32),\n self.regParser.paragraph_offsets(text, 0, 1))\n self.assertEqual((32, len(text)),\n self.regParser.paragraph_offsets(text, 0, 2))",
"def _compute_position(input, index):\n line = 1\n col = 1\n eol = None # last end of line character\n for c in input[:index]:\n if c == '\\n' or c == '\\r':\n if eol is None or eol == c:\n eol = c\n line += 1\n col = 1\n else:\n # ignore second of '\\n\\r' and '\\r\\n' sequences\n eol = None\n else:\n col += 1\n return (line, col)",
"def _get_accession_number(self, doc_description):\n print(\"doc_description: \", doc_description)\n access_num = None\n try:\n access_num = util.extract_text_between_expressions(doc_description, \"Acc-no:\", \" \")\n except AssertionError:\n access_num = util.extract_text_between_expressions(doc_description, \"Acc-no: \", \" \")\n\n access_num = access_num.strip()\n assert len(access_num) == 20\n \n return access_num",
"def _getIndice(self,center):\n # See the method addHealthCenter, the keys of self.centers\n # are simply 0, 1, 2, 3, ..., len(self.centers) - 1\n for index in self.centers.keys():\n if self.centers[index]==center:\n return index\n return -1",
"def find_pos(self):\n self.y = 0\n for d in self.data:\n try:\n self.x = d.index('m')\n return\n except ValueError:\n self.y += 1",
"def get_IA_position(self, maze):\n for y in range(len(maze)):\n for x in range(len(maze[y])):\n if maze[y][x] == self.letter:\n self.posx = x\n self.posy = y\n break\n return 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct and return the next enumerated list item marker, and an autoenumerator ("" instead of the regular enumerator). Return ``None`` for invalid (out of range) ordinals. | def make_enumerator(self, ordinal, sequence, format): #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator | [
"def get_next_item(self):\n return # osid.assessment.Item",
"def test_incorrect_start(start):\n with raises(TypeError):\n next(ienumerate([21], start))",
"def get_next_id(self):\n try:\n next_item = next(self)\n except StopIteration:\n raise IllegalState('no more elements available in this list')\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n else:\n return next_item",
"def next( self ):\n\n try: \n value = self.__sequence[ self.__nextValue ]\n except IndexError:\n raise StopIteration\n else:\n self.__nextValue += 1\n return value",
"def nextname(self, ea, ui=True):\n # don't count this item\n ea = Data.Data(ea).ea + Data.Data(ea).getSize()\n output = idaapi.BADADDR\n while ea < self.end_ea:\n d = Data.Data(ea)\n if d.getName():\n output = ea\n break\n ea += d.getSize()\n if ui: idaapi.jumpto(ea)\n return '%07X' % output",
"def nextimmref(self, ea, ui=True):\n # don't count this item\n ea = Data.Data(ea).ea + Data.Data(ea).getSize()\n output = idaapi.BADADDR\n while ea < self.end_ea:\n d = Data.Data(ea)\n if d.isCode() and '#' in d.getOrigDisasm():\n disasm = d.getOrigDisasm()\n # check out the xrefs from the data, see if it references to them\n xrefs = d.getXRefsFrom()\n for xref in xrefs[0]:\n if Data.Data(xref).getName() in disasm:\n output = ea\n break\n for xref in xrefs[1]:\n if Data.Data(xref).getName() in disasm:\n output = ea\n break\n if output != idaapi.BADADDR:\n break\n ea += d.getSize()\n if ui: idaapi.jumpto(ea)\n return '%07X' % output",
"def next(iterator, default=None): # real signature unknown; restored from __doc__\n pass",
"def enumerate1(it):\n return ((n+1, x) for n, x in enumerate(it))",
"def advance(self):\n assert(self.get_dotted_symbol() is not None)\n\n return LR1Item(self.p,\n self.index + 1,\n self.lookahead_set)",
"def __next__(self):\r\n\t\tif self.postion >= len(self.letters):\r\n\t\t\traise StopIteration\r\n\t\tletter = self.letters[self.postion]\r\n\t\tself.postion += 1\r\n\t\treturn letter",
"def getNextItem(self):\n if len(self.items) > 0:\n return self.items.pop(0)\n return None",
"async def next(\n itr: AnyIterator[T1], default: Union[T2, Sentinel] = Sentinel.MISSING\n) -> Union[T1, T2]:\n try:\n if isinstance(itr, AsyncIterator):\n return await itr.__anext__()\n\n try:\n return builtins.next(itr)\n except StopIteration:\n raise StopAsyncIteration\n except StopAsyncIteration:\n if default is Sentinel.MISSING:\n raise\n return default",
"def next(self):\n if self.next_value is not None:\n next_value = self.next_value\n self.next_value = None\n return next_value\n else:\n return next(self.iterator)",
"def nextval(iterable, value):\n i = iterable.index(value)\n i = 0 if i >= lastind(iterable) else i+1\n return iterable[i]",
"def next_inventory_item(self):\n self.current_item_index = (self.current_item_index + 1) % len(self.inventory.items)\n self.current_item = self.inventory.get_item_name(self.current_item_index)",
"def select_next_item(self) -> int:\n if not self.is_opened():\n self.open()\n self.selected_item += 1\n if self.selected_item == len(self.items):\n self.selected_item = -1\n return self.selected_item",
"def get_next_letter(coll: Collection, letter: str='r') -> int:\n s = list(map(lambda x: int(x[8:]), coll.distinct('label')))\n s.sort()\n return s[-1] + 1 if s else letter",
"def next(self):\n self.assert_open()\n if not infolist_next(self._infolist):\n raise StopIteration()\n self._old_item.close()\n new_item = InfoListItem(self, infolist_fields(self._infolist))\n self._old_item = new_item\n return new_item",
"def get_next_items(self, n):\n return # osid.assessment.Item",
"def next(self):\n\n try:\n next_token = next(self.token_generator)\n # print(n)\n return next_token\n except StopIteration:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract & return field name from a field marker match. | def parse_field_marker(self, match):
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field | [
"def parse_field_marker(self, match):\n field = match.string[1:] # strip off leading ':'\n field = field[:field.find(':')] # strip off trailing ':' etc.\n tokens = field.split()\n return tokens[0], tokens[1:] # first == name, others == args",
"def _get_field_name(cls, rule_content):\n return rule_content.get(cls.fieldname, None)",
"def getFieldName(self, *args) -> \"PyObject *\":\n return _coin.SoFieldContainer_getFieldName(self, *args)",
"def _get_field_name(self):\n return self.field_name",
"def __extractField(self, raw: dict, name: str):\n if not 'fields' in raw:\n return None\n fields = raw['fields']\n if not name in fields:\n return None\n return fields[name]",
"def _get_field(self, line):\n field_name, _ = line.split(\",\", 1)\n field_name = field_name.strip()\n return field_name",
"def extract_name(key):\n # we assume that the \"key\" is like \"(Full Name, blah...)\"\n fields = key.lstrip('(').rstrip(')').split(',')\n return fields[0]",
"def get_field_name(content_disposition: str) -> str:\n parts = content_disposition.split(';')\n for part in parts:\n part_stripped = part.strip()\n search_result = re.search(\"^name=\\\"(.*)\\\"$\", part_stripped)\n if search_result:\n return search_result.group(1)",
"def field_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"field_name\")",
"def get_field(self, field_name):\n\n field_names = field_name.split('.')\n return _find_field(self.__msg, field_names)",
"def parseField(self, text):\n fieldMatch = _fieldPartRe.match(text)\n if fieldMatch:\n modifier = fieldMatch.group(1)\n fieldName = fieldMatch.group(2)\n try:\n if not modifier:\n return self.fieldDict[fieldName]\n elif modifier == '*' * len(modifier):\n return fieldformat.AncestorLevelField(fieldName,\n len(modifier))\n elif modifier == '?':\n return fieldformat.AnyAncestorField(fieldName)\n elif modifier == '&':\n return fieldformat.ChildListField(fieldName)\n elif modifier == '#':\n match = _levelFieldRe.match(fieldName)\n if match and match.group(1) != '0':\n level = int(match.group(1))\n return fieldformat.DescendantCountField(fieldName,\n level)\n elif modifier == '!':\n return (self.parentFormats.fileInfoFormat.\n fieldDict[fieldName])\n except KeyError:\n pass\n return text",
"def __getField(self, record, field):\n\t\t(offset, length) = (self.allFields[field].ffOffset, self.allFields[field].maxlength)\n\t\treturn record[offset:offset+length].strip()",
"def _get_field(extras: dict, field_name: str):\n backcompat_prefix = \"extra__dataprep__\"\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix \"\n \"when using this method.\"\n )\n if field_name in extras:\n return extras[field_name] or None\n prefixed_name = f\"{backcompat_prefix}{field_name}\"\n return extras.get(prefixed_name) or None",
"def _extract_field_with_regex(self, field):\n matched = re.search(field, self.text)\n if not matched:\n err_msg = f\"Failed to extract data with regex! => {field}\\n\"\n err_msg += f\"response body: {self.text}\\n\"\n logger.error(err_msg)\n raise exceptions.ExtractFailure(err_msg)\n\n return matched.group(1)",
"def name(field: BaseField) -> str:\n return field.NAME",
"def try_get_field(self, field_name: str) -> Optional[fields.Field]:\n prefix = field_name.split(\"$\")[0]\n if prefix not in self.field_prefix_map:\n return None\n\n field = self.field_prefix_map[prefix]\n if isinstance(field, fields.BaseTemplateField):\n # We use the regex here since we want to also match template fields.\n if \"$\" in field_name and not re.match(field.get_regex(), field_name):\n return None\n return field",
"def GetNamedFieldInformation(self, vtkInformation, p_int, string):\n ...",
"def _get_field_by_name(model, field):\n field_dict = {x.name: x for x in model._meta.get_fields()} # noqa\n return field_dict[field]",
"def get_field(self, name):\n return self._fields[name]",
"def field(self, tag):\n return self[self.index(tag)]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of `node.option` and `node.option_argument` objects, parsed from an option marker match. | def parse_option_marker(self, match):
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=', 1)
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist | [
"def argv(self):\n optlist = []\n for n in range(self.count):\n optlist.append(self.flag)\n if self.values is not None:\n optlist.append(self.values[n])\n return optlist",
"def ParseOptions():\n parser = optparse.OptionParser()\n parser.add_option('--version_file', dest='version_file')\n parser.add_option('--outdir', dest='outdir')\n\n (options, _) = parser.parse_args()\n return options",
"def getValuesFromOption(opt, cmd):\n values = []\n pat = opt + r'\\s*([\\w\\_\\/\\\\]+)'\n m = re.search(pat, cmd)\n while m:\n # found\n val = m.group(1)\n values.append(val)\n # remove the option-value pair\n cmd = re.sub(opt + '\\s*' + val, '', cmd)\n m = re.search(pat, cmd)\n return values",
"def commandLineOptionsToList(stringOptions):\n return stringOptions.split()",
"def options(self) -> List[OptionInfo]:\n return []",
"def multiget(self, option, section = None):\n\n matches = []\n if section is None:\n section = self.default_section\n if self.cfg.has_option(section, option):\n yield self.cfg.get(section, option)\n option += \".\"\n matches = [o for o in self.cfg.options(section)\n if o.startswith(option) and o[len(option):].isdigit()]\n matches.sort()\n for option in matches:\n yield self.cfg.get(section, option)",
"def _do_arg_parse(parser):\n m_index = index_(\"-m\", sys.argv[1:])\n c_index = index_(\"-c\", sys.argv[1:])\n\n if m_index == NOT_IN_LIST and c_index == NOT_IN_LIST:\n args_to_parse = sys.argv[1:]\n remainder = None\n\n elif m_index < c_index:\n args_to_parse = sys.argv[1:m_index+3]\n remainder = sys.argv[m_index+3:]\n\n elif c_index < m_index:\n args_to_parse = sys.argv[1:c_index+3]\n remainder = sys.argv[c_index+3:]\n\n opts = parser.parse_args(args_to_parse)\n\n if remainder is None:\n remainder = opts.remainder\n\n return opts, remainder",
"def read_options(data: Any) -> List[CommandOption]:\n def __fn(doc_option: Dict[str, Any]) -> CommandOption:\n name = doc.read(doc_option, \"name\", doc.typed(str))\n description = doc.read(doc_option, \"description\", doc.typed(str))\n validate_meta(MetaType.OPTION, name, description)\n kind_key = doc.read(doc_option, \"type\", doc.typed(Union[str, int]))\n try:\n kind = CommandOptionType.from_str(kind_key) if isinstance(kind_key, str) else CommandOptionType(kind_key)\n except ValueError as err:\n raise InvalidOptionTypeError(\" \".join(\n [f\"{kind_key} is not a valid command option type.\",\n \"It must be between 1 and 9 (both inclusive).\"])) from err\n option = {\n \"name\": name,\n \"description\": description,\n \"type\": kind.value,\n }\n doc.read(doc_option, \"required\", doc.typed(bool, optional=True), to=option)\n if \"choices\" in doc_option and isinstance(doc_option[\"choices\"], list):\n choices = []\n for doc_choice in doc_option[\"choices\"]:\n if isinstance(doc_choice, (str, int)):\n doc_choice = {\"name\": doc_choice}\n if not isinstance(doc_choice, dict):\n raise doc.ValueTypeError(f\"Choice of unexpected type '{type(doc_choice).__name__}'\")\n choice_name = doc.read(doc_choice, \"name\", doc.typed(str))\n validate_length(\"Choice name\", choice_name)\n choices.append({\n \"name\": choice_name,\n \"value\": doc.read(doc_choice, [\"value\", \"name\"], doc.typed(Union[str, int]))\n })\n option[\"choices\"] = choices\n doc.read(doc_option, \"options\", doc.with_default(read_options, None), to=option)\n return option # type: ignore\n\n return __read_list_or_keyed(\"Option\", data, __fn)",
"def launcher_argv(self, is_geopmctl):\n result = []\n result.extend(self.num_node_option())\n result.extend(self.exclude_list_option())\n result.extend(self.num_rank_option(is_geopmctl))\n if self.config and self.config.do_affinity:\n result.extend(self.affinity_option(is_geopmctl))\n result.extend(self.preload_option())\n result.extend(self.timeout_option())\n result.extend(self.time_limit_option())\n result.extend(self.job_name_option())\n result.extend(self.node_list_option())\n result.extend(self.host_file_option())\n result.extend(self.partition_option())\n result.extend(self.reservation_option())\n result.extend(self.performance_governor_option())\n return result",
"def options(self, section: str) -> List[str]:",
"def get_option_values(self):\n \n class CommandLineOptions(object):\n def __getattr__(self, name):\n # if an attribute can not be found, this is the last function called\n all_option_names=\", \".join(vars(self).keys())\n error_message=\"Unable to find option '{0}' in command line options.\\n\".format(name)\n error_message+=\"The available options are: {0}\".format(all_option_names)\n raise AttributeError(error_message)\n \n # get arguments from the command line (will not run again if already parsed)\n if not self._user_asked:\n self.ask_user()\n \n args=CommandLineOptions()\n for option in list(self._user_arguments.keys()) + list(self._arguments.keys()):\n option = re.sub(r'-', '_', option)\n value = self.get(option)\n setattr(args,option,value)\n \n return args",
"def extract_option(L):\n for k in xrange(len(L)):\n e = L[k]\n if e[0] == '-':\n if e[1] == 'L': return ('L',None)\n try:\n return (e[1],L[k+1])\n except:\n print 'no data with option', e[1]\n return None\n return None",
"def GetCommandLineOptions(self):\n return self.args_",
"def gen_command_options(self, command):\n for option in self.opts[command]:\n yield option",
"def _parse_arguments(\n arguments_ast: Optional[List[dict]]\n) -> List[\"ArgumentNode\"]:\n if arguments_ast:\n return [_parse_argument(argument) for argument in arguments_ast]\n return []",
"def parse_extension_options(self, option_spec, datalines):\r\n node = nodes.field_list()\r\n newline_offset, blank_finish = self.nested_list_parse(\r\n datalines, 0, node, initial_state='ExtensionOptions',\r\n blank_finish=True)\r\n if newline_offset != len(datalines): # incomplete parse of block\r\n return 0, 'invalid option block'\r\n try:\r\n options = utils.extract_extension_options(node, option_spec)\r\n except KeyError, detail:\r\n return 0, ('unknown option: \"%s\"' % detail.args[0])\r\n except (ValueError, TypeError), detail:\r\n return 0, ('invalid option value: %s' % ' '.join(detail.args))\r\n except utils.ExtensionOptionError, detail:\r\n return 0, ('invalid option data: %s' % ' '.join(detail.args))\r\n if blank_finish:\r\n return 1, options\r\n else:\r\n return 0, 'option data incompletely parsed'",
"def visitOption(self, opt, visitor):\n for arg in opt.args:\n arg.accept(visitor)",
"def all_flag_args(flags, key, argc):\n match_indices = filter(lambda i: flags[i] == key, range(len(flags)))\n if argc == 1:\n return [flags[i + 1] for i in match_indices]\n elif argc > 1:\n return [flags[i + 1 : i + argc + 1] for i in match_indices]",
"def match_options(self): # pragma: no cover",
"def visitAll(self, visitor):\n for opt in self.m.options:\n for arg in opt.args:\n arg.accept(visitor)\n \n self.optionParam.accept(visitor)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return one line element of a line_block. | def line_block_line(self, match, lineno):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish | [
"def getLine(self) -> \"SbLine const &\":\n return _coin.SbLineProjector_getLine(self)",
"def getLine(self, line_id: int) -> Line:\n return self.pool[line_id]",
"def get_line(self, line_name):\n for line in self.line_list:\n if line.name == line_name:\n return line\n return None",
"def parse_row(self, row):\n return LineItem(row)",
"def get_line(node: Any) -> Optional[int]:\n if node is None:\n return None\n if node.line is not None:\n return node.line\n else:\n return get_line(node.parent)",
"def use_single_line(self, line_index, line_slice = None, align = align_none):\n if not (line_index >= 0 and line_index < len(self._display_lines)):\n raise AssertionError\n display = self._display_lines[line_index]\n display = line_slice is not None and display.subdisplay[line_slice]\n layer = Layer(priority=MESSAGE_BOX_PRIORITY, display_line1=display)\n return _CallbackControl(self._token_control, partial(self._set_message_box_layout, layer, maybe(partial(align, display.width))))",
"def _line(self, line: _Line) -> Text:\n\n out = \"\\n\".join(\n [\n *([\"\"] if line.errors and line.level == self._previous_level else []),\n *(f\"{self._indent(line.level)}// {e}\" for e in line.errors),\n f\"{self._indent(line.level)}{line.content}\",\n ]\n )\n\n self._previous_level = line.level\n\n return out",
"def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line",
"def get_start_line(self):\n if self._start_line == 0 and self._ast_elem_list != []:\n self._start_line = self._ast_elem_list[0].coord.line\n\n return self._start_line",
"def getLine(self) -> \"SbLine const &\":\n return _coin.SoRayPickAction_getLine(self)",
"def first_line(self) -> str | None:\n if self.output:\n return self.output.split(\"\\n\", 1)[0]\n return None",
"def get_line_by_line(self, line_width=60):\n # Returns header as first line\n yield self.get_header().decode()\n\n if line_width > 0:\n for i in range(self.get_seq_length() // line_width):\n start_pos = i * line_width\n # Returns line by line - linewidth is self.line_width\n yield self._seq[start_pos:start_pos + line_width].decode()\n\n # Last line if not full length\n if self.get_seq_length() % line_width != 0:\n yield self._seq[self.get_seq_length() // line_width * line_width:].decode()\n else:\n # No line width returns all in one line\n yield self._seq.decode()",
"def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def l(line):\n\treturn line[:-1]",
"def _line_parser(self, line):\n line = (line.rstrip()).split(' ')\n return line",
"def parse_line(cls, line):\n if not line:\n node = nodes.EmptyNode()\n elif line[0] in (cls.HTML_TAG_PREFIX, '.', '#'):\n node = nodes.HtmlNode.from_haml(line)\n elif line[0] in (cls.HTML_COMMENT_PREFIX, ):\n node = nodes.HtmlCommentNode(line[1:])\n elif line[0] in (cls.JINJA_TAG_PREFIX, ):\n node = nodes.JinjaNode.from_haml(line)\n elif line[0] in (cls.CUSTOM_BLOCK_PREFIX, ):\n node = nodes.CustomBlockNode(line[1:])\n elif line[0] in (cls.PREFORMATTED_PREFIX, ):\n node = nodes.PreformattedTextNode(line[1:])\n elif line[0] in (cls.ESCAPE_PREFIX, ):\n node = nodes.TextNode(line[1:])\n else:\n node = nodes.TextNode(line)\n\n return node",
"def first_sel_line(self):\n ret_val = self._first_sel_line()\n return ret_val",
"def put_plain_line(self, line: str) -> None:\n x = self\n if x.marker.isSentinel(line):\n x.results.append(x.verbatim_line)\n x.results.append(line)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a directive then run its directive function. | def run_directive(self, directive, match, type_name, option_presets):
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text), line=lineno)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
line=lineno)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank()) | [
"def _parse_directive(directive_ast: dict) -> \"DirectiveNode\":\n return DirectiveNode(\n name=_parse_name(directive_ast[\"name\"]),\n arguments=_parse_arguments(directive_ast[\"arguments\"]),\n location=_parse_location(directive_ast[\"loc\"]),\n )",
"async def on_directive(self, directive: 'WorkerDirective'):\n pass",
"def directive(self, name: str) -> Callable | None:\n if name in self._directive_cache:\n return self._directive_cache[name]\n if name not in self.directives:\n return None\n fullname = f'{self.name}:{name}'\n BaseDirective = self.directives[name]\n\n class DirectiveAdapter(BaseDirective): # type: ignore[valid-type,misc]\n def run(self) -> list[Node]:\n self.name = fullname\n return super().run()\n self._directive_cache[name] = DirectiveAdapter\n return DirectiveAdapter",
"def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp",
"def connect_directive_node(self, name, f_visit, f_depart):\r\n self.builder._function_node.append((name, f_visit, f_depart))",
"def parse(self, node):\n pm = getattr(self, \"parse_%s\"%node.__class__.__name__)\n pm(node)",
"def test_hensonclidirective_sets_parser(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.arguments == ('henson.cli:parser',)",
"def testDirectiveParser_parse_translate(self):\n translateDirective = \"xhr-src http://localhost\"\n cspTranslateDirective = DirectiveParser().parse(translateDirective)\n assert cspTranslateDirective == Directive(\"connect-src\", (URISourceExpression(\"http\", \"localhost\", None, None),))",
"def run_parser(self, parser: ArgumentParser):",
"def parse(self, node):\n parseMethod = getattr(self, \"parse%s\" % node.__class__.__name__)\n parseMethod(node)",
"def _parse_directives(\n directives_ast: Optional[List[dict]]\n) -> List[\"DirectiveNode\"]:\n if directives_ast:\n return [_parse_directive(directive) for directive in directives_ast]\n return []",
"def parse_directive(directive):\n\n whitelist = []\n blacklist = []\n substitutions = {}\n\n for segment in directive.split(\",\"):\n if segment.startswith(\"*=\"):\n scope = [\"*\"]\n elif \":\" in segment:\n start, stop = segment.strip(\"!\").split(\"=\")[0].split(\":\")\n scope = range(*sorted([int(start), int(stop) + 1]))\n else:\n start = int(segment.strip(\"!\").split(\"=\")[0])\n scope = range(start, start + 1)\n\n for code in scope:\n if \"=\" in segment:\n sub = int(segment.split(\"=\")[1])\n\n if segment.startswith(\"!\"):\n blacklist.append(code)\n substitutions[\"*\"] = sub\n else:\n substitutions[code] = sub\n elif segment.startswith(\"!\"):\n blacklist.append(code)\n else:\n whitelist.append(code)\n\n return whitelist, blacklist, substitutions",
"def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)",
"def parse_Element(self, node):\n name = node.tagName\n ignores = self.ignores\n if name in ignores:\n return\n attr = \"do_%s\" % name\n if hasattr(self, attr):\n handlerMethod = getattr(self, attr)\n handlerMethod(node)\n else:\n self.generic_parse(node)\n #if name not in self.generics: self.generics.append(name)",
"def apply_visitor(visitor, decl_inst):\n\n fname = 'visit_' + \\\n decl_inst.__class__.__name__[:-2] # removing '_t' from class name\n if not hasattr(visitor, fname):\n raise visit_function_has_not_been_found_t(visitor, decl_inst)\n return getattr(visitor, fname)()",
"def parse(self, stringDirective):\n \n # extract/translate directive type\n stringDirective = stringDirective.strip()\n if stringDirective == \"inline style base restriction\":\n return Directive.INLINE_STYLE_BASE_RESTRICTION()\n elif stringDirective == \"inline script base restriction\":\n return Directive.INLINE_SCRIPT_BASE_RESTRICTION()\n elif stringDirective == \"eval script base restriction\":\n return Directive.EVAL_SCRIPT_BASE_RESTRICTION()\n \n directiveParts = stringDirective.partition(\" \")\n if directiveParts[0] == stringDirective:\n return Directive.INVALID() # could not split as expected (no \" \")\n directiveType = directiveParts[0].strip().lower()\n if directiveType in self._typeTranslations:\n directiveType = self._typeTranslations[directiveType]\n if directiveType == \"\" or directiveType not in self._allowedTypes:\n return Directive.INVALID() # parsing error or type not allowed (e.g., report-uri or sandbox)\n\n # extract whitelisted source expressions\n whitelistedResources = directiveParts[2].strip().split()\n \n # handle 'none' in list\n # (list of length 0 might be invalid, but we handle it as 'none', too)\n if (\"'none'\" in map(lambda x: x.lower(), whitelistedResources)\n and len(whitelistedResources) > 1 \n and self._strict):\n return Directive.INVALID() # 'none' must be only resource if present\n \n # clean up URIs (and make unique set)\n validWhitelistedSourceExpressions = set([])\n for res in whitelistedResources:\n if res.lower() == \"'none'\":\n continue\n srcExpr = self._sourceExpressionParser.parse(res)\n # check some error conditions\n if srcExpr == SourceExpression.INVALID():\n if self._strict:\n return Directive.INVALID()\n else:\n continue\n if srcExpr == SourceExpression.UNSAFE_EVAL() and not directiveType in (\"script-src\", \"default-src\"):\n if self._strict:\n return Directive.INVALID()\n else:\n continue\n if srcExpr == SourceExpression.UNSAFE_INLINE() and not directiveType in (\"script-src\", \"style-src\", \"default-src\"):\n if self._strict:\n return Directive.INVALID()\n else:\n continue\n validWhitelistedSourceExpressions.add(srcExpr)\n return Directive(directiveType, validWhitelistedSourceExpressions)",
"def compile_do(self):\n # write <do_statement>\n self.non_terminal_open(XML_DO_STATEMENT)\n # write <keyword> do <keyword>\n self.one_liner(XML_KEY_WORD, self.tokenizer.current_token)\n # advance to next token (subroutine call)\n self.tokenizer.advance()\n # write <identifier> name_of_func <identifier>\n self.one_liner(XML_IDENTIFIER, self.tokenizer.current_token)\n self.tokenizer.advance()\n # compile the subroutine call\n self.compile_subroutine_call()\n # write <symbol> ; <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n # write <do_statement>\n self.non_terminal_end(XML_DO_STATEMENT)\n self.tokenizer.advance()",
"def parse(self, player, message):\n #test if the message match a command available for the player state\n matched = self.cmd_regex[player.get_state()].match(message)\n if matched:\n # execute the relative function\n cmd = matched.group(\"command\")\n arg = matched.group(\"arguments\") or ''\n getattr(self, Cmd.commands[cmd].fn)(player, arg)\n else:\n #self.game.log(\n # \"Unknown command <{}> for state {}.\"\n # .format(message, player.get_state()))\n info(player, \"<code>Arglebargle !?</code>\")",
"def directives(self, directives):\n\n self._directives = directives"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse `datalines` for a field list containing extension options matching `option_spec`. | def parse_extension_options(self, option_spec, datalines):
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=True)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed' | [
"def parse_extension_attributes(self, attribute_spec, datalines, blankfinish):\n node = nodes.field_list()\n newlineoffset, blankfinish = self.nestedlistparse(\n datalines, 0, node, initialstate='FieldList',\n blankfinish=blankfinish)\n if newlineoffset != len(datalines): # incomplete parse of block\n return 0, 'invalid attribute block', blankfinish\n try:\n attributes = utils.extract_extension_attributes(node, attribute_spec)\n except KeyError, detail:\n return 0, ('unknown attribute: \"%s\"' % detail), blankfinish\n except (ValueError, TypeError), detail:\n return 0, ('invalid attribute value:\\n%s' % detail), blankfinish\n except utils.ExtensionAttributeError, detail:\n return 0, ('invalid attribute data: %s' % detail), blankfinish\n return 1, attributes, blankfinish",
"def extract_extension_options(fields, options_spec, raise_fail=True, errors=[]):\n \"\"\"\n :Parameters:\n - `fields`: A list or field_list of fields with field_name, field_body pairs.\n - `options_spec`: Dictionary mapping known option names to a\n conversion function such as `int` or `float`.\n\n :Exceptions:\n - `UnknownOptionError` for unknown option names.\n - `DuplicateOptionError` for duplicate options.\n - `OptionValueError` for invalid option values (raised by conversion\n function).\n - `OptionTypeError` for invalid option value types (raised by conversion\n function).\n \"\"\"\n options = {}\n seen = [] # track seen names, raise on missing required fields\n for field in fields:\n\n field_name, field_body = field[0:2]\n name = extract_field_name(field_name)\n\n if not name in options_spec or not options_spec[name][0]:\n error = UnknownOptionError( name, ) # if explicitly disabled\n if raise_fail:\n raise error\n errors.append((field, error))\n continue\n\n spec = options_spec[name]\n\n # XXX:BVB: dont like this\n convertor = spec[0]\n required = not (len(spec)>1 and spec[1])\n append = len(spec)>2 and spec[2]\n\n if name in options and not append:\n error = DuplicateOptionError( name, )\n if raise_fail:\n raise error\n errors.append((field, error))\n continue\n\n if name not in seen:\n seen.append(name)\n\n if len(field_body):\n pass\n\n error = None\n try:\n if not callable(convertor):\n body = field_body[0]\n if len(convertor)==2:\n converted = list(parse_list(body, *convertor))\n elif len(convertor)==3:\n converted = list(parse_nested_list(body, *convertor))\n elif len(convertor)==4:\n converted = list(parse_nested_list_with_headers(body, *convertor))\n elif len(field_body):\n converted = convertor(field_body[0])\n else:\n converted = ''\n\n except ValueError, e:\n error = OptionValueError( name, field_body, e )\n except TypeError, e:\n error = OptionValueError( name, field_body, e )\n if error:\n if raise_fail:\n raise error\n errors.append((field, error))\n continue\n\n if append:\n if not name in options:\n options[name] = []\n if isinstance(converted, list):\n options[name].extend(converted)\n else:\n options[name].append(converted)\n else:\n options[name] = converted\n\n if len(options_spec) > len(seen):\n # Report missing fields\n names = options_spec.keys()\n [names.remove(name) for name in seen]\n for name in names:\n spec = options_spec[name]\n if len(spec)<2 or spec[1]:\n error = MissingOptionError(name,)\n if raise_fail:\n raise error\n errors.append((None, error))\n\n return options",
"def test_multiple_extra_sections(self):\n class MySchema(Schema):\n foo = ListOption(\n item=DictOption(spec={'bar': IntOption()}))\n\n config = StringIO('[__main__]\\nfoo=d1\\n d2\\n d3\\n'\n '[d1]\\nbar=1\\n[d2]\\nbar=2\\n[d3]\\nbar=3')\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n self.assertTrue(parser.is_valid())",
"def parse_delimited_data_lines(data_lines,delimiter=\"\\t\"):\n for line in data_lines: \n \n if line.startswith(\"#\"): \n continue \n if not line.strip(): \n continue \n \n fields = line.strip().split(delimiter) \n yield fields",
"def read_options(data: Any) -> List[CommandOption]:\n def __fn(doc_option: Dict[str, Any]) -> CommandOption:\n name = doc.read(doc_option, \"name\", doc.typed(str))\n description = doc.read(doc_option, \"description\", doc.typed(str))\n validate_meta(MetaType.OPTION, name, description)\n kind_key = doc.read(doc_option, \"type\", doc.typed(Union[str, int]))\n try:\n kind = CommandOptionType.from_str(kind_key) if isinstance(kind_key, str) else CommandOptionType(kind_key)\n except ValueError as err:\n raise InvalidOptionTypeError(\" \".join(\n [f\"{kind_key} is not a valid command option type.\",\n \"It must be between 1 and 9 (both inclusive).\"])) from err\n option = {\n \"name\": name,\n \"description\": description,\n \"type\": kind.value,\n }\n doc.read(doc_option, \"required\", doc.typed(bool, optional=True), to=option)\n if \"choices\" in doc_option and isinstance(doc_option[\"choices\"], list):\n choices = []\n for doc_choice in doc_option[\"choices\"]:\n if isinstance(doc_choice, (str, int)):\n doc_choice = {\"name\": doc_choice}\n if not isinstance(doc_choice, dict):\n raise doc.ValueTypeError(f\"Choice of unexpected type '{type(doc_choice).__name__}'\")\n choice_name = doc.read(doc_choice, \"name\", doc.typed(str))\n validate_length(\"Choice name\", choice_name)\n choices.append({\n \"name\": choice_name,\n \"value\": doc.read(doc_choice, [\"value\", \"name\"], doc.typed(Union[str, int]))\n })\n option[\"choices\"] = choices\n doc.read(doc_option, \"options\", doc.with_default(read_options, None), to=option)\n return option # type: ignore\n\n return __read_list_or_keyed(\"Option\", data, __fn)",
"def test_multiple_extra_sections(self):\n class MySchema(Schema):\n foo = ListOption(\n item=DictOption(spec={'bar': IntOption()}))\n\n config = StringIO('[__main__]\\nfoo=d1\\n d2\\n d3\\n'\n '[d1]\\nbar=1\\n[d2]\\nbar=2\\n[d3]\\nbar=3')\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n expected_sections = set(['d1', 'd2', 'd3'])\n extra_sections = parser.extra_sections\n self.assertEqual(expected_sections, extra_sections)",
"def _parse_fields(self, unf_str):\n unf_str = unf_str.strip(self.BORDER_CHAR)\n _, desc_token = unf_str.split(\": \")\n fields = []\n suite_name = re.search(VPatterns.get_suite_name(), desc_token).group(0)\n fields.append(suite_name)\n fields.append(desc_token)\n return fields",
"def parse_options(data: bytearray) -> Generator[BaseOption, None, None]:\n while data:\n kind = data[0]\n opt = _PARSE_KIND_TBL.get(kind, SizedOption).from_bytes(data)\n yield opt\n\n if opt is end_of_options:\n return",
"def _read_lst_file(config: MutableMapping[str, Any]):\n cur_file = ReadMeta(\n filename=config[\"outputs\"][\"data_filename\"],\n input_start=config[\"inputs\"][\"start\"],\n input_stop1=config[\"inputs\"][\"stop1\"],\n input_stop2=config[\"inputs\"][\"stop2\"],\n input_stop3=config[\"inputs\"][\"stop3\"],\n input_stop4=config[\"inputs\"][\"stop4\"],\n input_stop5=config[\"inputs\"][\"stop5\"],\n binwidth=config[\"advanced\"][\"binwidth\"],\n use_sweeps=config[\"advanced\"][\"sweeps_as_lines\"],\n mirror_phase=config[\"advanced\"][\"phase\"],\n )\n cur_file.run()\n raw_data_obj = ReadData(\n filename=config[\"outputs\"][\"data_filename\"],\n start_of_data_pos=cur_file.start_of_data_pos,\n timepatch=cur_file.timepatch,\n is_binary=cur_file.is_binary,\n debug=config[\"advanced\"][\"debug\"],\n )\n raw_data = raw_data_obj.read_lst()\n if cur_file.is_binary:\n relevant_columns, dict_of_data = binary_parsing(cur_file, raw_data, config)\n else:\n relevant_columns, dict_of_data = ascii_parsing(cur_file, raw_data, config)\n lst_metadata = cur_file.lst_metadata\n fill_frac = (\n config[\"advanced\"][\"fill_frac\"]\n if cur_file.fill_fraction == -1.0\n else cur_file.fill_fraction\n )\n return relevant_columns, dict_of_data, lst_metadata, fill_frac",
"def _extract_non_default_list(\n config_data: Dict, ctx: click.Context, field: str, process: Callable[[List], Any]\n) -> None:\n try:\n # Check if `field` was given in config file\n config_paths = config_data.pop(field)\n except KeyError:\n # No value for field was provided\n pass\n else:\n # Use config default if `field` was not provided as CLI argument\n if not ctx.params.get(field) and config_paths:\n if isinstance(config_paths, str):\n config_paths = [config_paths]\n ctx.params[field] = process(config_paths)",
"def list_cmd_parser(buf):\n records = []\n\n # Assumption: Each record is separated by empty line.\n for record in buf.split('\\n\\n'):\n records.append(Record.parse(record))\n\n return records",
"def parse_dd(fp):\n expr = re.compile(r'[\\x0c]{0,1}(\\w+)\\*?[\\s\\t]*(\\d{1,2})[\\s\\t]*(.*?)'\n r'[\\s\\t]*\\(*(\\d+)\\s*[\\-–]\\s*(\\d+)\\)*\\s*$')\n with open(fp) as f:\n lines = (expr.match(x) for x in f)\n matches = filter(None, lines)\n groups = (x.groups() for x in matches)\n\n df = (pd.DataFrame(list(groups),\n columns=['field', 'width', 'desc', 'start', 'end'])\n .convert_objects(convert_numeric=True))\n return df",
"def parse_sparkDatasourceInfo_tag(spec):\n def parse_datasource(spec):\n toks = spec.split(\",\")\n dct = {}\n for tok in toks:\n k,v = tok.split(\"=\")\n dct[k] = v\n return dct\n toks = spec.split(\"\\n\")\n return [ parse_datasource(tok) for tok in toks ]",
"def _parse_fields(self, unf_str):\n unf_str = unf_str.strip(self.BORDER_CHAR)\n unf_str = unf_str.lstrip(\"Test Case \")\n number, desc_token = unf_str.split(\": \")\n case_name = re.search(VPatterns.get_test_case_name(),\n desc_token).group(0)\n fields = []\n fields.append(case_name)\n fields.append(int(number))\n fields.append(desc_token)\n return fields",
"def _add_fields(cls):\n for opt in CmdLine._flatten(cls._supported_options):\n if not opt.opt_name.isidentifier():\n raise CmdLineException(\"Specified option name '{}' must be \"\n \"a valid Python identifier\".\n format(opt.opt_name))\n if opt.opt_name in dir(CmdLine):\n raise CmdLineException(\"Specified option name '{}' clashes\".\n format(opt.opt_name))\n setattr(cls, opt.opt_name, opt.value)",
"def resolve_validation_dataloaders(model: 'ModelPT'):\n if not _HAS_HYDRA:\n logging.error(\"This function requires Hydra/Omegaconf and it was not installed.\")\n exit(1)\n cfg = copy.deepcopy(model._cfg)\n dataloaders = []\n\n # process val_loss_idx\n if 'val_dl_idx' in cfg.validation_ds:\n cfg = OmegaConf.to_container(cfg)\n val_dl_idx = cfg['validation_ds'].pop('val_dl_idx')\n cfg = OmegaConf.create(cfg)\n else:\n val_dl_idx = 0\n\n # Set val_loss_idx\n model._val_dl_idx = val_dl_idx\n\n ds_key = resolve_dataset_name_from_cfg(cfg.validation_ds)\n\n if ds_key is None or val_dl_idx < 0:\n logging.debug(\n \"Could not resolve file path from provided config - {}. \"\n \"Disabling support for multi-dataloaders.\".format(cfg.validation_ds)\n )\n\n model.setup_validation_data(cfg.validation_ds)\n return\n\n ds_values = cfg.validation_ds[ds_key]\n\n if isinstance(ds_values, (list, tuple, ListConfig)):\n\n for ds_value in ds_values:\n if isinstance(ds_value, (dict, DictConfig)):\n # this is a nested dataset\n cfg.validation_ds = ds_value\n else:\n cfg.validation_ds[ds_key] = ds_value\n\n model.setup_validation_data(cfg.validation_ds)\n dataloaders.append(model._validation_dl)\n\n model._validation_dl = dataloaders\n if len(ds_values) > 0 and isinstance(ds_values[0], (dict, DictConfig)):\n # using the name of each of the nested dataset\n model._validation_names = [ds.name for ds in ds_values]\n else:\n model._validation_names = [parse_dataset_as_name(ds) for ds in ds_values]\n unique_names_check(name_list=model._validation_names)\n return\n\n else:\n model.setup_validation_data(cfg.validation_ds)\n model._validation_names = [parse_dataset_as_name(ds_values)]\n unique_names_check(name_list=model._validation_names)",
"def iter_field_groups(input_file, line_group_separator='', field_separator='\\t', strip_spaces_at_ends=True,\r\n ignore_empty_groups=True, min_field_list_len=0, default_field_value=''):\r\n field_group = []\r\n with open(input_file) as f:\r\n for line in f:\r\n if strip_spaces_at_ends:\r\n line = line.strip()\r\n if line == line_group_separator:\r\n if field_group or (not ignore_empty_groups):\r\n yield field_group\r\n field_group = []\r\n else:\r\n fields = line.split(field_separator)\r\n if len(fields) < min_field_list_len:\r\n fields += [default_field_value] * (min_field_list_len - len(fields))\r\n field_group.append(fields)\r\n if field_group or (not ignore_empty_groups):\r\n yield field_group",
"def parse_spec(spec):\n for row in spec:\n yield FrTest(**row)",
"def _parseField(self, value, filename=None):\n if value is None:\n value = ''\n if filename is None:\n # Split the text into a list for diffs\n return value.splitlines()\n else:\n return [self.filenameTitle(filename)] + value.splitlines()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine which explicit construct this is, parse & return it. | def explicit_construct(self, match):
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error:
lineno = self.state_machine.abs_line_number()
message = ' '.join(error.args)
errors.append(self.reporter.warning(message, line=lineno))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish | [
"def explicit_construct(self, match):\n errors = []\n for method, pattern in self.explicit.constructs:\n expmatch = pattern.match(match.string)\n if expmatch:\n try:\n return method(self, expmatch)\n except MarkupError, detail: # never reached?\n errors.append(\n self.statemachine.memo.reporter.warning('%s: %s'\n % (detail.__class__.__name__, detail)))\n break\n nodelist, blankfinish = self.comment(match)\n return nodelist + errors, blankfinish",
"def getConstructor(self) -> ghidra.app.plugin.processors.sleigh.Constructor:\n ...",
"def read_constructor(is_private, datatype, tokens):\n args = parse_args(tokens)\n body = parse_body(tokens)\n return {'op': 'constructor',\n 'name': datatype,\n 'args': args,\n 'body': body,\n 'private': is_private }",
"def parse(name):\n\n pass",
"def find_input(self, input_format):\n input_cls = find_needed_input(input_format)\n return input_cls",
"def _build_topology(topology):\n if isinstance(topology, str):\n return get_topology(topology)\n if isinstance(topology, list):\n return topology",
"def GetConstructorName(typename, platform):\n\n return 'Create' + typename + platform.capitalize()",
"def test_initialization_state_construction_approach(self):\n\n self.assertIsInstance(self.owl_nets.kg_construct_approach, str)\n self.assertTrue(self.owl_nets.kg_construct_approach == 'subclass')\n self.assertFalse(self.owl_nets.kg_construct_approach == 'instance')\n\n return None",
"def _parse(txt):\n \n from mwlib import scanner, parser\n \n tokens = scanner.tokenize(txt)\n res=parser.Parser(tokens, \"unknown\").parse()\n\n # res is an parser.Article. \n if len(res.children)!=1:\n res.__class__ = parser.Node\n return res\n\n res = res.children[0]\n if res.__class__==parser.Paragraph:\n res.__class__ = parser.Node\n \n if len(res.children)!=1:\n return res\n return res.children[0]",
"def parse(cls, string):\n obj, i = cls.match(string, 0)\n if i != len(string):\n raise NotParseable(f\"Found unexpected {string[i]}.\", i + 1)\n return obj",
"def _construct_metadata(self):\n if self.properties:\n return self._step_type_to_output_format_map[self.type]()\n return None",
"def get_parser(data):\n\n # If there are only two lines and the first begins with a '>', the\n # data is in FASTA format. Remove the first line to get the\n # sequence.\n if len(data.splitlines()) == 2:\n if data.startswith('>'):\n data = data.splitlines()[1]\n\n # Test for SnpSequence\n pattern = regex.compile(r'\\w|\\[.\\/.\\]')\n matched_chars = ''.join(regex.findall(pattern, data))\n if matched_chars == data:\n return SnpSequence(data)\n\n # Test for TwoAlleles\n lines = data.splitlines()\n if len(lines) == 4 and lines[0].startswith('>') and lines[2].startswith('>'):\n return TwoAlleles(data)\n\n # Test for Single Blast Sequence\n if '|' in data:\n return SingleBlastParser(data)\n\n # Format not recognized.\n raise StarpError(\"SNP Format Not Recognized\")",
"def _find_primary_component(self):\n progcomps = {}\n spec = {}\n primary_component = None\n for comp in self._component_classes:\n if comp == \"CPL\":\n continue\n spec[comp] = self.get_value(\"COMP_{}\".format(comp))\n notprogcomps = (\"D{}\".format(comp), \"X{}\".format(comp), \"S{}\".format(comp))\n if spec[comp].upper() in notprogcomps:\n progcomps[comp] = False\n else:\n progcomps[comp] = True\n expect(\n \"ATM\" in progcomps\n and \"LND\" in progcomps\n and \"OCN\" in progcomps\n and \"ICE\" in progcomps,\n \" Not finding expected components in {}\".format(self._component_classes),\n )\n if (\n progcomps[\"ATM\"]\n and progcomps[\"LND\"]\n and progcomps[\"OCN\"]\n and progcomps[\"ICE\"]\n ):\n primary_component = \"allactive\"\n elif progcomps[\"LND\"] and progcomps[\"OCN\"] and progcomps[\"ICE\"]:\n # this is a \"J\" compset\n primary_component = \"allactive\"\n elif progcomps[\"ATM\"] and progcomps[\"OCN\"] and progcomps[\"ICE\"]:\n # this is a ufs s2s compset\n primary_component = \"allactive\"\n elif progcomps[\"ATM\"]:\n if \"DOCN%SOM\" in self._compsetname and progcomps[\"LND\"]:\n # This is an \"E\" compset\n primary_component = \"allactive\"\n else:\n # This is an \"F\" or \"Q\" compset\n primary_component = spec[\"ATM\"]\n elif progcomps[\"LND\"]:\n # This is an \"I\" compset\n primary_component = spec[\"LND\"]\n elif progcomps[\"OCN\"]:\n # This is a \"C\" or \"G\" compset\n primary_component = spec[\"OCN\"]\n elif progcomps[\"ICE\"]:\n # This is a \"D\" compset\n primary_component = spec[\"ICE\"]\n elif \"GLC\" in progcomps and progcomps[\"GLC\"]:\n # This is a \"TG\" compset\n primary_component = spec[\"GLC\"]\n elif progcomps[\"ROF\"]:\n # This is a \"R\" compset\n primary_component = spec[\"ROF\"]\n elif progcomps[\"WAV\"]:\n # This is a \"V\" compset\n primary_component = spec[\"WAV\"]\n else:\n # This is \"A\", \"X\" or \"S\"\n primary_component = \"drv\"\n\n return primary_component",
"def guess_scene_parser(scene_id):\n from ..scihub.parser import Sentinel2Scene\n from ..usgs.parser import LandsatScene\n\n parsers = [GoogleLandsat, GoogleSentinel]\n\n found = None\n\n for parser in parsers:\n try:\n found = parser(scene_id)\n break\n except RuntimeError:\n continue\n\n if found is None:\n raise RuntimeError('Cant guess parser')\n\n return found",
"def extract_una_or_generate_default(edifact_string):\n if edifact_string[:3] == 'UNA':\n return UNA(src_string=edifact_string[:9])\n else:\n return UNA()",
"def grab_one(type_str, resolver, top_combiner, combiner):\n bits = re.split(r'([,<>()])', type_str, 1)\n first, sep, rest = [bits[0], '', ''] if (len(bits) == 1) else bits\n args = [resolver(first)]\n if sep == '<' or sep == '(':\n lastsep = '>' if (sep == '<') else ')'\n while sep != lastsep:\n next, rest = grab_one(rest, resolver, combiner, combiner)\n args.append(next)\n sep, rest = rest[0], rest[1:]\n else:\n rest = sep + rest\n return top_combiner(*args), rest",
"def get_lang_obj(name: str) -> AbstractLanguage:\n if name.lower() == \"abap\":\n return Abap()\n raise Exception(\"Unknown language: \" + name)",
"def __init__(self, parser_instance):\n self.parser_instance = parser_instance\n # Dict of sorted lists of constructs by [head_or_tail][trigger_token_label]\n self.construct_lookup_dict = {}\n self.construct_lookup_dict[HEAD] = {}\n self.construct_lookup_dict[TAIL] = {}",
"def _reconstruct(self):\n\n # We assemble the unknown which is an expression\n # of the first eliminated variable.\n self._sub_unknown()\n # Recover the eliminated unknown\n self._elim_unknown()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RFC2822style field list item. | def rfc2822(self, match, context, next_state):
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, [] | [
"def __getitem__(self, i: 'int const') -> \"SoField *\":\n return _coin.SoFieldList___getitem__(self, i)",
"def parseField(f):\n k = f.id\n if f.has_value('alternate_name'):\n k = f.get_value('alternate_name') or f.id\n v = getattr(request, k, MARKER)\n if hasattr(v, 'edit'):\n # This is an encapsulated editor\n # call it\n encapsulated_editor_list.append(v)\n elif v is not MARKER:\n if k.startswith(field_prefix):\n # We only take into account\n # the object attributes\n k = k[field_prefix_len:]\n # Form: '' -> ERP5: None\n if v == '':\n v = None\n kw[k] = v",
"def addField(self,field=\"\"):\r\n self._NMEAFieldList.append(field)",
"def fields(self, *fields):\n if len(fields) == 0:\n return [el.split() for el in self]\n \n res = SList()\n for el in [f.split() for f in self]:\n lineparts = []\n\n for fd in fields:\n try:\n lineparts.append(el[fd])\n except IndexError:\n pass\n if lineparts:\n res.append(\" \".join(lineparts))\n \n return res",
"def map_listcs(item):\n fields = item.split()\n\n fields = [x.split(\"=\", 1)[-1] for x in fields]\n\n return tuple( fields )",
"def fields(self):\n yield from self._field_list",
"def visit_list_item(self, node):\n if len(node.children)==0 or node.children[0].tagname not in \\\n ['paragraph', 'compact_paragraph']:\n self.extend_node_attrs(node, bias=0)\n\n self.visit_list_item_original(self, node)\n\n # For compound list items (e.g. bullet point with two paragraphs):\n # the second paragraph should be recorded as a paragraph, not as\n # a `list_item`\n node._ucomment_num_nodes = 0",
"def _parseField(self, value, filename=None):\n if value is None:\n value = ''\n if filename is None:\n # Split the text into a list for diffs\n return value.splitlines()\n else:\n return [self.filenameTitle(filename)] + value.splitlines()",
"def field(self, *args):\r\n return _osgDB.FieldReaderIterator_field(self, *args)",
"def __getitem__(self, item_num):\n if self._type == self.TYPE_LIST:\n return self._val[item_num].resolve()\n else:\n raise TypeError(\"RFValue is not a list.\")",
"def FieldHandle(self) -> _n_2_t_10:",
"def print_field(field):\n\n print('\\n'.join([''.join(['{:4}'.format(str(item)) for item in row])\n for row in field]))\n print('---------------')",
"def format_field(field):\n if field.tag < '010' and self.tag.isdigit():\n return field.getData()\n fielddata = ''\n for subfield in field.subfields:\n if subfield.code == '6':\n continue\n if not field.tag.startswith('6'):\n fielddata += ' {0}'.format(subfield.getData())\n else:\n if subfield.code not in ('v','x','y','z'):\n fielddata += ' {0}'.format(subfield.getData())\n else:\n fielddata += ' -- {0}'.format(subfield.getData())\n return fielddata.strip()",
"def fillFieldList(self):\n count = self.dlgCtrls.listWhatToGrab.getItemCount()\n self.dlgCtrls.listWhatToGrab.removeItems(0, count)\n self.titles = [(\"\", \"\")]\n if self.filetype in PhonReader.supportedNames():\n self.titles.extend(lingex_structs.LingPhonExample.GRAB_FIELDS)\n elif self.filetype in InterlinReader.supportedNames():\n self.titles.extend(lingex_structs.LingInterlinExample.GRAB_FIELDS)\n elif self.filetype in DocReader.supportedNames():\n self.titles.append((WhatToGrab.WHOLE_DOC, \"Whole Document\"))\n elif self.filetype in CalcFileReader.supportedNames():\n for char in string.ascii_uppercase:\n self.titles.append(\n (char, \"%s %s\" % (theLocale.getText(\"Column\"), char)))\n if len(self.titles) > 1:\n stringList = [theLocale.getText(display)\n for dummy_key, display in self.titles]\n self.dlgCtrls.listWhatToGrab.addItems(tuple(stringList), 0)",
"def define_nested_list_field(*args):\n\n # Deal with the optional subclass name\n largs=len(args)\n if largs == 1:\n subclass_name=\"AnonymousNestedListField\"\n efield=args[0]\n elif largs == 2:\n subclass_name=args[0]\n efield=args[1]\n else:\n raise TypeError(\"define_nested_list_field() missing or invalid arguments\")\n\n # The element_field must be a RawField sub-class\n if not inspect.isclass(efield) or not issubclass(efield,RawField):\n raise TypeError(\"'{}' is not a RawField or a sub-class\".format(efield))\n\n def _pytocl(v):\n if isinstance(v,str) or not isinstance(v,collections.Iterable):\n raise TypeError(\"'{}' is not a collection (list/seq/etc)\".format(v))\n nested=clingo.Function(\"\",[])\n for ev in reversed(v):\n nested=clingo.Function(\"\",[efield.pytocl(ev),nested])\n return nested\n\n def _get_next(raw):\n if raw.type != clingo.SymbolType.Function or raw.name != \"\":\n raise TypeError(\"'{}' is not a nested list\".format(raw))\n rlen = len(raw.arguments)\n if rlen == 0: return None\n if rlen == 2: return raw.arguments\n else:\n raise TypeError(\"'{}' is not a nested list\".format(raw))\n\n def _cltopy(raw):\n elements=[]\n result = _get_next(raw)\n while result:\n elements.append(efield.cltopy(result[0]))\n result = _get_next(result[1])\n return elements\n\n return type(subclass_name, (RawField,),\n { \"pytocl\": _pytocl,\n \"cltopy\": _cltopy})",
"def _parse_fields(self, unf_str):\n pass",
"def __init__(self, field_list):\n self.field_list = field_list\n self.data = None",
"def field(self,name,occurrence=None):\n proto_field = self.fields_by_name[name]\n field_internal = self.fields[name]\n if isinstance(field_internal,list):\n if occurrence is None:\n occurrence = 0\n field_internal = field_internal[occurrence]\n return proto_field, field_internal",
"def item_info(sf, row, field=None):\n fields = [item[0] for item in sf.fields[1:]]\n record = sf.record(row)\n if field:\n print(\"{}: {}\".format(fields[field], record[field]))\n else:\n for i, field in enumerate(fields):\n print(\"{} - {}: {}\".format(i, field, record[field]))",
"def __setitem__(self, i: 'int const', value: 'SoField') -> \"void\":\n return _coin.SoFieldList___setitem__(self, i, value)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Analyze the text `block` and return a table data structure. Given a plaintextgraphic table in `block` (list of lines of text; no whitespace padding), parse the table, construct and return the data necessary to construct a CALS table or equivalent. Raise `TableMarkupError` if there is any problem with the markup. | def parse(self, block):
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure | [
"def render_table(self, block):\n before = '<table>\\n<tr>\\n<td>'\n end = '</td>\\n</tr>\\n</table>'\n content = [\"</td>\\n<td>\".join(row) for row in block.data]\n content = \"</td>\\n</tr>\\n<tr>\\n<td>\".join(content)\n block.data = before + content + end\n return None",
"def _parse_block(self,idx):\n block_tmp = self._block_list[idx]\n blocktype = self._paragraph_or_table[idx]\n paragraph_count = sum(self._paragraph_or_table[:idx+1])\n table_count = idx + 1 - paragraph_count\n df = DataFrame()\n # paragraph\n if blocktype==1:\n l_runText = [r.text for r in block_tmp.runs]\n l_runID = arange(len(l_runText))\n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['paragraph_ID'] = paragraph_count - 1 # 0-starting index \n # table\n if blocktype==0:\n row_count = 0\n for row in block_tmp.rows:\n cell_count = 0\n for cell in row.cells:\n cell_para_count = 0\n for p in cell.paragraphs:\n l_runText = [r.text for r in p.runs]\n l_runID = arange(len(l_runText)) \n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['table_ID'] = table_count - 1 # 0-starting index\n df['row_ID'] = row_count\n df['cell_ID'] = cell_count\n df['paragraph_ID'] = cell_para_count \n cell_para_count += 1\n cell_count += 1\n row_count += 1\n df['block_ID'] = idx\n self._block_dataframe_list[idx] = df",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def pba(self, block_attributes, element=None):\r\n style = []\r\n aclass = ''\r\n lang = ''\r\n colspan = ''\r\n rowspan = ''\r\n block_id = ''\r\n\r\n if not block_attributes:\r\n return ''\r\n\r\n matched = block_attributes\r\n if element == 'td':\r\n m = re.search(r'\\\\(\\d+)', matched)\r\n if m:\r\n colspan = m.group(1)\r\n\r\n m = re.search(r'/(\\d+)', matched)\r\n if m:\r\n rowspan = m.group(1)\r\n\r\n if element == 'td' or element == 'tr':\r\n m = re.search(r'(%s)' % self.vertical_align_re, matched)\r\n if m:\r\n style.append(\"vertical-align:%s;\" % self.vAlign[m.group(1)])\r\n\r\n m = re.search(r'\\{([^}]*)\\}', matched)\r\n if m:\r\n style.append(m.group(1).rstrip(';') + ';')\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'\\[([^\\]]+)\\]', matched, re.U)\r\n if m:\r\n lang = m.group(1)\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'\\(([^()]+)\\)', matched, re.U)\r\n if m:\r\n aclass = m.group(1)\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'([(]+)', matched)\r\n if m:\r\n style.append(\"padding-left:%sem;\" % len(m.group(1)))\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'([)]+)', matched)\r\n if m:\r\n style.append(\"padding-right:%sem;\" % len(m.group(1)))\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'(%s)' % self.horizontal_align_re, matched)\r\n if m:\r\n style.append(\"text-align:%s;\" % self.hAlign[m.group(1)])\r\n\r\n m = re.search(r'^(.*)#(.*)$', aclass)\r\n if m:\r\n block_id = m.group(2)\r\n aclass = m.group(1)\r\n\r\n if self.restricted:\r\n if lang:\r\n return ' lang=\"%s\"' % lang\r\n else:\r\n return ''\r\n\r\n result = []\r\n if style:\r\n result.append(' style=\"%s\"' % \"\".join(style))\r\n if aclass:\r\n result.append(' class=\"%s\"' % aclass)\r\n if lang:\r\n result.append(' lang=\"%s\"' % lang)\r\n if block_id:\r\n result.append(' id=\"%s\"' % block_id)\r\n if colspan:\r\n result.append(' colspan=\"%s\"' % colspan)\r\n if rowspan:\r\n result.append(' rowspan=\"%s\"' % rowspan)\r\n return ''.join(result)",
"def parse_block(block: str) -> str:\n try:\n match = pattern.search(block)\n charset, encoding, raw_text = match.groups()\n except AttributeError:\n # match is None so .groups fails\n raise ValueError(f\"Could not recognise format of: {block}\") from None\n\n if str.lower(encoding) == 'b':\n text = b64decode(raw_text)\n elif str.lower(encoding) == 'q':\n text = quopri.decodestring(raw_text)\n else:\n raise ValueError(f\"Unknown encoding '{encoding}'\") from None\n exit(1)\n\n decoded = text.decode(charset)\n return decoded",
"def table(self, text):\r\n text = text + \"\\n\\n\"\r\n pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\\. ?\\n)?^(%(a)s%(c)s\\.? ?\\|.*\\|)\\n\\n'\r\n % {'s': self.table_span_re,\r\n 'a': self.align_re,\r\n 'c': self.c},\r\n re.S | re.M | re.U)\r\n return pattern.sub(self.fTable, text)",
"async def parse_block(ctx, block):\n # some questions are inputted from macs and have weird apostrophes. Kill them, and empty newlines\n # also escape underscores so when shown as a question in discord, they do not format, and normalize iOS apostrophes\n rawlines = block.replace('´', '\\'').replace('\\n\\n', '\\n').replace('_', '\\\\_').replace('´', '\\'').split('\\n')\n lines = []\n for line in rawlines:\n if not line.lower().startswith('source:'):\n lines.append(line)\n print(lines)\n # check validity of input\n try:\n if len(lines) % 2:\n raise UserWarning('Ope, I didn\\'t get that. Try not to separate any questions from their answers')\n for i in range(len(lines)):\n if i % 2 and not lines[i].startswith('Answer: '):\n raise UserWarning('Answer did not start with \"Answer: \"\\n```' + lines[i] + '```')\n if (1 + i) % 2 and not lines[i].startswith('Question: '):\n raise UserWarning('Question did not start with \"Question: \"\\n```' + lines[i] + '```')\n except UserWarning as e:\n await ctx.send(e)\n return\n\n out = []\n while lines:\n out.append(parse_next(lines))\n\n await ctx.send(display(out))\n return out",
"def find_block(self, block):\n startlines = {\n 'surcharge': ('Node Surcharge Summary', 9),\n 'depth': ('Node Depth Summary', 8),\n # todo:\n #'inflow':,\n #'flooding':,\n #'volume':,\n #'loading':,\n #'link_flow':,\n #'classification':,\n #'conduit_surcharge':,\n }\n\n\n blockstart, comment_lines = startlines[block]\n\n return self._find_line(blockstart) + comment_lines #b/c variable comment lines",
"def _parse_table(self, node, state):\n if not self.tabular:\n logger.error(\"Called _parse_table without tabular activated.\")\n return state\n\n if node.tag == \"table\":\n table_idx = state[\"table\"][\"idx\"]\n stable_id = \"{}::{}:{}\".format(\n state[\"document\"].name, \"table\", state[\"table\"][\"idx\"]\n )\n # Create the Table in the DB\n parts = {}\n parts[\"document\"] = state[\"document\"]\n parts[\"stable_id\"] = stable_id\n parts[\"position\"] = table_idx\n parent = state[\"parent\"][node]\n if isinstance(parent, Cell):\n parts[\"section\"] = parent.table.section\n elif isinstance(parent, Section):\n parts[\"section\"] = parent\n else:\n raise NotImplementedError(\"Table is not within a Section or Cell\")\n\n state[\"context\"][node] = Table(**parts)\n\n # Local state for each table. This is required to support nested\n # tables\n state[\"table\"][table_idx] = {\n \"grid\": defaultdict(int),\n \"cell_pos\": 0,\n \"row_idx\": -1,\n \"col_idx\": 0,\n }\n\n # Increment table counter\n state[\"table\"][\"idx\"] += 1\n\n elif node.tag == \"tr\":\n if not isinstance(state[\"parent\"][node], Table):\n raise NotImplementedError(\"Table row parent must be a Table.\")\n\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"] = 0\n state[\"table\"][state[\"parent\"][node].position][\"row_idx\"] += 1\n\n elif node.tag in [\"td\", \"th\"]:\n if not isinstance(state[\"parent\"][node], Table):\n raise NotImplementedError(\"Cell parent must be a Table.\")\n\n if not state[\"table\"][state[\"parent\"][node].position][\"row_idx\"] >= 0:\n raise NotImplementedError(\"Table cell encountered before a table row.\")\n\n # calculate row_start/col_start\n while state[\"table\"][state[\"parent\"][node].position][\"grid\"][\n (\n state[\"table\"][state[\"parent\"][node].position][\"row_idx\"],\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"],\n )\n ]: # while a cell on the grid is occupied, keep moving\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"] += 1\n col_start = state[\"table\"][state[\"parent\"][node].position][\"col_idx\"]\n row_start = state[\"table\"][state[\"parent\"][node].position][\"row_idx\"]\n\n # calculate row_end/col_end\n row_end = row_start\n if \"rowspan\" in node.attrib:\n row_end += int(node.get(\"rowspan\")) - 1\n col_end = col_start\n if \"colspan\" in node.attrib:\n col_end += int(node.get(\"colspan\")) - 1\n\n # update grid with occupied cells\n for r, c in itertools.product(\n list(range(row_start, row_end + 1)), list(range(col_start, col_end + 1))\n ):\n state[\"table\"][state[\"parent\"][node].position][\"grid\"][(r, c)] = 1\n\n # construct cell\n parts = defaultdict(list)\n parts[\"document\"] = state[\"document\"]\n parts[\"table\"] = state[\"parent\"][node]\n parts[\"row_start\"] = row_start\n parts[\"row_end\"] = row_end\n parts[\"col_start\"] = col_start\n parts[\"col_end\"] = col_end\n parts[\"position\"] = state[\"table\"][state[\"parent\"][node].position][\n \"cell_pos\"\n ]\n stable_id = \"{}::{}:{}:{}:{}\".format(\n parts[\"document\"].name,\n \"cell\",\n parts[\"table\"].position,\n row_start,\n col_start,\n )\n parts[\"stable_id\"] = stable_id\n # Create the Cell in the DB\n state[\"context\"][node] = Cell(**parts)\n\n # Update position\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"] += 1\n state[\"table\"][state[\"parent\"][node].position][\"cell_pos\"] += 1\n\n return state",
"def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '⟨'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'⟩'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht",
"def make_dataframe(block_name, blocks):\n names = {} # store names corresponding to column ids\n all_rows = [] # store list of dicts of column_id: value\n for k, v in blocks.iteritems():\n # to hold table info for this file\n info = {}\n for line in v:\n # split around the #. parts[0] is the contents, parts[1] is the column header\n # (but note programs use diff conventions...)\n parts = [p.strip() for p in line.split('#')]\n data, comment = parts\n\n # for most blocks, we use the first part of parts[0] to ID what the row means\n # BUT this doens't work for all e.g. DCINFO\n id_not_first_blocks = [\"DCINFO\"]\n if block_name in id_not_first_blocks:\n pass\n else:\n col_id, contents = data.split()\n names[col_id] = comment\n info[col_id] = contents\n all_rows.append(info)\n # make a DataFrame for this block\n df = pd.DataFrame(all_rows, index=blocks.keys())\n # convert column IDs to string names\n df.rename(columns=names, inplace=True)\n df.reindex_axis(sorted(df.columns), axis=1)\n df.sort_index(inplace=True)\n print df\n return df",
"def handleBlock(block):\n mlines = filter(lambda line : line.startswith('-'), block)\n plines = filter(lambda line : line.startswith('+'), block)\n mcount = len(mlines)\n pcount = len(plines)\n if mcount > pcount:\n plines.extend([''] * (mcount - pcount))\n elif pcount > mcount:\n mlines.extend([''] * (pcount - mcount))\n count = max(mcount, pcount)\n return [(mlines[i],plines[i]) for i in range(count)]",
"def parseMT(self):\n print(\"starting\");\n ans = RowBox()\n if self.getStart():\n print(\"Found start\")\n nends = 0\n while self.checkNext(): \n print(\"Starting interpreter\")\n mb = self.nextRecord(True)\n if mb != None: #if this is true, parseMT terminates: we come here only once\n if self.recType == 1: # type LINE add all children of mb to ans\n for nmb in mb.c :\n if not nmb.isEmpty():\n ans.addChild(nmb)\n ## if(endct == 0) return ans;\n self.endct -= 1\n #elif self.subType == 1: # take last element of ans, put it in a rowbox, replace first of mb with the rowbox, finally insert mb in ans\n## used for adding exponent and index to elem\n #zb = ans.c.removeLast() #? is ans ever non-empty, here???\n #zbnew = RowBox()\n #zbnew.addChild(zb)\n #lb = mb\n #lb.c.remove(0)\n #lb.c.add(0, zbnew)\n #ans.addChild(mb)\n else: # add mb (as a block) as a single child of ans \n ans.addChild(mb)\n return ans #\n if self.recType == 0: #mb == None, if we find more than 6, stop\n nends += 1\n if nends > 6: \n return ans\n return ans #we've hit end of file",
"def executeblock(self, block):\r\n \r\n block_text = \"\\n\\n\" + block.string\r\n line_number = block.start_row\r\n #self.options._update_loose(block.options)\r\n out_value = \"\"\r\n \r\n # This import should not be needed, but it works around a very\r\n # strange bug I encountered once.\r\n import cStringIO\r\n # create file-like string to capture output\r\n code_out = cStringIO.StringIO()\r\n code_err = cStringIO.StringIO()\r\n \r\n captured_exception = None\r\n # capture output and errors\r\n sys.stdout = code_out\r\n sys.stderr = code_err\r\n try:\r\n exec block_text in self.namespace\r\n except Exception, captured_exception:\r\n if isinstance(captured_exception, KeyboardInterrupt):\r\n raise captured_exception\r\n print >> sys.stderr, format_exc() \r\n \r\n # restore stdout and stderr\r\n sys.stdout = sys.__stdout__\r\n sys.stderr = sys.__stderr__\r\n \r\n out_value = code_out.getvalue()\r\n error_value = code_err.getvalue()\r\n \r\n code_out.close()\r\n code_err.close()\r\n\r\n if captured_exception: \r\n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \r\n print >> sys.stderr, error_value\r\n self.namespace = globals()\r\n self.namespace.update(locals())\r\n\r\n if out_value and not self.options.noecho:\r\n if self.options.outfilename == \"-\" :\r\n print >> sys.stderr, out_value\r\n else:\r\n print out_value\r\n if self.myshow:\r\n self.current_figure_list = self.myshow.figure_list[\r\n len(self.total_figure_list):]\r\n self.total_figure_list = self.myshow.figure_list\r\n\r\n #if self.options.silent:\r\n # error_value = \"\"\r\n \r\n return (block.start_row, block.string, out_value, error_value, \r\n self.current_figure_list)",
"def executeblock(self, block):\n \n block_text = \"\\n\\n\" + block.string\n line_number = block.start_row\n #self.options._update_loose(block.options)\n out_value = \"\"\n \n # This import should not be needed, but it works around a very\n # strange bug I encountered once.\n import cStringIO\n # create file-like string to capture output\n code_out = cStringIO.StringIO()\n code_err = cStringIO.StringIO()\n \n captured_exception = None\n # capture output and errors\n sys.stdout = code_out\n sys.stderr = code_err\n try:\n exec block_text in self.namespace\n except Exception, captured_exception:\n if isinstance(captured_exception, KeyboardInterrupt):\n raise captured_exception\n print >> sys.stderr, format_exc() \n \n # restore stdout and stderr\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n \n out_value = code_out.getvalue()\n error_value = code_err.getvalue()\n \n code_out.close()\n code_err.close()\n\n if captured_exception: \n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \n print >> sys.stderr, error_value\n self.namespace = globals()\n self.namespace.update(locals())\n\n if out_value and not self.options.noecho:\n if self.options.outfilename == \"-\" :\n print >> sys.stderr, out_value\n else:\n print out_value\n if self.myshow:\n self.current_figure_list = self.myshow.figure_list[\n len(self.total_figure_list):]\n self.total_figure_list = self.myshow.figure_list\n\n #if self.options.silent:\n # error_value = \"\"\n \n return (block.start_row, block.string, out_value, error_value, \n self.current_figure_list)",
"def test_block_in_inline():\r\n box = parse('''\r\n<style>\r\n p { display: inline-block; }\r\n span, i { display: block; }\r\n</style>\r\n<p>Lorem <em>ipsum <strong>dolor <span>sit</span>\r\n <span>amet,</span></strong><span><em>conse<i></i></em></span></em></p>''')\r\n box = build.inline_in_block(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n # No whitespace processing here.\r\n ('strong', 'Text', '\\n '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])])]),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse'),\r\n ('i', 'Block', [])])])])])])])])])\r\n\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [\r\n # Whitespace processing not done yet.\r\n ('strong', 'Text', '\\n ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])]),\r\n\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [])])])]),\r\n ('span', 'Block', [\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse')])])]),\r\n ('i', 'Block', []),\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [])])])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [])])])])])])",
"def _get_block_plain_text(self, block):\n cursor = QtGui.QTextCursor(block)\n cursor.movePosition(QtGui.QTextCursor.StartOfBlock)\n cursor.movePosition(QtGui.QTextCursor.EndOfBlock,\n QtGui.QTextCursor.KeepAnchor)\n return cursor.selection().toPlainText()",
"def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree",
"def check_rst_block(block):\r\n publisher = docCore.Publisher( source_class = docIO.StringInput,\r\n destination_class = docIO.StringOutput )\r\n publisher.set_components('standalone', 'restructuredtext', 'pseudoxml')\r\n publisher.process_programmatic_settings(None, None, None)\r\n if block[0] == \"textBlock\":\r\n publisher.set_source(block[1], None)\r\n compiled_rst = publisher.reader.read(publisher.source,\r\n publisher.parser, publisher.settings)\r\n if compiled_rst.parse_messages:\r\n # FIXME: It would be nice to add the line number where the error \r\n # happened\r\n print >> sys.stderr, \"\"\"Error reading rst on literate comment line \r\nfalling back to plain text\"\"\"\r\n else:\r\n block[0] = \"rstBlock\"\r\n return block",
"def parse_content_block(content_block, tm_, privkey=None, pubkey=None):\n stix_bindings = (t.CB_STIX_XML_10,\n t.CB_STIX_XML_101,\n t.CB_STIX_XML_11,\n t.CB_STIX_XML_111,\n \"urn:stix.mitre.org:xml:1.2\")\n\n binding = str(content_block.content_binding)\n if binding == 'application/x-pkcs7-mime':\n if not privkey or not pubkey:\n msg = \"Encrypted data found, but certificate or key not provided\"\n return (None, msg)\n\n inbuf = BIO.MemoryBuffer(BytesIO(content_block.content).read())\n s = SMIME.SMIME()\n try:\n s.load_key(str(privkey), str(pubkey))\n p7, data = SMIME.smime_load_pkcs7_bio(inbuf)\n buf = s.decrypt(p7)\n except SMIME.PKCS7_Error:\n return (None, \"Decryption Failed\")\n f = BytesIO(buf)\n new_block = f.read()\n f.close()\n return parse_content_block(tm_.ContentBlock.from_xml(new_block),\n tm_, privkey, pubkey)\n elif binding in stix_bindings:\n f = BytesIO(content_block.content)\n data = f.read()\n f.close()\n return (data, None)\n else:\n msg = 'Unknown content binding \"%s\"' % binding\n return (None, msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Look for a head/body row separator line; store the line index. | def find_head_body_sep(self):
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators '
'(table lines %s and %s); only one allowed.'
% (self.head_body_sep+1, i+1), offset=i)
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.',
offset=i) | [
"def parse_special_header(self, linenum, info):\n if linenum + 1 < len(self.lines) and \\\n self.lines[linenum].startswith(\"Index: \") and \\\n self.lines[linenum + 1] == self.INDEX_SEP:\n # This is an Index: header, which is common in CVS and Subversion,\n # amongst other systems.\n try:\n info['index'] = self.lines[linenum].split(None, 2)[1]\n except ValueError:\n raise DiffParserError(\"Malformed Index line\", linenum)\n linenum += 2\n\n return linenum",
"def find_next_nearest_delimiter(self, line, index):\n while line[index] != self.delimiter and index < len(line)-1:\n index += 1\n return index",
"def _find_linesep(self, s):\r\n if \"\\r\\n\" in s: # windows line ending\r\n self.linesep = \"\\r\\n\"\r\n else:\r\n self.linesep = \"\\n\"",
"def find_head(self):\n switch = [0, 0]\n for row in self.sheet_r.rows:\n if switch == [1, 0]:\n break\n switch[0] = switch[1]\n\n try:\n if row[0].fill.start_color.index != '00000000':\n switch[1] = 1\n self.head_pos.append(row[0].row)\n else:\n switch[1] = 0\n except AttributeError:\n pass",
"def get_line_identifier(self):",
"def get_number_header_lines(docbody, page_break_posns):\n remaining_breaks = len(page_break_posns) - 1\n num_header_lines = empty_line = 0\n # pattern to search for a word in a line:\n p_wordSearch = re.compile(ur'([A-Za-z0-9-]+)', re.UNICODE)\n if remaining_breaks > 2:\n if remaining_breaks > 3:\n # Only check odd page headers\n next_head = 2\n else:\n # Check headers on each page\n next_head = 1\n keep_checking = 1\n while keep_checking:\n cur_break = 1\n if docbody[(page_break_posns[cur_break] +\n num_header_lines + 1)].isspace():\n # this is a blank line\n empty_line = 1\n\n if (page_break_posns[cur_break] + num_header_lines + 1) \\\n == (page_break_posns[(cur_break + 1)]):\n # Have reached next page-break: document has no\n # body - only head/footers!\n keep_checking = 0\n\n grps_headLineWords = \\\n p_wordSearch.findall(docbody[(page_break_posns[cur_break] +\n num_header_lines + 1)])\n cur_break = cur_break + next_head\n while (cur_break < remaining_breaks) and keep_checking:\n grps_thisLineWords = \\\n p_wordSearch.findall(docbody[(page_break_posns[cur_break] +\n num_header_lines + 1)])\n if empty_line:\n if len(grps_thisLineWords) != 0:\n # This line should be empty, but isn't\n keep_checking = 0\n else:\n if (len(grps_thisLineWords) == 0) or \\\n (len(grps_headLineWords) != len(grps_thisLineWords)):\n # Not same num 'words' as equivilent line\n # in 1st header:\n keep_checking = 0\n else:\n keep_checking = \\\n check_boundary_lines_similar(grps_headLineWords,\n grps_thisLineWords)\n # Update cur_break for nxt line to check\n cur_break = cur_break + next_head\n if keep_checking:\n # Line is a header line: check next\n num_header_lines = num_header_lines + 1\n empty_line = 0\n return num_header_lines",
"def split_line(self):\n # coordinate of the # symbol or end of the line (-1) if not found\n hash_or_end = self.line.find(\"#\")\n temp = self.line[self.region_end:hash_or_end].strip(\" |\")\n self.coord_str = regex_paren.sub(\"\", temp)\n\n # don't want any meta_str if there is no metadata found\n if hash_or_end >= 0:\n self.meta_str = self.line[hash_or_end:]\n else:\n self.meta_str = \"\"",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def _beginningOfContent(line: str) -> int:\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0",
"def leyline_head_changer(self, leyline: 'Leyline') -> None:\n cell_id_list = []\n for cells in leyline.cell_list:\n cell_id_list.append(cells.id)\n numberof1s = cell_id_list.count(1)\n numberof2s = cell_id_list.count(2)\n if numberof1s >= (len(leyline.cell_list) / 2) and leyline.head == '@':\n leyline.head = 1\n elif numberof2s >= (len(leyline.cell_list) / 2) and leyline.head == '@':\n leyline.head = 2",
"def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)",
"def get_line_number(self):\n return self.line_number",
"def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish",
"def _get_line_numbers(self):\n\n output = ''\n row, col = self._text.index('end').split('.')\n i = 0\n for i in range(1, int(row) - 1):\n output += str(i) + '\\n'\n\n return output + str(i + 1)",
"def get_containing_line(self, pos):\n _, col, lino = self.contentTextCtrl.PositionToXY(pos)\n left = pos - col\n return (left, left + self.contentTextCtrl.GetLineLength(lino))",
"def indexByLineNumber(self,n):\n for idx in range(len(self.__data)):\n if self.__data[idx].lineno() == n:\n return idx\n raise IndexError,\"No line number %d\" % n",
"def test_find_line(self):\n a = NRT.ConvToCsv()\n s= a.find_line(self.test2,\"Empty_Category20\")\n self.assertEqual(s, 38, \"Dont find the write line\")",
"def _compute_position(input, index):\n line = 1\n col = 1\n eol = None # last end of line character\n for c in input[:index]:\n if c == '\\n' or c == '\\r':\n if eol is None or eol == c:\n eol = c\n line += 1\n col = 1\n else:\n # ignore second of '\\n\\r' and '\\r\\n' sequences\n eol = None\n else:\n col += 1\n return (line, col)",
"def first_non_whitespace_index (line): \n return len (line) - len (line.lstrip ())",
"def _get_line_number(file_lines, pattern):\n return next(i for i, line in enumerate(file_lines) if pattern in line) + 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start with a queue of upperleft corners, containing the upperleft corner of the table itself. Trace out one rectangular cell, remember it, and add its upperright and lowerleft corners to the queue of potential upperleft corners of further cells. Process the queue in toptobottom order, keeping track of how much of each text column has been seen. We'll end up knowing all the row and column boundaries, cell positions and their dimensions. | def parse_table(self):
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.') | [
"def iter_cells_greater_than(self, row_zb: int, col_zb: int) \\\n -> Generator[Tuple[int, int], None, None]:\n # Cell above?\n if (row_zb > 0 and\n self.inequalities_down[row_zb - 1][col_zb] == BOTTOM_LT_TOP):\n other = row_zb - 1, col_zb\n yield other\n yield from self.iter_cells_greater_than(*other)\n # Cell below?\n if (row_zb < self.n - 1 and\n self.inequalities_down[row_zb][col_zb] == TOP_LT_BOTTOM):\n other = row_zb + 1, col_zb\n yield other\n yield from self.iter_cells_greater_than(*other)\n # Cell left?\n if (col_zb > 0 and\n self.inequalities_right[row_zb][col_zb - 1] == LEFT_GT_RIGHT):\n other = row_zb, col_zb - 1\n yield other\n yield from self.iter_cells_greater_than(*other)\n # Cell right?\n if (col_zb < self.n - 1 and\n self.inequalities_right[row_zb][col_zb] == LEFT_LT_RIGHT):\n other = row_zb, col_zb + 1\n yield other\n yield from self.iter_cells_greater_than(*other)",
"def draw_grid(self, cell_edge_size):\r\n left = 0\r\n top = 0\r\n for row in range(1, self.game.num_of_rows + 1):\r\n for col in range(1, self.game.num_of_cols + 1):\r\n cell = self.grid[row][col]\r\n if cell.alive_next_round:\r\n cell.color = cell.alive_color\r\n cell.alive = True\r\n cell.alive_next_round = False\r\n else:\r\n cell.color = cell.dead_color\r\n cell.alive = False\r\n\r\n square = pygame.Rect(left, top, cell_edge_size, cell_edge_size)\r\n pygame.draw.rect(cell.surface, cell.color, square)\r\n left += cell_edge_size\r\n\r\n left = 0\r\n top += cell_edge_size",
"def border(self):\n #pylint: disable=R0912\n # Too many branches (17/12)\n rstr = self.colors.get('border', u'')\n thoriz = self.glyphs.get('top-horiz', u'') * (self.width - 2)\n bhoriz = self.glyphs.get('bot-horiz', u'') * (self.width - 2)\n topright = self.glyphs.get('top-right', u'')\n botright = self.glyphs.get('bot-right', u'')\n for row in range(0, self.height):\n # top to bottom\n for col in range (0, self.width):\n # left to right\n if (col == 0) or (col == self.width - 1):\n rstr += self.pos(col, row)\n if (row == 0) and (col == 0):\n # top left\n rstr += self.glyphs.get('top-left', u'')\n elif (row == self.height - 1) and (col == 0):\n # bottom left\n rstr += self.glyphs.get('bot-left', u'')\n elif (row == 0):\n # top right\n rstr += self.glyphs.get('top-right', u'')\n elif (row == self.height - 1):\n # bottom right\n rstr += self.glyphs.get('bot-right', u'')\n elif col == 0:\n # left vertical line\n rstr += self.glyphs.get('left-vert', u'')\n elif col == self.width - 1:\n # right vertical line\n rstr += self.glyphs.get('right-vert', u'')\n elif (row == 0):\n # top row (column 1)\n if thoriz == u'':\n if topright != u'':\n # prepare for top-right, (horiz skipped)\n rstr += self.pos(self.width -1, row)\n else:\n # horizontal line\n rstr += thoriz\n # top-right,\n rstr += topright\n break\n elif (row == self.height - 1):\n # bottom row (column 1)\n if bhoriz == u'':\n if botright != u'':\n # prepare for bot-right, (horiz skipped)\n rstr += self.pos(self.width -1, row)\n else:\n # horizontal line\n rstr += bhoriz\n # top-right,\n rstr += botright\n break\n rstr += self.colors.get('border', u'')\n return rstr",
"def create_neighbors(self):\n for row in self._currentGrid:\n for cell in row:\n row = cell.get_row()\n column = cell.get_column()\n if row == 0:\n # 1. upper left corner (3 neighbors)\n if column == 0:\n #print('upper left')\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # 2. rest of the top row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # upper right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n # middle row\n elif row < (self._rows - 1):\n #print('middle')\n # 1. middle left edge (5 neighbors)\n if column == 0:\n #print('middle left edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n # 2. rest of the middle row (8 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n # 3. middle right edge (5 neighbors)\n else:\n #print('middle right edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n # bottom row\n else:\n #print('lower')\n # 1. bottom left corner (3 neighbors)\n if column == 0:\n #print('lower left')\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # 2. rest of the bottom row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # bottom right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[row - 1][0])\n cell.add_neighbor(self._currentGrid[row][0])",
"def add_corners(df):\n col_corners, col_ra_min, col_ra_max, col_dec_min, col_dec_max = [], [], [], [], []\n for idx, row in tqdm(df.iterrows(), desc=\"Adding corner coordinates\", total=len(df)):\n corners = mask_corners(row)\n # String serialization\n str_repr = \";\".join([\"{:.6f},{:.6f}\".format(corners[0][idx], corners[1][idx])\n for idx in range(len(corners[0]))])\n col_corners.append(str_repr)\n # Bounding box in equatorial coordinates\n ra_min, ra_max = corners[0].min(), corners[0].max()\n dec_min, dec_max = corners[1].min(), corners[1].max()\n col_ra_min.append(\"{:.6f}\".format(ra_min))\n col_ra_max.append(\"{:.6f}\".format(ra_max))\n col_dec_min.append(\"{:.6f}\".format(dec_min))\n col_dec_max.append(\"{:.6f}\".format(dec_max))\n df['corners'] = col_corners\n df['ra_min'] = col_ra_min\n df['ra_max'] = col_ra_max\n df['dec_min'] = col_dec_min\n df['dec_max'] = col_dec_max\n return df",
"def iter_cells_less_than(self, row_zb: int, col_zb: int) \\\n -> Generator[Tuple[int, int], None, None]:\n # Cell above?\n if (row_zb > 0 and\n self.inequalities_down[row_zb - 1][col_zb] == TOP_LT_BOTTOM):\n other = row_zb - 1, col_zb\n yield other\n yield from self.iter_cells_less_than(*other)\n # Cell below?\n if (row_zb < self.n - 1 and\n self.inequalities_down[row_zb][col_zb] == BOTTOM_LT_TOP):\n other = row_zb + 1, col_zb\n yield other\n yield from self.iter_cells_less_than(*other)\n # Cell left?\n if (col_zb > 0 and\n self.inequalities_right[row_zb][col_zb - 1] == LEFT_LT_RIGHT):\n other = row_zb, col_zb - 1\n yield other\n yield from self.iter_cells_less_than(*other)\n # Cell right?\n if (col_zb < self.n - 1 and\n self.inequalities_right[row_zb][col_zb] == LEFT_GT_RIGHT):\n other = row_zb, col_zb + 1\n yield other\n yield from self.iter_cells_less_than(*other)",
"def _cellborders(self, ix, iy, nx, ny, kwargs):\r\n\r\n ret = kwargs.copy()\r\n\r\n def corners(ret):\r\n \"Handle corners of table\"\r\n if ix == 0 and iy == 0:\r\n ret[\"corner_top_left\"] = self.corner_top_left\r\n if ix == nx and iy == 0:\r\n ret[\"corner_top_right\"] = self.corner_top_right\r\n if ix == 0 and iy == ny:\r\n ret[\"corner_bottom_left\"] = self.corner_bottom_left\r\n if ix == nx and iy == ny:\r\n ret[\"corner_bottom_right\"] = self.corner_bottom_right\r\n return ret\r\n\r\n def left_edge(ret):\r\n \"add vertical border along left table edge\"\r\n if ix == 0:\r\n ret[\"border_left\"] = bwidth\r\n return ret\r\n\r\n def top_edge(ret):\r\n \"add border along top table edge\"\r\n if iy == 0:\r\n ret[\"border_top\"] = bwidth\r\n return ret\r\n\r\n def right_edge(ret):\r\n \"add vertical border along right table edge\"\r\n if ix == nx:# and 0 < iy < ny:\r\n ret[\"border_right\"] = bwidth\r\n return ret\r\n\r\n def bottom_edge(ret):\r\n \"add border along bottom table edge\"\r\n if iy == ny:\r\n ret[\"border_bottom\"] = bwidth\r\n return ret\r\n\r\n def cols(ret):\r\n \"Adding vertical borders inside the table\"\r\n if 0 <= ix < nx:\r\n ret[\"border_right\"] = bwidth\r\n return ret\r\n\r\n def rows(ret):\r\n \"Adding horizontal borders inside the table\"\r\n if 0 <= iy < ny:\r\n ret[\"border_bottom\"] = bwidth\r\n return ret\r\n\r\n def head(ret):\r\n \"Add header underline\"\r\n if iy == 0:\r\n # put different bottom line for header\r\n ret[\"border_bottom\"] = bwidth\r\n ret[\"border_bottom_char\"] = headchar\r\n return ret\r\n\r\n\r\n # handle the various border modes\r\n border = self.border\r\n header = self.header\r\n\r\n bwidth = self.border_width\r\n headchar = self.header_line_char\r\n\r\n # use the helper functions to define various\r\n # table \"styles\"\r\n\r\n if border in (\"table\", \"tablecols\",\"cells\"):\r\n ret = bottom_edge(right_edge(top_edge(left_edge(corners(ret)))))\r\n if border in (\"cols\", \"tablecols\", \"cells\"):\r\n ret = cols(right_edge(left_edge(ret)))\r\n if border in (\"incols\"):\r\n ret = cols(ret)\r\n if border in (\"rows\", \"cells\"):\r\n ret = rows(bottom_edge(top_edge(ret)))\r\n if header and not border in (\"none\", None):\r\n ret = head(ret)\r\n\r\n return ret",
"def test_custom_corners(self):\n tab = tabl.Tabl()\n tab.set_corner('*')\n string = tab.to_table([['a']])\n self.assertEqual('*-*\\n' + \\\n '|a|\\n' + \\\n '*-*\\n', string)",
"def open_value_border_cells(self) -> None:\n\n for row in range(self.rows):\n for col in range(self.columns):\n\n # boolean condition variables:\n cond_if_cell_isnt_opnd = self.cells_list[row][col].value > 0 and \\\n self.cells_list[row][col].state == False\n\n if self.create_check_range_horiz_cells_cond(col):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row, col - 1)\n or\n self.create_open_value_border_cells_cond(row, col + 1)\n ):\n self.open_value_border_cell(row, col)\n\n if self.create_check_range_vert_cells_cond(row):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row - 1, col)\n or\n self.create_open_value_border_cells_cond(row + 1, col)\n ):\n self.open_value_border_cell(row, col)\n\n if self.create_check_range_cross_top_cells_cond(row, col):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row - 1, col - 1)\n or\n self.create_open_value_border_cells_cond(row - 1, col + 1)\n ):\n self.open_value_border_cell(row, col)\n\n if self.create_check_range_cross_bottom_cells_cond(row, col):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row + 1, col - 1)\n or\n self.create_open_value_border_cells_cond(row + 1, col + 1)\n ):\n self.open_value_border_cell(row, col)",
"def _cell_tree_traversal(self, start):\n queue = deque(\n chain(\n self.tiling.cells_in_col(start[0]), self.tiling.cells_in_row(start[1])\n )\n )\n visited = set([start])\n while queue:\n cell = queue.popleft()\n if cell not in visited:\n yield cell\n visited.add(cell)\n queue.extend(self.tiling.cells_in_row(cell[1]))\n queue.extend(self.tiling.cells_in_col(cell[0]))",
"def reportBoxes(self):\n res = ''\n for indx in self.shapesTable:\n res += 'Value {0!s} has {1!s} boxes'.format(indx, len(self.shapesTable[indx].boxes))\n return res\n \n # Concurrent version with threadpool does not seem to work - only one thread seems to run at a time, and speed is much less than sequential version",
"def StringBoxRects():\r\n pass",
"def get_cell(melb_grid, coordinates, X_coords,Y_coords):\r\n\r\n #Initialize labels for grid rows\r\n grid_rows = {1: 'A', 2: 'B', 3: 'C', 4: 'D'}\r\n\r\n list_match = []\r\n cell = \"\"\r\n\r\n # Case 1: tweet lies ALONG the boundaries on any cell;\r\n # If so, the tweet score will be added either to the left and/or the below adjacent cell\r\n if coordinates[0] in X_coords or coordinates[1] in Y_coords:\r\n for grid_box in melb_grid:\r\n if (coordinates[1] >= grid_box[3] and coordinates[1] <= grid_box[4]) \\\r\n and (coordinates[0] >= grid_box[1] and coordinates[0] <= grid_box[2]):\r\n list_match.append(grid_box[0]) #id\r\n\r\n #print(list_match)\r\n\r\n #case 1.1 - when the tweet point lies ON the intersecting points of 4 cells\r\n # select the left-below cell\r\n if(len(list_match)==4): #matches 4 grid boxes\r\n cell = sorted(list_match, reverse = False)[2]\r\n\r\n #case 1.2 - when the tweet point lies either ON intersecting points of B4,C4, C5\r\n # or ON intersecting points of C2, C3, D3 -- ASSUME tweet belongs to LEFT box\r\n elif(len(list_match)==3):\r\n cell = sorted(list_match, reverse = False)[0]\r\n\r\n #case 1.2 - when the tweet point lies ALONG the boundary connecting 2 grid cells:\r\n # select either left and/or below cell\r\n elif len(list_match) == 2:\r\n if list_match[0][1] == list_match[1][1]: #comparison between top and bottom boxes\r\n cell = max(sorted(list_match, reverse = False))\r\n elif list_match[0][0] == list_match[1][0]: #comparison between left and right boxes\r\n cell = min(sorted(list_match, reverse = False))\r\n elif len(list_match) == 1:\r\n cell = list_match[0]\r\n\r\n #Case 2: when the point doesn't lie on the grid lines but lies within each cell\r\n else:\r\n cell = (grid_rows[sum([1 if coordinates[1] < i else 0 for i in Y_coords])]\r\n + str(sum([1 if coordinates[0] > i else 0 for i in X_coords])))\r\n\r\n #for example: coordiztes[1] = -37.51\r\n #print(\"Tweet Cell \", cell)\r\n #To test, point [144.9,-37.8] should lie on C2 and not B2\r\n\r\n return cell",
"def tight_fit(tables, text_boxes):\n for i in range(len(tables)):\n boxes_contained = []\n for b in text_boxes:\n if compute_contain(b, tables[i]) > 0.5:\n boxes_contained.append(b)\n if len(boxes_contained) > 0:\n tables[i] = bounding_box(tables[i], boxes_contained)\n return tables",
"def bottom_left_fill(data, width, upperbound, debug_mode=False, buffer=0):\n\n free_area = _create_rectangle(0, 0, width, upperbound) # set available area\n total_area = _create_rectangle(0, 0, width, upperbound)\n solns = []\n\n for i in data.data:\n i_id = i[0]\n i_w = i[1] + buffer\n i_h = i[2] + buffer\n\n poly_rep = Polygon.Shapes.Rectangle(i_w, i_h) # polygon representation of this shape, floating in space\n if debug_mode: #debugging method, step through placing one rectangle at a time\n x, y, triangles = no_fill_polygon(total_area, free_area, poly_rep, debug_mode=debug_mode)\n free_area = free_area - _create_rectangle(x, y, i_w, i_h) # calculate new free area\n free_area.simplify()\n filled_area = total_area - free_area\n\n view.view_debug(solns, triangles, filled_area, width, upperbound)\n else:\n x, y = no_fill_polygon(total_area, free_area, poly_rep,\n debug_mode=debug_mode) # calculate position of polygon\n free_area = free_area - _create_rectangle(x, y, i_w, i_h) # calculate new free area\n free_area.simplify()\n filled_area = total_area - free_area\n\n solns.append((i_id, x, y, i_w - buffer, i_h - buffer)) # add soln\n\n\n return main.Solution(solns)",
"def countCornerRectangles(self, grid):\n if not grid or not len(grid[0]):\n return 0\n\n if len(grid) == 1 or len(grid[0]) == 1:\n return 0\n\n r, c = len(grid), len(grid[0])\n\n col_dict = collections.defaultdict(set)\n for j in range(c):\n for i in range(r):\n if grid[i][j] == 1:\n col_dict[j].add(i)\n ans = 0\n cols = list(col_dict.keys())\n for c1 in range(len(cols)):\n for c2 in range(0, c1):\n s1, s2 = col_dict[cols[c1]], col_dict[cols[c2]]\n ans += self.combination(len(s1.intersection(s2)), 2)\n\n return ans",
"def _borders(self):\r\n nx, ny = self.ncols-1, self.nrows-1\r\n options = self.options\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n cell.reformat(**self._cellborders(ix,iy,nx,ny,options))",
"def check_board(self, row, col, dirn):\n\n parts = {}\n if dirn == \"H\":\n left = self.anchor_strings[(row, col)][0]\n if left:\n letts = [(key, self.anchor_strings[key][1]) for key in self.anchor_strings\n if (self.anchor_strings[key][1] and key[0] == row\n and col <= key[1] < col + 9)]\n letts = sorted(letts, key=lambda x: x[0][1])\n if letts:\n parts = {max(t[0][1] + 1 - col, 0):\n chars[1] for t in letts for chars in enumerate(t[1])}\n else:\n left = self.anchor_strings[(row, col)][1]\n else:\n left = self.anchor_strings[(row, col)][2]\n if left:\n letts = [(key, self.anchor_strings[key][3]) for key in self.anchor_strings\n if (self.anchor_strings[key][3] and key[1] == col\n and row <= key[0] < row + 9)]\n letts = sorted(letts, key=lambda x: x[0][0])\n if letts:\n parts = {max(t[0][0] + 1 - row, 0):\n chars[1] for t in letts for chars in enumerate(t[1])}\n else:\n left = self.anchor_strings[(row, col)][3]\n\n if parts:\n words = self.get_list(left, self.graph.contains_lett_patt, [self.rack, parts])\n else:\n words = self.get_list(left, self.graph.contains_lett, [self.rack])\n words = [t for t in words if t[1] != left]\n return words",
"def fixMasks(image, table_mask, column_mask):\r\n table_mask = table_mask.reshape(1024,1024).astype(np.uint8)\r\n column_mask = column_mask.reshape(1024,1024).astype(np.uint8)\r\n \r\n #get contours of the mask to get number of tables\r\n contours, table_heirarchy = cv2.findContours(table_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n \r\n table_contours = []\r\n #ref: https://www.pyimagesearch.com/2015/02/09/removing-contours-image-using-python-opencv/\r\n #remove bad contours\r\n\r\n #print(contours)\r\n\r\n for c in contours:\r\n # if the contour is bad, draw it on the mask\r\n\r\n\r\n #if not is_contour_bad(c):\r\n if cv2.contourArea(c) > 2000:\r\n table_contours.append(c)\r\n \r\n if len(table_contours) == 0:\r\n return None\r\n\r\n #ref : https://docs.opencv.org/4.5.2/da/d0c/tutorial_bounding_rects_circles.html\r\n #get bounding box for the contour\r\n \r\n table_boundRect = [None]*len(table_contours)\r\n for i, c in enumerate(table_contours):\r\n polygon = cv2.approxPolyDP(c, 3, True)\r\n table_boundRect[i] = cv2.boundingRect(polygon)\r\n \r\n #table bounding Box\r\n table_boundRect.sort()\r\n \r\n col_boundRects = []\r\n for x,y,w,h in table_boundRect:\r\n \r\n col_mask_crop = column_mask[y:y+h,x:x+w]\r\n \r\n #get contours of the mask to get number of tables\r\n contours, col_heirarchy = cv2.findContours(col_mask_crop, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n #get bounding box for the contour\r\n boundRect = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n polygon = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(polygon)\r\n \r\n #adjusting columns as per table coordinates\r\n boundRect[i] = (boundRect[i][0] + x ,\r\n boundRect[i][1] + y ,\r\n boundRect[i][2],\r\n boundRect[i][3])\r\n \r\n col_boundRects.append(boundRect)\r\n \r\n image = image[...,0].reshape(1024, 1024).astype(np.uint8)\r\n \r\n #draw bounding boxes\r\n color = (0,255,0)\r\n thickness = 4\r\n \r\n for x,y,w,h in table_boundRect:\r\n image = cv2.rectangle(image, (x,y),(x+w,y+h), color, thickness)\r\n \r\n return image, table_boundRect, col_boundRects",
"def draw_board(self):\r\n for i in range(9):\r\n for j in range(9):\r\n # Draw black lines to demarkate the 'boxes'\r\n if j%3 == 0 and j != 0:\r\n pygame.draw.line(self.window, BLACK, ((j//3)*180, 0), ((j//3)*180, 540), 4)\r\n if i%3 == 0 and i != 0:\r\n pygame.draw.line(self.window, BLACK, (0, (i//3)*180), (540, (i//3)*180), 4)\r\n \r\n # Draw the cells \r\n self.cells[i][j].draw(BLACK, 1)\r\n\r\n # Don't draw the placeholder 0s on the grid\r\n if self.cells[i][j].value != 0:\r\n self.cells[i][j].display(self.cells[i][j].value, (21+(j*60), (16+(i*60))), (0, 0, 0))\r\n \r\n # Bottom most line\r\n pygame.draw.line(self.window, (0, 0, 0), (0, ((i+1) // 3) * 180), (540, ((i+1) // 3) * 180), 4)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First determine the column boundaries from the top border, then process rows. Each row may consist of multiple lines; accumulate lines until a row is complete. Call `self.parse_row` to finish the job. | def parse_table(self):
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1 | [
"def __readGrid(self, textLines):\n\t\tcolsIndex = None\n\t\tfor line in textLines:\n\t\t\tline = line.split(\"#\",1)[0].rstrip() # We don't take in account the comments and whitespaces at the end\n\t\t\tif len(line) == 0: continue # If the line is empty, we can skip it\n\n\t\t\t\"\"\"Parse the first line\"\"\"\n\t\t\tif colsIndex == None:\n\t\t\t\tcolsIndex = [(0,len(line.split(\"_\",1)[0])-1)] # give the width of the first column of the lines\n\t\t\t\tif line[0] != \" \" : \n\t\t\t\t\traise ValueError(\"The first line should start with white spaces.\")\n\t\t\t\tfor char, nb in ((label, sum(1 for _ in group)) for label, group in gb(line)):\n\t\t\t\t\tif not char in \" _\":\n\t\t\t\t\t\traise ValueError(\"The first line should only contain white spaces and underscores.\")\n\t\t\t\t\tif char == \" \" and nb > 1 and len(colsIndex) > 1:\n\t\t\t\t\t\traise ValueError(\"The column separator between col \"+str(len(colsIndex)-1)+\" and col \"+str(len(colsIndex))+\" is too wide.\")\n\t\t\t\t\tif char == \"_\":\n\t\t\t\t\t\tcolsIndex.append(((colsIndex[-1][1]+1), (nb+colsIndex[-1][1]+1)))\n\t\t\t\tself.__l = len(colsIndex)-1\n\t\t\t\tself.__values[\"v\"] = [-1]*self.__l\n\t\t\t\tcontinue\n\n\t\t\t\"\"\"Prepare the parsing of other lines\"\"\"\n\t\t\t\"\"\"try:\n\t\t\t\tsplitted_line = [line[x:y] for x,y in colsIndex]\n\t\t\texcept Exception as e:\n\t\t\t\traise e\"\"\"\n\n\t\t\t\"\"\"Parse the last line\"\"\"\n\t\t\tif line[colsIndex[0][1]] != \"|\": \n\t\t\t\tself.__values[\"v\"] = [self.__strToVal(line[x:y],len(self.__barrier[\"v\"])) for x,y in colsIndex[1:]]\n\n\t\t\t\t\"\"\"Parse all the other lines\"\"\"\n\t\t\telse : \n\t\t\t\tbarrier = {\"v\":[], \"h\":[]}\n\t\t\t\tself.__values[\"h\"].append(self.__strToVal(line[0:colsIndex[0][1]], len(colsIndex)-1))\n\t\t\t\tfor x,y in colsIndex[1:] :\n\t\t\t\t\ts = line[x:y]\n\t\t\t\t\tif not (s[0] in \" _\") or len(list(gb(s))) > 1 :\n\t\t\t\t\t\traise ValueError(\"La grille a une erreur ligne \"+str(len(self.__values[\"h\"])))\n\n\t\t\t\t\tif s[0] == '_':\n\t\t\t\t\t\tbarrier[\"h\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"h\"].append(False)\n\n\t\t\t\t\tif line[y] == '|':\n\t\t\t\t\t\tbarrier[\"v\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"v\"].append(False)\n\n\t\t\t\tself.__barrier[\"h\"].append(barrier[\"h\"])\n\t\t\t\tbarrier[\"v\"].pop()\n\t\t\t\tself.__barrier[\"v\"].append(barrier[\"v\"])\n\n\t\tself.__barrier[\"h\"].pop()\n\t\tself.__h = len(self.__barrier[\"v\"])",
"def _generate_rows(self):\n margin_str = ' ' * self.column_margin\n # Loop over each data row\n for n, data_row in enumerate(self.data):\n if self.use_row_separators and n > 0:\n # Add row separator before every row except the first\n self._text_lines.append(self._row_separator)\n # Create a list where each element is a cell, represented by\n # a list of lines with its contents\n cells = [\n col.get_cell(data_row[i]) for i, col in enumerate(self.columns)\n if i < len(data_row)\n ]\n # The size of the tallest cell\n max_lines = max(len(cell) for cell in cells) if cells else 1\n # Loop over the columns to do vertical alignment\n for i, col in enumerate(self.columns):\n # Calculate how many lines are \"missing\" from each cell\n # with respect to the tallest\n delta = max_lines - (len(cells[i]) if i < len(cells) else 0)\n if delta > 0:\n if col.v_alignment == Alignment.MIDDLE:\n # Insert half as many missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * (delta // 2)\n elif col.v_alignment == Alignment.BOTTOM:\n # Insert all missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * delta\n for m in range(max_lines):\n row = '│'\n for i, col in enumerate(self.columns):\n row += margin_str\n if i >= len(cells) or m >= len(cells[i]):\n row += col.get_empty_cell()\n else:\n row += cells[i][m]\n row += margin_str + '│'\n self._text_lines.append(row)\n self._text_lines.append(self._bottom)",
"def _process_Raw_Lap_Rows(self):\n\n # For each racer we are going to add a list to to lapRowsTime and lapRowsPosition\n # lapRowsTime[0] will contain Racer #1's lap data (may be an empty string).\n\n # Example of self._columnHeaders.split()\n # ['___1___', '___2___', '___3___', '___4___', '___5___',\n # '___6___', '___7___', '___8___', '___9___', '___10__']\n split_columns = self._columnHeaders.split()\n\n max_racers = len(split_columns)\n for index in range(len(split_columns)):\n self.lapRowsTime.append([])\n self.lapRowsPosition.append([])\n\n '''\n For each _lapRowsRaw we are going to parse using a FIXED width, which we\n calculate using the _columnHeaders.\n NOTE - Split is not going to work because it trims: racerSplit = row.split()\n (the empty spaces have meaning).\n\n Need to parse the row\n Example:\n \"3/3.804 1/2.936 7/6.013 2/3.487 4/4.118 6/5.817 10/7.72 5/4.512 8/6.310 9/6.941\"\n Another Example:\n \" 1/20.27 3/20.87 2/19.54 \"\n\n Print line debugging\n Row:' 1/23.70 3/23.00 2/21.27 '\n raceIndex:2 lapWidth:6\n i: 17 '3/23.00'\n lapRowsTime:['27.50', '20.19', '21.93', '24.01', '20.81',\n '19.15', '21.15', '21.07', '21.00', '22.12', '20.87', '20.39']\n lapRowsPos:['3', '3', '2', '3', '3', '3', '3', '2', '2', '2', '3', '3']\n\n print\n print \"Row:'\" + row + \"'\"\n print \"racer_index:\" + str(racer_index) + \" lap_width:\" + str(lap_width)\n print \"index:\", index, \"'\" + row[index:index+lap_width + 1] + \"'\"\n print \"lapRowsTime:\" + str(self.lapRowsTime[racer_index])\n print \"lapRowsPos:\" + str(self.lapRowsPosition[racer_index])\n '''\n # WARNING Special Code - we use the columnHeaders to identify the fixed with\n # that the columns are using.\n # Explanation - Ignoring the first character [1:], find the next empty space.\n lap_width = self._columnHeaders[1:].find(' ') - 1\n\n # Walk through each line of the raw lap times and extract racers position and time.\n for row in self._lapRowsRaw:\n\n index = 1\n racer_index = 0\n while index < len(row):\n if (racer_index >= max_racers):\n raise Exception(\"Error in the _lapRowsRaw resulting in\" +\n \" incorrect parsing (laps for more racers than expected\")\n pos, lap = self._parse_Lap(row[index:index + lap_width + 1])\n\n self.lapRowsPosition[racer_index].append(pos)\n self.lapRowsTime[racer_index].append(lap)\n\n index += lap_width + 2 # +2 to skip the ' ' space.\n racer_index += 1\n\n '''\n Example - note that the white spaces only extend as far right as the last race on the line\n ___1___ ___2___ ___3___ ___4___ ___5___ ___6___ ___7___ ___8___ ___9___ ___10__\n 5/35.95 1/26.24 4/30.95 2/27.01 3/29.63\n 1/17.48 4/18.05 2/17.83 3/17.74\n 1/17.14 2/17.25 3/19.69\n 1/17.61 2/17.11\n 1/20.71\n ------- ------- ------- ------- ------- ------- ------- ------- ------- -------\n '''\n # Create empty records for the rest of the racers in this row\n # even if it ended early.\n while racer_index < max_racers:\n self.lapRowsPosition[racer_index].append('')\n self.lapRowsTime[racer_index].append('')\n racer_index += 1",
"def parse_lines(self, lines):\n row_tree = None\n for line in lines:\n if not line:\n continue\n m = self.row_regex.search(line)\n if m is None:\n continue\n groups = list(m.groups())\n name_raw = str(groups.pop(0))\n name = name_raw.strip()\n name = re.sub('\\s+', ' ', name)\n data = [float(v.strip()) for v in groups]\n row = RowData(\n name, **{n: d for n, d in zip(self.data_col_names, data)})\n self.rows.append(row)\n\n ind = self.subcategory_indention\n depth = int((len(name_raw)-len(name_raw.lstrip(ind)))/len(ind))\n\n if row_tree is None:\n row_tree = [row]\n elif len(row_tree) < depth:\n raise ValueError(\n 'A hirarchical level was skipped! Found element of '\n 'depth {}. However parent element is of depth '\n '{}.'.format(depth, len(row_tree)-1))\n elif len(row_tree) >= depth:\n row_tree = row_tree[:depth]\n try:\n parent_row = row_tree[-1]\n RowData.set_child_parent_relation(row, parent_row)\n except IndexError:\n pass\n row_tree += [row]",
"def _fill_next_rows(self, rows: _Rows, line: int) -> _Rows:\n unmerged_rows = {}\n\n for column, cell in enumerate(rows[line]):\n if isinstance(cell, TableCell) and cell.rowspan > 1:\n nb_lines = cell.rowspan - 1\n lines = [cell]\n if \"\\n\" in cell:\n lines = cell.replace(\"\\n\", \"<fg=default;bg=default>\\n</>\").split(\n \"\\n\"\n )\n if len(lines) > nb_lines:\n nb_lines = cell.count(\"\\n\")\n\n rows[line][column] = TableCell(\n lines[0], colspan=cell.colspan, style=cell.style\n )\n\n # Create a two dimensional dict (rowspan x colspan)\n placeholder = dict(\n [(k, {}) for k in range(line + 1, line + 1 + nb_lines)]\n )\n for k, v in unmerged_rows.items():\n if k in placeholder:\n for l, m in unmerged_rows[k].items():\n if l in placeholder[k]:\n placeholder[k][l].update(m)\n else:\n placeholder[k][l] = m\n else:\n placeholder[k] = v\n\n unmerged_rows = placeholder\n\n for unmerged_row_key, _ in unmerged_rows.items():\n value = \"\"\n if unmerged_row_key - line < len(lines):\n value = lines[unmerged_row_key - line]\n\n unmerged_rows[unmerged_row_key][column] = TableCell(\n value, colspan=cell.colspan, style=cell.style\n )\n if nb_lines == unmerged_row_key - line:\n break\n\n for unmerged_row_key, unmerged_row in unmerged_rows.items():\n # we need to know if unmerged_row will be merged or inserted into rows\n if (\n unmerged_row_key < len(rows)\n and isinstance(rows[unmerged_row_key], list)\n and (\n (\n self._get_number_of_columns(rows[unmerged_row_key])\n + self._get_number_of_columns(\n list(unmerged_rows[unmerged_row_key].values())\n )\n )\n <= self._number_of_columns\n )\n ):\n # insert cell into row at cell_key position\n for cell_key, cell in unmerged_row.items():\n rows[unmerged_row_key].insert(cell_key, cell)\n else:\n row = self._copy_row(rows, unmerged_row_key - 1)\n for column, cell in unmerged_row.items():\n if len(cell):\n row[column] = unmerged_row[column]\n\n rows.insert(unmerged_row_key, row)\n\n return rows",
"def _initial_Processing_Raw_Lines(self):\n pacedata_included = None # If there is pace data, this will be used as a counter.\n\n lapData = False\n for index in range(len(self._singleRaceLines)):\n\n line = self._singleRaceLines[index]\n\n # Look for the column data\n if not lapData and (line.find('__10') != -1):\n # ___1___ ___2___ ___3___ ___4___ ___5___\n if not line.find('__1__'):\n raise Exception(\"The column header data spilled into a new line\")\n self._columnHeaders = line.strip('\\r\\n')\n lapData = True\n\n # Check to see if pace data is mixed in - this is a strong indicator.\n index = self._singleRaceLines.index(line)\n\n pacedata_included = self._check_for_pace_data(index)\n\n # Get the laps in row format\n elif lapData:\n # If we are the end of the lap data\n if (line.find('-----') != -1):\n # Example: ' ------- ------- ------- ------- '\n index += 2 # WARNING - This is for additional laps logic below.\n break\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n # Special code for dealing with pace data.\n if (pacedata_included is None): # Common case (no pace data)\n # Warning - we dont want to blanket strip this (white space matters)\n self._lapRowsRaw.append(line.strip('\\r\\n'))\n else: # Special case (pace data mixed in).\n if (pacedata_included % 3 == 0):\n self._lapRowsRaw.append(line.strip('\\r\\n'))\n pacedata_included += 1\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n # Get race header data.\n if not lapData:\n # 3/17.20 2/20.37 10/18.1 1/20.19\n self._raceHeaderData_RAW.append(line)\n\n # ===================================================\n # Check to see if there additional racer data - (MORE THAN 10 RACERS)\n # ===================================================\n\n # Starting at the index, lets look for another column row.\n found_additional_laps = False\n additional_lap_index = 0\n\n for trail_index in range(index, len(self._singleRaceLines)):\n line = self._singleRaceLines[trail_index].strip('\\r\\n')\n\n if ((not found_additional_laps) and (line.find('__11__') != -1)):\n found_additional_laps = True\n self._columnHeaders += line\n if (pacedata_included is not None):\n pacedata_included = 0\n\n elif found_additional_laps:\n if (line.find('-----') != -1):\n # Indicates there is no more data\n # Example: ' ------- ------- ------- ------- '\n break\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n # Special code for dealing with pace data.\n if (pacedata_included is None): # Common case (no pace data)\n self._lapRowsRaw[additional_lap_index] += line\n additional_lap_index += 1\n else: # Special case (pace data mixed in)\n if (pacedata_included % 3 == 0):\n self._lapRowsRaw[additional_lap_index] += line\n additional_lap_index += 1\n pacedata_included += 1\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
"def _readRowData(self, unit_data, file_line):\r\n end_line = int(unit_data[file_line].strip())\r\n file_line += 1\r\n try:\r\n # Load the geometry data\r\n for i in range(file_line, end_line + file_line):\r\n chain = unit_data[i][0:10].strip()\r\n elev = unit_data[i][10:20].strip()\r\n rough = unit_data[i][20:30].strip()\r\n\r\n self.row_data['main'].addRow(\r\n {rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.ROUGHNESS: rough},\r\n # We don't need to make backup copies here. If it fails the\r\n # load fails anyway and this will just really slow us down\r\n no_copy=True\r\n )\r\n\r\n except NotImplementedError:\r\n logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')\r\n raise\r\n\r\n return end_line + file_line",
"def _process_row(self, row):\n # Must be overridden.",
"def sort_into_lines(result):\n # the sorted data will be grouped into each line\n lines_of_table = {}\n wait_list = []\n column_wait_list = []\n current_bottom = 0\n for cell in result:\n if cell[0] == 1: # if this is a row title\n cells_in_line = [cell]\n current_bottom = cell[4]\n current_top = cell[2]\n no_row_title = [[-1, -1, -1, -1, -1, -1, \"-1\"]]\n no_row_bottom = 0\n for c in wait_list: # handling wait_list\n if c[4] - current_top < 5:\n if c[0] == 3:\n no_row_bottom = no_row_bottom + c[4]\n no_row_title.append(c)\n else:\n column_wait_list.append(c)\n else:\n cells_in_line.append(c)\n if len(column_wait_list) > 0:\n top = column_wait_list[0][2]\n column_title = [column_wait_list[0]]\n lines_of_table[top] = column_title\n for col in column_wait_list[1:]:\n if abs(top - col[2]) < 0.6 * (col[4] - col[2]):\n lines_of_table[top].append(col)\n else:\n top = col[2]\n column_title = [col]\n lines_of_table[top] = column_title\n if no_row_title.__len__() > 1:\n lines_of_table[no_row_bottom / no_row_title.__len__()] = no_row_title\n lines_of_table[current_bottom] = cells_in_line\n wait_list = []\n else: # have to decide which row it belongs to\n if current_bottom == 0: # if no row has been detected, then go to wait list\n wait_list.append(cell)\n else: # if there is one active row, check whether belongs to it or not\n if abs(current_bottom - cell[4]) < 0.6 * (cell[4] - cell[2]):\n lines_of_table[current_bottom].append(cell)\n else:\n wait_list.append(cell)\n return lines_of_table",
"def _row_iter(self, upper_row):\n row = [x-1 for x in upper_row[1:]]\n row_len = len(row)\n pos = 0\n while pos >= 0:\n if pos == row_len:\n yield row[:]\n pos -= 1\n continue\n # If it would create an invalid entry, backstep\n if ( pos > 0 and (row[pos] >= row[pos-1] \\\n or (self._strict and row[pos] == row[pos-1]-1)) ) \\\n or row[pos] >= upper_row[pos] \\\n or (self._k is not None and row[pos] >= self._k):\n row[pos] = upper_row[pos+1] - 1\n pos -= 1\n continue\n row[pos] += 1\n pos += 1",
"def _readRowData(self, unit_data, file_line):\r\n end_line = int(unit_data[file_line].strip())\r\n file_line += 1\r\n try:\r\n # Load the geometry data\r\n for i in range(file_line, end_line + file_line):\r\n chain = unit_data[i][0:10].strip()\r\n elev = unit_data[i][10:20].strip()\r\n rough = unit_data[i][20:30].strip()\r\n panel = unit_data[i][30:35].strip()\r\n rpl = unit_data[i][35:40].strip()\r\n bank = unit_data[i][40:50].strip()\r\n east = unit_data[i][50:60].strip()\r\n north = unit_data[i][60:70].strip()\r\n deact = unit_data[i][70:80].strip()\r\n special = unit_data[i][80:90].strip()\r\n\r\n if east == '':\r\n east = None\r\n if north == '':\r\n north = None\r\n if rpl == '':\r\n rpl = 1.000\r\n\r\n self.row_data['main'].addRow(\r\n {rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.ROUGHNESS: rough,\r\n rdt.RPL: rpl, rdt.PANEL_MARKER: panel, rdt.BANKMARKER: bank,\r\n rdt.EASTING: east, rdt.NORTHING: north,\r\n rdt.DEACTIVATION: deact, rdt.SPECIAL: special\r\n },\r\n # We don't need to make backup copies here. If it fails the\r\n # load fails anyway and this will just really slow us down\r\n no_copy=True\r\n )\r\n\r\n except NotImplementedError:\r\n logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')\r\n raise\r\n\r\n return end_line + file_line",
"def process_rows(self, row_fn, init_fn=None, final_fn=None):\n self._impl.process_rows(row_fn, init_fn, final_fn)",
"def break_rows(rows):\n rows_to_break = []\n for i in rows:\n try:\n for j in range(NUM_COLUMNS):\n if not BOARD[i][j].full:\n raise MyException\n except MyException:\n continue\n insert_sorted(i, rows_to_break)\n if rows_to_break:\n num_rows_to_break = len(rows_to_break)\n rows_to_break.append(0)\n blit_rect = BOARD_RECT.inflate(- BORDER_DISTANCE * 2, - BORDER_DISTANCE * 2)\n blit_rect.move_ip(0, rows_to_break[0] * ROW_STEP)\n for k in range(num_rows_to_break):\n dist = rows_to_break[k] - rows_to_break[k + 1]\n for i in reversed(range(rows_to_break[k + 1] + 2 + k, rows_to_break[k] + 1 + k)):\n for j in range(NUM_COLUMNS):\n BOARD[i][j].full = BOARD[i - k - 1][j].full\n blit_rect.move_ip(0, - dist * ROW_STEP)\n blit_rect.height = dist * ROW_STEP - BORDER_DISTANCE * 2\n SCREEN.blit(SCREEN, blit_rect.move(0, (k + 1) * ROW_STEP), blit_rect)\n for i in range(num_rows_to_break + 1):\n for j in range(NUM_COLUMNS):\n BOARD[i][j].full = False\n blit_rect.height = num_rows_to_break * ROW_STEP\n blit_rect.top = SQUARE_OFFSET[1]\n SCREEN.blit(BACKGROUND, blit_rect, blit_rect)\n pygame.display.update(BOARD_RECT)",
"def check_columns(self, lines, first_line, columns):\r\n # \"Infinite\" value for a dummy last column's beginning, used to\r\n # check for text overflow:\r\n columns.append((sys.maxint, None))\r\n lastcol = len(columns) - 2\r\n # combining characters do not contribute to the column width\r\n lines = [strip_combining_chars(line) for line in lines]\r\n\r\n for i in range(len(columns) - 1):\r\n start, end = columns[i]\r\n nextstart = columns[i+1][0]\r\n offset = 0\r\n for line in lines:\r\n if i == lastcol and line[end:].strip():\r\n text = line[start:].rstrip()\r\n new_end = start + len(text)\r\n columns[i] = (start, new_end)\r\n main_start, main_end = self.columns[-1]\r\n if new_end > main_end:\r\n self.columns[-1] = (main_start, new_end)\r\n elif line[end:nextstart].strip():\r\n raise TableMarkupError('Text in column margin '\r\n 'in table line %s.' % (first_line+offset+1),\r\n offset=first_line+offset)\r\n offset += 1\r\n columns.pop()",
"def split_columns(row):\n cells = split_rows(row,vertical=False)\n merged_cells = []\n for C in cells:\n\n C.sort(key=lambda x:(top(x),left(x)))\n for i in range(len(C)-1):\n merge_text_fields(C[-2-i],C[-1])\n\n if verbose and len(C)>1:\n print(\"\\n------ Unexpected merge: perhaps a bad value for vskip? ------\")\n print(C[-1]['text'])\n\n merged_cells.append(C[-1])\n\n return merged_cells",
"def validate_lines(grid, expected_height, expected_width):\n # String of exceptions that will be built as/if they occur.\n reports = \"\"\n valid_chars = (\"X\", \".\")\n try: \n # List of offenses and specific locations.\n bad_chars = []\n for row in range(len(grid)):\n # Check last character of each line is a \"\\n\"\n if grid[row][-1] != \"\\n\":\n bad_chars.append(\"Line %s does not end with \\n\" % str(row + 1))\n for char in range(len(grid[row]) - 1):\n # Check all other characters are valid.\n if grid[row][char] not in valid_chars:\n bad_chars.append(grid[row][char]) \n # True if bad_chars isn't empty. \n if bad_chars:\n raise BadCharacter(bad_chars)\n except BadCharacter as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n # List of offenses and specific locations.\n bad_lines = []\n for row in range(len(grid)):\n # Ignore last element as should be \"\\n\". Checked previously.\n actual_width = len(grid[row]) - 1 \n if actual_width < expected_width or actual_width > expected_width: \n bad_lines.append((actual_width, expected_width, row + 1))\n # True if bad_lines isn't empty.\n if bad_lines:\n raise BadLineLength(tuple(bad_lines)) \n except BadLineLength as error:\n reports += str(error)\n \n # Store actual height \n actual_height = len(grid)\n \n try:\n if actual_height > expected_height:\n raise TooManyLines(actual_height, expected_height)\n except TooManyLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n if actual_height < expected_height:\n raise TooFewLines(actual_height, expected_height) \n except TooFewLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n # True if reports isn't empty. \n if reports:\n print \"File format is invalid. Errors found:\\n\"\n print reports\n else:\n print \"File format okay\\n\"",
"def _process_Raw_Header_Rows(self):\n\n #\n # Step 1 - is to get the general race information.\n #\n if len(self._raceHeaderData_RAW) < 5:\n raise Exception(\"The header for this race is malformed:%s\" % self._raceHeaderData_RAW)\n self.date = self._parse_Header_Date(self._raceHeaderData_RAW[0])\n\n self.trackName = self._raceHeaderData_RAW[2].strip()\n\n race_class_raw, self.roundNumber, self.raceNumber = \\\n self._parse_Class_And_Race_Data(self._raceHeaderData_RAW[4])\n\n # Extract the main event and main event round info from the class data.\n # Example: race classes often contain information like \"Mod Buggy A-main\"\n self.raceClass, self.mainEvent, self.mainEventRoundNum, self.mainEventParsed = \\\n self._parse_Class_Main_Event_Info(race_class_raw)\n\n #\n # Step 2 - is to process the general race results for each racer.\n #\n individualResult = self._raceHeaderData_RAW[7:-1]\n finalRacePosition = 0\n\n '''\n We tackle this part in several distinct peices.\n\n 1. Starting with the line:\n 'Fname RacerLastName\\t\\t\\t#9 \\t\\t26\\t\\t 8:07.943\\t\\t 17.063\\t\\t 6.008\\n'\n\n 2. We break up the line based on the '#'\n 'Fname RacerLastName' and '#9 \\t\\t26\\t\\t 8:07.943\\t\\t 17.063\\t\\t 6.008\\n'\n\n 3. Then we perform a split on the rest of the data\n ['#9', '26', '8:07.943', '17.063', '6.008']\n\n We must do additional checking because the final three columns are not\n guaranteed to be there.\n '''\n for line in individualResult:\n carnum_start_index = line.rfind(\"#\")\n finalRacePosition += 1\n driver = line[:carnum_start_index].strip()\n\n # Cut off the racer names to simplify things.\n racedata = line[carnum_start_index:]\n lineList = racedata.split()\n\n carRaw = lineList[0]\n if (carRaw[0] != '#'):\n raise Exception(\"Incorrect format for header data, execting a '#' in the car number, line: \" + line)\n car = int(carRaw[1:])\n\n laps = int(lineList[1])\n\n # WARNING - The following fields may not be present.\n racetime = lineList[2]\n if (line.find(':') <= 0): # Checking to see if the racer even has a race time.\n racetime = ''\n\n fastlap = ''\n behind = ''\n if (len(lineList) >= 4):\n fastlap = lineList[3]\n if len(lineList) == 5:\n behind = lineList[4]\n\n self.raceHeaderData.append({\"Driver\": driver,\n \"Car#\": car,\n \"Laps\": laps,\n \"RaceTime\": racetime,\n \"Fast Lap\": fastlap,\n \"Behind\": behind,\n \"Final Position\": finalRacePosition})",
"def test_parse_row(self):\n rows = produce_rows_lst()\n parsed_rows = [AS_Rank_Website_Parser()._parse_row(row) for row in rows]\n\n # The row should only have 5 elements that represents the 5 columns#\n for parsed_row in parsed_rows:\n assert len(parsed_row) == 5\n\n # Each element within the row should be a string\n for elem in parsed_row:\n assert isinstance(elem, str)\n\n # The fourth element (country) should only have 2 letters\n assert len(parsed_row[3]) == 2\n\n # Verify that the elements that should be numbers are numbers\n assert parsed_row[0].isdigit()\n assert parsed_row[1].isdigit()\n assert parsed_row[4].isdigit()",
"def _generate_lines(self):\r\n for iy in range(self.nrows):\r\n cell_row = [col[iy] for col in self.worktable]\r\n # this produces a list of lists, each of equal length\r\n cell_data = [cell.get() for cell in cell_row]\r\n cell_height = min(len(lines) for lines in cell_data)\r\n for iline in range(cell_height):\r\n yield ANSIString(\"\").join(_to_ansi(celldata[iline] for celldata in cell_data))",
"def get_lines_in_reading_order(trp_lines: Iterable[trp.Line]) -> List[trp.Line]:\n columns = []\n lines = []\n\n for item in trp_lines:\n column_found = False\n bbox_left = item.geometry.boundingBox.left\n bbox_right = item.geometry.boundingBox.left + item.geometry.boundingBox.width\n bbox_centre = (bbox_left + bbox_right) / 2\n for index, column in enumerate(columns):\n column_centre = (column[\"left\"] + column[\"right\"]) / 2\n if (bbox_centre > column[\"left\"] and bbox_centre < column[\"right\"]) or (\n column_centre > bbox_left and column_centre < bbox_right\n ):\n # BBox appears inside the column\n lines.append([index, item])\n column_found = True\n break\n if not column_found:\n columns.append(\n {\n \"left\": item.geometry.boundingBox.left,\n \"right\": item.geometry.boundingBox.left + item.geometry.boundingBox.width,\n }\n )\n lines.append([len(columns) - 1, item])\n lines.sort(key=lambda x: x[0])\n return list(map(lambda x: x[1], lines))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the text `lines` of a row, parse it and append to `self.table`. The row is parsed according to the current column spec (either `spanline` if provided or `self.columns`). For each column, extract text from each line, and check for text in column margins. Finally, adjust for insignificant whitespace. | def parse_row(self, lines, start, spanline=None):
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row) | [
"def __readGrid(self, textLines):\n\t\tcolsIndex = None\n\t\tfor line in textLines:\n\t\t\tline = line.split(\"#\",1)[0].rstrip() # We don't take in account the comments and whitespaces at the end\n\t\t\tif len(line) == 0: continue # If the line is empty, we can skip it\n\n\t\t\t\"\"\"Parse the first line\"\"\"\n\t\t\tif colsIndex == None:\n\t\t\t\tcolsIndex = [(0,len(line.split(\"_\",1)[0])-1)] # give the width of the first column of the lines\n\t\t\t\tif line[0] != \" \" : \n\t\t\t\t\traise ValueError(\"The first line should start with white spaces.\")\n\t\t\t\tfor char, nb in ((label, sum(1 for _ in group)) for label, group in gb(line)):\n\t\t\t\t\tif not char in \" _\":\n\t\t\t\t\t\traise ValueError(\"The first line should only contain white spaces and underscores.\")\n\t\t\t\t\tif char == \" \" and nb > 1 and len(colsIndex) > 1:\n\t\t\t\t\t\traise ValueError(\"The column separator between col \"+str(len(colsIndex)-1)+\" and col \"+str(len(colsIndex))+\" is too wide.\")\n\t\t\t\t\tif char == \"_\":\n\t\t\t\t\t\tcolsIndex.append(((colsIndex[-1][1]+1), (nb+colsIndex[-1][1]+1)))\n\t\t\t\tself.__l = len(colsIndex)-1\n\t\t\t\tself.__values[\"v\"] = [-1]*self.__l\n\t\t\t\tcontinue\n\n\t\t\t\"\"\"Prepare the parsing of other lines\"\"\"\n\t\t\t\"\"\"try:\n\t\t\t\tsplitted_line = [line[x:y] for x,y in colsIndex]\n\t\t\texcept Exception as e:\n\t\t\t\traise e\"\"\"\n\n\t\t\t\"\"\"Parse the last line\"\"\"\n\t\t\tif line[colsIndex[0][1]] != \"|\": \n\t\t\t\tself.__values[\"v\"] = [self.__strToVal(line[x:y],len(self.__barrier[\"v\"])) for x,y in colsIndex[1:]]\n\n\t\t\t\t\"\"\"Parse all the other lines\"\"\"\n\t\t\telse : \n\t\t\t\tbarrier = {\"v\":[], \"h\":[]}\n\t\t\t\tself.__values[\"h\"].append(self.__strToVal(line[0:colsIndex[0][1]], len(colsIndex)-1))\n\t\t\t\tfor x,y in colsIndex[1:] :\n\t\t\t\t\ts = line[x:y]\n\t\t\t\t\tif not (s[0] in \" _\") or len(list(gb(s))) > 1 :\n\t\t\t\t\t\traise ValueError(\"La grille a une erreur ligne \"+str(len(self.__values[\"h\"])))\n\n\t\t\t\t\tif s[0] == '_':\n\t\t\t\t\t\tbarrier[\"h\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"h\"].append(False)\n\n\t\t\t\t\tif line[y] == '|':\n\t\t\t\t\t\tbarrier[\"v\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"v\"].append(False)\n\n\t\t\t\tself.__barrier[\"h\"].append(barrier[\"h\"])\n\t\t\t\tbarrier[\"v\"].pop()\n\t\t\t\tself.__barrier[\"v\"].append(barrier[\"v\"])\n\n\t\tself.__barrier[\"h\"].pop()\n\t\tself.__h = len(self.__barrier[\"v\"])",
"def parse_lines(self, lines):\n row_tree = None\n for line in lines:\n if not line:\n continue\n m = self.row_regex.search(line)\n if m is None:\n continue\n groups = list(m.groups())\n name_raw = str(groups.pop(0))\n name = name_raw.strip()\n name = re.sub('\\s+', ' ', name)\n data = [float(v.strip()) for v in groups]\n row = RowData(\n name, **{n: d for n, d in zip(self.data_col_names, data)})\n self.rows.append(row)\n\n ind = self.subcategory_indention\n depth = int((len(name_raw)-len(name_raw.lstrip(ind)))/len(ind))\n\n if row_tree is None:\n row_tree = [row]\n elif len(row_tree) < depth:\n raise ValueError(\n 'A hirarchical level was skipped! Found element of '\n 'depth {}. However parent element is of depth '\n '{}.'.format(depth, len(row_tree)-1))\n elif len(row_tree) >= depth:\n row_tree = row_tree[:depth]\n try:\n parent_row = row_tree[-1]\n RowData.set_child_parent_relation(row, parent_row)\n except IndexError:\n pass\n row_tree += [row]",
"def check_columns(self, lines, first_line, columns):\r\n # \"Infinite\" value for a dummy last column's beginning, used to\r\n # check for text overflow:\r\n columns.append((sys.maxint, None))\r\n lastcol = len(columns) - 2\r\n # combining characters do not contribute to the column width\r\n lines = [strip_combining_chars(line) for line in lines]\r\n\r\n for i in range(len(columns) - 1):\r\n start, end = columns[i]\r\n nextstart = columns[i+1][0]\r\n offset = 0\r\n for line in lines:\r\n if i == lastcol and line[end:].strip():\r\n text = line[start:].rstrip()\r\n new_end = start + len(text)\r\n columns[i] = (start, new_end)\r\n main_start, main_end = self.columns[-1]\r\n if new_end > main_end:\r\n self.columns[-1] = (main_start, new_end)\r\n elif line[end:nextstart].strip():\r\n raise TableMarkupError('Text in column margin '\r\n 'in table line %s.' % (first_line+offset+1),\r\n offset=first_line+offset)\r\n offset += 1\r\n columns.pop()",
"def insert_lines(self, row=None, lines=1):\n if row is None:\n row = self.term_cursor[1]\n else:\n row = self.scrollregion_start\n\n if lines == 0:\n lines = 1\n\n while lines > 0:\n self.term.insert(row, self.empty_line())\n self.term.pop(self.scrollregion_end)\n lines -= 1",
"def _generate_rows(self):\n margin_str = ' ' * self.column_margin\n # Loop over each data row\n for n, data_row in enumerate(self.data):\n if self.use_row_separators and n > 0:\n # Add row separator before every row except the first\n self._text_lines.append(self._row_separator)\n # Create a list where each element is a cell, represented by\n # a list of lines with its contents\n cells = [\n col.get_cell(data_row[i]) for i, col in enumerate(self.columns)\n if i < len(data_row)\n ]\n # The size of the tallest cell\n max_lines = max(len(cell) for cell in cells) if cells else 1\n # Loop over the columns to do vertical alignment\n for i, col in enumerate(self.columns):\n # Calculate how many lines are \"missing\" from each cell\n # with respect to the tallest\n delta = max_lines - (len(cells[i]) if i < len(cells) else 0)\n if delta > 0:\n if col.v_alignment == Alignment.MIDDLE:\n # Insert half as many missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * (delta // 2)\n elif col.v_alignment == Alignment.BOTTOM:\n # Insert all missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * delta\n for m in range(max_lines):\n row = '│'\n for i, col in enumerate(self.columns):\n row += margin_str\n if i >= len(cells) or m >= len(cells[i]):\n row += col.get_empty_cell()\n else:\n row += cells[i][m]\n row += margin_str + '│'\n self._text_lines.append(row)\n self._text_lines.append(self._bottom)",
"def assert_lines_in_text(text, lines,\n remove_white_spaces=True, remove_newline=True):\n filtered_lines = lines\n if remove_white_spaces:\n text = text.replace(\" \", \"\")\n filtered_lines = filtered_lines.replace(\" \", \"\")\n if remove_newline:\n text = text.replace(os.linesep,\"\")\n filtered_lines = filtered_lines.replace(os.linesep,\"\")\n assert text.find(filtered_lines) >= 0, \\\n \"Lines:\\n %s\\n are not found\" % (lines)",
"def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)",
"def _wrap(self, availWidth):\n\n self._lines = []\n minWidthRequired = 0\n\n if len(self._prewrapLines) == 0:\n return minWidthRequired\n\n spaceWidth = self._fontManager.textWidth(\" \", self._fontSize)\n\n tempLines = self._prewrapLines\n currentTempLine = 0\n #logger.debug(\"TableText::_wrap> availWidth: \" + str(availWidth) + \", tempLines: \" + str(tempLines))\n for currentTempLine, tempLine in enumerate(tempLines):\n tempLineWidth = self._fontManager.textWidth(tempLine, self._fontSize)\n #logger.debug(\"TableText::_wrap> tempLine: \" + tempLine + \", tempLineWidth: \" + str(tempLineWidth))\n\n if tempLineWidth <= availWidth:\n # easy case: the entire line fits within availWidth\n\n #logger.debug(\"TableText::_wrap> tempLineWidth <= availWidth\")\n self._lines.append(tempLine)\n minWidthRequired = tempLineWidth\n else:\n # the line needs to be wrapped in order to fit in availWidth\n # break the line into tokens, each token is a word or number or a punctuation character\n\n tempWords = re.split(\"(\\W)\", tempLine)\n totalLinesHeight = len(self._lines) * self._lineHeight\n while len(tempWords) > 0 and totalLinesHeight < self._maxCellHeight:\n #logger.debug(\"TableText::_wrap> starting new line. Words left: \" + str(tempWords))\n currentLineWords = []\n remainingWidth = availWidth\n\n fillingCurrentLine = True\n # TODO: remove any leading spaces\n\n while fillingCurrentLine:\n tempWord = tempWords.pop(0)\n\n # reportlab doesn't handle \\t character. replace with space\n if tempWord == '\\t':\n tempWord = ' '\n\n #start = time.time()\n tempWordWidth = self._fontManager.textWidth(tempWord, self._fontSize)\n #finish = time.time()\n #stringWidthTimes.append(finish-start)\n\n\n #addSpace = False\n #logger.debug(\"TableText::_wrap> word: \" + tempWord + \", wordWidth: \" + str(tempWordWidth) + \", remainingWidth: \" + str(remainingWidth))\n if len(currentLineWords) > 0:\n tempWordWidth = tempWordWidth + spaceWidth\n #addSpace = True\n\n if tempWordWidth <= remainingWidth:\n # temp word can fit in the remaining space\n #logger.debug(\"TableText::_wrap> can fit within remaining space\")\n\n #if addSpace:\n #\tcurrentLineWords.append(\" \")\n currentLineWords.append(tempWord)\n remainingWidth = remainingWidth - tempWordWidth\n elif tempWordWidth <= availWidth:\n # temp word cannot fit in the remaining space, but can fit on a new line\n #logger.debug(\"TableText::_wrap> cannot fit within remaining space, but can fit on next line\")\n\n tempWords.insert(0, tempWord)\n remainingWidth = 0\n fillingCurrentLine = False\n else:\n # temp word cannot fit in the remaining space, nor can it fit on a new line\n # hard-break a segment off the word that will fit in the remaining space\n #logger.debug(\"TableText::_wrap> cannot fit within remaining space, and cannot fit on next line\")\n\n #if addSpace:\n #\tremainingWidth = remainingWidth - spaceWidth\n firstSegment, restOfWord = self._wrapWord(tempWord, remainingWidth, wordWidth = tempWordWidth)\n #logger.debug(\"TableText::_wrap> broke word \" + tempWord + \" into: \" + firstSegment + \" and \" + restOfWord)\n tempWords.insert(0, restOfWord)\n #if addSpace:\n #\tcurrentLineWords.append(\" \")\n currentLineWords.append(firstSegment)\n fillingCurrentLine = False\n\n if len(tempWords) == 0:\n # we're done filling the current line, given that there are no more words\n fillingCurrentLine = False\n\n currentLine = \"\".join(currentLineWords)\n self._lines.append(currentLine)\n totalLinesHeight = len(self._lines) * self._lineHeight\n minWidthRequired = max(minWidthRequired, availWidth - remainingWidth)\n\n # check to see if we need to truncate the cell's contents\n if (len(self._lines) * self._lineHeight) >= self._maxCellHeight:\n break\n\n if (currentTempLine + 1) < len(tempLines):\n # we truncated\n percentageShown = (100.0 * float(currentTempLine) / float(len(tempLines)))\n logger.info(\"TableText::_wrap> truncated cell contents. %s%% shown.\" % percentageShown)\n # TODO: this needs to be internationalized\n self._lines.append(\"... Truncated. %s%% shown.\" % percentageShown)\n\n logger.debug(\"TableText::_wrap> minWidthRequired: \" + str(minWidthRequired) + \", self._lines: \" + str(self._lines))\n return minWidthRequired",
"def remove_lines(self, row=None, lines=1):\n if row is None:\n row = self.term_cursor[1]\n else:\n row = self.scrollregion_start\n\n if lines == 0:\n lines = 1\n\n while lines > 0:\n self.term.pop(row)\n self.term.insert(self.scrollregion_end, self.empty_line())\n lines -= 1",
"def parse_row(line):\n\n if type(line) is not str:\n raise TypeError('line must be a non-empty string.')\n if not line.strip():\n raise ValueError('line must be a non-empty string.')\n\n row_instructions = line[line.index(':') + 2 :]\n\n number = _find_first_num(line)\n\n side = None\n if re.search('rs|right side', line, re.IGNORECASE):\n side = 'RS'\n elif re.search('ws|wrong side', line, re.IGNORECASE):\n side = 'WS'\n\n row = Row([Annotation(row_instructions)], number, side)\n\n if re.search(IN_ROW_REPEAT_REGEX, line, re.IGNORECASE):\n return Row(parse_in_row_repeat(row_instructions), number, side)\n\n return row",
"def test_multi_line(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),\n ('|', ' Column One ', '|', ' ', '|', ' ', '|'),\n ]\n assert actual == expected",
"def makeTextCell(table, span, widths, heights, use_headers):\n width = getTotalSpanWidth(span, widths)\n height = getTotalSpanHeight(span, heights)\n text_row = span[0][0]\n text_column = span[0][1]\n text = table[text_row][text_column]\n\n lines = text.split(\"\\n\")\n for i in range(len(lines)):\n width_difference = width - len(lines[i])\n lines[i] = lines[i] + lineBreak(width_difference, \" \")\n\n height_difference = height - len(lines)\n empty_lines = []\n for i in range(0, height_difference):\n empty_lines.append(lineBreak(width, \" \"))\n lines.extend(empty_lines)\n\n output = [\"+\" + lineBreak(width, \"-\") + \"+\"]\n for i in range(0, height):\n output.append(\"|\" + lines[i] + \"|\")\n\n if use_headers and span[0][0] == 0:\n symbol = \"=\"\n else:\n symbol = \"-\"\n output.append(\"+\" + lineBreak(width, symbol) + \"+\")\n\n text = \"\\n\".join(output)\n row_count = getSpanRowCount(span)\n column_count = getSpanColumnCount(span)\n cell = Cell(text, text_row, text_column, row_count, column_count)\n\n return cell",
"def _fill_next_rows(self, rows: _Rows, line: int) -> _Rows:\n unmerged_rows = {}\n\n for column, cell in enumerate(rows[line]):\n if isinstance(cell, TableCell) and cell.rowspan > 1:\n nb_lines = cell.rowspan - 1\n lines = [cell]\n if \"\\n\" in cell:\n lines = cell.replace(\"\\n\", \"<fg=default;bg=default>\\n</>\").split(\n \"\\n\"\n )\n if len(lines) > nb_lines:\n nb_lines = cell.count(\"\\n\")\n\n rows[line][column] = TableCell(\n lines[0], colspan=cell.colspan, style=cell.style\n )\n\n # Create a two dimensional dict (rowspan x colspan)\n placeholder = dict(\n [(k, {}) for k in range(line + 1, line + 1 + nb_lines)]\n )\n for k, v in unmerged_rows.items():\n if k in placeholder:\n for l, m in unmerged_rows[k].items():\n if l in placeholder[k]:\n placeholder[k][l].update(m)\n else:\n placeholder[k][l] = m\n else:\n placeholder[k] = v\n\n unmerged_rows = placeholder\n\n for unmerged_row_key, _ in unmerged_rows.items():\n value = \"\"\n if unmerged_row_key - line < len(lines):\n value = lines[unmerged_row_key - line]\n\n unmerged_rows[unmerged_row_key][column] = TableCell(\n value, colspan=cell.colspan, style=cell.style\n )\n if nb_lines == unmerged_row_key - line:\n break\n\n for unmerged_row_key, unmerged_row in unmerged_rows.items():\n # we need to know if unmerged_row will be merged or inserted into rows\n if (\n unmerged_row_key < len(rows)\n and isinstance(rows[unmerged_row_key], list)\n and (\n (\n self._get_number_of_columns(rows[unmerged_row_key])\n + self._get_number_of_columns(\n list(unmerged_rows[unmerged_row_key].values())\n )\n )\n <= self._number_of_columns\n )\n ):\n # insert cell into row at cell_key position\n for cell_key, cell in unmerged_row.items():\n rows[unmerged_row_key].insert(cell_key, cell)\n else:\n row = self._copy_row(rows, unmerged_row_key - 1)\n for column, cell in unmerged_row.items():\n if len(cell):\n row[column] = unmerged_row[column]\n\n rows.insert(unmerged_row_key, row)\n\n return rows",
"def __create_lines_table(self):\r\n i = 0\r\n rows = []\r\n cols = []\r\n self.__add_item('number', self.tab1, i, 0, cols)\r\n self.__add_item('last stop', self.tab1, i, 1, cols)\r\n self.__add_item('route', self.tab1, i, 2, cols)\r\n self.__add_item('frequency', self.tab1, i, 3, cols)\r\n self.__add_item('bus capacity', self.tab1, i, 4, cols)\r\n rows.append(cols)\r\n i += 1\r\n for line in self.simulation.lines:\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(0), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[0] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[0], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(1), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[1] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[1], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n rows.append(cols)",
"def test_no_padding_no_borders(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n table.inner_column_border = False\n table.outer_border = False\n table.padding_left = 0\n table.padding_right = 0\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('Row One ', 'Two', 'Three'),\n ('Column One', ' ', ' '),\n ]\n assert actual == expected",
"def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)",
"def validate_lines(grid, expected_height, expected_width):\n # String of exceptions that will be built as/if they occur.\n reports = \"\"\n valid_chars = (\"X\", \".\")\n try: \n # List of offenses and specific locations.\n bad_chars = []\n for row in range(len(grid)):\n # Check last character of each line is a \"\\n\"\n if grid[row][-1] != \"\\n\":\n bad_chars.append(\"Line %s does not end with \\n\" % str(row + 1))\n for char in range(len(grid[row]) - 1):\n # Check all other characters are valid.\n if grid[row][char] not in valid_chars:\n bad_chars.append(grid[row][char]) \n # True if bad_chars isn't empty. \n if bad_chars:\n raise BadCharacter(bad_chars)\n except BadCharacter as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n # List of offenses and specific locations.\n bad_lines = []\n for row in range(len(grid)):\n # Ignore last element as should be \"\\n\". Checked previously.\n actual_width = len(grid[row]) - 1 \n if actual_width < expected_width or actual_width > expected_width: \n bad_lines.append((actual_width, expected_width, row + 1))\n # True if bad_lines isn't empty.\n if bad_lines:\n raise BadLineLength(tuple(bad_lines)) \n except BadLineLength as error:\n reports += str(error)\n \n # Store actual height \n actual_height = len(grid)\n \n try:\n if actual_height > expected_height:\n raise TooManyLines(actual_height, expected_height)\n except TooManyLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n if actual_height < expected_height:\n raise TooFewLines(actual_height, expected_height) \n except TooFewLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n # True if reports isn't empty. \n if reports:\n print \"File format is invalid. Errors found:\\n\"\n print reports\n else:\n print \"File format okay\\n\"",
"def split_rows(sentences, column_names):\r\n new_sentences = []\r\n texts=[]\r\n root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']\r\n start = [dict(zip(column_names, root_values))]\r\n for sentence in sentences:\r\n info=[]\r\n rows = sentence.split('\\n')\r\n sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']\r\n sentence = start + sentence\r\n new_sentences.append(sentence)\r\n if \"newdoc id\" in rows[0]: # beginnings of new docs\r\n info.append(rows[1])\r\n info.append(rows[2])\r\n texts.append(info)\r\n else:\r\n info.append(rows[0])\r\n info.append(rows[1])\r\n texts.append(info)\r\n return new_sentences, texts",
"def parse(self, lines):\n self.reset()\n if type(lines) is str:\n lines = lines.split(\"\\n\")\n\n line_no = 0\n for line in lines:\n line_no += 1\n\n # Block begin?\n m, block_class = self.is_block_begin(line)\n if block_class:\n new_block = block_class(line_no, m.group(1))\n self.push_block(switch=self.add_element(new_block))\n continue\n # Block end?\n m = self.is_block_end(line)\n if m:\n self.pop_block(m.group(1))\n continue\n\n m = self.RE_EXEC.search(line)\n if m:\n element = exec_t(line_no, stmt=m.group(2), indent=m.end(1))\n else:\n element = line_t(line_no, line)\n\n # Regular line\n self.add_element(element)",
"def _generate_lines(self):\r\n for iy in range(self.nrows):\r\n cell_row = [col[iy] for col in self.worktable]\r\n # this produces a list of lists, each of equal length\r\n cell_data = [cell.get() for cell in cell_row]\r\n cell_height = min(len(lines) for lines in cell_data)\r\n for iline in range(cell_height):\r\n yield ANSIString(\"\").join(_to_ansi(celldata[iline] for celldata in cell_data))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for text in column margins and text overflow in the last column. Raise TableMarkupError if anything but whitespace is in column margins. Adjust the end value for the last column if there is text overflow. | def check_columns(self, lines, first_line, columns):
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
# combining characters do not contribute to the column width
lines = [strip_combining_chars(line) for line in lines]
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin '
'in table line %s.' % (first_line+offset+1),
offset=first_line+offset)
offset += 1
columns.pop() | [
"def text_error_cols(text): \n po = ParseOptions(min_null_count=0, max_null_count=999)\n en_dir = Dictionary() # open the dictionary only once\n sent = Sentence(text, en_dir, po)\n linkages = sent.parse()\n if sent.null_count() == 0 :\n return []\n else:\n error_cols=[]\n iws=[]\n for lkg in linkages:\n words=[w for w in lkg.words()]\n #desc(lkg)\n for k,w in enumerate(words):\n if is_no_link_ward(w):\n if k in iws:\n break\n else:\n iws.append(k)\n js=text_words2col_begin_end(text,words)\n error_cols.append(js[k-1])\n return error_cols",
"def calcColWidth(self):",
"def RetainHorizontalSpacing(self, first_column, depth):\n previous = self.previous_token\n if not previous:\n return\n\n if previous.is_pseudo:\n previous = previous.previous_token\n if not previous:\n return\n\n cur_lineno = self.lineno\n prev_lineno = previous.lineno\n if previous.is_multiline_string:\n prev_lineno += previous.value.count('\\n')\n\n if (cur_lineno != prev_lineno or\n (previous.is_pseudo and previous.value != ')' and\n cur_lineno != previous.previous_token.lineno)):\n self.spaces_required_before = (\n self.column - first_column + depth * style.Get('INDENT_WIDTH'))\n return\n\n cur_column = self.column\n prev_column = previous.column\n prev_len = len(previous.value)\n\n if previous.is_pseudo and previous.value == ')':\n prev_column -= 1\n prev_len = 0\n\n if previous.is_multiline_string:\n prev_len = len(previous.value.split('\\n')[-1])\n if '\\n' in previous.value:\n prev_column = 0 # Last line starts in column 0.\n\n self.spaces_required_before = cur_column - (prev_column + prev_len)",
"def rewrap(self) -> None:\n self.measured_widths = self.colwidth[:]\n for cell in self.cells:\n cell.wrap(width=self.cell_width(cell, self.colwidth))\n if not cell.wrapped:\n continue\n if cell.row is None or cell.col is None:\n msg = 'Cell co-ordinates have not been set'\n raise ValueError(msg)\n width = math.ceil(max(column_width(x) for x in cell.wrapped) / cell.colspan)\n for col in range(cell.col, cell.col + cell.colspan):\n self.measured_widths[col] = max(self.measured_widths[col], width)",
"def test_inside_column(self):\n col1, col2, col3 = st.columns([2.5, 1.5, 0.5])\n\n with col1:\n st.text_input(\"foo\")\n\n all_deltas = self.get_all_deltas_from_queue()\n\n # 5 elements will be created: 1 horizontal block, 3 columns, 1 widget\n self.assertEqual(len(all_deltas), 5)\n text_input_proto = self.get_delta_from_queue().new_element.text_input\n\n self.assertEqual(text_input_proto.label, \"foo\")",
"def test_columns_with_large_gap(self):\n\n columns = st.columns(3, gap=\"LARGE\")\n\n all_deltas = self.get_all_deltas_from_queue()\n\n horizontal_block = all_deltas[0]\n columns_blocks = all_deltas[1:4]\n\n # 4 elements will be created: 1 horizontal block, 3 columns, each receives \"large\" gap arg\n self.assertEqual(len(all_deltas), 4)\n self.assertEqual(horizontal_block.add_block.horizontal.gap, \"large\")\n self.assertEqual(columns_blocks[0].add_block.column.gap, \"large\")\n self.assertEqual(columns_blocks[1].add_block.column.gap, \"large\")\n self.assertEqual(columns_blocks[2].add_block.column.gap, \"large\")",
"def EllipsisMiddleTruncate(text, available_space, line_length):\n ...",
"def _isEndOfRow(self):\r\n\t\tinfo=self.copy()\r\n\t\tinfo.expand(textInfos.UNIT_CHARACTER)\r\n\t\treturn info._rangeObj.getText(-1)==u'\\u0007'",
"def test_unsupported_columns(self):\n self.dlg.set_focus()\n table = self.dlg.Table\n self.assertRaises(NotImplementedError, table.column_count)\n self.assertRaises(NotImplementedError, table.get_column, 0)",
"def _check_valid_docstring_spacing(self) -> None:\n if self.Modifier.FLOATING in self.type_mod:\n return # floating docstring sections need not be checked for this\n\n end_line = self.extent.end.line + 1\n cursor_start = self.cursor.extent.start\n if end_line != cursor_start.line:\n # there is at least 1 (probably empty) line between the comment end and whatever it\n # is describing\n diag = self.diags.symbol_spacing\n mess = 'Invalid line-spacing between docstring and the symbol it describes. The docstring must appear immediately above its target'\n eloc = self.make_source_range('', '', end_line)\n floc = SourceRange.from_locations(self.make_source_location(end_line, 1), cursor_start)\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, eloc, highlight=False, patch=Patch(floc, '')\n )\n return",
"def test_console_width_is_positive():\n assert console.columns() > 0",
"def typeset(self, container, text_align, line_spacing, last_descender,\n last_line=False, force=False):\n document = container.document\n\n # drop spaces (and empty spans) at the end of the line\n while len(self) > 0:\n last_span = self[-1]\n if last_span and last_span.ends_with_space:\n self.cursor -= last_span.space.width\n self.pop()\n else:\n break\n else: # abort if the line is empty\n return last_descender\n\n descender = min(glyph_span.span.descender(container)\n for glyph_span in self)\n if last_descender is None:\n advance = max(glyph_span.span.ascender(container)\n for glyph_span in self)\n else:\n advance = line_spacing.advance(self, last_descender, container)\n container.advance(advance)\n self.advance = advance\n\n container.advance(- descender)\n for glyph_span in self:\n glyph_span.span.before_placing(container)\n container.advance(descender)\n\n # horizontal displacement\n left = self.indent\n\n if self._has_tab or text_align == TextAlign.JUSTIFY and last_line:\n text_align = 'left'\n extra_space = self.width - self.cursor\n if text_align == TextAlign.JUSTIFY:\n # TODO: padding added to spaces should be prop. to font size\n nr_spaces = sum(glyph_span.number_of_spaces for glyph_span in self)\n if nr_spaces > 0:\n add_to_spaces = extra_space / nr_spaces\n for glyph_span in self:\n if glyph_span.number_of_spaces > 0:\n glyph_span.space.width += add_to_spaces\n elif text_align == TextAlign.CENTER:\n left += extra_space / 2.0\n elif text_align == TextAlign.RIGHT:\n left += extra_space\n\n canvas = container.canvas\n cursor = container.cursor\n current_annotation = AnnotationState(container)\n for span, glyph_and_widths in group_spans(self):\n try:\n width = canvas.show_glyphs(left, cursor, span, glyph_and_widths,\n container)\n except InlineFlowableException:\n ascender = span.ascender(document)\n if ascender > 0:\n top = cursor - ascender\n else:\n inline_height = span.virtual_container.height\n top = cursor - span.descender(document) - inline_height\n span.virtual_container.place_at(container, left, top)\n width = span.width\n current_annotation.update(span, left, width)\n left += width\n current_annotation.place_if_any()\n container.advance(- descender)\n return descender",
"def realign_punctuated_text(df, text, skip_1st=0, margin=2):\n # Built-in str.split doesn't retain starting/trailing spaces correctly.\n # Probably would be fine but just keep this since it took a while to get\n # right and I don't want to break it.\n words = re.split(' ', text)\n rows = []\n start_i = 0\n for i, chunk in df.iterrows():\n chunk_words = re.split(' ', chunk.text)\n length = len(chunk_words)\n punct_words = words[start_i:start_i + length + margin]\n suff = ' '.join(chunk_words[-2:])\n scores = []\n bigrams = zip(punct_words[skip_1st:], punct_words[skip_1st + 1:])\n # Avoid list comp so we can exit early if we find a perfect match.\n for j, gram in enumerate(bigrams):\n score = fuzz.ratio(suff, ' '.join(gram).lower())\n if score == 100:\n argmax = j\n break\n scores.append(score)\n else:\n argmax = np.argmax(scores)\n if max(scores) < 80:\n warnings.warn(\n 'Max score < 80. Your rows may have gotten misaligned '\n f'at row {i}: {chunk.text}'\n )\n punct_len = skip_1st + argmax + 2\n rows.append(' '.join(words[start_i:start_i + punct_len]))\n start_i += punct_len\n\n new_df = pd.DataFrame(rows, columns=['text'])\n return pd.concat((new_df, df.reset_index()[['start', 'duration']].copy()),\n axis=1)",
"def column_offset_validation(arguments):\n inputfile = arguments[1]\n header = inputfile.readline()\n splitter = arguments[4]\n attributesCount = len(header.split(splitter))\n operands = arguments[0].split(',')\n hasheader = arguments[3]\n\n if hasheader:\n for operand in operands:\n\n # if you are here the column offset can be a integer or string\n if operand[1:].isdecimal():\n data_error_handler(operand, attributesCount, arguments)\n else:\n # This block of code is executed for float or string\n if operand[1:] not in header:\n print(f'column reference {operand} entered is incorrect')\n free_resources(arguments)\n sys.exit(-1)\n\n else:\n # no header so setting the file pointer back to first line\n # if inputtype != None: (while going back is an option in files not for stdin)\n # inputfile.seek(0)\n for operand in operands:\n if operand[1:].isdecimal():\n data_error_handler(operand, attributesCount, arguments)\n else:\n print(\n f'column reference {operand} cannot be a string, perhaps you forgot to pass \"-h\" arg')\n free_resources(arguments)\n sys.exit(-1)\n return header",
"def test_split_columns_invalid_values():\n with pytest.raises(ValueError):\n split_columns(\"example\", -1)\n\n with pytest.raises(ValueError):\n split_columns(\"example\", -200)\n\n with pytest.raises(ValueError):\n split_columns(\"example\", 0)\n\n with pytest.raises(ValueError):\n split_columns(\"example\", 200)",
"def test_no_padding_no_borders(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n table.inner_column_border = False\n table.outer_border = False\n table.padding_left = 0\n table.padding_right = 0\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('Row One ', 'Two', 'Three'),\n ('Column One', ' ', ' '),\n ]\n assert actual == expected",
"def _borders(self):\r\n nx, ny = self.ncols-1, self.nrows-1\r\n options = self.options\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n cell.reformat(**self._cellborders(ix,iy,nx,ny,options))",
"def _check_valid_indentation(self, lineno: int, line: str, left_stripped: str) -> None:\n if linelen := len(line):\n indent = linelen - len(left_stripped)\n expected_ind = 0 if line.startswith(('.', '+', '-', '$')) else self.indent\n if indent != expected_ind:\n diag = self.diags.indentation\n loc = self.make_source_range(' ' * indent, line, lineno)\n mess = f'Invalid indentation ({indent}), all regular (non-empty, non-parameter, non-seealso) text must be indented to {self.indent} columns'\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, loc, patch=Patch(loc, ' ' * expected_ind)\n )\n return",
"def truncate_like_pd_max_colwidth(x: any) -> str:\n max_colwidth = pd.get_option(\"display.max_colwidth\")\n if max_colwidth is None:\n return x\n else:\n s = str(x)\n if len(s) <= max_colwidth:\n return s\n else:\n return s[:max_colwidth - 3] + '...'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extend the list values of `master` with those from `newdata`. Both parameters must be dictionaries containing list values. | def update_dict_of_lists(master, newdata):
for key, values in newdata.items():
master.setdefault(key, []).extend(values) | [
"def update_dictargs( list_of_dicts, master_dict, issuer = 'alberta_treasury' ):\n key, default_dict = create_default_dictargs( issuer = issuer )\n if master_dict.get( key, None ) is None:\n master_dict[ key ] = list()\n for append_dict in list_of_dicts:\n d = dict( default_dict.items() + append_dict.items() )\n master_dict[ key ].append( d )\n return master_dict",
"def update_data(self, new_dict):\n\n self.data.update(new_dict)",
"def updateWith(self,new=None):\n assert isinstance(new, Chemplate)\n for id in new.data:\n self.data[id] = new.data[id]",
"def extend(self, list):",
"def _merge_dict(self,\n base_items,\n new_items,\n list_extend=True,\n yml_multilines=False):\n if isinstance(new_items, dict):\n for key, value in new_items.items():\n if isinstance(value, dict):\n base_items[key] = self._merge_dict(\n base_items=base_items.get(key, {}),\n new_items=value,\n list_extend=list_extend\n )\n elif (not isinstance(value, int) and (\n ',' in value or (\n '\\n' in value and not yml_multilines))):\n base_items[key] = re.split(',|\\n', value)\n base_items[key] = [\n i.strip() for i in base_items[key] if i\n ]\n elif isinstance(value, list):\n if isinstance(base_items.get(key), list) and list_extend:\n base_items[key].extend(value)\n else:\n base_items[key] = value\n elif isinstance(value, (tuple, set)):\n le = list_extend # assigned for pep8\n if isinstance(base_items.get(key), tuple) and le:\n base_items[key] += tuple(value)\n elif isinstance(base_items.get(key), list) and le:\n base_items[key].extend(list(value))\n else:\n base_items[key] = value\n else:\n base_items[key] = new_items[key]\n elif isinstance(new_items, list):\n if list_extend:\n base_items.extend(new_items)\n else:\n base_items = new_items\n return base_items",
"def merge(self, another_list: object) -> None:\n # for loop to iterate through data to add/append Dynamic Array\n for i in range(another_list.size):\n self.append(another_list.data[i])\n return",
"def extend(self, datasets: Iterable[_TypeMultiBlockLeaf]) -> None:\n # Code based on collections.abc\n if isinstance(datasets, MultiBlock):\n for key, data in zip(datasets.keys(), datasets):\n self.append(data, key)\n else:\n for v in datasets:\n self.append(v)",
"def master_info(self, master_info):\n if master_info is None:\n raise ValueError(\"Invalid value for `master_info`, must not be `None`\")\n\n self._master_info = master_info",
"def update(self, new_body_values):\n self.data = merge_dicts(self.data, new_body_values)\n return self",
"def deep_merge_lists(original, incoming):\n common_length = min(len(original), len(incoming))\n for idx in range(common_length):\n if isinstance(original[idx], dict) and isinstance(incoming[idx], dict):\n deep_merge_dicts(original[idx], incoming[idx])\n\n elif isinstance(original[idx], list) and isinstance(incoming[idx], list):\n deep_merge_lists(original[idx], incoming[idx])\n\n else:\n original[idx] = incoming[idx]\n\n for idx in range(common_length, len(incoming)):\n original.append(incoming[idx])",
"def splitMasterData(mdata):\n # Create empty dictionaries with keys\n print(\"Splitting master data into subsets...\", end = ' ')\n learndata = {}\n testdata = {}\n for i in mdata.keys():\n learndata[i] = []\n testdata[i] = []\n\n for i in range(len(mdata['subset'])):\n if mdata['subset'][i] == 'learn':\n for x in mdata:\n learndata[x].append(mdata[x][i])\n elif mdata['subset'][i] == 'test':\n for x in mdata:\n testdata[x].append(mdata[x][i])\n elif mdata['subset'][i] == 'exclude':\n pass\n else:\n print(\"Invalid subset read(valid= 'learn', 'test', or 'exclude')\")\n print(mdata['subset'][i])\n sys.exit()\n print(\"Done!\")\n return testdata, learndata",
"def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict:\n if isinstance(config, str):\n import json\n config = json.loads(config)\n properties = self.all_properties()\n config['fields'] = config.get('fields', dict())\n fields = config['fields']\n\n d_color = defaults.get('color', 'white')\n d_icon = defaults.get('icon', 'icons:default')\n\n if delete_orphan_fields:\n exist = {p.name() for p in properties}\n unexist = set(fields.keys()) - exist\n for name in unexist:\n del fields[name]\n\n for p in properties:\n field = fields.get(p.name(), {'show_in_search': False,\n 'combine_fields': False,\n 'number_of_rules': 0,\n 'glossaries': [],\n 'use_in_network_search': False,\n 'case_sensitive': False,\n 'show_as_link': 'text',\n 'blacklists': [],\n 'show_in_result': 'no',\n 'rule_extractor_enabled': False,\n 'search_importance': 1,\n 'group_name': '',\n 'show_in_facets': False,\n 'predefined_extractor': 'none',\n 'rule_extraction_target': ''})\n config['fields'][p.name()] = field\n field['screen_label'] = ' '.join(p.label())\n field['description'] = '\\n'.join(p.definition())\n field['name'] = p.name()\n\n # color\n if 'color' not in field:\n color = self.__merge_close_ancestor_color(p, fields, attr='color')\n field['color'] = color if color else d_color\n # icon\n if 'icon' not in field:\n icon = self.__merge_close_ancestor_color(p, fields, attr='icon')\n field['icon'] = icon if icon else d_icon\n # type\n if isinstance(p, OntologyObjectProperty):\n field['type'] = 'kg_id'\n else:\n try:\n field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges())))\n except StopIteration:\n field['type'] = None\n return config",
"def mastered_instruments(self, mastered_instruments):\n\n self._mastered_instruments = mastered_instruments",
"def test_override_custom(self):\n\n default = {\n 'list': ['two']\n }\n\n override1 = {\n 'list': append('three', 'four')\n }\n\n override2 = {\n 'list': prepend('one')\n }\n\n merged = merge(default, override1, override2)\n\n self.assertEquals(merged['list'], ['one', 'two', 'three', 'four'])",
"def new_data(self):\n\t\tif len(self.data) == 10:\n\t\t\tfor i in range(5):\n\t\t\t\ttt = self.data[5+i]\n\t\t\t\tself.data_new.append(tt)",
"def override_master_items(master_name, spread=False):\n script = 'try\\n'\n page_nums = [2, 3] if spread else [1]\n for num in page_nums:\n script += (\n f'override (every item of master page items of page {num}'\n 'whose item layer\\'s name is \"Work\") destination page '\n f'page {num}\\n')\n script += 'end try'\n return wrap_and_run(script)",
"def merge(self, other) -> None:\n if other.new:\n raise ValueError(\"This patch should not have a .new set.\")\n if not other.old:\n raise ValueError(\"No data in .old\")\n self.old = other.old + self.old\n self.old_hash = get_sha256(self.old)",
"def update_symbol_master(self):\n new_symbol_master = self.pull_symbol_master()\n ts = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n \n # first check if symbolmaster exists\n if not os.path.exists(self.symbol_master_filepath):\n \n # if it currently does not exist, create one and update\n new_symbol_master['updateTimeLocal'] = ts\n self.final_symbol_master = new_symbol_master\n self.final_symbol_master.to_feather(self.symbol_master_filepath)\n \n else: \n # pull existing symbol master\n current_symbol_master = pd.read_feather(self.symbol_master_filepath)\n\n # find difference between old and new\n current_body = current_symbol_master[self.symbol_master_cols]\n new_body = new_symbol_master[self.symbol_master_cols]\n check_rows = new_body.merge(current_body, how='outer', indicator=True)\n new_rows = check_rows[check_rows['_merge'] == 'left_only'].copy()\n new_rows.drop('_merge', axis=1, inplace=True)\n\n # update new rows\n if len(new_rows) > 0:\n new_rows['entryTimeLocal'] = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n existing_symbol_master = current_symbol_master[self.symbol_master_cols + ['entryTimeLocal']]\n final_symbol_master = pd.concat([existing_symbol_master, new_rows], axis=0)\n final_symbol_master['updateTimeLocal'] = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('Number of new symbols appended: {}'.format(len(new_rows)))\n else:\n final_symbol_master = current_symbol_master[self.symbol_master_cols + ['entryTimeLocal']].copy()\n final_symbol_master['updateTimeLocal'] = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('No new symbols appended')\n\n # save final symbol master as feather file\n self.final_symbol_master = final_symbol_master\n self.final_symbol_master.reset_index().to_feather(self.symbol_master_filepath)\n \n # reset index\n self.final_symbol_master.reset_index(drop=True, inplace=True)\n return self.final_symbol_master",
"def updateLists(self):\r\n\r\n self.parentOf = self.relationType.sourceDataTypeNames\r\n self.childOf = self.relationType.targetDataTypeNames",
"def nfvi_kube_rootca_host_update_list(self, new_list):\n self._nfvi_kube_rootca_host_update_list = new_list"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the state machine on `input_lines`. Return results (a list). Reset `self.line_offset` and `self.current_state`. Run the beginningoffile transition. Input one line at a time and check for a matching transition. If a match is found, call the transition method and possibly change the state. Store the context returned by the transition method to be passed on to the next transition matched. Accumulate the results returned by the transition methods in a list. Run the endoffile transition. Finally, return the accumulated results. | def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>self._stderr, (
u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, u'\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>self._stderr, '\nStateMachine.run: bof transition'
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>self._stderr, (
u'\nStateMachine.run: line (source=%r, '
u'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results | [
"def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n % (state.__class__.__name__, transitions)), file=self._stderr)\r\n for name in transitions:\r\n pattern, method, next_state = state.transitions[name]\r\n match = pattern.match(self.line)\r\n if match:\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: Matched transition '\r\n '\"%s\" in state \"%s\".'\r\n % (name, state.__class__.__name__)), file=self._stderr)\r\n return method(match, context, next_state)\r\n else:\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: No match in state \"%s\".'\r\n % state.__class__.__name__), file=self._stderr)\r\n return state.no_match(context, transitions)",
"def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n % (state.__class__.__name__, transitions))\r\n for name in transitions:\r\n pattern, method, next_state = state.transitions[name]\r\n match = pattern.match(self.line)\r\n if match:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: Matched transition '\r\n '\"%s\" in state \"%s\".'\r\n % (name, state.__class__.__name__))\r\n return method(match, context, next_state)\r\n else:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: No match in state \"%s\".'\r\n % state.__class__.__name__)\r\n return state.no_match(context, transitions)",
"def process(self, lines):\n for line in lines:\n self._process_line(line)",
"def build_from(lines:[str]) -> [object]:\n lines = iter(lines)\n current_line = None\n while True:\n try:\n line = next(lines).strip()\n except StopIteration:\n break\n if not line: break\n if REG_CHARACTER.match(line): # new line\n if current_line:\n yield current_line\n try:\n character, content, refs = parse_line(line)\n except TypeError: # parse_line returned None ?!\n print(f\"ERROR: parse_line didn't parse '{line}'\")\n current_line = Line(character.strip(), content.strip(), refs)\n else: # continuation of previous line\n # print('CURRENT LINE:', current_line)\n # print(' :', line)\n current_line.content += '\\n' + line\n if current_line:\n yield current_line",
"def lex(self, line):\n\n # only add line if we are in a continuation or line is not empty\n if self.continuation is True or line.strip() != '':\n self.line += line\n\n self.continuation = False\n # keep running states until out of data or we need a continuation\n while self.continuation is False and len(self.line) > 0:\n for token in self.state():\n if token.ident == Lexer.error.ident:\n yield token\n # reset state on error\n self._reset()\n return\n yield token",
"def parse_initial_state_transitions(lines: List[Line]) -> Tuple[Dict[str, Line], List[Line]]:\n remaining_lines = []\n initial_state_names = {}\n\n for line in lines:\n m = re.fullmatch(r'^\\[\\*\\]\\s+-{1,2}>\\s+(\\w+)\\s*(.*)', line.text)\n if not m:\n remaining_lines.append(line)\n continue\n\n name, trailing_text = m.groups()\n assert name not in initial_state_names, f'Duplicate initial transition for state {name} in {line}'\n assert not trailing_text, f'Additional text after initial transition in {line}: {line.orig_text}'\n initial_state_names[name] = line\n\n return initial_state_names, remaining_lines",
"def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)",
"def parse(self, lines):\n self.reset()\n if type(lines) is str:\n lines = lines.split(\"\\n\")\n\n line_no = 0\n for line in lines:\n line_no += 1\n\n # Block begin?\n m, block_class = self.is_block_begin(line)\n if block_class:\n new_block = block_class(line_no, m.group(1))\n self.push_block(switch=self.add_element(new_block))\n continue\n # Block end?\n m = self.is_block_end(line)\n if m:\n self.pop_block(m.group(1))\n continue\n\n m = self.RE_EXEC.search(line)\n if m:\n element = exec_t(line_no, stmt=m.group(2), indent=m.end(1))\n else:\n element = line_t(line_no, line)\n\n # Regular line\n self.add_element(element)",
"def _eagerly_parse_lines(self, lines, skeleton_regex, event_parsers, events, time=None):\n\n # Recompile all regex so that they work on bytes rather than strings.\n # This simplifies the rest of the code while allowing the raw output\n # from a process to be fed\n def encode(string):\n return string.encode('ascii')\n\n events = list(map(encode, events))\n event_parsers = {\n encode(event): parser\n for event, parser in event_parsers.items()\n }\n\n # Only add an extra iterator and tuple unpacking if that is strictly\n # necessary, as it comes with a performance cost\n time_is_provided = time is not None\n skel_search = skeleton_regex.search\n if time_is_provided:\n lines = zip(time, lines)\n drop_filter = lambda line: not skel_search(line[1])\n else:\n drop_filter = lambda line: not skel_search(line)\n\n # First, get rid of all the lines coming before the trace\n lines = itertools.dropwhile(drop_filter, lines)\n\n # Appending to lists is amortized O(1). Inside the list, we store\n # tuples since they are:\n # 1) the most compact Python representation of a product type\n # 2) output directly by regex.search()\n skeleton_data = []\n events_data = {\n **{event: (None, None) for event in events},\n **{\n event: (parser.bytes_regex.search, [])\n for event, parser in event_parsers.items()\n },\n }\n available_events = set()\n\n begin_time = None\n end_time = None\n time_type = getattr(np, self.HEADER_FIELDS['__timestamp'])\n\n # THE FOLLOWING LOOP IS A THE MOST PERFORMANCE-SENSITIVE PART OF THAT\n # CLASS, APPLY EXTREME CARE AND BENCHMARK WHEN MODIFYING\n # Best practices:\n # - resolve all dotted names ahead of time\n # - minimize the amount of local variables. Prefer anonymous\n # expressions\n # - Catch exceptions for exceptional cases rather than explicit check\n\n # Pre-lookup methods out of the loop to speed it up\n append = list.append\n group = self._RE_MATCH_CLS.group\n groups = self._RE_MATCH_CLS.groups\n nextafter = np.nextafter\n inf = math.inf\n line_time = 0\n parse_time = '__timestamp' in skeleton_regex.groupindex.keys()\n\n for line in lines:\n prev_time = line_time\n if time_is_provided:\n line_time, line = line\n\n match = skel_search(line)\n # Stop at the first non-matching line\n try:\n event = group(match, '__event')\n line_time = time_type(group(match, '__timestamp'))\n # Assume only \"time\" is not in the regex. Keep that out of the hot\n # path since it's only needed in rare cases (like nesting parsers)\n except IndexError:\n # If we are supposed to parse time, let's re-raise the\n # exception\n if parse_time:\n raise\n else:\n # Otherwise, make sure \"event\" is defined so that we only\n # go a match failure on \"time\"\n event # pylint: disable=pointless-statement\n # The line did not match the skeleton regex, so skip it\n except TypeError:\n if b'EVENTS DROPPED' in line:\n raise DroppedTraceEventError('The trace buffer got overridden by new data, increase the buffer size to ensure all events are recorded')\n # Unknown line, could be coming e.g. from stderr\n else:\n continue\n\n # Do a global deduplication of timestamps, across all\n # events regardless of the one we will parse. This ensures\n # stable results and joinable dataframes from multiple\n # parser instance.\n if line_time <= prev_time:\n line_time = nextafter(prev_time, inf)\n\n if begin_time is None:\n begin_time = line_time\n\n # If we can parse it right away, let's do it now\n try:\n search, data = events_data[event]\n append(\n data,\n # Add the fixedup time\n groups(search(line)) + (line_time,)\n )\n # If we don't have a parser for it yet (search == None),\n # just store the line so we can infer its parser later\n except TypeError:\n # Add the fixedup time and the full line for later\n # parsing as well\n append(\n skeleton_data,\n groups(match) + (line_time, line)\n )\n # We are not interested in that event, but we still remember the\n # pareseable events\n except KeyError:\n available_events.add(event)\n\n # This should have been set on the first line.\n # Note: we don't raise the exception if no events were asked for, to\n # allow creating dummy parsers without any line\n if begin_time is None and events:\n raise ValueError('No lines containing events have been found')\n\n end_time = line_time\n available_events.update(\n event\n for event, (search, data) in events_data.items()\n if data\n )\n\n events_df = {}\n for event, parser in event_parsers.items():\n try:\n # Remove the tuple data from the dict as we go, to free memory\n # before proceeding to the next event to smooth the peak memory\n # consumption\n _, data = events_data.pop(event)\n except KeyError:\n pass\n else:\n decoded_event = event.decode('ascii')\n df = self._make_df_from_data(parser.regex, data, ['__timestamp'])\n # Post-process immediately to shorten the memory consumption\n # peak\n df = self._postprocess_df(decoded_event, parser, df)\n events_df[decoded_event] = df\n\n # Compute the skeleton dataframe for the events that have not been\n # parsed already. It contains the event name, the time, and potentially\n # the fields if they are needed\n skeleton_df = self._make_df_from_data(skeleton_regex, skeleton_data, ['__timestamp', 'line'])\n # Drop unnecessary columns that might have been parsed by the regex\n to_keep = {'__event', '__fields', 'line'}\n skeleton_df = skeleton_df[sorted(to_keep & set(skeleton_df.columns))]\n # Make the event column more compact\n skeleton_df['__event'] = skeleton_df['__event'].astype('category', copy=False)\n # This is very fast on a category dtype\n available_events.update(skeleton_df['__event'].unique())\n\n available_events = {event.decode('ascii') for event in available_events}\n return (events_df, skeleton_df, (begin_time, end_time), available_events)",
"def run(self, s):\n state = self.init_state\n for c in s:\n state = self.transition(state, c)\n return state",
"def process_lines(self, lines, file):\n return lines",
"def process_ops_input(self):\n input_data = self.text()\n if (self.local_state == State.GOTO_LINE):\n self.goto_line.emit(int(input_data))\n else:\n self.search.emit(input_data)",
"def input(self, i):\n self.i_count += 1\n rlist = self.rules.get(self.state, [])\n for (test, dst, action, tag) in rlist + self.rules.get(None, []): # Rules starting from None are added to all states\n t_info = TransitionInfo(self.state, dst, self.i_count, None)\n result = test(i, t_info) if callable(test) else test == i\n t_info = t_info._replace(result=result)\n if result:\n if dst is not None: # Transitions ending in None stay in the same state\n self.state = dst\n # Run the action after the state change so it could override the end state (e.g. pop state from a stack)\n out = action(i, t_info) if callable(action) else action\n # Be sure to trace the actual end state after `action` is done\n self.tracer(i, TraceInfo(t_info, test, action, tag, out, self.state))\n return out\n self.tracer(i, TraceInfo(t_info, test, action, tag, None, self.state))\n\n return self.unrecognized(i, self.state, self.i_count)",
"def update_line_search(self):\n # Collect information on a forward evaluation that just took place\n alpha_try = self.load_vector(\"alpha\") # step length\n f_try = self.load_vector(\"f_try\") # misfit for the trial model\n\n # Update the line search with a new step length and misfit value\n self._line_search.step_count += 1\n self._line_search.update_search_history(step_len=alpha_try,\n func_val=f_try)\n\n # Calculate a new step length based on the current step length and its\n # corresponding misfit.\n alpha, status = self._line_search.calculate_step_length()\n\n # Note: if status is 'PASS' then `alpha` represents the step length of\n # the lowest misfit in the line search and we reconstruct `m_try` w/ it\n if status.upper() in [\"PASS\", \"TRY\"]:\n # Create a new trial model based on search direction, step length\n # and the initial model vector\n _m = self.load_vector(\"m_new\")\n _p = self.load_vector(\"p_new\")\n\n # Sets the latest trial model using the current `alpha` value\n m_try = _m.copy()\n m_try.update(vector=_m.vector + alpha * _p.vector)\n logger.info(\"line search model 'm_try' parameters: \")\n m_try.check()\n elif status.upper() == \"FAIL\":\n # Failed line search skips over costly vector manipulations\n m_try = None\n\n return m_try, alpha, status",
"def step(self):\n if self.__global_state != DFAGlobalState.START:\n raise RuntimeError('DFA is not started!')\n\n if len(self.__input_list) > 0:\n ch = self.__input_list[0]\n transit_to = self.__find_current_state_transition(ch)\n if transit_to:\n self.__logging_list.add_event(DfaLoggingEvent(self.__current_state, ch, transit_to))\n self.__current_state = transit_to\n self.__input_list = self.__input_list[1:]\n else:\n self.__logging_list.set_error(f'no transition for symbol \"{ch}\" in state \"{self.__current_state}\"')\n self.halt()\n return\n else:\n if self.__current_state not in self.__dfa_dict['end_states']:\n self.__logging_list.set_error(f'input string ended at non end state \"{self.__current_state}\"')\n self.halt()",
"def run(self):\n print (\"Worker is now running at step {} with step_size {} starting \"\n \"at time {}\".format(self.step, self.step_size, self.start_time))\n # read in the entries for this step\n processed = 0\n for line in self.inputf.xreadlines():\n entry = self.process_line(line)\n\n processed += 1\n if (processed % 1) == 0:\n print \"Processed {} entries\".format(processed)\n\n # if we are moving beyond this timestep, then wait for\n # more data from the master\n if entry['step'] > self.step:\n self.upload_data()\n time.sleep(UPLOAD_WAIT)\n self.get_master_updates()\n\n # now update the skyline using this point\n self.update_skyline(entry)\n self.inputf.close()\n self.upload_data()\n req = requests.get(self.master_url + \"/worker_done\")\n req.raise_for_status()",
"def expectedRuns(lineup):\n transitionsMatrices = list(map(lambda Batter: Batter.transitionMatrixSimple(), lineup))\n return simulateMarkovChain(transitionsMatrices)[:, 216]",
"def processLines(self, lines):\n\n for line in lines:\n if len(line) == 0:\n continue\n\n if line[-1] == \"\\r\":\n line = line[:-1]\n\n # Automatically make P10 protocols have their lines parsed\n # differently\n lineobj = IRCLine(line, self.protocol.p10)\n\n #debug output\n if self.config[\"etc\"][\"debug\"]:\n self.log(line, \"<<<\")\n\n if lineobj.verb == \"ERROR\":\n #If ERROR is sent, it's already fatal.\n raise IOError\n\n #Handle server commands\n try:\n for impl in self.s2scommands[lineobj.verb]:\n try:\n impl(cod, lineobj)\n except KeyError as e:\n continue\n except Exception as e:\n if not self.config[\"etc\"][\"production\"]:\n self.servicesLog(\"%s %s %s\" %(type(e), e.message, lineobj))\n traceback.print_exc(file=sys.stdout)\n continue\n except KeyError:\n pass",
"def _parse_line(\n self, line: str, handler_lookup: Dict[str, Callable[[str, Path], str]],\n path_file: Optional[Path] = None,\n ) -> List[str]:\n lines: List[str] = []\n if '{cte}' in line and self.state == self.state_auto: # end\n self.end()\n elif '{cts}' in line: # start\n self.start_auto()\n matches = [text_match for text_match in handler_lookup if text_match in line]\n if len(matches) == 1:\n lines.extend(handler_lookup[matches[0]](line, path_file))\n else:\n logger.error('Could not parse: {line}', line=line)\n lines.append(line)\n self.end()\n elif self.state == self.state_user:\n lines.append(line)\n # else: discard the lines in the auto-section\n return lines"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return current state object; set it first if `next_state` given. | def get_state(self, next_state=None):
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state) | [
"def go_to_state(self, next_state):\n for t in self.transitions:\n if t.next_state == None:\n t.next_state = next_state\n return self.root",
"def next_state(self):\r\n s = max(self.states)\r\n self.states.remove(s)\r\n return s[1]",
"def get_next_state(self, index_next_state):\n raise NotImplementedError()",
"def next_state(self, state, move):\n\n pass",
"def estimate_next_state(self):\n return self.__transition_function(self.__state)",
"def next_available_state(self) -> 'State':\n i = len(self.states) - 1\n while i >= 0:\n if self.states[i].is_ready():\n num_incomplete = self.states[i].num_incomplete_deps()\n if num_incomplete == 0:\n # This is perfect; no need to look for the best match.\n return self.states[i]\n i -= 1\n return None",
"def get_state(self, state_name: str):\n if state_name == self.start.name:\n return self.start\n for state in self.state_list:\n if state.name == state_name:\n return state\n return None",
"def get_starting_state(self):\n\t\treturn self._current_state # state 0",
"def calculate_next_state(self):\n self.current_step = self.current_step + 1\n self.current_state = self.game.next_state(current_state=self.current_state, actions=self.next_action)",
"def get_next(current):\n for index,value in enumerate(STATES):\n if value == current:\n if index == len(STATES)-1:\n return STATES[0]\n else:\n return STATES[index+1]",
"def gen_next_state(self, direction):\r\n # Find the current zero-location (blank space).\r\n zero_row = self.zero_location[0]\r\n zero_col = self.zero_location[1]\r\n\r\n # Store the zero location values for our swap tile calculations.\r\n swap_row = zero_row\r\n swap_col = zero_col\r\n\r\n # Find the value in the appropriate direction.\r\n if direction == 'up':\r\n swap_row -= 1\r\n if direction == 'down':\r\n swap_row += 1\r\n if direction == 'left':\r\n swap_col -= 1\r\n if direction == 'right':\r\n swap_col += 1\r\n\r\n # Move the zero-location in the direction specified,\r\n # swapping with the number in the location it moves to.\r\n new_puzzle = np.copy(self.puzzle)\r\n new_puzzle[zero_row, zero_col], new_puzzle[swap_row, swap_col] = (\r\n new_puzzle[swap_row, swap_col], new_puzzle[zero_row, zero_col]\r\n )\r\n\r\n # Create the new state.\r\n path_cost = self.g_cost + 1\r\n predecessor_state = self\r\n next_state = PuzzleState(new_puzzle, path_cost, predecessor_state)\r\n\r\n # Set the predecessor's direction being moved.\r\n next_state.action_from_predecessor = direction\r\n\r\n return next_state",
"def move_next(self, prev_state):\n new_state = SceneState.objects.create(scene=prev_state.scene,\n previous_state=prev_state, target_singular=prev_state.target_singular)\n new_state.rectangles = prev_state.rectangles.all()\n new_state.selected_rectangles = prev_state.selected_rectangles.all()\n return new_state",
"def state(self, as_tuple = False):\n if as_tuple:\n return self.current_state\n else:\n return self.legal_states.index(self.current_state)",
"def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state",
"def find_next_state(self, state, action):\n reward = -1 # set all rewards default is -1\n if state == self.states[0] or state == self.states[2]:\n if action == 1: # take right action\n next_state = state + 1\n else: # take left action\n next_state = max(0, state - 1)\n else:\n if action == 1:\n next_state = state - 1\n else:\n next_state = state + 1\n\n if next_state == self.end_state: # reach the terminal state\n reward = 0\n\n return next_state, reward",
"def get_state(self):\n if self.state:\n return self.state\n\n from timon.state import TMonState\n self.state = state = TMonState(self.cfg['statefile'], config=self)\n return state",
"def _state(self, thread_id):\n while len(self._states) <= thread_id:\n self._states.append(State(self._l2c, self.config.state_config(thread_id)))\n return self._states[thread_id]",
"def from_state(self):\n return self._from_state",
"def next_state(self, state: State, jointaction: JointAction) -> State:\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load `self.line` with the `n`'th next line and return it. | def next_line(self, n=1):
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers() | [
"def read_line(file_path, n):\n return linecache.getline(file_path, n)",
"def NthLineOfFile( fname, n = 0 ):\n with open( fname ) as f:\n while n > 0:\n f.readline()\n n -= 1\n return f.readline().strip()",
"def _next_line(self):\n self.current_line += 1\n return next(self.fh).rstrip(\"\\n\")",
"def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line",
"def nth_item(line, n: int = 0):\n return line.split()[n]",
"def next_line(self, oldLine):\n nextLine = ''\n #nextLine += self.rule[neighbor.index('0' + oldLine[:2])]\n for i in range(len(oldLine) - 2):\n nextLine += self.rule[neighbor.index(oldLine[i:i+3])]\n if len(self.lines) == 1: # if we work on the second line\n nextLine = self.rule[neighbor.index(oldLine[-2:] + '0')] +\\\n nextLine +\\\n self.rule[neighbor.index(oldLine[-2:] + '0')]\n else:\n nextLine = oldLine[0] + nextLine + oldLine[-1]\n #nextLine += self.rule[neighbor.index(oldLine[-2:] + '0')]\n return nextLine",
"def gotoLine(self, n):\n self.fileIndex = n",
"def NextLine(self):\n self.lineNum = self.lineNum + 1\n self.linePos = 0",
"def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line",
"def next(self, n = 1):\n return NonStandardInteger(self.non_st_part, self.st_part + n, self.non_st_ring)",
"def readline(self) -> Optional[str]:\n # N-Triples lines end in either CRLF, CR, or LF\n # Therefore, we can't just use f.readline()\n if not self.buffer:\n # type error: Item \"None\" of \"Union[TextIO, StreamReader, None]\" has no attribute \"read\"\n buffer = self.file.read(bufsiz) # type: ignore[union-attr]\n if not buffer:\n return None\n self.buffer = buffer\n\n while True:\n m = r_line.match(self.buffer)\n if m: # the more likely prospect\n self.buffer = self.buffer[m.end() :]\n return m.group(1)\n else:\n # type error: Item \"None\" of \"Union[TextIO, StreamReader, None]\" has no attribute \"read\"\n buffer = self.file.read(bufsiz) # type: ignore[union-attr]\n if not buffer and not self.buffer.isspace():\n # Last line does not need to be terminated with a newline\n buffer += \"\\n\"\n elif not buffer:\n return None\n self.buffer += buffer",
"def getNext(self):\n line = self._file.readline()\n if line:\n return tuple(line.strip('\\n').split('\\t'))\n else: \n return None",
"def next_line(self):\n self.line = next(self.lines) # Will raise StopIteration when there are no more lines\n self.values = self.line.split()",
"def line(n, rule):\n return lambda l, i: (\n i['lineno'] == (n if n >= 0 else i['nlines'] + n) and rule(l, i)\n )",
"def next_line(rule):\n return shift_line(1, rule)",
"def next(self):\n # apply implicit line ending conversion\n line = self.readline()\n if line:\n return line\n else:\n raise StopIteration",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def nth(n, generator):\n return next(itertools.islice(generator, n-1, n))",
"def goto_recnum(self, n):\n if n == -1:\n self.fp.seek(0, 2)\n else:\n self.fp.seek(n * self.blocksize, 0)",
"def __getitem__(self, n):\n if not (0 <= n < self.nrows):\n raise ValueError('0 >= row > %d, but %d given.'\n % (self.nrows, n))\n return self._sheet.row(n)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return 1 if the next line is blank or nonexistant. | def is_next_line_blank(self):
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1 | [
"def non_blank_lines(thing):\n \n count = 0\n for line in thing:\n if line.strip():\n count += 1\n return count",
"def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line",
"def _not_empty_line(line):\n return len(line) > 0",
"def first_line_number(self):\n self._assert_buffer_not_empty()\n return 1",
"def _is_empty_line(self, line):\r\n return re.match('\\s*$', line) is not None",
"def count_last_empty_lines(s):\n cnt = 0\n lines = s.splitlines()\n lines.reverse()\n for l in lines:\n if re.match(\"^\\s*$\", l):\n cnt += 1\n else:\n return cnt\n return cnt",
"def equal_num(line):\n\treturn not (line.count('1') > len(line) // 2 or line.count('0') > len(line) // 2)",
"def have_trailing_newline(line):\n\treturn line[-1] == '\\n' or line[-1] == '\\r' or line[-2:] == '\\r\\n'",
"def _next_line(self):\n self.current_line += 1\n return next(self.fh).rstrip(\"\\n\")",
"def count_never_executed(self):\n lineno = self.firstlineno\n counter = 0\n for line in self.source:\n if self.sourcelines.get(lineno) == 0:\n if not self.blank_rx.match(line):\n counter += 1\n lineno += 1\n return counter",
"def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line",
"def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)",
"def _should_skip(self, line):\r\n return self._is_empty_line(line) or\\\r\n self._is_comment_line(line) or\\\r\n self._is_group_header_line(line) or\\\r\n self.delimiter not in line",
"def first_non_whitespace_index (line): \n return len (line) - len (line.lstrip ())",
"def has_next(self):\n\n return self.index < len(self.string)",
"def get_n_lines(input_path):\r\n count = 0\r\n with open(input_path, \"r\") as input_file:\r\n for line in input_file:\r\n if line.strip() == \"\":\r\n print(\"WARN: Found empty line while counting lines, will not count.\")\r\n continue\r\n count += 1\r\n return count",
"def _beginningOfContent(line: str) -> int:\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0",
"def should_count_spines(line):\n return line != \"\" and line != config.MEASURE_SYMBOL",
"def mylen(self):\n self.ilen=len(self.line)\n \n itab = 0 # flag if tab before 1st non-whitespace character\n self.ifnb = 0\n isig = 0\n while self.ifnb < self.ilen and isig == 0:\n if string.find(string.whitespace,self.line[self.ifnb]) <> -1:\n if self.line[self.ifnb] == '\\t':\n itab = 1\n self.ifnb += 1\n else:\n isig=1\n \n return itab",
"def _getStartExcludingNewlines(self, line_list):\n\n for count, item in enumerate(line_list):\n item = item.strip()\n if item != \"\":\n return count\n return -1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load `self.line` with the `n`'th previous line and return it. | def previous_line(self, n=1):
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line | [
"def previous(self, n = 1):\n return NonStandardInteger(self.non_st_part, self.st_part - n, self.non_st_ring)",
"def prev_line(rule):\n return shift_line(-1, rule)",
"def read_line(file_path, n):\n return linecache.getline(file_path, n)",
"def NthLineOfFile( fname, n = 0 ):\n with open( fname ) as f:\n while n > 0:\n f.readline()\n n -= 1\n return f.readline().strip()",
"def undo(self, n=1):\n if not self.history:\n return None\n\n if len(self.history) < n:\n n = len(self.history)\n\n entries = list(self.rl_history.entries)\n\n self.history.entries = self.history[:-n]\n\n self.reevaluate()\n\n self.rl_history.entries = entries",
"def gotoLine(self, n):\n self.fileIndex = n",
"def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line",
"def shift_line(n, rule, skip_comments=True):\n def wrap(line, info):\n old_index = info['line_index']\n new_index = old_index + n\n\n if 0 <= new_index < info['nlines']:\n new_lineno, new_line = info['lines'][new_index]\n info['line_index'] = new_index\n old_lineno, info['lineno'] = info['lineno'], new_lineno\n res = rule(new_line, info)\n info['lineno'], info['line_index'] = old_lineno, old_index\n return res\n return False\n\n return wrap",
"def get_line_number(self):\n return self.line_number",
"def _next_line(self):\n self.current_line += 1\n return next(self.fh).rstrip(\"\\n\")",
"def nth_item(line, n: int = 0):\n return line.split()[n]",
"def up(self, n_lines=1):\n self.down(-n_lines)",
"def _take_previous_line_pos(self, pos):\r\n\t\t(row, col) = self.view.rowcol(pos.begin())\r\n\t\tpoint = self.view.text_point(row - 1, col)\r\n\t\treturn sublime.Region(point, point)",
"def tail(filepath, n):\n tail = []\n with open(filepath, 'rt') as fin:\n for line in fin:\n tail.append(line.strip())\n\n return tail[len(tail) - n:]\n #return tail[-n:] ---this was the orignal code",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line",
"def get_timestep(self, n):\n if n < 0:\n for h in self.real_handles:\n h.seek(0, SEEK_END)\n for n in range(-n):\n self.skip_back_timestep()\n elif n == 0:\n raise ValueError(\"step number must be positive or negative, not zero\")\n else:\n # should we seek(0) first?\n for n in range(n-1):\n self.skip_next_timestep()\n\n return self.get_next_timestep()",
"def tail(file, n):\n with open(file) as in_fh:\n lines = in_fh.readlines()\n print(\"\".join(lines[-n:]))",
"def prev_num(self):\n self.current_page - 1",
"def next_line(self, oldLine):\n nextLine = ''\n #nextLine += self.rule[neighbor.index('0' + oldLine[:2])]\n for i in range(len(oldLine) - 2):\n nextLine += self.rule[neighbor.index(oldLine[i:i+3])]\n if len(self.lines) == 1: # if we work on the second line\n nextLine = self.rule[neighbor.index(oldLine[-2:] + '0')] +\\\n nextLine +\\\n self.rule[neighbor.index(oldLine[-2:] + '0')]\n else:\n nextLine = oldLine[0] + nextLine + oldLine[-1]\n #nextLine += self.rule[neighbor.index(oldLine[-2:] + '0')]\n return nextLine"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to absolute line offset `line_offset`, load and return it. | def goto_line(self, line_offset):
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers() | [
"def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n # This is an error, meaning that there aren't even offset_line+1 lines in self.path.\n if f.readline() == '':\n raise ValueError('offset: {} is higher than the total number of lines in file {}'.format(\n offset_line, self.path))\n\n total_lines += 1\n\n # Retrieve the console_output just between offset_line and offset_line + max_lines\n for i in range(offset_line, offset_line + max_lines):\n line = f.readline()\n\n # We have reached the end of the file, or a line that has not finished being written to.\n if line == '' or not line.endswith(\"\\n\"):\n break\n\n console_output.append(line)\n output_lines += 1\n total_lines += 1\n\n # If there are more lines, then keep on counting in order to populate total_lines properly\n while f.readline():\n total_lines += 1\n\n return ConsoleOutputSegment(offset_line, output_lines, total_lines, ''.join(console_output))",
"def current_from_import_import(cursor_offset, line):\n baseline = current_from_import_import_re_1.search(line)\n if baseline is None:\n return None\n match1 = current_from_import_import_re_2.search(line[baseline.end():])\n if match1 is None:\n return None\n matches = current_from_import_import_re_3.finditer(line[baseline.end():])\n for m in chain((match1, ), matches):\n start = baseline.end() + m.start(1)\n end = baseline.end() + m.end(1)\n if start < cursor_offset and end >= cursor_offset:\n return start, end, m.group(1)\n return None",
"def load_offset(offset_file):\n offset_file = realpath(offset_file)\n return np.loadtxt(offset_file)",
"def get_location_by_offset(filename, offset):\n with open(filename, encoding='utf-8', errors='ignore') as f:\n for row, line in enumerate(f, 1):\n length = len(line)\n if length < offset:\n offset -= length\n else:\n return row, offset + 1",
"def current_from_import_from(cursor_offset, line):\n #TODO allow for as's\n tokens = line.split()\n if not ('from' in tokens or 'import' in tokens):\n return None\n matches = current_from_import_from_re.finditer(line)\n for m in matches:\n if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or\n (m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):\n return m.start(1), m.end(1), m.group(1)\n return None",
"def jump_to_line(self, lineno):\r\n self._main.editor_jump_to_line(lineno=lineno)",
"def offset_from_line(line, firstlineno, lnotab):\n # TODO: Handle negetive offsets!\n n = len(lnotab)\n assert n & 1 == 0\n\n l = firstlineno\n tab = lnotab\n offset = 0\n index = 0\n while tab:\n index += 1\n b, d, *tab = tab\n l += d\n offset += b\n if l >= line:\n return offset, index\n raise IndexError(\"Line out of bound\")",
"def getAddressFromFileOffset(self,offset):\n return HopperLowLevel.getAddressFromFileOffset(self.__internal_document_addr__, offset)",
"def getLoc(self, file, line, join=False):\n with open(file if not join else os.path.join(PATH, file), \"r\") as f:\n i = 0\n while i < line - 1:\n f.readline()\n i += 1\n return f.readline()",
"def goToLine(self, lineno):\n # Go to start and move pointer to given line no\n self.goToStart()\n line_count = 1\n eof = False\n pos = 0\n while not eof and line_count != lineno:\n line = self.file_obj.readline()\n if not line:\n eof = True\n continue\n pos = self.file_obj.tell()\n line_count += 1\n\n self.line_no = line_count\n self.offset = pos",
"def load_by_offset(self, offset, size):\n raise NotImplementedError()",
"def _read_entity_from_offset(self, offset):\n self.entities_mm.seek(offset)\n l = self.entities_mm.readline()\n return self._string_to_entity(l)",
"def jump_to_line(self, lineno=None):\r\n if lineno is not None:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(lineno)\r\n return\r\n\r\n maximum = self.blockCount()\r\n line = QInputDialog.getInt(self, self.tr(\"Jump to Line\"),\r\n self.tr(\"Line:\"), 1, 1, maximum, 1)\r\n if line[1]:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(line[0] - 1)",
"def _lower_bound(self, query: str, offset_l: int, offset_h: int) -> int:\n logging.debug('lower bound 2 %s %s %s', query, offset_l, offset_h)\n if offset_l >= offset_h:\n return self._seek_back_to_line_start(offset_l)\n\n mid = (offset_l + offset_h) // 2\n\n line_start = self._seek_back_to_line_start(mid)\n #current_id = self._id_from_line(line_start)\n current_line = self._get_line(line_start)\n next_line_start = self._seek_to_next_line(mid)\n\n #if current_id >= query:\n if current_line >= query:\n return self._lower_bound(query=query, offset_l=offset_l, offset_h=line_start - 1)\n return self._lower_bound(query=query, offset_l=next_line_start, offset_h=offset_h)",
"def _resolve_lineno(self, lineno):\n if lineno is None:\n return self.line_number()\n return lineno",
"def get_section_by_offset(self, offset):\n\n for section in self.sections:\n if section.contains_offset(offset):\n return section\n\n return None",
"def fine_tuning(raw_line, offset_step=0.05):\n def _offset(symbols):\n if not symbols:\n return 0\n sign = int('{}1'.format(symbols[0]))\n return len(symbols) * offset_step * sign\n\n result = re.match(OFFSET_PATTERN, raw_line).groupdict()\n line = result.pop('line').strip()\n return {line: {k: _offset(v) for k, v in result.items()}}",
"def __init__(self, pos_team=None, yardline=None, offset=None):\r\n if isinstance(offset, int):\r\n self.offset = offset\r\n return\r\n if yardline == '50':\r\n self.offset = 0\r\n return\r\n\r\n territory, yd_str = yardline.split()\r\n yd = int(yd_str)\r\n if territory == pos_team:\r\n self.offset = -(50 - yd)\r\n else:\r\n self.offset = 50 - yd",
"def editor_go_to_line(self, line):\r\n editorWidget = self.get_current_editor()\r\n if editorWidget:\r\n editorWidget.jump_to_line(line)",
"def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return source of line at absolute line offset `line_offset`. | def get_source(self, line_offset):
return self.input_lines.source(line_offset - self.input_offset) | [
"def source_line(self) -> str:\n if not self.__source_line:\n self.__source_line = util.get_line(self.file_path, self.line)\n\n return self.__source_line",
"def raise_source_exception(\n source: str,\n rel_path: Path,\n source_lineno: int,\n file_lineno: int,\n source_offset: int | None = None,\n exception: Exception | None = None,\n) -> None:\n message = exception.msg if exception else \"\"\n source_lines = [\n (\"....\" if n != source_lineno - 1 else \" >\") + line\n for n, line in enumerate(source.splitlines())\n ]\n if source_offset:\n source_lines.insert(source_lineno, f\"{' '*(source_offset+3)}^ {message}\")\n annotated_source = \"\\n\".join(source_lines)\n exception = type(exception) if exception else SyntaxError\n msg = f\"{rel_path}:{file_lineno}: {message}\\n{annotated_source}\"\n raise exception(\n msg,\n ) from None",
"def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n # This is an error, meaning that there aren't even offset_line+1 lines in self.path.\n if f.readline() == '':\n raise ValueError('offset: {} is higher than the total number of lines in file {}'.format(\n offset_line, self.path))\n\n total_lines += 1\n\n # Retrieve the console_output just between offset_line and offset_line + max_lines\n for i in range(offset_line, offset_line + max_lines):\n line = f.readline()\n\n # We have reached the end of the file, or a line that has not finished being written to.\n if line == '' or not line.endswith(\"\\n\"):\n break\n\n console_output.append(line)\n output_lines += 1\n total_lines += 1\n\n # If there are more lines, then keep on counting in order to populate total_lines properly\n while f.readline():\n total_lines += 1\n\n return ConsoleOutputSegment(offset_line, output_lines, total_lines, ''.join(console_output))",
"def GetSymbolSourceLine(symbol):\n return SourceSymbolSourceLine.get(symbol, 0)",
"def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')",
"def analyze_last_line(line, offset=None):\n tokens = utils.tokenize_source(line) # tokens do not include spaces nor comments\n\n if not tokens:\n return\n\n for analyzer in LINE_ANALYZERS:\n cause = analyzer(tokens, offset=offset)\n if cause:\n return cause\n return",
"def current_from_import_from(cursor_offset, line):\n #TODO allow for as's\n tokens = line.split()\n if not ('from' in tokens or 'import' in tokens):\n return None\n matches = current_from_import_from_re.finditer(line)\n for m in matches:\n if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or\n (m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):\n return m.start(1), m.end(1), m.group(1)\n return None",
"def make_source_range(self, token: str, string: str, lineno: int, offset: int = 0) -> SourceRange:\n col_begin = string.index(token, offset) + 1\n col_end = col_begin + len(token)\n return SourceRange.from_positions(self.cursor.translation_unit, lineno, col_begin, lineno, col_end)",
"def current_from_import_import(cursor_offset, line):\n baseline = current_from_import_import_re_1.search(line)\n if baseline is None:\n return None\n match1 = current_from_import_import_re_2.search(line[baseline.end():])\n if match1 is None:\n return None\n matches = current_from_import_import_re_3.finditer(line[baseline.end():])\n for m in chain((match1, ), matches):\n start = baseline.end() + m.start(1)\n end = baseline.end() + m.end(1)\n if start < cursor_offset and end >= cursor_offset:\n return start, end, m.group(1)\n return None",
"def getAddressFromFileOffset(self,offset):\n return HopperLowLevel.getAddressFromFileOffset(self.__internal_document_addr__, offset)",
"def get_location_by_offset(filename, offset):\n with open(filename, encoding='utf-8', errors='ignore') as f:\n for row, line in enumerate(f, 1):\n length = len(line)\n if length < offset:\n offset -= length\n else:\n return row, offset + 1",
"def get_string(self, offset):\r\n table_offset = self['sh_offset']\r\n s = parse_cstring_from_stream(self.stream, table_offset + offset)\r\n return s",
"def _get_line(self, regex):\n return self._match(regex).group(1)",
"def get_string_from_table(self, offset):\r\n return parse_cstring_from_stream(self.debug_str_sec.stream, offset)",
"def handle_source(self, line):\n self._source_lines_buffered.append(line)\n ## Ask client if line is complete; get indent for next line:\n if self.use_kernel_is_complete:\n msg_id = self.client.is_complete(\"\\n\".join(self._source_lines_buffered))\n return self.handle_is_complete_reply(msg_id, timeout=self.kernel_is_complete_timeout)\n else:\n more = (line != \"\")\n return more, \"\"",
"def _highlit_line(content, offsets, markup, markdown, encoding):\n def chunks():\n try:\n # Start on the line the highlights are on:\n chars_before = content.rindex('\\n', 0, offsets[0][0]) + 1\n except ValueError:\n chars_before = None\n for start, end in offsets:\n yield cgi.escape(content[chars_before:start].decode(encoding,\n 'replace'))\n yield markup\n yield cgi.escape(content[start:end].decode(encoding, 'replace'))\n yield markdown\n chars_before = end\n # Make sure to get the rest of the line after the last highlight:\n try:\n next_newline = content.index('\\n', chars_before)\n except ValueError: # eof\n next_newline = None\n yield cgi.escape(content[chars_before:next_newline].decode(encoding,\n 'replace'))\n return ''.join(chunks()).lstrip()",
"def getLine(self, line_id: int) -> Line:\n return self.pool[line_id]",
"def _read_entity_from_offset(self, offset):\n self.entities_mm.seek(offset)\n l = self.entities_mm.readline()\n return self._string_to_entity(l)",
"def get_corresponding_lineno(self, lineno):\r\n for template_line, code_line in reversed(self.debug_info):\r\n if code_line <= lineno:\r\n return template_line\r\n return 1",
"def getLine(self) -> \"SbLine const &\":\n return _coin.SbLineProjector_getLine(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return (source, line) tuple for current or given line number. Looks up the source and line number in the `self.input_lines` StringList instance to count for included source files. If the optional argument `lineno` is given, convert it from an absolute line number to the corresponding (source, line) pair. | def get_source_and_line(self, lineno=None):
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline) | [
"def get_corresponding_lineno(self, lineno):\r\n for template_line, code_line in reversed(self.debug_info):\r\n if code_line <= lineno:\r\n return template_line\r\n return 1",
"def _resolve_lineno(self, lineno):\n if lineno is None:\n return self.line_number()\n return lineno",
"def source_line(self) -> str:\n if not self.__source_line:\n self.__source_line = util.get_line(self.file_path, self.line)\n\n return self.__source_line",
"def line_number(self, line):\n ret_val = self._line_number(line)\n return ret_val",
"def current_from_import_import(cursor_offset, line):\n baseline = current_from_import_import_re_1.search(line)\n if baseline is None:\n return None\n match1 = current_from_import_import_re_2.search(line[baseline.end():])\n if match1 is None:\n return None\n matches = current_from_import_import_re_3.finditer(line[baseline.end():])\n for m in chain((match1, ), matches):\n start = baseline.end() + m.start(1)\n end = baseline.end() + m.end(1)\n if start < cursor_offset and end >= cursor_offset:\n return start, end, m.group(1)\n return None",
"def indexByLineNumber(self,n):\n for idx in range(len(self.__data)):\n if self.__data[idx].lineno() == n:\n return idx\n raise IndexError,\"No line number %d\" % n",
"def _get_line_number(file_lines, pattern):\n return next(i for i, line in enumerate(file_lines) if pattern in line) + 1",
"def scan_source_line_comments(\n self,\n fp: TextIO,\n line_numbers: Iterable[int]\n ) -> Tuple[List[Tuple[int, SourceCodeComments]], List[str]]:\n comments: List[Tuple[int, SourceCodeComments]] = []\n misspelled_comments: List[str] = []\n if not contains_codechecker_comment(fp):\n return comments, misspelled_comments\n\n line_numbers = sorted(line_numbers)\n for num in line_numbers:\n try:\n comments.append((num, self.get_source_line_comments(fp, num)))\n except SpellException as ex:\n misspelled_comments.append(str(ex))\n return comments, misspelled_comments",
"def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line",
"def _FindFileLine(outbuffer, line, fname, regex):\n match = regex.findall(outbuffer.GetLine(line))\n ifile = None\n if len(match):\n ifile = match[0][0]\n try:\n line = max(int(match[0][1]) - 1, 0)\n except (IndexError, TypeError):\n line = 0\n\n # If not an absolute path then the error is relative to the\n # script that produced this error message.\n if ifile is not None and not os.path.isabs(ifile):\n dname = os.path.split(fname)[0]\n ifile = os.path.join(dname, ifile)\n\n return (ifile, line)",
"def line_search(self, regex, lineno=None):\n return regex.search(self.line_text(lineno))",
"def get_line_number(self):\n return self.line_number",
"def eval_line(self, number: int) -> object:\n\n if self.ast:\n interpreter = Interpreter(self.filename)\n\n cur_line = self.buf[number-1].strip()\n if (not cur_line) or any(cur_line.startswith(token)\n for token in self.ignored_tokens):\n raise ValueError()\n\n # constuction area:\n # =================\n pre_nodes = []\n\n def find_cur_node(node):\n if hasattr(node, 'lineno') and node.lineno == number:\n return node\n\n if hasattr(node, 'body'):\n for subn in node.body:\n if subn.lineno > number:\n break\n pre_nodes.append(subn)\n nextn = subn\n else:\n return None\n\n pre_nodes.pop()\n return find_cur_node(nextn)\n\n node = find_cur_node(self.ast)\n\n compiled = interpreter.compile(ast.Module(pre_nodes), 'exec')\n interpreter.exec_code(compiled)\n\n if isinstance(node, ast.If):\n source = node.test\n else:\n source = node.value\n\n compiled = interpreter.compile(ast.Expression(source), 'eval')\n value, error = interpreter.eval_code(compiled)\n # =================\n\n return (self._format_exc(error) if error\n else self._format_value(value))\n else:\n return self.msg",
"def software_source_line_number(self):\n return self._software_source_line_number",
"def current_from_import_from(cursor_offset, line):\n #TODO allow for as's\n tokens = line.split()\n if not ('from' in tokens or 'import' in tokens):\n return None\n matches = current_from_import_from_re.finditer(line)\n for m in matches:\n if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or\n (m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):\n return m.start(1), m.end(1), m.group(1)\n return None",
"def _compute_lineno(cls, table, code):\n for offset, lineno in dis.findlinestarts(code):\n adj_offset = offset + _FIXED_OFFSET\n if adj_offset in table:\n table[adj_offset].lineno = lineno\n # Assign unfilled lineno\n # Start with first bytecode's lineno\n known = code.co_firstlineno\n for inst in table.values():\n if inst.lineno >= 0:\n known = inst.lineno\n else:\n inst.lineno = known\n return table",
"def get_lines(name, source = None):\n\n cmd = ['./llvm-to-source', name]\n if source:\n #cmd.append('-lines-only')\n cmd.append(source)\n p = Popen(cmd, cwd = srcdir, stdout=PIPE, stderr=PIPE)\n out, errs = p.communicate()\n if p.poll() != 0:\n sys.stderr.write(errs)\n sys.exit(1)\n\n assert not out is None\n return frozenset(map(int, out.split()))",
"def getSnippetIdentifier(self, file, line):\n for i in self.fileInfo[file]:\n if i == \"path\":\n continue\n if line in range(self.fileInfo[file][i][\"start\"], self.fileInfo[file][i][\"stop\"] + 1):\n return i",
"def parse_position(errmsg, arg):\n colon = arg.rfind(':') \n if colon >= 0:\n filename = arg[:colon].rstrip()\n m, f = lookupmodule(filename)\n if not f:\n errmsg(\"'%s' not found using sys.path\" % filename)\n return (None, None, None)\n else:\n filename = file_pyc2py(f)\n arg = arg[colon+1:].lstrip()\n pass\n try:\n lineno = int(arg)\n except TypeError:\n errmsg(\"Bad line number: %s\", str(arg))\n return (None, filename, None)\n return (None, filename, lineno)\n return (None, None, None)",
"def lineno(self) -> int:\n return self.node.lineno"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Examine one line of input for a transition match & execute its method. | def check_line(self, context, state, transitions=None):
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions) | [
"def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n % (state.__class__.__name__, transitions)), file=self._stderr)\r\n for name in transitions:\r\n pattern, method, next_state = state.transitions[name]\r\n match = pattern.match(self.line)\r\n if match:\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: Matched transition '\r\n '\"%s\" in state \"%s\".'\r\n % (name, state.__class__.__name__)), file=self._stderr)\r\n return method(match, context, next_state)\r\n else:\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: No match in state \"%s\".'\r\n % state.__class__.__name__), file=self._stderr)\r\n return state.no_match(context, transitions)",
"def parse_transition_line(line: Line, trans_txt: str, from_state: State, to_state: State) -> Transition:\n m = re.fullmatch(r'^(\\w+)\\s*(\\[\\s*(.*?)\\s*\\]\\s*)?(/(.*))?', trans_txt.replace('\\\\n', ''))\n assert m, f'Invalid transition format in {line}: {line.orig_text}'\n\n event_name, _, guard_code, _, actions_txt = m.groups()\n actions_code = [] if not actions_txt else [x.strip() for x in actions_txt.split('/') if x.strip()]\n\n event = Event(event_name)\n guard = None if not guard_code else Guard(guard_code)\n actions = [Action(x) for x in actions_code]\n transition = Transition(event, guard, from_state, to_state, actions)\n\n return transition",
"def run(self, input_lines, input_offset=0, context=None,\r\n input_source=None, initial_state=None):\r\n self.runtime_init()\r\n if isinstance(input_lines, StringList):\r\n self.input_lines = input_lines\r\n else:\r\n self.input_lines = StringList(input_lines, source=input_source)\r\n self.input_offset = input_offset\r\n self.line_offset = -1\r\n self.current_state = initial_state or self.initial_state\r\n if self.debug:\r\n print >>self._stderr, (\r\n u'\\nStateMachine.run: input_lines (line_offset=%s):\\n| %s'\r\n % (self.line_offset, u'\\n| '.join(self.input_lines)))\r\n transitions = None\r\n results = []\r\n state = self.get_state()\r\n try:\r\n if self.debug:\r\n print >>self._stderr, '\\nStateMachine.run: bof transition'\r\n context, result = state.bof(context)\r\n results.extend(result)\r\n while True:\r\n try:\r\n try:\r\n self.next_line()\r\n if self.debug:\r\n source, offset = self.input_lines.info(\r\n self.line_offset)\r\n print >>self._stderr, (\r\n u'\\nStateMachine.run: line (source=%r, '\r\n u'offset=%r):\\n| %s'\r\n % (source, offset, self.line))\r\n context, next_state, result = self.check_line(\r\n context, state, transitions)\r\n except EOFError:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: %s.eof transition'\r\n % state.__class__.__name__)\r\n result = state.eof(context)\r\n results.extend(result)\r\n break\r\n else:\r\n results.extend(result)\r\n except TransitionCorrection, exception:\r\n self.previous_line() # back up for another try\r\n transitions = (exception.args[0],)\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: TransitionCorrection to '\r\n 'state \"%s\", transition %s.'\r\n % (state.__class__.__name__, transitions[0]))\r\n continue\r\n except StateCorrection, exception:\r\n self.previous_line() # back up for another try\r\n next_state = exception.args[0]\r\n if len(exception.args) == 1:\r\n transitions = None\r\n else:\r\n transitions = (exception.args[1],)\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: StateCorrection to state '\r\n '\"%s\", transition %s.'\r\n % (next_state, transitions[0]))\r\n else:\r\n transitions = None\r\n state = self.get_state(next_state)\r\n except:\r\n if self.debug:\r\n self.error()\r\n raise\r\n self.observers = []\r\n return results",
"def parse(self, player, message):\n #test if the message match a command available for the player state\n matched = self.cmd_regex[player.get_state()].match(message)\n if matched:\n # execute the relative function\n cmd = matched.group(\"command\")\n arg = matched.group(\"arguments\") or ''\n getattr(self, Cmd.commands[cmd].fn)(player, arg)\n else:\n #self.game.log(\n # \"Unknown command <{}> for state {}.\"\n # .format(message, player.get_state()))\n info(player, \"<code>Arglebargle !?</code>\")",
"def transition(self):\n statedef = self.transitions[self.state]\n for path in statedef:\n pat, dest = path[:2]\n retval = None\n if type(pat).__name__ == \"str\" and pat == self.char or \\\n type(pat).__name__ == \"SRE_Pattern\" and pat.match(self.char): # Regexp objects match like regexps\n for action in path[2:]:\n retval = action(self) # Keep the return value to return from ourselves\n self.state = dest\n return retval\n raise Exception(\"No matching path for char %s from state %d.\" % (self.char, self.state))",
"def consume(self, inp):\n if self.state.is_end:\n raise ValueError(\"state %s is terminal\" % self.state.name)\n # Follow the first matched rule of current state.\n for predicate, target, action in self.state.rules:\n if predicate(inp, self.stack):\n if action is not None:\n action(inp, self.stack)\n self.state = target\n break\n else: # No match found, follow default.\n if self.state.default_action is not None:\n self.state.default_action(inp, self.stack)\n self.state = self.state.default_target",
"def process_line(self, line):\n if not line:\n return\n msg = self.line_to_message(line)\n self.handle_message(msg)",
"def parse(line):\n if line.startswith('turn on'):\n action = 'on'\n elif line.startswith('turn off'):\n action = 'off'\n elif line.startswith('toggle'):\n action = 'toggle'\n else:\n raise Exception('Unexpected input: \"{}\"'.format(line))\n start, end = map(parse_pair, re.findall(r'\\d+,\\d+', line))\n return action, start, end",
"def test_match_sentence_with_steps(given_sentence, given_steps, expected_argument_match_type, expected_func_match):\n # given & when\n match = matcher.match_step(given_sentence, given_steps)\n\n # then\n assert isinstance(match.argument_match, expected_argument_match_type)\n assert match.func == expected_func_match",
"def process_ops_input(self):\n input_data = self.text()\n if (self.local_state == State.GOTO_LINE):\n self.goto_line.emit(int(input_data))\n else:\n self.search.emit(input_data)",
"def input(self, i):\n self.i_count += 1\n rlist = self.rules.get(self.state, [])\n for (test, dst, action, tag) in rlist + self.rules.get(None, []): # Rules starting from None are added to all states\n t_info = TransitionInfo(self.state, dst, self.i_count, None)\n result = test(i, t_info) if callable(test) else test == i\n t_info = t_info._replace(result=result)\n if result:\n if dst is not None: # Transitions ending in None stay in the same state\n self.state = dst\n # Run the action after the state change so it could override the end state (e.g. pop state from a stack)\n out = action(i, t_info) if callable(action) else action\n # Be sure to trace the actual end state after `action` is done\n self.tracer(i, TraceInfo(t_info, test, action, tag, out, self.state))\n return out\n self.tracer(i, TraceInfo(t_info, test, action, tag, None, self.state))\n\n return self.unrecognized(i, self.state, self.i_count)",
"def testLineParsingNormal(self):\n\n a = LedSwitcher(\"../test/testinputs/input_assign3.txt\")\n a.parseFile()\n self.assertTrue(a.parseEachLine(\"turn on 619,181 through 736,944\") == [True, 619, 181, 736, 944])",
"def process_line(self, line):\n args = line.split(' ')\n command = args[0]\n try:\n handler = getattr(self, f'c_{command}')\n except AttributeError:\n log.warning(f'command {command!r} not found')\n\n try:\n handler(args)\n except ShutdownClient as err:\n self.shutdown(err.args[0])\n except Exception:\n log.exception('error executing command')",
"def parse(cls, input):",
"def parseInput(input):\n # parse=bash(\"sh ../bitpar/parse '\"+input+\"'\") # ouput: [.VP [.V draw][.NP [.D a][.N-bar [.N square]]]]\n bash(\"java -jar ../lambda/lambda-auto.jar ../lambda/input.txt > ../lambda/input.tex\")\n fml=bash(\"make -C ../lambda input.fml\")\n print fml\n cmd=`fml`.split('true ')[1]\n \n # TEST CASES\n # cmd=\"draw(Gy[red(y) & square(y)])\" \n cmd=\"draw(\\gamma y(red(y) & square(y))).\"\n\n print cmd\n parse(cmd)",
"def __get_transition(self, i: int) -> int:\n line = self.contents[i]\n pieces = [x for x in line.split() if (x.find(':') == -1)]\n action = self.actions.index(pieces[0])\n\n if len(pieces) == 4:\n # case 1: T: <action> : <start-state> : <next-state> %f\n start_state = self.states.index(pieces[1])\n next_state = self.states.index(pieces[2])\n prob = float(pieces[3])\n self.T[(action, start_state, next_state)] = prob\n return i + 1\n\n elif len(pieces) == 3:\n # case 2: T: <action> : <start-state> : <next-state>\n # %f\n start_state = self.states.index(pieces[1])\n next_state = self.states.index(pieces[2])\n next_line = self.contents[i + 1]\n prob = float(next_line)\n self.T[(action, start_state, next_state)] = prob\n return i + 2\n\n elif len(pieces) == 2:\n # case 3: T: <action> : <start-state>\n # %f %f ... %f\n start_state = self.states.index(pieces[1])\n next_line = self.contents[i + 1]\n probs = next_line.split()\n assert len(probs) == len(self.states)\n for j in range(len(probs)):\n prob = float(probs[j])\n self.T[(action, start_state, j)] = prob\n return i + 2\n\n elif len(pieces) == 1:\n next_line = self.contents[i + 1]\n if next_line == 'identity':\n # case 4: T: <action>\n # identity\n for j in range(len(self.states)):\n for k in range(len(self.states)):\n prob = 1.0 if j == k else 0.0\n self.T[(action, j, k)] = prob\n return i + 2\n\n elif next_line == 'uniform':\n # case 5: T: <action>\n # uniform\n prob = 1.0 / float(len(self.states))\n for j in range(len(self.states)):\n for k in range(len(self.states)):\n self.T[(action, j, k)] = prob\n return i + 2\n\n else:\n # case 6: T: <action>\n # %f %f ... %f\n # %f %f ... %f\n # ...\n # %f %f ... %f\n for j in range(len(self.states)):\n probs = next_line.split()\n assert len(probs) == len(self.states)\n for k in range(len(probs)):\n prob = float(probs[k])\n self.T[(action, j, k)] = prob\n next_line = self.contents[i + 2 + j]\n return i + 1 + len(self.states)\n\n else:\n raise Exception('Cannot parse line ' + line)",
"def lex(self, line):\n\n # only add line if we are in a continuation or line is not empty\n if self.continuation is True or line.strip() != '':\n self.line += line\n\n self.continuation = False\n # keep running states until out of data or we need a continuation\n while self.continuation is False and len(self.line) > 0:\n for token in self.state():\n if token.ident == Lexer.error.ident:\n yield token\n # reset state on error\n self._reset()\n return\n yield token",
"def step(self):\n if self.__global_state != DFAGlobalState.START:\n raise RuntimeError('DFA is not started!')\n\n if len(self.__input_list) > 0:\n ch = self.__input_list[0]\n transit_to = self.__find_current_state_transition(ch)\n if transit_to:\n self.__logging_list.add_event(DfaLoggingEvent(self.__current_state, ch, transit_to))\n self.__current_state = transit_to\n self.__input_list = self.__input_list[1:]\n else:\n self.__logging_list.set_error(f'no transition for symbol \"{ch}\" in state \"{self.__current_state}\"')\n self.halt()\n return\n else:\n if self.__current_state not in self.__dfa_dict['end_states']:\n self.__logging_list.set_error(f'input string ended at non end state \"{self.__current_state}\"')\n self.halt()",
"def parse(self, transitions):\n #print \"\\n\\n\\n\\n%s\\n\\n\\n\\n\\n\\n\"%self.sentence\n # print transitions\n for transition in transitions:\n self.parse_step(transition)\n #print \"#######################\\n%s##############\\n\"%self.sentence\n return self.dependencies"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize & add a `state_class` (`State` subclass) object. | def add_state(self, state_class):
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug) | [
"def setup_class(cls):\n cls.state = State()\n cls.state.name = \"Oregon\"",
"def set_classy_state(self, state: Dict[str, Any]) -> None:\n raise NotImplementedError",
"def fsm_factory(name, states):\n className = name.capitalize() + \"State\"\n attribs = dict(\n __mapper_args__={\"polymorphic_identity\": name},\n table=name,\n values=states,\n )\n class_ = type(className, (State,), attribs)\n return class_",
"def __init__(self):\n this = _coin.new_ScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def _state(self):\n self.state = _State(self.ct)",
"def __init__(self):\n this = _coin.new_SoScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, state=False):\r\n \r\n # If state is a string, convert it to all caps (for case insensitivity)\r\n # and convert it into bool\r\n # (True if it is \"ON\", False if it is \"OFF\")\r\n if(isinstance(state, str)):\r\n state = state.upper()\r\n if (state == \"ON\"):\r\n state = True\r\n elif (state == \"OFF\"):\r\n state = False\r\n else:\r\n raise InvalidStateException\r\n \r\n # Set the state of the switch\r\n self._on = state\r\n \r\n return",
"def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state",
"def __init__(self):\n this = _coin.new_ScXMLStateElt()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def state_class(self):\n return self._stateclass",
"def test_init_state(self) -> None:\n # Execute\n state = self.state_factory()\n\n # Assert\n assert isinstance(state, State)",
"def __init__(self, state_name: str, changes: List[int]) -> None:\n self.__changes = changes\n super(GuineaPigState, self).__init__(state_name.upper())",
"def state_class(self):\n return self._state_class",
"def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_enable_bfd_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_enable_bfd_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()",
"def initial_state(self):\n return GeneralGameState(self)",
"def __init__(self, state_type=None, name=None, path=None, vibs_path=None, sigma=None,\n mass=None, inertia=None, gasdata=None, add_to_energy=None, path_to_pickle=None,\n read_from_alternate=None, truncate_freq=True, energy_source=None, freq_source=None,\n freq=None, i_freq=None, Gelec=None, Gzpe=None, Gvibr=None, Gtran=None, Grota=None, Gfree=None):\n\n if path_to_pickle:\n assert (os.path.isfile(path_to_pickle))\n newself = pickle.load(open(path_to_pickle, 'rb'))\n assert (isinstance(newself, State))\n for att in newself.__dict__.keys():\n setattr(self, att, getattr(newself, att))\n else:\n if name is None:\n name = os.path.basename(path)\n self.state_type = state_type\n self.name = name\n self.path = path\n self.vibs_path = vibs_path\n self.sigma = sigma\n self.mass = mass\n self.inertia = inertia\n self.gasdata = gasdata\n self.add_to_energy = add_to_energy\n self.read_from_alternate = read_from_alternate\n self.truncate_freq = truncate_freq\n self.energy_source = energy_source\n self.freq_source = freq_source\n self.Gelec = Gelec\n self.Gzpe = Gzpe\n self.Gtran = Gtran\n self.Gvibr = Gvibr\n self.Grota = Grota\n self.Gfree = Gfree\n self.tran_source = None if self.Gtran is None else 'inputfile'\n self.rota_source = None if self.Grota is None else 'inputfile'\n self.vibr_source = None if self.Gvibr is None else 'inputfile'\n self.free_source = None if self.Gfree is None else 'inputfile'\n self.freq = None\n self.i_freq = None\n self.shape = None\n self.atoms = None\n if freq is not None:\n self.freq_source = 'inputfile'\n self.freq = np.array(sorted(freq, reverse=True))\n if i_freq is not None:\n self.i_freq = np.array(sorted(i_freq, reverse=True))\n if self.state_type == 'gas':\n assert(self.sigma is not None)\n\n if self.inertia is not None:\n inertia_cutoff = 1.0e-12\n self.inertia = np.array([i if i > inertia_cutoff else\n 0.0 for i in self.inertia])\n self.shape = len([i for i in self.inertia if i > 0.0])\n if self.shape < 2:\n print('Too many components of the moments of inertia are zero.'\n 'Please specify atoms differently.')",
"def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLStateElt_addState(self, state)",
"def addState(self, state):\n id = len(self.states)\n self.states.append(state)\n return id",
"def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLScxmlElt_addState(self, state)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add `state_classes` (a list of `State` subclasses). | def add_states(self, state_classes):
for state_class in state_classes:
self.add_state(state_class) | [
"def add_css_classes(self, *css_classes):\n for cls in css_classes:\n self._css_classes.add(cls)",
"def add_class(self, class_):\n self.classes.append(class_)",
"def addState(self, state):\n id = len(self.states)\n self.states.append(state)\n return id",
"def _register_classes(classes, addon_name_for_counter=None):\n\n from bpy.utils import register_class\n\n class_count = 0\n for cls in classes:\n register_class(cls)\n class_count += 1\n if addon_name_for_counter:\n print(f\"{addon_name_for_counter}: Registered {str(class_count)} classes\")",
"def register_event_handler_classes(self, *event_handler_classes: Type[EventHandler]) -> None:\n _logger.info(\"Registering event handler classes: {}\",\n \", \".join(str(c) for c in event_handler_classes))\n self._handlers += [self.injector.get_instance(cls) for cls in event_handler_classes]",
"def _registerClasses(classes) -> None:\n global _registered_classes\n _registered_classes = classes",
"def set_classy_state(self, state: Dict[str, Any]) -> None:\n raise NotImplementedError",
"def update_state_history(self, state: int) -> None:\n self.state_history.append(state)",
"def add_states(self, name, state_group):\n try:\n self._state_groups[name] = state_group\n except AttributeError:\n self._state_groups = { name: state_group }",
"def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLStateElt_addState(self, state)",
"def pushState(self, state):\n self.statebuff = [state] + self.statebuff[:len(self.statebuff) - 1]",
"def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLScxmlElt_addState(self, state)",
"def add_state_group(\n self, name: str, *states: t.Union[ManagedState, ManagedStateGroup]\n ) -> None:\n # See `_add_state_internal` for explanation of the following\n if hasattr(self, name):\n raise AttributeError(\n f\"State group name {name!r} conflicts with an existing \"\n f\"attribute in the state manager\"\n )\n mstate = ManagedStateGroup(name, self, states)\n self.states[name] = mstate\n setattr(self, name, mstate)",
"def add_state(self, state):\n self.Q.add(state)\n self.delta[state] = defaultdict(set)",
"def add(self, state):\r\n # The hash function is a Python builtin that generates\r\n # a hash value from its argument. Use this to create\r\n # a dictionary key. Handle collisions by storing \r\n # states that hash to the same key in a bucket list.\r\n # Note that when you access a Python dictionary by a\r\n # non existant key, it throws a KeyError\r\n \r\n # if the hash key of the given state is not in this\r\n # explored instance's set of keys\r\n if state.__hash__() not in self.explored_set.keys():\r\n # create a new set for the particular hash key\r\n self.explored_set[state.__hash__()] = set()\r\n # then just add the state to the set of the\r\n # particular hash key\r\n self.explored_set[state.__hash__()].add(state)",
"def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLParallelElt_addState(self, state)",
"def setStateList (self, states):\n\t\tself.state_list = states",
"def register_classes_factory(classes):\n def register():\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n\n def unregister():\n from bpy.utils import unregister_class\n for cls in reversed(classes):\n unregister_class(cls)\n\n return register, unregister",
"def add(self, state):\n raise Exception(\"TreeSearch is abstract\")",
"def add_state(frontier,state, cost,stateStr,depth):\n\n count = next(counter)\n entry = [cost, count, state,stateStr,depth]\n entry_finder[stateStr] = entry\n heappush(frontier, entry)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make and add transitions listed in `self.initial_transitions`. | def add_initial_transitions(self):
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions) | [
"def getAutomaticTransitions():",
"def set_transitions(self, cell_transition, orientation, new_transitions):\n raise NotImplementedError()",
"def setup_transition_list(self):\n \n # Create an empty transition list\n xn_list = []\n \n # Append four transitions to the list.\n # Note that the arguments to the Transition() object constructor are:\n # - Tuple representing starting pair state\n # (left cell, right cell, orientation [0=horizontal])\n # - Tuple representing new pair state\n # (bottom cell, top cell, orientation [1=vertical])\n # - Transition rate (cells per time step, in this case 1 sec)\n # - Name for transition\n # - Flag indicating that the transition involves an exchange of properties\n # - Function to be called after each transition, to update a property\n # (in this case, to simulate bleaching of the luminescence signal)\n xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion', True, self.update_bleaching) )\n xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion', True, self.update_bleaching) )\n xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion', True, self.update_bleaching) )\n xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion', True, self.update_bleaching) )\n \n return xn_list",
"def __init__(self, transitions):\n if isinstance(transitions, types.GeneratorType):\n transitions = [t for t in transitions]\n assert isinstance(transitions, list) # currently, a list is wanted for a MDD (and not a set); to be changed?\n super().__init__(transitions)",
"def add_transition(self, from_state, to_state,\n trans_func, output_func):\n\n if from_state in self.transitions.keys():\n self.transitions[from_state].append((\n to_state,\n trans_func,\n output_func\n ))\n\n return\n\n self.transitions[from_state] = []\n self.transitions[from_state].append((\n to_state,\n trans_func,\n output_func\n ))",
"def add_transition(self, prev_time_step, prev_action, prev_legal_actions,\n time_step):\n assert prev_time_step is not None\n next_legal_actions = (\n time_step.observations[\"legal_actions\"][self.player_id])\n next_legal_one_hots = self._to_one_hot(next_legal_actions)\n # Added for deep OMD: keep previous action mask.\n prev_legal_one_hots = self._to_one_hot(prev_legal_actions)\n\n transition = Transition(\n info_state=(\n prev_time_step.observations[\"info_state\"][self.player_id][:]),\n action=prev_action,\n legal_one_hots=prev_legal_one_hots,\n reward=time_step.rewards[self.player_id],\n next_info_state=time_step.observations[\"info_state\"][self.player_id][:],\n is_final_step=float(time_step.last()),\n next_legal_one_hots=next_legal_one_hots)\n self._replay_buffer.add(transition)",
"def _start_transition(self, transition: TransitionBase, current_text: str, new_text: str,\n current_colors: List[RGBColor], new_colors: List[RGBColor],\n update_hz: float, flashing, flash_mask):\n current_colors = self._expand_colors(current_colors, len(current_text))\n new_colors = self._expand_colors(new_colors, len(new_text))\n if self._current_transition:\n self._stop_transition()\n self._current_transition = TransitionRunner(self.machine, transition, current_text, new_text,\n current_colors, new_colors)\n transition_text = next(self._current_transition)\n self._update_display(SegmentDisplayState(transition_text, flashing, flash_mask))\n self._transition_update_task = self.machine.clock.schedule_interval(self._update_transition, 1 / update_hz)",
"def lease_transitions(self, lease_transitions):\n\n self._lease_transitions = lease_transitions",
"def start(transition):",
"def run(self, s):\n state = self.init_state\n for c in s:\n state = self.transition(state, c)\n return state",
"def addTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLStateElt_addTransition(self, transition)",
"def transitions(jira, args):\n print(\"Available JIRA transitions:\")\n pprint.pprint(jira.transitions(args.issue))",
"def setDefaultStateTransitionHandlers(self):\n StateTransition.setDefaultStateTransitionHandler(\n MemoryStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.Memory,\n )\n StateTransition.setDefaultStateTransitionHandler(\n VectorRegisterStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.VectorRegister,\n )\n StateTransition.setDefaultStateTransitionHandler(\n SystemRegisterStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.SystemRegister,\n )\n StateTransition.setDefaultStateTransitionHandler(\n GprStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.GPR,\n )\n StateTransition.setDefaultStateTransitionHandler(\n VmContextStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.VmContext,\n )\n StateTransition.setDefaultStateTransitionHandler(\n PrivilegeLevelStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.PrivilegeLevel,\n )\n StateTransition.setDefaultStateTransitionHandler(\n PcStateTransitionHandlerRISCV(self.genThread), EStateElementType.PC\n )\n StateTransition.setDefaultStateTransitionHandler(\n FloatingPointRegisterStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.FloatingPointRegister,\n )",
"def setTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLInitialElt_setTransition(self, transition)",
"def bindTransitions( form_instance, transitions, wf_name=None, wf=None):\n\n if wf_name:\n success_factory = lambda tid: TransitionHandler( tid, wf_name )\n else:\n success_factory = TransitionHandler\n\n actions = []\n for tid in transitions:\n d = {}\n if success_factory:\n d['success'] = success_factory( tid )\n if wf is not None:\n title = _(unicode(wf.getTransitionById( tid ).title))\n action = form.Action(title, **d)\n else:\n action = form.Action( tid, **d)\n action.form = form_instance\n action.__name__ = \"%s.%s\"%(form_instance.prefix, action.__name__)\n \n actions.append( action )\n return actions",
"def output_transitions(self, output_transitions):\n\n self._output_transitions = output_transitions",
"def add_to_fsm(self, fsm):\n final = fsm.get_next_state()\n if fsm.finals:\n final = fsm.finals[0]\n fsm.add_final(final)\n current = fsm.initial\n for i in range(len(self.relations) - 1):\n next_node = fsm.get_next_state()\n if i in self.inputs:\n fsm.add_transition(current,\n next_node,\n self.relations[i][0] + \"_IN\" +\n self.relations[i][1] * \"m\")\n else:\n fsm.add_transition(current,\n next_node,\n self.relations[i][0] + \"_OUT\" +\n self.relations[i][1] * \"m\")\n current = next_node\n if i in self.outputs:\n fsm.add_final(next_node)\n last = len(self.relations) - 1\n # Normally this is stupid...\n if last in self.inputs:\n fsm.add_transition(current,\n final,\n self.relations[last][0] + \"_IN\" +\n self.relations[last][1] * \"m\")\n else:\n fsm.add_transition(current,\n final,\n self.relations[last][0] + \"_OUT\" +\n self.relations[last][1] * \"m\")\n return fsm",
"def _update_transition(self):\n try:\n transition_text = next(self._current_transition)\n self._update_display(SegmentDisplayState(transition_text, self._current_state.flashing,\n self._current_state.flash_mask))\n\n except StopIteration:\n self._stop_transition()",
"def addTransition( self, wf, trans_id, REQUEST=None ):\n workflow = self[wf]\n workflow.transitions.addTransition(trans_id)\n if REQUEST is not None:\n REQUEST['RESPONSE'].redirect( self.portal_url() + '/transitions?wf=' + wf + \\\n '&portal_status_message=Transition+added' )",
"def addTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLParallelElt_addTransition(self, transition)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a transition by `name`. | def remove_transition(self, name):
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name) | [
"def remove(self, name):\n if self.states[name]:\n del self.states[name]",
"def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLStateElt_removeTransition(self, transition)",
"def removeScene(self, name: str) -> None:\r\n\r\n for scene in self.scenes:\r\n if scene.name == name:\r\n self.scenes.remove(scene)\r\n return",
"def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLParallelElt_removeTransition(self, transition)",
"def remove_layer(self, name: str):\n for i, layer in enumerate(self.layers):\n if layer.name == name:\n del self.layers[i]",
"def remove(self, name):\n try:\n self.schedule_lock.acquire()\n the_task = None\n for task in self.schedule:\n if task.name == name:\n the_task = task\n if the_task is not None:\n self.schedule.remove(the_task)\n except:\n raise\n finally:\n self.schedule_lock.release()",
"def delete(self, name):\n if name in self.steps:\n self.steps.pop(name)\n else:\n self.log('{} not in steps dict'.format(name), level='warn')\n if name in self.order:\n ind = self.order.index(name)\n self.order = self.order[:ind] + self.order[ind + 1:]\n else:\n self.log('{} not in order tuple'.format(name), level='warn')\n self.save()",
"def remove(self, name):\n if not isinstance(name, string_types):\n raise TypeError('Tensor name must be a string.')\n if not self.has(name):\n raise RuntimeError(\"Can't find tensor: {}\".format(name))\n\n tensor = self._data[name]\n del self._data[name]\n\n return tensor",
"def remove(self, name):\n # remove and return the sentence sequence\n return self.sentences.pop(name, None)",
"def remove(self, name):\r\n if name not in self._task_map: return True\r\n\r\n id = [x.name() for x in self._task_list].index(name)\r\n self._task_list[id].stop(self)\r\n self.log.msg('Removed task`' + name +'`')\r\n del self._task_list[id]\r\n del self._task_map[name]\r\n return True",
"def removeCalendarWithName(name): # @NoSelf",
"def remove_input(self, name):\n self._input.remove(name)",
"def remove_transition(self, component):\n # check if component is valid\n if component != None:\n # check object type\n if type(component) == transition.Transition:\n # remove transition\n del self._transitions[component.key]\n return True\n return False",
"def remove_curve(self, name):\n logger.debug(\"Removing %s from TyphonTimePlot ...\", name)\n self.timechart.remove_curve(name)",
"def delete(self, name):\n LOG.info(\"Delete workflow [name=%s]\" % name)\n\n db_api.delete_workflow_definition(name)",
"def removeReference(self, name: 'SbName') -> \"void\":\n return _coin.SoInput_removeReference(self, name)",
"def remove_transition_key(self, key):\n # check if key is valid\n if key != \"\" and self._transitions.has_key(key):\n # remove transition\n del self._transitions[key]\n return True\n return False",
"def remove_hero(self, name):\n if self.name in self.heroes:\n z = index(self.name)\n self.heroes.pop(z)\n else:\n return 0",
"def removeEntityName(name):\n ierr = c_int()\n lib.gmshModelRemoveEntityName(\n c_char_p(name.encode()),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelRemoveEntityName returned non-zero error code: \",\n ierr.value)",
"def Remove(self, name):\n new_waypoints = []\n removed_waypoint = None\n with self._lock:\n for waypoint in self._waypoints:\n if waypoint.name != name:\n new_waypoints.append(waypoint)\n else:\n removed_waypoint = waypoint\n self._waypoints = new_waypoints\n if self._waypoints:\n self._waypoints[0].active = True\n return removed_waypoint"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of transition names and a transition mapping. | def make_transitions(self, name_list):
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions | [
"def create_transition_dict(self):\n out = {}\n for state in self.states:\n to_states, probas = self.transition_from(state)\n out[state] = {s: p for s, p in zip(to_states, probas)}\n return out",
"def getAutomaticTransitions():",
"def transitions(self) -> list:\n transitions = [\n key\n for key in self.trans_exec\n if self.idf.as_version >= key > self.idf.file_version\n ]\n transitions.sort()\n return transitions",
"def transitions(self):\n transitions = [\n key\n for key in self.trans_exec\n if self.idf.as_version >= key > self.idf.file_version\n ]\n transitions.sort()\n return transitions",
"def rename(self):\n\n ids = {s: str(i) for (i, s) in enumerate(sorted(list(self.Q())))}\n\n self.transitions = [(ids[t[0]], t[1], ids[t[2]]) for t in self.transitions]\n self.F = [ids[f] for f in self.F]\n self.q0 = ids[self.q0]",
"def getTransitions(source, type):",
"def state_names(model):\n return tuple(n for n, v in model[\"state\"])",
"def transitions(jira, args):\n print(\"Available JIRA transitions:\")\n pprint.pprint(jira.transitions(args.issue))",
"def get_state_names() -> List[str]:\n names = []\n names += get_state_names_1qubit()\n names += get_state_names_2qubit()\n names += get_state_names_3qubit()\n names += get_state_names_1qutrit()\n names += get_state_names_2qutrit()\n return names",
"def __transitions(self):\n # Initialize the transition probailities tensor (S,S,A)\n dimensions = (self.n_states,self.n_states,self.n_actions)\n transition_probabilities = np.zeros(dimensions)\n\n # Compute the transition probabilities. Note that the transitions\n # are deterministic.\n for s in range(self.n_states):\n for a in range(self.n_actions):\n n = 0\n next_s_vec = list()\n for a_batman in self.actions_batman:\n next_s, caught = self.__move(s, a, a_batman)\n \n if caught:\n n = 1\n next_s_vec = [next_s]\n break\n\n elif next_s != None:\n n += 1\n next_s_vec.append(next_s)\n \n for next_s in next_s_vec:\n transition_probabilities[next_s, s, a] = 1/n\n return transition_probabilities",
"def edit_names(a):\n\n dictionary={}\n i=0\n for state in a.states:\n dictionary[str(i)]=state\n i+=1\n\n # rename states\n a.states=list(a.states)\n for i in range(len(a.states)):\n a.states[i]=list(dictionary.keys())[list(dictionary.values()).index(a.states[i])]\n a.states=set(a.states)\n\n # rename start states\n a.start=list(a.start)\n for i in range(len(a.start)):\n a.start[i]=list(dictionary.keys())[list(dictionary.values()).index(a.start[i])]\n a.start=set(a.start)\n\n # rename accept states\n a.accept=list(a.accept)\n for i in range(len(a.accept)):\n a.accept[i]=list(dictionary.keys())[list(dictionary.values()).index(a.accept[i])]\n a.accept=set(a.accept)\n\n # rename transitions\n for i in range(len(a.transitions)):\n a.transitions[i][0]=list(dictionary.keys())[list(dictionary.values()).index(a.transitions[i][0])]\n a.transitions[i][2]=list(dictionary.keys())[list(dictionary.values()).index(a.transitions[i][2])]",
"def getTransition(self) -> \"ScXMLTransitionElt *\":\n return _coin.ScXMLHistoryElt_getTransition(self)",
"def parse(self, transitions):\n #print \"\\n\\n\\n\\n%s\\n\\n\\n\\n\\n\\n\"%self.sentence\n # print transitions\n for transition in transitions:\n self.parse_step(transition)\n #print \"#######################\\n%s##############\\n\"%self.sentence\n return self.dependencies",
"def get_transition(counts, label):\r\n counts.inc_no_visible()\r\n return petri.petrinet.PetriNet.Transition(label, label)",
"def get_transitions(self, cell_transition, orientation):\n raise NotImplementedError()",
"def get_transitions(self, player):\n\n return self.transitions.get(player, [])",
"def get_transitions(content, request, from_state=None):",
"def parse_initial_state_transitions(lines: List[Line]) -> Tuple[Dict[str, Line], List[Line]]:\n remaining_lines = []\n initial_state_names = {}\n\n for line in lines:\n m = re.fullmatch(r'^\\[\\*\\]\\s+-{1,2}>\\s+(\\w+)\\s*(.*)', line.text)\n if not m:\n remaining_lines.append(line)\n continue\n\n name, trailing_text = m.groups()\n assert name not in initial_state_names, f'Duplicate initial transition for state {name} in {line}'\n assert not trailing_text, f'Additional text after initial transition in {line}: {line.orig_text}'\n initial_state_names[name] = line\n\n return initial_state_names, remaining_lines",
"def _MapToMathsNames(startingPoint, GoalFunction, Inequalities, Equalities):\n x = startingPoint\n F = GoalFunction\n gs = Inequalities\n hs = Equalities\n return x, F, gs, hs",
"def get_transition(self,route_id,stop_id):\n if stop_id in self.routes[route_id].trans:\n return self.routes[route_id].trans[stop_id]\n else:\n return []"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A "do nothing" transition method. Return unchanged `context` & `next_state`, empty result. Useful for simple state changes (actionless transitions). | def nop(self, match, context, next_state):
return context, next_state, [] | [
"def noop_context():\n yield",
"def noop(value, state = None):\n return value, None",
"def strip_state(e: Expression) -> None:\n if hasattr(e, \"state\"):\n e.state = None\n for c in e.children():\n strip_state(c)",
"def reset(self):\n self.env.reset()\n\n repeat_noop_times = self.unwrapped.np_random.randint(1, self.max_noop_times + 1)\n for _ in range(repeat_noop_times):\n state, _, done, _ = self.env.step(self.noop_action)\n if done:\n state = self.env.reset()\n\n return state",
"def erase_state(self, *args):\n return _wali.WFA_erase_state(self, *args)",
"def SoAction_nullAction(action: 'SoAction', node: 'SoNode') -> \"void\":\n return _coin.SoAction_nullAction(action, node)",
"def nullAction(action: 'SoAction', node: 'SoNode') -> \"void\":\n return _coin.SoAction_nullAction(action, node)",
"def removeStateChangeCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_removeStateChangeCallback(self, *args)",
"def patch_opp_state(opp):\n opp.state = core.CoreState.not_running",
"def _reset_state_wrapper(self):\n self._reset_state_impl()\n self._is_adapted = False",
"def next_state(self, state: State, jointaction: JointAction) -> State:\n pass",
"def go_to_state(self, next_state):\n for t in self.transitions:\n if t.next_state == None:\n t.next_state = next_state\n return self.root",
"def reset_context(self):\n self.current.clear()",
"def test_disable_running_transition():\n\n def assert_new(instance):\n \"\"\"\n ensure the state is still the original state\n \"\"\"\n assert instance.state == \"new\"\n\n x = get_thing()\n x.disable_running_state(assert_new)",
"def remove_state(self) -> None:\n self._state = {}",
"def reset(self):\n self.mutate(WorldState())",
"def transition(self):\n next_state = self.current_state.transition()\n # self.printStateChange(self.current_state, next_state)\n self.current_state = next_state",
"def restore_state(self):\n if self:\n self.pop()\n else:\n log.warning(\"Can't reset empty state\")",
"def prepare_forward(self, seeding_pos):\n self.hidden_recurrent_states = None\n\n return super().prepare_forward(seeding_pos)",
"def reset(self, s = None, as_tuple = False):\n if (s is None):\n self.current_state = self.legal_states[np.random.choice(self.states())]\n else:\n s = s if isinstance(s, tuple) else self.legal_states[s]\n self.current_state = s\n return self.state(as_tuple = as_tuple), self.rewards[self.state(as_tuple=True)], self.terminals[self.state(as_tuple=True)]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize a `StateSM` object; extends `State.__init__()`. Check for indent state machine attributes, set defaults if not set. | def __init__(self, state_machine, debug=False):
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs | [
"def __init__(self, token, state, extra):\n self.state = state\n self.token = token\n self.extra = extra\n pass",
"def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state",
"def __init__(self):\n self.action_space = [(0, 0)] + list(permutations([i for i in range(m)], 2))\n self.action_space = [list(i) for i in self.action_space]\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()",
"def __init__(self, state=False):\r\n \r\n # If state is a string, convert it to all caps (for case insensitivity)\r\n # and convert it into bool\r\n # (True if it is \"ON\", False if it is \"OFF\")\r\n if(isinstance(state, str)):\r\n state = state.upper()\r\n if (state == \"ON\"):\r\n state = True\r\n elif (state == \"OFF\"):\r\n state = False\r\n else:\r\n raise InvalidStateException\r\n \r\n # Set the state of the switch\r\n self._on = state\r\n \r\n return",
"def __init__(self):\n self.action_space = tuple([(pick_up,drop) for pick_up in (1,2,3,4,5) for drop in (1,2,3,4,5) if pick_up!=drop])\n self.state_space = [(loc, time, day) for loc in np.arange(1,m+1) for time in range(t) for day in range(d)]\n self.state_init = random.choice(self.state_space)\n self.state_input = (np.arange(1,m+1) , np.arange(0,t) , np.arange(0,d))\n # Start the first round\n self.reset()",
"def __init__(self):\n this = _coin.new_ScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self):\n this = _coin.new_SoScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, agent_id=None, agent_positions=None,\n food_positions=None, fragile_agents=None, wall_positions=None,\n legal_actions=None, reward=None, executed_action=None,\n test_mode=None, realPosition=None):\n super(StateMessage, self).__init__(msg_type=STATE_MSG)\n\n self.agent_id = agent_id\n self.agent_positions = agent_positions\n self.food_positions = food_positions\n self.fragile_agents = fragile_agents\n self.wall_positions = wall_positions\n self.legal_actions = legal_actions\n self.reward = reward\n self.executed_action = executed_action\n self.test_mode = test_mode\n self.realPosition = realPosition",
"def __init__(self):\n # Create a initialized state map where all tiles are assumed unknown\n self._state = [TileState.Unknown] * StateMap.TILE_NUMBER\n self._state.append(False) # isClaim bit\n self._state.append(False) # Claim action bit",
"def _sgf_init_gamestate(sgf_root):\n\tprops = sgf_root.properties\n\ts_size = props.get('SZ', ['19'])[0]\n\ts_player = props.get('PL', ['B'])[0]\n\t# init board with specified size\n\tgs = go.GameState(int(s_size))\n\t# handle 'add black' property\n\tif 'AB' in props:\n\t\tfor stone in props['AB']:\n\t\t\tgs.do_move(_parse_sgf_move(stone), go.BLACK)\n\t# handle 'add white' property\n\tif 'AW' in props:\n\t\tfor stone in props['AW']:\n\t\t\tgs.do_move(_parse_sgf_move(stone), go.WHITE)\n\t# setup done; set player according to 'PL' property\n\tgs.current_player = go.BLACK if s_player == 'B' else go.WHITE\n\treturn gs",
"def __initialise_states(self):\n\n # Start not dead and not powered up\n self.powered_up = False\n self.dead = False",
"def initial_state(self):\n return GeneralGameState(self)",
"def setup_class(cls):\n cls.state = State()\n cls.state.name = \"Oregon\"",
"def __init__(self, *, indent: int = 0, indent_step: int = 4):\n self._indent = indent\n self._code = []\n self.INDENT_STEP = indent_step",
"def init_level(self, level='lowest'):\n # TODO: init encoders, handle different size aligners\n if len(self.aligners) > 1:\n if level == 'lowest':\n state_dict = self.aligners[1].state_dict()\n self.aligners[0].load_state_dict(state_dict)\n elif level == 'highest':\n state_dict = self.aligners[-2].state_dict()\n self.aligners[-1].load_state_dict(state_dict)\n return self",
"def make_state():\r\n return State(name=\"\", cntBins=0, cntSimulation=0\r\n , intLane=0, valBucket = [], cntBucket = []\r\n , pathDirectionList = []\r\n , pathScoreList = []\r\n ) #returns a State objec\r",
"def __init__(self, states, params, rxnstring=None):\n\n self.states = states # State management\n self.params = params # Parameter management\n self.reactants = []\n self.products = []\n self.activators = []\n self.inhibitors = []\n self.mark = '--'\n\n if rxnstring: self.read_rxn_str(rxnstring)",
"def setStateMachine(self, sm: 'ScXMLStateMachine') -> \"void\":\n return _coin.ScXMLEvaluator_setStateMachine(self, sm)",
"def setStateMachine(self, sm: 'ScXMLStateMachine') -> \"void\":\n return _coin.ScXMLCoinEvaluator_setStateMachine(self, sm)",
"def make_sm(self):\n return smach.StateMachine(outcomes=['succeeded','aborted','preempted'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle an indented text block. Extend or override in subclasses. Recursively run the registered state machine for indented blocks (`self.indent_sm`). | def indent(self, match, context, next_state):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results | [
"def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )",
"def update_indent(self) -> None:\n self.indent = self.base_indent * self.level\n self.newline_indent = \"\\n\" + self.indent",
"def _indent(self, dedent=True):\n num_newlines = self._get_cursor().selectedText().count(u\"\\u2029\")\n save_cur = self._get_cursor()\n cur = self._get_cursor()\n\n # move to first line of selection, if present\n cur.setPosition(cur.selectionStart())\n self._control.setTextCursor(cur)\n spaces = self._get_leading_spaces()\n # calculate number of spaces neded to align/indent to 4-space multiple\n step = self._tab_width - (spaces % self._tab_width)\n\n # insertText shouldn't replace if selection is active\n cur.clearSelection()\n\n # indent all lines in selection (ir just current) by `step`\n for _ in range(num_newlines+1):\n # update underlying cursor for _get_line_start_pos\n self._control.setTextCursor(cur)\n # move to first non-ws char on line\n cur.setPosition(self._get_line_start_pos())\n if dedent:\n spaces = min(step, self._get_leading_spaces())\n safe_step = spaces % self._tab_width\n cur.movePosition(QtGui.QTextCursor.Right,\n QtGui.QTextCursor.KeepAnchor,\n min(spaces, safe_step if safe_step != 0\n else self._tab_width))\n cur.removeSelectedText()\n else:\n cur.insertText(' '*step)\n cur.movePosition(QtGui.QTextCursor.Down)\n\n # restore cursor\n self._control.setTextCursor(save_cur)",
"def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)",
"def _indent(self, level: int) -> Text:\n\n return self.indent * level",
"def indent(self, lvl=1):\n self.current_level += lvl\n assert self.current_level >= 0, \"Level of indentation cannot become negative\"\"\"",
"def indentation():\n try:\n indent()\n yield\n finally:\n unindent()",
"def add_text(self, text):\n if text.startswith(nl):\n text = text[1:]\n\n cls = ''\n prefix = ''\n is_code = False\n is_output = False\n interp_line = False\n after_blank = False # state 'after blank line'\n blank = False\n bullets = 0\n code_indent = 0\n output_indent = 0\n\n for line in text.split(nl):\n sline = line.strip()\n if sline.startswith('#'):\n continue\n\n # handle <ul> <li> ...\n if sline == '*':\n bullets = 1\n elif bullets == 1 and sline.startswith('*'):\n bullets = 2\n elif bullets == 2 and not sline.startswith('*'):\n bullets = 0\n self.commands.append( dict(cmd=\"text\", arg=\"</ul>\", indent=indent, cls=cls, prefix=prefix) )\n\n line = line.rstrip()\n blank = bool(not line)\n indent = len(line) - len(line.lstrip()) + 1\n\n if interp_typecmd and line.strip().startswith(\">>>\"):\n self.commands.append(dict(cmd=\"type\", arg=None))\n cls = \"code\"\n prefix = escape(\">>>\") + nbsp\n is_code = True\n interp_line = True\n # interp.prompt, space, 1 level of block indent\n code_indent = indent + 3+1\n output_indent = code_indent - 4\n\n # blank line; next line at code indent: still code; ELSE reset code\n # non-blank line; next line at code indent - 4: output\n\n # shorter indent than code should be means end of code block; ignore blank lines\n if not interp_line and indent < code_indent and not blank:\n is_code = False; cls = ''\n\n if not interp_line and after_blank and indent != code_indent and not blank:\n is_code = False; cls = ''\n\n if indent==output_indent and not interp_line:\n is_output = True; cls = \"output\"\n\n if is_output and indent < output_indent:\n is_output = False; cls = ''\n\n # ugly hack: force bigger indent on lines of code except for interp lines\n if is_code and not interp_line:\n indent += 4\n\n line = line.lstrip(\"> \")\n arg = escape(line)\n arg = arg.replace(space, nbsp).replace(\"--\", \"—\")\n if is_code or is_output:\n for name, fn, tag in images:\n arg = arg.replace(name+\"png\", fn)\n arg = arg.replace(fn, tag)\n\n if bullets == 1:\n self.commands.append( dict(cmd=\"text\", arg=\"<ul>\", indent=indent, cls=cls, prefix=prefix) )\n elif bullets == 2:\n arg = \"<li>%s</li>\" % arg.lstrip('*')\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n else:\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n\n prefix = ''\n interp_line = False\n after_blank = bool(not line.strip())",
"def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r",
"def indent(self, indent: int):\n self._column_offset += indent\n self._current_text.set_x_offset(self._column_offset)",
"def _indent(text):\r\n indented, lines = '', text.splitlines(True)\r\n for line in lines:\r\n indented += '..' + line\r\n return indented",
"def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))",
"def increaseIndentation():\n\tglobal indentLength\n\tindentLength = indentLength + 3",
"def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"",
"def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)",
"def indented(self, message: str, added_depth: int = 0) -> str:\n depth = self.depth + added_depth\n return depth * 4 * ' ' + message",
"def render_text(self, indent: str = \" \") -> str:\n self.preprocess()\n return f\"{self._start()}{self._mid(indent)}{self._end()}\"",
"def _indent(self):\n if self._debug:\n self._debug += 1",
"def indenter(text_to_indent):\n temp = \"\"\n for line in json.dumps(text_to_indent, indent=2).split('\\n'):\n temp = temp + \"# %s\\n\" % line\n return temp.strip()",
"def dumped (text, level, indent=2):\n return indented (\"{\\n%s\\n}\" % indented (text, level+1, indent) or \"None\", level, indent) + \"\\n\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle a knownindent text block. Extend or override in subclasses. Recursively run the registered state machine for knownindent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. | def known_indent(self, match, context, next_state):
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results | [
"def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish",
"def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r",
"def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))",
"def _get_indent_segment(self) -> Optional[RawSegment]:\n indent = None\n for seg in reversed(self.segments):\n if seg.pos_marker and not seg.pos_marker.is_literal():\n # Skip any templated elements.\n # NOTE: It must _have_ a position marker at this\n # point however to take this route. A segment\n # without a position marker at all, is an edit\n # or insertion, and so should still be considered.\n continue\n elif seg.is_type(\"newline\"):\n return indent\n elif seg.is_type(\"whitespace\"):\n indent = seg\n elif \"\\n\" in (get_consumed_whitespace(seg) or \"\"):\n # Consumed whitespace case.\n # NOTE: In this situation, we're not looking for\n # separate newline and indent segments, we're\n # making the assumption that they'll be together\n # which I think is a safe one for now.\n return seg\n # i.e. if we never find a newline, it's not an indent.\n return None",
"def update_indent(self) -> None:\n self.indent = self.base_indent * self.level\n self.newline_indent = \"\\n\" + self.indent",
"def match_multiline(self, text, delimiter, in_state, style):\n\t\t# If inside triple-single quotes, start at 0\n\t\tif self.previousBlockState() == in_state:\n\t\t\tstart = 0\n\t\t\tadd = 0\n\t\t# Otherwise, look for the delimiter on this line\n\t\telse:\n\t\t\tstart = delimiter.indexIn(text)\n\t\t\t# Move past this match\n\t\t\tadd = delimiter.matchedLength()\n\n\t\t# As long as there's a delimiter match on this line...\n\t\twhile start >= 0:\n\t\t\t# Look for the ending delimiter\n\t\t\tend = delimiter.indexIn(text, start + add)\n\t\t\t# Ending delimiter on this line?\n\t\t\tif end >= add:\n\t\t\t\tlength = end - start + add + delimiter.matchedLength()\n\t\t\t\tself.setCurrentBlockState(0)\n\t\t\t# No; multi-line string\n\t\t\telse:\n\t\t\t\tself.setCurrentBlockState(in_state)\n\t\t\t\tlength = len(text) - start + add\n\t\t\t# Apply formatting\n\t\t\tself.setFormat(start, length, style)\n\t\t\t# Look for the next match\n\t\t\tstart = delimiter.indexIn(text, start + length)\n\n\t\t# Return True if still inside a multi-line string, False otherwise\n\t\tif self.currentBlockState() == in_state:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def _indent(self, dedent=True):\n num_newlines = self._get_cursor().selectedText().count(u\"\\u2029\")\n save_cur = self._get_cursor()\n cur = self._get_cursor()\n\n # move to first line of selection, if present\n cur.setPosition(cur.selectionStart())\n self._control.setTextCursor(cur)\n spaces = self._get_leading_spaces()\n # calculate number of spaces neded to align/indent to 4-space multiple\n step = self._tab_width - (spaces % self._tab_width)\n\n # insertText shouldn't replace if selection is active\n cur.clearSelection()\n\n # indent all lines in selection (ir just current) by `step`\n for _ in range(num_newlines+1):\n # update underlying cursor for _get_line_start_pos\n self._control.setTextCursor(cur)\n # move to first non-ws char on line\n cur.setPosition(self._get_line_start_pos())\n if dedent:\n spaces = min(step, self._get_leading_spaces())\n safe_step = spaces % self._tab_width\n cur.movePosition(QtGui.QTextCursor.Right,\n QtGui.QTextCursor.KeepAnchor,\n min(spaces, safe_step if safe_step != 0\n else self._tab_width))\n cur.removeSelectedText()\n else:\n cur.insertText(' '*step)\n cur.movePosition(QtGui.QTextCursor.Down)\n\n # restore cursor\n self._control.setTextCursor(save_cur)",
"def step(self):\n # Try matching (if we haven't already)\n if self.untried_match():\n token = self.match()\n if token is not None:\n return token\n\n # Try expanding.\n production = self.expand()\n if production is not None:\n return production\n\n # Try backtracking\n if self.backtrack():\n self._trace_backtrack(self._tree, self._frontier)\n return True\n\n # Nothing left to do.\n return None",
"def match_multiline(self, text, delimiter, in_state, style,\r\n hls=[], highlight_errors=lambda x: x, user_data=None):\r\n # If inside triple-single quotes, start at 0\r\n if self.previousBlockState() == in_state:\r\n start = 0\r\n add = 0\r\n # Otherwise, look for the delimiter on this line\r\n else:\r\n start = delimiter.indexIn(text)\r\n # Move past this match\r\n add = delimiter.matchedLength()\r\n\r\n # As long as there's a delimiter match on this line...\r\n while start >= 0:\r\n # Look for the ending delimiter\r\n end = delimiter.indexIn(text, start + add)\r\n # Ending delimiter on this line?\r\n if end >= add:\r\n length = end - start + add + delimiter.matchedLength()\r\n self.setCurrentBlockState(0)\r\n # No; multi-line string\r\n else:\r\n self.setCurrentBlockState(in_state)\r\n length = len(text) - start + add\r\n\r\n st_fmt = self.format(start)\r\n start_collides = [pos for pos in hls if pos[0] < start < pos[1]]\r\n\r\n # Apply formatting\r\n if ((st_fmt != STYLES['comment']) or\r\n ((st_fmt == STYLES['comment']) and\r\n (self.previousBlockState() != 0))) and \\\r\n (len(start_collides) == 0):\r\n style = highlight_errors(style, user_data)\r\n self.setFormat(start, length, style)\r\n else:\r\n self.setCurrentBlockState(0)\r\n # Look for the next match\r\n start = delimiter.indexIn(text, start + length)\r\n\r\n # Return True if still inside a multi-line string, False otherwise\r\n if self.currentBlockState() == in_state:\r\n return True\r\n else:\r\n return False",
"def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )",
"def read_multiline(self, line, f, indent):\n log.debug('Beginning multiline search at position %d in %s', f.tell(), self.fname)\n result = ''\n\n n = line.find(self.ms)\n if n >= 0:\n line = line[n + len(self.ms):]\n\n while line:\n if line[:indent].isspace() and len(line) > indent:\n line = line[indent:]\n\n if self.me in self.multi_re.sub('', line):\n result += ''.join(line.rsplit(self.me, 1))\n break\n\n result += line\n line = f.readline()\n else:\n raise ParseError('Unexpected EOF while parsing %s.' % self.fname)\n\n return result",
"def highlightBlock(self, text):\n\t\t# Do other syntax formatting\n\t\tfor expression, nth, format in self.rules:\n\t\t\tindex = expression.indexIn(text, 0)\n\n\t\t\twhile index >= 0:\n\t\t\t\t# We actually want the index of the nth match\n\t\t\t\tindex = expression.pos(nth)\n\t\t\t\tlength = len(expression.cap(nth))\n\t\t\t\tself.setFormat(index, length, format)\n\t\t\t\tindex = expression.indexIn(text, index + length)\n\n\t\tself.setCurrentBlockState(0)\n\n\t\t# Do multi-line strings\n\t\tin_multiline = self.match_multiline(text, *self.tri_single)\n\t\tif not in_multiline:\n\t\t\tin_multiline = self.match_multiline(text, *self.tri_double)",
"def test_first_line_indent(self):\n try:\n self._read_string(' Project indented')\n raise AssertionError('SyntaxError expected')\n except reader.SyntaxError:\n pass",
"def _parse_hit_match_block(self, hit_match_data):\n\n def match_is_valid(match):\n \"\"\"Return True if match is not a Consensus column (PRIVATE).\n\n It's not possible to distinguish a sequence line from a Consensus line with\n a regexp, so need to check the ID column.\n \"\"\"\n return match.group(1).strip() != \"Consensus\"\n\n while True:\n if not self.line.strip(): # blank lines indicate the end of a hit block\n return\n match = re.match(_RE_MATCH_BLOCK_QUERY_SEQ, self.line)\n if match and match_is_valid(match):\n hit_match_data[\"query_seq\"] += match.group(3).strip()\n if hit_match_data[\"query_start\"] is None:\n hit_match_data[\"query_start\"] = int(match.group(2))\n hit_match_data[\"query_end\"] = int(match.group(4))\n else:\n match = re.match(_RE_MATCH_BLOCK_HIT_SEQ, self.line)\n if match and match_is_valid(match):\n hit_match_data[\"hit_seq\"] += match.group(3).strip()\n if hit_match_data[\"hit_start\"] is None:\n hit_match_data[\"hit_start\"] = int(match.group(2))\n hit_match_data[\"hit_end\"] = int(match.group(4))\n self.line = self.handle.readline()",
"def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules :\n index = expression.indexIn(text, 0)\n \n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)",
"def indent(self, indent: int):\n self._column_offset += indent\n self._current_text.set_x_offset(self._column_offset)",
"def _indent(self, level: int) -> Text:\n\n return self.indent * level",
"def _indents(self, line) -> Tuple[int, int]:\n import re\n\n indent = len(re.match(r'( *)', line).group(1))\n list_match = re.match(r'( *)(([*\\-+>]+|\\w+\\)|\\w+\\.) +)', line)\n if list_match:\n sub_indent = indent + len(list_match.group(2))\n else:\n sub_indent = indent\n\n return indent, sub_indent",
"def highlightBlock(self, text):\n pos = matchLen = 0\n for matchNum in range(self.skipMatches + 1):\n pos += matchLen\n if self.searchText:\n pos = text.lower().find(self.searchText, pos)\n matchLen = len(self.searchText)\n else:\n match = self.regExpObj.search(text, pos)\n pos = match.start() if match else -1\n matchLen = len(match.group())\n if pos >= 0:\n self.setFormat(pos, matchLen, self.charFormat)",
"def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove items from the start of the list, without touching the parent. | def trim_start(self, n=1):
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n | [
"def removeFirst(self):\n\t\tself.head = self.head.after",
"def delete_beg(self):\n\n if self.head != None:\n\n # grab the node that comes after the head.\n aft_head = self.head.next_node\n\n # have the last node now point to that node\n self.end.next_node = aft_head\n\n # set the head property.\n self.head = aft_head\n\n else:\n raise ValueError(\"The list is empty\")",
"def remove_from_parents(self):\n pass",
"def remove_front(self) -> None:\n length = self.length()\n head = self.head\n cur = self.head.next\n # Checking for empty list. If list is empty an exception is raised.\n if length < 1:\n raise SLLException\n # Else, removing the node directly after the head.\n else:\n head.next = cur.next",
"def removeFromFront(self):\n newNext = self.head.getNext().getNext()\n # remove the first real node in the list\n self.head.setNext(newNext)\n self.size -= 1\n self._tailRemoveCheck()",
"def moveFirst(self):\n if self.parent:\n self.parent.childList.remove(self)\n self.parent.childList.insert(0, self)\n globalref.docRef.modified = True",
"def __delete_first_node(\n self\n ):\n list_is_size_1 = self.size() == 1\n\n if list_is_size_1:\n self.first_node = None\n self.last_node = None\n else:\n self.first_node = self.get_node(1)\n self.first_node.set_previous_node(None)",
"def chop(lst):\n del lst[0] # Removes the first element\n del lst[-1] # Removes the last element",
"def chop(lst):\r\n del lst[0] # Removes the first element\r\n del lst[-1] # Removes the last element\r",
"def removeFirst(self):\n self._removeFirst_animation()\n \n super(A_LinkedList, self).removeFirst()",
"def nodeMoveFirst(self):\n self.currentSelectionModel().sortSelection()\n selNodes = self.currentSelectionModel().selectedNodes()\n undo.ChildListUndo(self.model.undoList,\n [node.parent for node in selNodes])\n for node in reversed(selNodes):\n node.parent.childList.remove(node)\n node.parent.childList.insert(0, node)\n self.currentSelectionModel().selectNodes(selNodes, False)\n self.updateAll()",
"def remove_before_current():\n tltracks = mp.tracklist.get_tl_tracks()\n if len(tltracks) == 0:\n logger.debug(\"Tracklist empty. Aborting track removals.\")\n return\n\n # index of currently playing track\n current = tltracks[0]\n curridx = mp.tracklist.index(tlid=current.tlid)\n\n # remove tracks if necessary\n if curridx != 0:\n logger.warning(f\"Current track is at idx: {curridx}\"\n \"Removing all tracks before it.\")\n tracks = mp.tracklist.get_tracks()\n remuris = [t.uri for t in tracks[:curridx]]\n logger.debug(f\"Removing tracks: {remuris}\")\n remove_tracks(remuris)",
"def delete_front(self) -> None:\n if not self.head:\n raise IndexError(\"Deleting from an empty list\")\n\n current_node = self.head\n\n if current_node.next_ptr == current_node:\n self.head, self.length = None, 0\n else:\n while current_node.next_ptr != self.head:\n current_node = current_node.next_ptr\n\n current_node.next_ptr = self.head.next_ptr\n self.head = self.head.next_ptr\n\n self.length -= 1",
"def clear_items_sequential(self):\n pass",
"def removeFront(self):\n if self.items:\n return self.items.pop(0)\n else:\n raise Exception('can not remove from empty deque')",
"def remove_from_front(self):\n if len(self.orders) > 0:\n return self.orders.pop(0)\n else:\n return None",
"def deleteFront(self) -> bool:\n if len(self.list):\n del self.list[0]\n return True\n else:return False",
"def reset_position(self):\r\n eliminated = [pos.obj for pos in self.positions]\r\n self.positions = deque()\r\n return eliminated",
"def remove(self):\r\n if self.parent:\r\n for i, node in enumerate(self.parent.children):\r\n if node is self:\r\n self.parent.changed()\r\n del self.parent.children[i]\r\n self.parent = None\r\n return i",
"def delete_middle(self):\n\t\tif self.head is None:\n\t\t\traise ValueError(\"Cannot find an element in an empty list\")\n\n\t\tcurrent = self.head\n\t\tmid_index = 0\n\t\tcount = 0\n\t\taux = None\n\t\tmid = self.head\n\n\t\twhile current is not None:\n\t\t\tif mid_index < int(count/2):\n\t\t\t\taux = mid\n\t\t\t\tmid = mid.next\n\t\t\t\tmid_index += 1\n\t\t\tcount += 1\n\t\t\tcurrent = current.next\n\n\t\tif aux is None:\n\t\t\tself.head = self.head.next\n\t\telse:\n\t\t\taux.next = mid.next\n\n\t\tdel mid"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return source for index `i`. | def source(self, i):
return self.info(i)[0] | [
"def source(self, index=0):\n if not self._sources:\n self.get_data()\n try:\n sitename, url = self._sources[index]\n except TypeError:\n return self._sources[index]\n except IndexError:\n raise NotFoundError(\"No episode sources found.\")\n\n ext = get_extractor(sitename)(\n url, quality=self.quality, headers=self.headers)\n self._sources[index] = ext\n\n return ext",
"def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp",
"def __getitem__(self, i):\n return self._data[i]",
"def get_all_sources(item=None):",
"def __getitem__(self, i):\n if not isinstance(i, slice):\n raise ValueError(\"Only slices can be used.\")\n return self.prepareIterator(i.step, i.start, i.stop)",
"def sources(self) -> Iterator[Slot[ItemT]]:\n for slot in self._slots:\n if slot.is_source:\n yield slot",
"def iterSources(self):\n for row in self.iterDictQuery(\"%s ORDER BY name\" % self.sourceQuery):\n yield ThermSource(self, **row)",
"def get_source():",
"def __getitem__(self, index):\n return self.target[self.position + index]",
"def source_index(self):\n return os.path.join(self.data_directory, 'sources')",
"def get_example(self, i):\n key = str(i)\n if key not in self.cache:\n self.cache[key] = self._dataset[i]\n return self.cache[key]",
"def __getitem__(self, i):\n\n return self.documents[i]",
"def srcdocs(self, i=1):\n res = []\n db = self.srcdb(i=i)\n for did in db:\n res += [dict(db[did])]\n return res",
"def getSource(self, name: str) -> Optional[\"Source\"]:\r\n\r\n for source in self.currentScene.sources:\r\n if source.name == name:\r\n return source\r\n return None\r\n #TODO: Search in non-current scene\r",
"def get_source_by_name(self, name):\r\n sources = self.call(GetSourcesList())\r\n for source in sources.getSources():\r\n if source[\"name\"] == name:\r\n return source\r\n return None",
"def src_idx(self, src_id='', dest_id='', dest_ports=slice(None, None)):\n\n if src_id == '' and dest_id == '':\n src_id = self.A_id\n dest_id = self.B_id\n mask = self.src_mask(src_id, dest_id, dest_ports)\n return np.arange(self.N(src_id))[mask]",
"def get_source_id(idx):\n global tgas\n if tgas is None:\n from .cfg import TGASFILE\n tgas = pd.read_hdf(TGASFILE, 'df')\n\n return tgas.iloc[idx].source_id",
"def get_sources(self):\n\n self.sources = []\n cur = self.settings['conn'].cursor()\n cur.execute(\"SELECT id, name, fulltext, mediapath, memo, owner, date FROM source\")\n results = cur.fetchall()\n for r in results:\n guid = self.create_guid()\n suffix = \"txt\"\n if r[3] is not None:\n suffix = r[3].split('.')[-1]\n else:\n if '.' in r[1]:\n suffix = r[1].split('.')[-1]\n if suffix == 'transcribed':\n suffix = 'txt'\n filename = guid + '.' + suffix\n\n plaintext_filename = None\n if r[2] is not None:\n plaintext_filename = self.create_guid() + \".txt\"\n source = {'id': r[0], 'name': r[1], 'fulltext': r[2], 'mediapath': r[3],\n 'memo': r[4], 'owner': r[5], 'date': r[6].replace(' ', 'T'), 'guid': guid,\n 'filename': filename, 'plaintext_filename': plaintext_filename,\n 'external': None}\n if source['mediapath'] is not None:\n fileinfo = os.stat(self.settings['path'] + source['mediapath'])\n if fileinfo.st_size >= 2147483647:\n source['external'] = self.settings['directory']\n self.sources.append(source)",
"def source(self):\n ret = self._get_attr(\"source\")\n return IEventSource(ret)",
"def _source(self) -> Source:\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return offset for index `i`. | def offset(self, i):
return self.info(i)[1] | [
"def getOffset(self, index: int) -> int:\n ...",
"def getMarkPosition(self, i: int) -> int:\n ...",
"def getEndPosition(self, i: int) -> int:\n ...",
"def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp",
"def offset_at_position(self):\n offset = 0\n for i, curr_line in enumerate(self.doc.iter_lines()):\n if i == self.line:\n break\n offset += len(curr_line)\n\n return offset + self.col",
"def findPosition(self,i): # TEST\n return self.abstract.findPosition(self.notes[i])",
"def ComputeOffset(self, ind: 'itkIndex2') -> \"long long\":\n return _itkImagePython.itkImageBase2_ComputeOffset(self, ind)",
"def ComputeOffset(self, ind: 'itkIndex3') -> \"long long\":\n return _itkImagePython.itkImageBase3_ComputeOffset(self, ind)",
"def calc_offset(self,path,i,chunk_sz):\n i=int(i)\n chunk_sz=int(chunk_sz)\n if os.path.isfile(path):\n return (path,i*chunk_sz)\n\n self.lock.acquire()\n self.check_key(path) #Don't know if it is THREAD SAFE\n self.lock.release()\n \n dic,other = self.cache[path]\n\n chunk_start = int(i)*int(chunk_sz)\n owner_ind = other.bisect_right(chunk_start)\n owner_key = other.iloc[owner_ind]\n file = other[owner_key]\n\n file_start=0\n if owner_ind!=0:\n file_start = other.iloc[owner_ind-1]\n\n return (file,chunk_start-file_start)",
"def __shapeIndex(self, i=None):\r\n shx = self.shx\r\n if not shx:\r\n return None\r\n if not self._offsets:\r\n # File length (16-bit word * 2 = bytes) - header length\r\n shx.seek(24)\r\n shxRecordLength = (unpack(\">i\", shx.read(4))[0] * 2) - 100\r\n numRecords = shxRecordLength / 8\r\n # Jump to the first record.\r\n shx.seek(100)\r\n for r in range(numRecords):\r\n # Offsets are 16-bit words just like the file length\r\n self._offsets.append(unpack(\">i\", shx.read(4))[0] * 2)\r\n shx.seek(shx.tell() + 4)\r\n if not i == None:\r\n return self._offsets[i]",
"def ComputeOffset(self, ind: 'itkIndex4') -> \"long long\":\n return _itkImagePython.itkImageBase4_ComputeOffset(self, ind)",
"def get_start_end_xy(self, i):\n direction_inc = [\n [-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]\n x_start = self.position[i][1]\n y_start = self.position[i][0]\n x_end = x_start + \\\n (len(self.words[i]) - 1) * direction_inc[self.direction[i]][1]\n y_end = y_start + \\\n (len(self.words[i]) - 1) * direction_inc[self.direction[i]][0]\n return (x_start, y_start), (x_end, y_end)",
"def _to_cursor_pos(self, index):\n return index + 1",
"def calc_sag_offset_idx(self):\n return self.offset_pnt-1",
"def min_idx(a, i):\n pass",
"def get_location_by_offset(filename, offset):\n with open(filename, encoding='utf-8', errors='ignore') as f:\n for row, line in enumerate(f, 1):\n length = len(line)\n if length < offset:\n offset -= length\n else:\n return row, offset + 1",
"def GetOrigin(self, i: 'unsigned int') -> \"double\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_GetOrigin(self, i)",
"def GetOffset(self):\n return self.st_pos",
"def image_id_at(self, i):\n return i",
"def _offset(self, tile_index: int) -> Tuple[int, int]:\n width_tiles = self.shape_in_tiles[1]\n row = int(tile_index / width_tiles)\n col = tile_index % width_tiles\n\n return row * self.spec.height, col * self.spec.width"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Trim `length` characters off the beginning of each item, inplace, from index `start` to `end`. No whitespacechecking is done on the trimmed text. Does not affect slice parent. | def trim_left(self, length, start=0, end=sys.maxint):
self.data[start:end] = [line[length:]
for line in self.data[start:end]] | [
"def trim(self, start, end):\r\n self.ltrim(start, end)",
"def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoBaseList_truncate(self, length)",
"def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoChildList_truncate(self, length)",
"def trim(self, start: int = None, end: int = None) -> None:\n if start is not None:\n self._data = self._data.iloc[start:]\n if end is not None and end > 0:\n self._data = self._data.iloc[:-end]",
"def _trim_start_end(data: pd.DataFrame, start: int, end: int):\n start_idx = data.loc[:, \"start_locus\"].searchsorted(start)\n end_idx = data.loc[:, \"start_locus\"].searchsorted(end, side=\"left\")\n return data.iloc[start_idx:end_idx, :]",
"def crop(listtocrop, length, start = 0): \n croppedlist = []\n for row in listtocrop:\n croppedlist.append(row[start:length+start])\n \n \n return croppedlist",
"def slice(rec, start=0, end=0):\n\n # Nothing needs to be done\n if not (start or end):\n return rec\n\n # There is end but no start\n if end and not start:\n start = 1\n\n # There start but no end\n if start and not end:\n end = len(rec.seq)\n\n rec.seq = rec.seq[start - 1:end]\n rec.description = f\"{start}:{end} {rec.description}\"\n return rec",
"def ltrim(self, key, start, end):\r\n return self.execute_command(\"LTRIM\", key, start, end)",
"def truncate(vec, max_length, truncate_tail=True):\n if max_length is None:\n return vec\n if len(vec) <= max_length:\n return vec\n if truncate_tail:\n return vec[:max_length]\n else:\n return vec[-max_length:]",
"def trim(self, start=True, end=True):\n if not self.frames.valid.any():\n return\n start_index = self.get_first_frame_index() if start else 0\n end_index = self.get_last_frame_index() if end else (self.size - 1)\n delete = np.full(self.size, True)\n delete[start_index:end_index + 1] = False\n\n if delete.any():\n self.frames.delete_indices(delete)\n if self.dependents is not None:\n for dependent in self.dependents.values():\n dependent.for_frame = dependent.for_frame[~delete]\n\n self.reindex()\n log.debug(f\"Trimmed to {self.size} frames.\")",
"def cutseq(seq):\n rem = len(seq) % 3\n if rem != 0:\n return seq #[:-rem]\n else:\n return seq",
"def trim_from_start(self, trim_from_start):\n self._trim_from_start = trim_from_start",
"def trim_slice(lines, slice_tuple):\n\n def _empty(line):\n return not line or line.strip() == \">\"\n\n if not slice_tuple:\n return None\n\n slice_start, slice_end = slice_tuple\n\n if slice_start is None:\n slice_start = 0\n if slice_end is None:\n slice_end = len(lines)\n\n # Trim from beginning\n while slice_start < slice_end and _empty(lines[slice_start]):\n slice_start += 1\n\n # Trim from end\n while slice_end > slice_start and _empty(lines[slice_end - 1]):\n slice_end -= 1\n\n return (slice_start, slice_end)",
"def strip_range(start, end):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = \"\"\n for index, char in enumerate(func(*args, **kwargs)):\n if start <= index < end:\n result += DOT\n else:\n result += char\n return result\n return wrapper\n return decorator",
"def truncate(self, startindex: 'int const') -> \"void\":\n return _coin.SoLightPath_truncate(self, startindex)",
"def strip_range(start, end):\n\n def actual_decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n # replace characters\n new_str = []\n for i, letter in enumerate(kwargs[\"text\"]):\n if i in range(start, end):\n new_str.append(DOT)\n else:\n new_str.append(letter)\n kwargs[\"text\"] = \"\".join(new_str)\n return f(*args, **kwargs)\n\n return wrapper\n\n return actual_decorator",
"def truncate(self, length: 'int const', fit: 'int const'=0) -> \"void\":\n return _coin.SbPList_truncate(self, length, fit)",
"def trim_tokens_at_front(x,\n sequence_length,\n keys_to_trim=None,\n **unused_kwargs):\n\n for key in (keys_to_trim or sequence_length.keys()):\n if key in x:\n # trim tokens, leaving room for EOS which gets added later\n x[key] = x[key][-(sequence_length[key] - 1):]\n return x",
"def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoPath_truncate(self, length)",
"def truncate(self, start, end):\n note_count = 0\n track = self._pattern[0]\n idx = 0\n\n while idx < len(track):\n msg = track[idx]\n if note_count > end and not isinstance(msg, midi.EndOfTrackEvent):\n if isinstance(msg, midi.NoteOnEvent):\n off = self._get_note_off_event(msg.get_pitch(), track, idx)\n track.remove(msg)\n track.remove(off)\n elif not isinstance(msg, midi.NoteOffEvent):\n track.remove(msg)\n else:\n idx += 1\n elif isinstance(msg, midi.NoteOnEvent):\n if note_count < start:\n off = self._get_note_off_event(msg.get_pitch(), track, idx)\n track.remove(msg)\n track.remove(off)\n else:\n idx += 1\n note_count += 1\n else:\n idx += 1\n\n self.notes = self.get_notes()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line). | def get_text_block(self, start, flush_left=False):
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end] | [
"def _extract_block_from_next_pos(self, marker):\n block = ''\n if not self.oom.find_text(marker):\n return block\n\n line = self.oom.current()\n block += \"{}\\n\".format(line)\n for line in self.oom:\n if not line.startswith(' '):\n self.oom.back()\n break\n block += \"{}\\n\".format(line)\n return block",
"def get_block_indent(text):\n lines = text.split('\\n')\n cnt = []\n for i in lines:\n if i != '' and not i.isspace():\n cnt.append(get_indent(i))\n return min(cnt)",
"def one_paragraph_ragged_left():\n return dedent(\n \"\"\"\n Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n Integer condimentum, orci at auctor venenatis, dolor\n orci congue felis, sit amet luctus dolor est in felis.\n Class aptent taciti sociosqu ad litora torquent per conubia nostra, per\n inceptos himenaeos. Ut imperdiet ex sit amet lacinia condimentum.\"\"\"\n ).strip()",
"def _read_text_block(self, deadline=None):\n result = \"\"\n\n while True:\n line = _read_line(self.stdout_queue, deadline, \"utf-8\", \"replace\", False)\n\n if line.endswith(self.eof_marker):\n result += line[:-len(self.eof_marker)]\n break\n elif line.endswith('#EOF\\r\\n'):\n result += line[:-len('#EOF\\r\\n')]\n self.logger.warning('Got a CRLF-terminated #EOF - this is a driver bug.')\n break\n\n result += line\n\n return result",
"def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish",
"def continued_indentation(logical_line, tokens, indent_level, hang_closing,\r\n noqa, verbose):\r\n first_row = tokens[0][2][0]\r\n nrows = 1 + tokens[-1][2][0] - first_row\r\n if noqa or nrows == 1:\r\n return\r\n\r\n # indent_next tells us whether the next block is indented; assuming\r\n # that it is indented by 4 spaces, then we should not allow 4-space\r\n # indents on the final continuation line; in turn, some other\r\n # indents are allowed to have an extra 4 spaces.\r\n indent_next = logical_line.endswith(':')\r\n\r\n row = depth = 0\r\n # remember how many brackets were opened on each line\r\n parens = [0] * nrows\r\n # relative indents of physical lines\r\n rel_indent = [0] * nrows\r\n # for each depth, collect a list of opening rows\r\n open_rows = [[0]]\r\n # visual indents\r\n indent_chances = {}\r\n last_indent = tokens[0][2]\r\n # for each depth, memorize the visual indent column\r\n indent = [last_indent[1]]\r\n if verbose >= 3:\r\n print(\">>> \" + tokens[0][4].rstrip())\r\n\r\n for token_type, text, start, end, line in tokens:\r\n\r\n newline = row < start[0] - first_row\r\n if newline:\r\n row = start[0] - first_row\r\n newline = (not last_token_multiline and\r\n token_type not in (tokenize.NL, tokenize.NEWLINE))\r\n\r\n if newline:\r\n # this is the beginning of a continuation line.\r\n last_indent = start\r\n if verbose >= 3:\r\n print(\"... \" + line.rstrip())\r\n\r\n # record the initial indent.\r\n rel_indent[row] = expand_indent(line) - indent_level\r\n\r\n # identify closing bracket\r\n close_bracket = (token_type == tokenize.OP and text in ']})')\r\n\r\n # is the indent relative to an opening bracket line?\r\n valid_hang = 4 if (hang_closing or not close_bracket) else 0\r\n for open_row in reversed(open_rows[depth]):\r\n if rel_indent[row] == rel_indent[open_row] + valid_hang:\r\n break\r\n hang = rel_indent[row] - rel_indent[open_row]\r\n # is there any chance of visual indent?\r\n visual_indent = (not close_bracket and hang > 0 and\r\n indent_chances.get(start[1]))\r\n\r\n if close_bracket and indent[depth]:\r\n # closing bracket for visual indent\r\n if start[1] != indent[depth]:\r\n yield (start, \"E124 closing bracket does not match \"\r\n \"visual indentation\")\r\n elif close_bracket and not hang:\r\n # closing bracket matches indentation of opening bracket's line\r\n if hang_closing:\r\n yield start, \"E133 closing bracket is missing indentation\"\r\n elif indent[depth] and start[1] < indent[depth]:\r\n if visual_indent is not True:\r\n # visual indent is broken\r\n yield (start, \"E128 continuation line \"\r\n \"under-indented for visual indent\")\r\n elif hang == 4 or (indent_next and rel_indent[row] == 8):\r\n # hanging indent is verified\r\n if close_bracket and not hang_closing:\r\n yield (start, \"E123 closing bracket does not match \"\r\n \"indentation of opening bracket's line\")\r\n elif visual_indent is True:\r\n # visual indent is verified\r\n if not indent[depth]:\r\n indent[depth] = start[1]\r\n elif visual_indent in (text, str):\r\n # ignore token lined up with matching one from a previous line\r\n pass\r\n else:\r\n # indent is broken\r\n if hang <= 0:\r\n error = \"E122\", \"missing indentation or outdented\"\r\n elif indent[depth]:\r\n error = \"E127\", \"over-indented for visual indent\"\r\n elif hang % 4:\r\n error = \"E121\", \"indentation is not a multiple of four\"\r\n else:\r\n error = \"E126\", \"over-indented for hanging indent\"\r\n yield start, \"%s continuation line %s\" % error\r\n\r\n # look for visual indenting\r\n if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)\r\n and not indent[depth]):\r\n indent[depth] = start[1]\r\n indent_chances[start[1]] = True\r\n if verbose >= 4:\r\n print(\"bracket depth %s indent to %s\" % (depth, start[1]))\r\n # deal with implicit string concatenation\r\n elif (token_type in (tokenize.STRING, tokenize.COMMENT) or\r\n text in ('u', 'ur', 'b', 'br')):\r\n indent_chances[start[1]] = str\r\n # special case for the \"if\" statement because len(\"if (\") == 4\r\n elif not indent_chances and not row and not depth and text == 'if':\r\n indent_chances[end[1] + 1] = True\r\n\r\n # keep track of bracket depth\r\n if token_type == tokenize.OP:\r\n if text in '([{':\r\n depth += 1\r\n indent.append(0)\r\n if len(open_rows) == depth:\r\n open_rows.append([])\r\n open_rows[depth].append(row)\r\n parens[row] += 1\r\n if verbose >= 4:\r\n print(\"bracket depth %s seen, col %s, visual min = %s\" %\r\n (depth, start[1], indent[depth]))\r\n elif text in ')]}' and depth > 0:\r\n # parent indents should not be more than this one\r\n prev_indent = indent.pop() or last_indent[1]\r\n for d in range(depth):\r\n if indent[d] > prev_indent:\r\n indent[d] = 0\r\n for ind in list(indent_chances):\r\n if ind >= prev_indent:\r\n del indent_chances[ind]\r\n del open_rows[depth + 1:]\r\n depth -= 1\r\n if depth:\r\n indent_chances[indent[depth]] = True\r\n for idx in range(row, -1, -1):\r\n if parens[idx]:\r\n parens[idx] -= 1\r\n break\r\n assert len(indent) == depth + 1\r\n if start[1] not in indent_chances:\r\n # allow to line up tokens\r\n indent_chances[start[1]] = text\r\n\r\n last_token_multiline = (start[0] != end[0])\r\n\r\n if indent_next and expand_indent(line) == indent_level + 4:\r\n yield (last_indent, \"E125 continuation line does not distinguish \"\r\n \"itself from next logical line\")",
"def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"",
"def continued_indentation(logical_line, tokens, indent_level, hang_closing,\r\n noqa, verbose):\r\n first_row = tokens[0][2][0]\r\n nrows = 1 + tokens[-1][2][0] - first_row\r\n if noqa or nrows == 1:\r\n return\r\n\r\n # indent_next tells us whether the next block is indented; assuming\r\n # that it is indented by 4 spaces, then we should not allow 4-space\r\n # indents on the final continuation line; in turn, some other\r\n # indents are allowed to have an extra 4 spaces.\r\n indent_next = logical_line.endswith(':')\r\n\r\n row = depth = 0\r\n # remember how many brackets were opened on each line\r\n parens = [0] * nrows\r\n # relative indents of physical lines\r\n rel_indent = [0] * nrows\r\n # visual indents\r\n indent_chances = {}\r\n last_indent = tokens[0][2]\r\n indent = [last_indent[1]]\r\n if verbose >= 3:\r\n print(\">>> \" + tokens[0][4].rstrip())\r\n\r\n for token_type, text, start, end, line in tokens:\r\n\r\n newline = row < start[0] - first_row\r\n if newline:\r\n row = start[0] - first_row\r\n newline = (not last_token_multiline and\r\n token_type not in (tokenize.NL, tokenize.NEWLINE))\r\n\r\n if newline:\r\n # this is the beginning of a continuation line.\r\n last_indent = start\r\n if verbose >= 3:\r\n print(\"... \" + line.rstrip())\r\n\r\n # record the initial indent.\r\n rel_indent[row] = expand_indent(line) - indent_level\r\n\r\n if depth:\r\n # a bracket expression in a continuation line.\r\n # find the line that it was opened on\r\n for open_row in range(row - 1, -1, -1):\r\n if parens[open_row]:\r\n break\r\n else:\r\n # an unbracketed continuation line (ie, backslash)\r\n open_row = 0\r\n hang = rel_indent[row] - rel_indent[open_row]\r\n close_bracket = (token_type == tokenize.OP and text in ']})')\r\n visual_indent = (not close_bracket and hang > 0 and\r\n indent_chances.get(start[1]))\r\n\r\n if close_bracket and indent[depth]:\r\n # closing bracket for visual indent\r\n if start[1] != indent[depth]:\r\n yield (start, \"E124 closing bracket does not match \"\r\n \"visual indentation\")\r\n elif close_bracket and not hang:\r\n # closing bracket matches indentation of opening bracket's line\r\n if hang_closing:\r\n yield start, \"E133 closing bracket is missing indentation\"\r\n elif visual_indent is True:\r\n # visual indent is verified\r\n if not indent[depth]:\r\n indent[depth] = start[1]\r\n elif visual_indent in (text, str):\r\n # ignore token lined up with matching one from a previous line\r\n pass\r\n elif indent[depth] and start[1] < indent[depth]:\r\n # visual indent is broken\r\n yield (start, \"E128 continuation line \"\r\n \"under-indented for visual indent\")\r\n elif hang == 4 or (indent_next and rel_indent[row] == 8):\r\n # hanging indent is verified\r\n if close_bracket and not hang_closing:\r\n yield (start, \"E123 closing bracket does not match \"\r\n \"indentation of opening bracket's line\")\r\n else:\r\n # indent is broken\r\n if hang <= 0:\r\n error = \"E122\", \"missing indentation or outdented\"\r\n elif indent[depth]:\r\n error = \"E127\", \"over-indented for visual indent\"\r\n elif hang % 4:\r\n error = \"E121\", \"indentation is not a multiple of four\"\r\n else:\r\n error = \"E126\", \"over-indented for hanging indent\"\r\n yield start, \"%s continuation line %s\" % error\r\n\r\n # look for visual indenting\r\n if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)\r\n and not indent[depth]):\r\n indent[depth] = start[1]\r\n indent_chances[start[1]] = True\r\n if verbose >= 4:\r\n print(\"bracket depth %s indent to %s\" % (depth, start[1]))\r\n # deal with implicit string concatenation\r\n elif (token_type in (tokenize.STRING, tokenize.COMMENT) or\r\n text in ('u', 'ur', 'b', 'br')):\r\n indent_chances[start[1]] = str\r\n # special case for the \"if\" statement because len(\"if (\") == 4\r\n elif not indent_chances and not row and not depth and text == 'if':\r\n indent_chances[end[1] + 1] = True\r\n\r\n # keep track of bracket depth\r\n if token_type == tokenize.OP:\r\n if text in '([{':\r\n depth += 1\r\n indent.append(0)\r\n parens[row] += 1\r\n if verbose >= 4:\r\n print(\"bracket depth %s seen, col %s, visual min = %s\" %\r\n (depth, start[1], indent[depth]))\r\n elif text in ')]}' and depth > 0:\r\n # parent indents should not be more than this one\r\n prev_indent = indent.pop() or last_indent[1]\r\n for d in range(depth):\r\n if indent[d] > prev_indent:\r\n indent[d] = 0\r\n for ind in list(indent_chances):\r\n if ind >= prev_indent:\r\n del indent_chances[ind]\r\n depth -= 1\r\n if depth:\r\n indent_chances[indent[depth]] = True\r\n for idx in range(row, -1, -1):\r\n if parens[idx]:\r\n parens[idx] -= 1\r\n rel_indent[row] = rel_indent[idx]\r\n break\r\n assert len(indent) == depth + 1\r\n if start[1] not in indent_chances:\r\n # allow to line up tokens\r\n indent_chances[start[1]] = text\r\n\r\n last_token_multiline = (start[0] != end[0])\r\n\r\n if indent_next and expand_indent(line) == indent_level + 4:\r\n yield (last_indent, \"E125 continuation line does not distinguish \"\r\n \"itself from next logical line\")",
"def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )",
"def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)",
"def line_tokenize(text):\n current_line = []\n after_equals = False\n\n for block in tokenize(text):\n\n if block.startswith('\\n'):\n if len(current_line) > 1:\n if after_equals != False:\n current_line.append(' '.join(after_equals))\n after_equals = False\n yield current_line\n elif len(current_line) == 1 and len(current_line[0].strip()):\n yield current_line\n\n current_line = [block[1:]]\n\n elif after_equals != False:\n after_equals.append(block)\n\n else:\n if block == '=':\n after_equals = []\n current_line.append(block)\n elif len(block):\n current_line.append(block)",
"def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)",
"def test_dedent_too_much(one_paragraph_ragged_left):\n dedented_text = console.dedent(console.indent(one_paragraph_ragged_left, 2), num_spaces=4)\n assert dedented_text == one_paragraph_ragged_left",
"def test_hanging_indent(text):\n width = len(text) // 2 # Should force fill to three lines\n filled_text = console.fill(text, width=width, hanging=4)\n num_lines = filled_text.count(\"\\n\") + 1\n assert filled_text.count(\"\\n \") == 0 # 5 spaces indent\n assert filled_text.count(\"\\n \") == num_lines - 1 # 4 spaces indent",
"def split_by_newline(text, start=0):\r\n index = start\r\n while 1:\r\n new_index = text.find('\\n', index)\r\n if new_index == -1:\r\n yield (-1, text[index:])\r\n break\r\n yield (new_index + 1, text[index:new_index])\r\n index = new_index + 1",
"def delete_till_beginning_of_line(text):\n if text.rfind(\"\\n\") == -1:\n return ''\n return text[0:text.rfind(\"\\n\") + 1]",
"def _split_markdown_lines(markdown):\n block_fenced = False\n indent_fenced = False\n for line_number, line in enumerate(markdown.splitlines(True)):\n open_fence_this_iteration = False\n indent_fenced = line.startswith(\" \") or (indent_fenced and WHITE_SPACE_ONLY_PATTERN.match(line))\n if not block_fenced:\n if BLOCK_FENCE_START.match(line):\n open_fence_this_iteration = True\n block_fenced = True\n yield (line, block_fenced or indent_fenced, open_fence_this_iteration, line_number)\n if not open_fence_this_iteration and BLOCK_FENCE_END.match(line):\n block_fenced = False",
"def dedent(text, tabsize=8, skip_first_line=False):\n lines = text.splitlines(1)\n _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)\n return ''.join(lines)",
"def test_md027_good_block_quote_ordered_list_indented_code_block():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md027\",\n \"good_block_quote_ordered_list_indented_code_block.md\",\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )",
"def make_block(text, blocksize=60, spaces=False, newlines=False):\n if not spaces:\n # Simple block by chars.\n return (text[i:i + blocksize] for i in range(0, len(text), blocksize))\n if newlines:\n # Preserve newlines\n lines = []\n for line in text.split('\\n'):\n lines.extend(make_block(line, blocksize=blocksize, spaces=True))\n return lines\n\n # Wrap on spaces (ignores newlines)..\n words = text.split()\n lines = []\n curline = ''\n for word in words:\n possibleline = ' '.join((curline, word)) if curline else word\n\n if len(possibleline) > blocksize:\n lines.append(curline)\n curline = word\n else:\n curline = possibleline\n if curline:\n lines.append(curline)\n return lines"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pad all doublewidth characters in self by appending `pad_char` to each. For East Asian language support. | def pad_double_width(self, pad_char):
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new) | [
"def padIt(str: unicode, padlen: int, endchar: int, padded: bool) -> unicode:\n ...",
"def _pad(string, length, char=None):\r\n \r\n if char == None:\r\n addchar = ' '\r\n else:\r\n addchar = char\r\n while len(string) < length:\r\n string += addchar\r\n return string",
"def padCharacters(self):\n #Find the longest word in the dataset\n\n # maxCharLen is the longest word\n maxCharLen = 0\n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n for sentence in data:\n for token in sentence['characters']:\n maxCharLen = max(maxCharLen, len(token))\n \n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n #Pad each other word with zeros\n for sentenceIdx in range(len(data)):\n for tokenIdx in range(len(data[sentenceIdx]['characters'])):\n token = data[sentenceIdx]['characters'][tokenIdx]\n data[sentenceIdx]['characters'][tokenIdx] = np.pad(token, (0,maxCharLen-len(token)), 'constant')\n \n self.maxCharLen = maxCharLen",
"def padCharacters(self):\n #Find the longest word in the dataset\n maxCharLen = 0\n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n for sentence in data:\n for token in sentence['characters']:\n maxCharLen = max(maxCharLen, len(token))\n \n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n #Pad each other word with zeros\n for sentenceIdx in range(len(data)):\n for tokenIdx in range(len(data[sentenceIdx]['characters'])):\n token = data[sentenceIdx]['characters'][tokenIdx]\n data[sentenceIdx]['characters'][tokenIdx] = np.pad(token, (0,maxCharLen-len(token)), 'constant')\n \n self.maxCharLen = maxCharLen",
"def pad(fingering, width):\n return ''.join(str(f).ljust(width) for f in str(fingering))",
"def pad(s: str) -> str:\n return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)",
"def input_pad_to_len(words, padded_word_len, padded_char_len, word_padding=0, char_padding=0):\n if len(words) < padded_word_len:\n words += [[word_padding]] * (padded_word_len - len(words))\n elif len(words) > padded_word_len:\n words = words[:padded_word_len]\n else:\n pass\n words = [word + [char_padding] * (padded_char_len - len(word)) if len(word) < padded_char_len else word for word in words]\n return words",
"def pad(text, length):\n return (' '*max(0, length-len(text)))+text",
"def right_pad(string, length, character):\r\n add_len = length - len(string)\r\n return f'{string}{character * add_len}'",
"def right_zfill(chars, size):\n for i in range(len(chars)):\n char = '{:<016d}'.format(chars[i])\n chars[i] = char\n return chars",
"def _pad_to(val, length=10, right_align=False):\n ret = str(val)\n if len(ret) < 10:\n padding = \" \"*(10-len(ret))\n\n if right_align:\n ret = padding + ret\n else:\n ret = ret + padding\n\n return ret",
"def input_pad_to_len(words, padded_word_len, padded_char_len, word_padding=0, char_padding=0):\n if len(words) < padded_word_len:\n words += [[word_padding]] * (padded_word_len - len(words))\n words = [word + [char_padding] * (padded_char_len - len(word)) if len(word) < padded_char_len else word for word in words]\n return words",
"def pad(self,\n length: int,\n pad_id: Optional[int] = 0,\n pad_type_id: Optional[int] = 0,\n pad_token: Optional[str] = \"[PAD]\",\n direction: Optional[str] = \"right\"):\n pass",
"def pad(self, suffix):",
"def pad_left(item, width, pad_char=\"0\"):\n return str(item).rjust(width,pad_char)",
"def sep ( char=\"-\", pad=20 ):\n print( char * pad )",
"def rjust(self, width: int, fillchar: str = ' ') -> BetterString:\r\n ret = self.string.rjust(int(width), str(fillchar))\r\n\r\n return BetterString(ret)",
"def pad_sequence(sequence, n, pad_left: bool = ..., pad_right: bool = ..., left_pad_symbol: Optional[Any] = ..., right_pad_symbol: Optional[Any] = ...):\n ...",
"def text_extend(text, width, padchar=\" \"):\n out = text.ljust(width, padchar)\n if len(out) > width:\n return \"...\"+out[(-1*width)+2:-1]\n return out",
"def fill_with_space_to_length(string_to_fill, new_length, align='left'):\n delta = new_length - len(string_to_fill)\n if delta > 0:\n if align == 'left':\n return string_to_fill + ' ' * delta\n return ' ' * delta + string_to_fill\n return string_to_fill"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found. | def candidate_index(self, node):
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index | [
"def get_tag(self, candidate):\n return self.ordered.index(candidate)",
"def __index__(self, product, options=[]):\n cart_items = self.cart_serializable\n for i in range(len(cart_items)):\n if cart_items[i]['product_pk'] == product.pk:\n if sorted(cart_items[i]['option_pks']) == sorted([i.pk for i in options]):\n return i\n return -1",
"def find_index(self, path):\n for i in self.index:\n candidate = os.path.join(path, i)\n if os.path.isfile(candidate):\n return i, candidate\n return None, None",
"def get_version(self, index):\r\n for verneed, vernaux_iter in self.iter_versions():\r\n for vernaux in vernaux_iter:\r\n if vernaux['vna_other'] == index:\r\n return verneed, vernaux\r\n\r\n return None",
"def find(cls, promotion_id):\n try:\n document = cls.database[promotion_id]\n except KeyError:\n return None\n if '_rev' in document:\n return Promotion().deserialize(document)\n return None",
"def action_index(self, act_slot_response):\r\n for (i, action) in enumerate(self.feasible_actions):\r\n if act_slot_response == action:\r\n return i\r\n raise Exception(\"Action Index Not Found\")\r\n return None",
"def get_match(target, candidates, w2vmodel):\n # parse target string into a list of tokens\n new_s1 = get_token_list(target)\n scores = {candidates.index(s): pulp.value(word_mover_distance_probspec(new_s1, s, w2vmodel).objective) for\n s in\n candidates}\n return candidates[min(scores, key=scores.get)]",
"def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None",
"def index_by_chunk(self, chunk):\n for i, result in enumerate(self.result):\n if str(chunk).strip() == str(result.chunk).strip():\n return int(i)\n return None",
"def __call__(self, numbers: Sequence, target: Number) -> OptionalNumber:\r\n\t\t# iterate over the index, value pairings in the sequence\r\n\t\tfor index, number in enumerate(numbers):\r\n\t\t\t# if the current number is equal to the target\r\n\t\t\tif number == target:\r\n\t\t\t\t# return the index associated with the target\r\n\t\t\t\treturn index\r\n\t\t# target not found in the sequence, return None\r\n\t\treturn None",
"def get_index(sta, ver, svn, fix):\r\n for n, status, version, subver, hotfix in zip(range(len(version_n)), status_n, version_n, subver_n, hotfix_n):\r\n if sta == status and ver == version and svn == subver and fix == hotfix:\r\n return n\r\n return -1",
"def get_index(self, key):\r\n index = self.horner_hash(key)\r\n j = 0\r\n for i in range(0, self.table_size):\r\n j = (index + i ** 2) % self.table_size\r\n if self.hash_table[j] and self.hash_table[j].key == key:\r\n return j\r\n return None",
"def GetMatch(self, command, index):\n self.reason = ''\n token = command[index]\n found_close_match = False\n close_matches = 0\n matching_token = None\n for value in self.match:\n if value == token:\n return value\n if value.startswith(token):\n close_matches += 1\n if not found_close_match:\n # The \"closest\" is the first non-exact find if we don't\n # find an exact match.\n matching_token = value\n found_close_match = True\n if found_close_match and close_matches == 1:\n return matching_token\n\n self.reason = 'Must match one of: %s' % ','.join(self.match)\n return None",
"def find_candidate(num_list):\n\n\tif num_list is None or len(num_list) == 0:\n\t\treturn None\n\n\tmajority_index = 0\n\tcount = 1\n\n\tfor i in range(1, len(num_list)):\n\t\tif num_list[majority_index] == num_list[i]:\n\t\t\tcount += 1\n\t\telse:\n\t\t\tcount -= 1\n\n\t\tif count == 0:\n\t\t\tmajority_index = i\n\t\t\tcount = 1\n\n\treturn num_list[majority_index]",
"def find(target, items):\n for i in range(len(items)):\n if target == items[i]:\n return i\n return -1",
"def compute_closest_waypt_idx(self, desired_waypt_config, waypt_configs):\n # TODO: Potentially add linear and angular velocity here\n diff_pos_nk2 = desired_waypt_config.position_nk2() - waypt_configs.position_nk2()\n diff_heading_nk1 = angle_normalize(desired_waypt_config.heading_nk1().numpy() -\n waypt_configs.heading_nk1().numpy())\n diff = tf.concat([diff_pos_nk2, diff_heading_nk1], axis=2)\n idx = tf.argmin(tf.norm(diff, axis=2))\n return idx.numpy()[0]",
"def _getInformantBestPos(self,particle, swarm):\n best_fitness = sys.float_info.max\n best_pos = None\n for i in particle.informants:\n if best_fitness > swarm[i].fitness:\n best_fitness = swarm[i].fitness\n best_pos = swarm[i].position\n return best_pos",
"def get_candidate(self, id):\n return self.candidate_hash[id]\n #for c in self.candidates:\n # if c.id == id:\n # return c\n #return False",
"def __get_index_from_random_card(self):\n return self.cards.index(self.get_random_card())",
"def getCase(self,position):\n for i in range(len(self.cases)):\n if position in self.cases[i]:\n return i"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set document['title'] metadata title from the following | def set_metadata(self):
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext() | [
"def title(self, title):\r\n doc.title = title",
"def set_title(self, value):\n return self._set_one_attribute(self.AttributeNames.TITLE, value)",
"def _write_title(self) -> None:\n self.doc.preamble.append(Command('title', self.json_summary[\"title\"]))\n self.doc.preamble.append(Command('author', f\"FastEstimator {fe.__version__}\"))\n self.doc.preamble.append(Command('date', NoEscape(r'\\today')))\n self.doc.append(NoEscape(r'\\maketitle'))",
"def set_title(self, new_title):\n\n\t\tself.title(new_title)",
"async def set_title(self, title: str):\n self.preview_embed.title = title",
"def updateTitle(self, newTitle):\n if self.title == None: \n self.title = newTitle",
"def create_title(self):\n self.data['Title'] = (self.data['Name']\n .str.extract(r' ([A-Za-z]+)\\.', expand=False)\n .replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer',\n 'Lady', 'Major', 'Rev', 'Sir', 'Dona'], 'Rare')\n .replace('Mlle', 'Miss')\n .replace('Mme', 'Mrs')\n .replace('Ms', 'Miss'))",
"def set_photo_title_from_template(\n photo: osxphotos.PhotoInfo, title_template: str, dry_run: bool\n):\n if not title_template:\n return\n\n # don't render None values\n render_options = RenderOptions(none_str=\"\")\n\n title_string, _ = photo.render_template(title_template, render_options)\n title_string = [ts for ts in title_string if ts]\n if not title_string:\n verbose(\n f\"No title returned from template, nothing to do: [bold]{title_template}\"\n )\n return\n\n if len(title_string) > 1:\n echo_error(\n f\"[error] Title template must return a single string: [bold]{title_string}\"\n )\n sys.exit(1)\n\n verbose(f\"Setting [i]title[/i] to [bold]{title_string[0]}\")\n if not dry_run:\n ps_photo = photoscript_photo(photo)\n ps_photo.title = title_string[0]",
"def save(self, *args, **kwargs):\n\t\tif self.title == None or len(self.title) == 0: self.title = str(self.doc)\n\t\tif self.title.rfind('/') != -1: self.title = self.title[self.title.rfind('/') + 1:]\n\t\tsuper(Document, self).save(*args, **kwargs)",
"def __build_title_stuff( self, data_dict ):\n title_info = etree.SubElement( self.mods, self.MODS+'titleInfo' )\n title = etree.SubElement( title_info, self.MODS+'title' )\n title.text = data_dict[ 'object_title' ] or 'No title'",
"def fetch_title( f ):\n return f.Info['/Title']",
"def add_titles(self, tag):\n self.title_rom = tag.get('data-romaji')\n self.title_en = tag.get('data-english')\n self.title_other = tag.get('data-alternate').split(',')",
"def set_title(self, title, **kwargs):\n self.fig.suptitle(t=title, **kwargs)",
"def page_title(self, page_title):\n\n self._page_title = page_title",
"def edit_title(self, new_title):\n self.title = new_title",
"def _write_title(self, **kwargs):\n\t\tif kwargs.get('write_title'):\n\t\t\tself.fig.suptitle(self.fig_title)\n\t\telse:\n\t\t\tpass",
"def title(self):\n if self._score.metadata is not None:\n return self._score.metadata.title\n return self._filename",
"def _xml_title(self):\n title = E.titles(E.title(self.title))\n\n return title",
"def get_title_text(doc_id):\n data=read_data(\"doc-data.json\")\n\n text = data.get(doc_id).get(\"Text\")\n\n title = data.get(doc_id).get(\"Title\")\n\n return title[0] + text",
"def meta_track_with_title(meta_track: MidiTrack, title: str) -> MidiTrack:\n result = meta_track.copy()\n title_msg = MetaMessage('track_name', name=title)\n\n # insert title message at the very beginning, it takes precedence over\n # any later title messages lolidk\n result.insert(0, title_msg)\n\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mask the email address in `ref` and return a replacement node. `ref` is returned unchanged if it contains no email address. For email addresses such as "user", mask the address as "user at host" (text) to thwart simple email address harvesters (except for those listed in `non_masked_addresses`). If a PEP number (`pepno`) is given, return a reference including a default email subject. | def mask_email(ref, pepno=None):
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref | [
"def mask_email(email: str) -> str:\n if email.count(\"@\") != 1:\n raise ValueError(\"Invalid email address, should have exactly one @\")\n address, domain = email.split(\"@\")\n if not address:\n raise ValueError(\"Invalid email address, address should not be empty\")\n if not domain:\n raise ValueError(\"Invalid email address, domain should not be empty\")\n domain_fore, _, domain_tld = domain.rpartition(\".\")\n if not domain_fore:\n raise ValueError(\"Invalid email address, cannot work out domain\")\n if not domain_tld:\n raise ValueError(\"Invalid email address, cannot work out domain tld\")\n return f\"{address[:2]}***@{domain_fore[:2]}***.{domain_tld}\"",
"def perturb_reference(self, reference: Reference, rng: Random) -> Reference:\n return replace(reference, output=Output(text=self.perturb(reference.output.text, rng)), tags=reference.tags)",
"def get_email(self, node, expression='.', *, error=True):\n\n matches = []\n # If the text would be split across multiple sub-tags.\n for match in node.xpath('{}//*[contains(text(), \"@\")]'.format(expression)):\n matches.append(match.text_content())\n # The text version is more likely to be correct, as it is more visible,\n # e.g. ca_bc has one `href` of `mailto:first.last.mla@leg.bc.ca`.\n for match in node.xpath('{}//a[contains(@href,\"mailto:\")]'.format(expression)):\n matches.append(unquote(match.attrib['href']))\n # If the node has no sub-tags.\n if not matches:\n for match in node.xpath('{}//text()[contains(., \"@\")]'.format(expression)):\n matches.append(match)\n if matches:\n for match in matches:\n match = email_re.search(match)\n if match:\n return match.group(1)\n if error:\n raise Exception('No email pattern in {}'.format(matches))\n elif error:\n raise Exception('No email node in {}'.format(etree.tostring(node)))",
"def quoteaddr(addr):\r\n m = (None, None)\r\n try:\r\n m = email.Utils.parseaddr(addr)[1]\r\n except AttributeError:\r\n pass\r\n if m == (None, None): # Indicates parse failure or AttributeError\r\n # something weird here.. punt -ddm\r\n return \"<%s>\" % addr\r\n elif m is None:\r\n # the sender wants an empty return address\r\n return \"<>\"\r\n else:\r\n return \"<%s>\" % m",
"def GetFullAddress(name):\n if \"@\" not in name:\n domain = ezmail.CONFIG.get(\"domain\")\n if domain:\n return \"%s@%s\" % (name, domain)\n else:\n return \"%s@%s\" % (name, ezmail._get_hostname())\n else:\n return name",
"def mask_ip_addr(addr, keep_last=True):\n tip = ''\n if isinstance(addr, str):\n tl = addr.split('.')\n for i in range(0, len(tl) - 1):\n tip += 'xxx.'\n if keep_last:\n tip += tl[len(tl) - 1]\n else:\n tip += 'xxx'\n return tip",
"def get_email_without_link(email):\n\tif not frappe.get_all(\"Email Account\", filters={\"enable_automatic_linking\": 1}):\n\t\treturn email\n\n\ttry:\n\t\t_email = email.split(\"@\")\n\t\temail_id = _email[0].split(\"+\", 1)[0]\n\t\temail_host = _email[1]\n\texcept IndexError:\n\t\treturn email\n\n\treturn f\"{email_id}@{email_host}\"",
"def replace_emails(text, replace_with=\"<EMAIL>\"):\n result = re.sub(EMAIL_REGEX, replace_with, text)\n return result",
"def maskedAccount(account):\n suffix = None\n if '@' in account:\n name, _, suffix = account.partition('@')\n else:\n name = account\n _len = len(name)\n if _len <= 3:\n return account\n plen = 2 if _len > 3 else 1\n name = '%s%s%s' % (name[:plen], '*' * (_len - 2 * plen), name[_len - plen:])\n return '%s@%s' % (name, suffix) if suffix else name",
"def _fix_email(self, email):\n result = {\n 'invalid': 0,\n 'cleaned': 0,\n 'unchecked': 0,\n 'removed': 0\n }\n if re.search(r'^\\s*$', email.text) is not None:\n result['removed'] = 1\n email.decompose()\n return result\n\n if re.search(r'%20', email['href']) is not None:\n result['cleaned'] = 1\n email['href'] = re.sub(r'%20', '', email['href'])\n\n address = email['href'][7:] # strip off the leading mailto:\n info = cache.get_default().get_email(address)\n\n if not info.is_valid:\n if info.reason == 'accepted_email':\n result['unchecked'] = 1\n email.insert(0, '*UNCHECKED*')\n else:\n result['invalid'] = 1\n email.insert(0, '*INVALID {:s}*'.format(info.reason))\n return result",
"def AS_newreference(self, ref):\n\t\tif isinstance(ref, GenericReference):\n\t\t\treturn ref.AS_resolve(Reference, self.AS_appdata)\n\t\telif isinstance(ref, aem.Query):\n\t\t\treturn Reference(self.AS_appdata, ref)\n\t\telif ref is None:\n\t\t\treturn Reference(self.AS_appdata, aem.app)\n\t\telse:\n\t\t\treturn Reference(self.AS_appdata, aem.customroot(ref))",
"def link_emails_to_crm(ticket, noop):\n if ticket.fields.resolution is not None:\n # don't handle closed tickets now\n return\n if not ticket.fields.description:\n # if the description is empty there are no emails to parse\n return\n\n crm = Hatchbuck(os.environ.get(\"HATCHBUCK_APIKEY\"), noop)\n # logging.debug(ticket.fields.description)\n\n def repl(match):\n \"\"\"\n match is a tuple and looks like one of:\n ('e@example.com', 'e@example.com', '')\n ('<e@example.com>', 'e@example.com', '')\n ('<e@example.com> (https://example.com...)',\n 'e@example.com', ' (https://example.com...)')\n ( full match that will be replaced;\n pure email only;\n rest of string with link if present)\n :param match: tuple\n :return: email with brackets and link to crm\n \"\"\"\n # logging.debug(\"%s %s %s\", match[0], match[1], match[2])\n profile = crm.search_email(match[1])\n if profile is None:\n # email not found in CRM\n return match[0] # don't create contacts at the moment\n # if ticket.fields.resolution is not None:\n # ticket closed, don't create contact and don't replace any link\n # return match[0]\n # profile = {'emails': {'address': match[1], 'type': 'Work'}}\n # if not noop:\n # profile = crm.create(profile)\n # else:\n # profile['contactUrl'] = \"https://example.com/\"\n return \"<\" + match[1] + \"> (\" + profile[\"contactUrl\"] + \")\"\n\n description = re.sub(\n r\"[<]?([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)[>]?\"\n r\"( \\(https:\\/\\/app.hatchbuck.com\\/\\/Contact\\/ContactDetail\\?eid=[a-zA-Z0-9]+\\))?\",\n repl,\n ticket.fields.description,\n )\n if description != ticket.fields.description:\n logging.warning(\"%s new description: %s\", ticket, description)\n if not noop:\n ticket.update(description=description)",
"def email_reply_to_address(self) -> ConfigNodePropertyString:\n return self._email_reply_to_address",
"def strip_mail_ext_address(mail, delimiters=None):\n\n if not delimiters:\n delimiters = settings.RECIPIENT_DELIMITERS\n\n (_orig_user, _domain) = mail.split('@', 1)\n for delimiter in delimiters:\n if delimiter in _orig_user:\n (_user, _ext) = _orig_user.split(delimiter, 1)\n return _user + '@' + _domain\n\n return mail",
"def rereference(edf: mne.io.RawArray, desired_ref: str, current_ref: str=None, pick_chans: list=None) -> Tuple[mne.io.RawArray, str]:\n if pick_chans is None:\n chans = edf.ch_names\n else:\n chans = pick_chans\n if current_ref == desired_ref:\n return edf, current_ref\n\n if desired_ref in ['linked_ear'] and 'M1' not in chans or 'M2' not in chans:\n warnings.warn('Trying to reference to linked ear, but missing M1 and M2 channels. EEG file will not be re-referenced', EEGWarning)\n return edf, current_ref\n\n\n if current_ref == 'contra_mastoid':\n to_reref = [ch for ch in chans if ch not in ['M1','M2']]\n left = [ch for ch in to_reref if len([n for n in ['1','3','5','7','9'] if n in ch])>0]\n right = [ch for ch in to_reref if len([n for n in ['2','4','6','8','10'] if n in ch])>0]\n if len(left) > 0 and 'M2' not in chans:\n warnings.warn(\n 'Trying to reference to left channels to M2 ear, but missing M2 channel. left channels cannot be unreferenced')\n left_ref = []\n left = []\n else:\n left_ref = ['M2'] * len(left)\n if len(right) > 0 and 'M1' not in chans:\n warnings.warn(\n 'Trying to reference to right channels to M1 ear, but missing M1 channel. right channels cannot be unreferenced')\n right_ref = []\n right = []\n else:\n right_ref = ['M1'] * len(right)\n edf = edf.apply_function(lambda x: -x, picks=['M1', 'M2'])\n edf = mne.set_bipolar_reference(edf, left+right, left_ref+right_ref, drop_refs=False, verbose=False)\n edf = edf.drop_channels(left + right)\n edf.rename_channels({ch: ch.split('-')[0] for ch in edf.ch_names})\n edf = edf.apply_function(lambda x: -x, picks=['M1', 'M2'])\n\n ref_type = desired_ref\n if desired_ref == 'contra_mastoid':\n if current_ref == 'linked_ear':\n edf = edf.apply_function(lambda x: -x, picks=['M1','M2'])\n edf, _ = mne.set_eeg_reference(edf, ref_channels=['M1', 'M2'], verbose=False)\n edf = edf.apply_function(lambda x: -x, picks=['M1', 'M2'])\n to_reref = [ch for ch in chans if ch not in ['M1','M2']]\n left = [ch for ch in to_reref if len([n for n in ['1','3','5','7','9','z'] if n in ch])>0]\n right = [ch for ch in to_reref if len([n for n in ['2','4','6','8','10'] if n in ch])>0]\n if len(left) > 0 and 'M2' not in chans:\n warnings.warn(\n 'Trying to reference to left channels to M2 ear, but missing M2 channel. left channels will not be re-referenced')\n left_ref = []\n left = []\n ref_type = 'contra_right_only'\n else:\n left_ref = ['M2'] * len(left)\n if len(right) > 0 and 'M1' not in chans:\n warnings.warn(\n 'Trying to reference to right channels to M1 ear, but missing M1 channel. right channels will not be re-referenced')\n right_ref = []\n right = []\n ref_type = 'contra_left_only'\n else:\n right_ref = ['M1'] * len(right)\n edf = mne.set_bipolar_reference(edf, left+right, left_ref+right_ref, drop_refs=False, verbose=False)\n edf = edf.drop_channels(left + right)\n edf.rename_channels({ch:ch.split('-')[0] for ch in edf.ch_names})\n elif desired_ref == 'linked_ear':\n edf, _ = mne.set_eeg_reference(edf, ref_channels=['M1','M2'], verbose=False)\n else:\n edf, _ = mne.set_eeg_reference(edf, ref_channels=desired_ref, verbose=False)\n\n return edf, ref_type",
"def resolve_one_reference(key, val, fmt, meta):\n\n if key == \"Link\":\n text = stringify(val[1])\n target = val[2][0]\n m = re.match(r\"#(.+)$\", target)\n if m:\n # pandoc automatically makes labels for headings.\n label = m.group(1).lower()\n label = re.sub(r\"[^\\w-]+\", \"\", label) # Strip HTML entities\n text = re.sub(r\"_\", r\"\\_\", text) # Escape underscores in display text\n return RawInline(\"tex\", rf\"\\hyperref[{label}]{{{text}}}\")\n\n # Other elements will be returned unchanged.",
"def preProcess(email: str):\n # Make entire email to lower case\n email = email.lower()\n \n # Strip html tags (strings that look like <blah> where 'blah' does not\n # contain '<' or '>')... replace with a space\n email = re.sub('<[^<>]+>', ' ', email)\n\n # Replace any number with a string 'number'\n email = re.sub('[0-9]+', 'number', email)\n\n # Anything starting with http or https:// replaced with 'httpaddr'\n email = re.sub('(http|https)://[^\\s]*', 'httpaddr', email)\n\n # Strings with \"@\" in the middle are considered emails --> 'emailaddr'\n email = re.sub('[^\\s]+@[^\\s]+', 'emailaddr', email)\n\n # Replace $ with 'dollar'\n email = re.sub('[$]+' , 'dollar', email)\n\n return email",
"def fix_refuris(self, tree):\r\n fname = \"__\" + self.config.master_doc + \"__\"\r\n for refnode in tree.traverse(nodes.reference):\r\n if 'refuri' not in refnode:\r\n continue\r\n refuri = refnode['refuri']\r\n hashindex = refuri.find('#')\r\n if hashindex < 0:\r\n continue\r\n hashindex = refuri.find('#', hashindex + 1)\r\n if hashindex >= 0:\r\n refnode['refuri'] = fname + refuri[hashindex:]",
"def marking_ref(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"marking_ref\")",
"def apply_addr_mask(mask, val):\n b = [d for d in format(val, \"036b\")]\n for i, d in enumerate(mask):\n if d == \"X\":\n b[i] = \"X\"\n if d == \"1\":\n b[i] = \"1\"\n return gen_addrs(\"\".join(b))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign numbers to autonumbered footnotes. For labeled autonumbered footnotes, copy the number over to corresponding footnote references. | def number_footnotes(self, startnum):
for footnote in self.document.autofootnotes:
while True:
label = str(startnum)
startnum += 1
if label not in self.document.nameids:
break
footnote.insert(0, nodes.label('', label))
for name in footnote['names']:
for ref in self.document.footnote_refs.get(name, []):
ref += nodes.Text(label)
ref.delattr('refname')
assert len(footnote['ids']) == len(ref['ids']) == 1
ref['refid'] = footnote['ids'][0]
footnote.add_backref(ref['ids'][0])
self.document.note_refid(ref)
ref.resolved = 1
if not footnote['names'] and not footnote['dupnames']:
footnote['names'].append(label)
self.document.note_explicit_target(footnote, footnote)
self.autofootnote_labels.append(label)
return startnum | [
"def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn",
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def _ExpandFootnotes(self, filename, json_val):\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes",
"def makeFootnoteRefId(self, id):\r\n return 'fnref:%s' % id",
"def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]",
"def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()",
"def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote",
"def set_n(self, temp, n):\n self.temp_dict[temp]['n'] = n",
"def set_name_notes(ibs, nid_list, notes_list):\n ibsfuncs.assert_lblannot_rowids_are_type(ibs, nid_list, ibs.lbltype_ids[constants.INDIVIDUAL_KEY])\n ibs.set_lblannot_notes(nid_list, notes_list)",
"def renumber(apps, schema_editor):\n # Issue = apps.get_model('issues', 'Issue')\n from apps.issues.models import Issue\n Issue.objects.renumber()",
"def write_note(note, data, current_point, counts_num):\n column = np.where(data[0] == note)[0]\n data[current_point:current_point + counts_num, column] = 1",
"def reindex(self):\n for idx, line in enumerate(self.line_map):\n line.index = idx\n if line.annotations:\n for x in line.annotations:\n x.line_num = idx",
"def add_motif_counts(self, list_of_counts):\n self.motif_counts = list_of_counts",
"def offset_references(self, offset: int) -> None:\n self.stream_dict.offset_references(offset)\n self.object_number += offset",
"def makeFootnoteId(self, id):\r\n return 'fn:%s' % id",
"def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1",
"def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div",
"def assign_family_tree_internal_IDs(family_tree):\n internal_id = 1\n for key in keys_sorted_by_weight(family_tree):\n node = family_tree[key]\n node['.id'] = internal_id\n internal_id += 1\n # be sure that the DEFAULT_NODE_KEY has the id DEFAULT_NODE_ID\n node = family_tree[DEFAULT_NODE_KEY]\n offset = node['.id']\n for node in family_tree.itervalues():\n node['.id'] += (DEFAULT_NODE_ID - offset)",
"def set_elems_number(self, elems_number):\n assert len(elems_number) == self.natoms\n self.elems = [elements.number.keys()[i] for i in elems_number]\n return",
"def apply_pseudocounts(cls, motif, pseudocounts):\n if pseudocounts is not None:\n if pseudocounts == \"jaspar\":\n pseudocounts = motifs.jaspar.calculate_pseudocounts(motif)\n motif.pseudocounts = pseudocounts"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign numbers to autonumbered footnote references. | def number_footnote_references(self, startnum):
i = 0
for ref in self.document.autofootnote_refs:
if ref.resolved or ref.hasattr('refid'):
continue
try:
label = self.autofootnote_labels[i]
except IndexError:
msg = self.document.reporter.error(
'Too many autonumbered footnote references: only %s '
'corresponding footnotes available.'
% len(self.autofootnote_labels), base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.autofootnote_refs[i:]:
if ref.resolved or ref.hasattr('refname'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
break
ref += nodes.Text(label)
id = self.document.nameids[label]
footnote = self.document.ids[id]
ref['refid'] = id
self.document.note_refid(ref)
assert len(ref['ids']) == 1
footnote.add_backref(ref['ids'][0])
ref.resolved = 1
i += 1 | [
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn",
"def makeFootnoteRefId(self, id):\r\n return 'fnref:%s' % id",
"def References(self, document):\n self.sequence = list(map(document.Reference, self.sequence))",
"def offset_references(self, offset: int) -> None:\n self.stream_dict.offset_references(offset)\n self.object_number += offset",
"def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote",
"def _ExpandFootnotes(self, filename, json_val):\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes",
"def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]",
"def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()",
"def owner_references(self, owner_references):\n\n self._owner_references = owner_references",
"def set_n(self, temp, n):\n self.temp_dict[temp]['n'] = n",
"def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1",
"def renumber(apps, schema_editor):\n # Issue = apps.get_model('issues', 'Issue')\n from apps.issues.models import Issue\n Issue.objects.renumber()",
"def optSetRefNr(*args):\n return _optcc.optSetRefNr(*args)",
"def __mixing_references_on_n(self, list_refs):\n all = {x: [0, ''] for x in set.union(*map(set, list_refs))}\n for ref in list_refs:\n for word, origin in ref.items():\n all[word][0] += 1\n all[word][1] = origin\n return {word: origin for word, (count, origin) in all.items() if count >= self.mix}",
"def register_reference(target, count=1):",
"def makeFootnoteId(self, id):\r\n return 'fn:%s' % id",
"def update_reference(self, index, uri, text):\n el = self.xpath('./person/ref')[index]\n assert el.tag == 'ref' #check sanity\n el.set('target', uri)\n el.text = text\n return el",
"def set_name_notes(ibs, nid_list, notes_list):\n ibsfuncs.assert_lblannot_rowids_are_type(ibs, nid_list, ibs.lbltype_ids[constants.INDIVIDUAL_KEY])\n ibs.set_lblannot_notes(nid_list, notes_list)",
"def set_reference_index(self, value): # pragma: no cover\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add symbols indexes to "[]"style footnotes and references. | def symbolize_footnotes(self):
labels = []
for footnote in self.document.symbol_footnotes:
reps, index = divmod(self.document.symbol_footnote_start,
len(self.symbols))
labeltext = self.symbols[index] * (reps + 1)
labels.append(labeltext)
footnote.insert(0, nodes.label('', labeltext))
self.document.symbol_footnote_start += 1
self.document.set_id(footnote)
i = 0
for ref in self.document.symbol_footnote_refs:
try:
ref += nodes.Text(labels[i])
except IndexError:
msg = self.document.reporter.error(
'Too many symbol footnote references: only %s '
'corresponding footnotes available.' % len(labels),
base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.symbol_footnote_refs[i:]:
if ref.resolved or ref.hasattr('refid'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
break
footnote = self.document.symbol_footnotes[i]
assert len(footnote['ids']) == 1
ref['refid'] = footnote['ids'][0]
self.document.note_refid(ref)
footnote.add_backref(ref['ids'][0])
i += 1 | [
"def prepare_symbols(self):",
"def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn",
"def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]",
"def add_symbol(self):\r\n for j in range(len(self.atom)):\r\n self.ax.text(\r\n self.coord[j][0] + 0.1,\r\n self.coord[j][1] + 0.1,\r\n self.coord[j][2] + 0.1,\r\n f\"{self.atom[j]},{j}\",\r\n fontsize=9,\r\n )",
"def _ExpandFootnotes(self, filename, json_val):\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes",
"def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()",
"def add_symbols(self, symbols):\n for symbol in symbols:\n self.add_symbol(symbol)",
"def show_refs(index):\n indent = \" : \"\n for ref, defn in index.links:\n print(format_ref(ref))\n if defn:\n print(indent, defn.format())\n for loc in index.locs[defn.id]:\n print(indent, format_def_with_location(defn, loc.location))\n else:\n print(indent, \"None\")\n continue",
"def _set_tag_indexes(self):\n self.opening_lexes = {}\n self.closing_lexes = {}\n for l in self.lexes:\n self.opening_lexes[l[0]] = l\n self.closing_lexes[l[1]] = l\n self.opening_sents = {}\n self.closing_sents = {}\n for s in self.sentences:\n self.opening_sents[s[0]] = s\n self.closing_sents[s[1]] = s",
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def add_info(lines:list, infos:list, start_index:int):\n\tfor line, info in zip(lines,infos):\n\t\tend_index = start_index + len(info)\n\t\tline.extend(['']*(end_index-len(line)))\n\t\tline[start_index:end_index] = info",
"def generate_head2(head2, index, link_index):\n if head2[:2] == '##':\n head2 = head2[2:]\n \n return f'\\t{index}. [{head2}](#p{link_index})'",
"def testFootnotes(self, b, u):\n rx = re.compile(r'\\\\f \\+ [^\\\\][^f][^r]')\n if not rx.search(u) == None:\n print('Footnote without back reference in: ' + b)",
"def add_escapement_back_in_unit_ref(unit_name):\n escaped_text = \"\"\n for c in unit_name:\n if ( c == ESCAPE_SYM or is_boundary_sym(c) or is_comment_sym(c)\n or is_unit_ref_modifier_sym(c)):\n escaped_text += ESCAPE_SYM + c\n else:\n escaped_text += c\n return add_escapement_back_for_not_comments(escaped_text)",
"def OutputSymbolExtraLinks(symbol):\n desc = ''\n\n if False: # NEW FEATURE: needs configurability\n sstr = uri_escape(symbol)\n mstr = uri_escape(MODULE)\n desc += '''<ulink role=\"extralinks\" url=\"http://www.google.com/codesearch?q=%s\">code search</ulink>\n<ulink role=\"extralinks\" url=\"http://library.gnome.org/edit?module=%s&symbol=%s\">edit documentation</ulink>\n''' % (sstr, mstr, sstr)\n\n return desc",
"def OutputSymbolTraits(symbol):\n\n desc = ''\n\n if symbol in Since:\n link_id = \"api-index-\" + Since[symbol]\n desc += \"<para role=\\\"since\\\">Since: <link linkend=\\\"%s\\\">%s</link></para>\" % (link_id, Since[symbol])\n\n if symbol in StabilityLevel:\n stability = StabilityLevel[symbol]\n if stability in AnnotationDefinition:\n AnnotationsUsed[stability] = True\n stability = \"<acronym>%s</acronym>\" % stability\n desc += \"<para role=\\\"stability\\\">Stability Level: %s</para>\" % stability\n return desc",
"def glue_notes(notes, add_marks=True):\n all_notes=[]\n for n in notes:\n if add_marks:\n all_notes=all_notes+[350]+list(n)+[351] \n else:\n all_notes=all_notes+list(n)\n return np.array(all_notes)",
"def add_symbols(self, symbols: List[Symbol]):\n if len(symbols) == 0:\n return\n for symbol in symbols:\n self.add_symbol(symbol)",
"def add_all_refs(self, line):\n # find lone strings with no brackets\n p = re.compile(r'.*\\:\\s*([^\\s\\[\\]]+).*')\n self.add_ref_pattern(p, line)\n # find objects in one or more bracket sets with possible first token and comma\n p = re.compile(r'.*\\[(?:(.*),\\s*)?((?:\\[??[^\\[]*?))\\]')\n self.add_ref_pattern(p, line)\n p = re.compile(r'.*Optional\\[Union\\[([^,]+)')\n self.add_ref_pattern(p, line)\n return line",
"def list_symbols(self) -> str:\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Link manuallylabeled footnotes and citations to/from their references. | def resolve_footnotes_and_citations(self):
for footnote in self.document.footnotes:
for label in footnote['names']:
if label in self.document.footnote_refs:
reflist = self.document.footnote_refs[label]
self.resolve_references(footnote, reflist)
for citation in self.document.citations:
for label in citation['names']:
if label in self.document.citation_refs:
reflist = self.document.citation_refs[label]
self.resolve_references(citation, reflist) | [
"def _generate_biblio_ref_content(self, doc, out_buffer):\n out_buffer.write(\"\\nDocument contains the following Bibliography References:\\n\")\n\n for biblio_ref in doc.get_biblio_refs():\n out_buffer.write(\"- Reference to [{}]\\n\".format(biblio_ref.get_name()))",
"def result_nodes(\n self,\n document: \"docutils.nodes.document\",\n env: \"BuildEnvironment\",\n node: \"docutils.nodes.Element\",\n is_ref: bool,\n ) -> Tuple[List[\"docutils.nodes.Node\"], List[\"docutils.nodes.system_message\"]]:\n if not node.get(\"refdomain\"):\n assert node[\"reftype\"] == \"footcite\"\n node[\"refdomain\"] = \"footcite\"\n node[\"reftype\"] = \"p\"\n foot_domain = cast(\"BibtexFootDomain\", self.env.get_domain(\"footcite\"))\n keys = [key.strip() for key in self.target.split(\",\")] # type: ignore\n try:\n foot_bibliography = env.temp_data[\"bibtex_foot_bibliography\"]\n except KeyError:\n env.temp_data[\n \"bibtex_foot_bibliography\"\n ] = foot_bibliography = foot_domain.bibliography_header.deepcopy()\n foot_old_refs = env.temp_data.setdefault(\"bibtex_foot_old_refs\", set())\n foot_new_refs = env.temp_data.setdefault(\"bibtex_foot_new_refs\", set())\n style = find_plugin(\n \"pybtex.style.formatting\", self.config.bibtex_default_style\n )()\n references = []\n domain = cast(\"BibtexDomain\", self.env.get_domain(\"cite\"))\n # count only incremented at directive, see foot_directives run method\n footbibliography_count = env.temp_data.setdefault(\n \"bibtex_footbibliography_count\", 0\n )\n footcite_names = env.temp_data.setdefault(\"bibtex_footcite_names\", {})\n for key in keys:\n entry = domain.bibdata.data.entries.get(key)\n if entry is not None:\n formatted_entry = style.format_entry(label=\"\", entry=entry)\n if key not in (foot_old_refs | foot_new_refs):\n footnote = docutils.nodes.footnote(auto=1)\n # no automatic ids for footnotes: force non-empty template\n template: str = (\n env.app.config.bibtex_footcite_id\n if env.app.config.bibtex_footcite_id\n else \"footcite-{key}\"\n )\n raw_id = template.format(\n footbibliography_count=footbibliography_count + 1, key=entry.key\n )\n # format name with make_id for consistency with cite role\n name = make_id(raw_id)\n footnote[\"names\"] += [name]\n footcite_names[entry.key] = name\n footnote += domain.backend.paragraph(formatted_entry)\n document.note_autofootnote(footnote)\n document.note_explicit_target(footnote, footnote)\n node_text_transform(footnote)\n foot_bibliography += footnote\n foot_new_refs.add(key)\n references.append(\n (\n entry,\n formatted_entry,\n FootReferenceInfo(\n key=entry.key,\n refname=footcite_names[entry.key],\n document=document,\n ),\n )\n )\n else:\n logger.warning(\n 'could not find bibtex key \"%s\"' % key,\n location=(env.docname, self.lineno),\n type=\"bibtex\",\n subtype=\"key_not_found\",\n )\n ref_nodes = format_references(\n foot_domain.reference_style, node[\"reftype\"], references\n ).render(domain.backend)\n return ref_nodes, []",
"def add_pdf_references(text, document):\n document.get_pdf_url()\n\n def replace_docs(_):\n return '<a href=%s>(ver documento original)</a>' % document.get_pdf_url()\n\n text = re.sub('\\(ver documento original\\)', replace_docs, text)\n\n return text",
"def _resolve_references(self):\n self._log_msg(\"Processing inline citations\", level=3)\n for paragraph in self.parsed_xml.findall('/body/sec/p'):\n self._process_node_for_references(paragraph)",
"def test_anchorRef(self):\r\n listing = Element('a')\r\n listing.setAttribute('href', 'http://example.com/foo')\r\n self.spitter.visitNode(listing)\r\n self.assertEqual(\r\n ''.join(self.output),\r\n \"\\\\footnote{http://example.com/foo}\")",
"def show_refs(index):\n indent = \" : \"\n for ref, defn in index.links:\n print(format_ref(ref))\n if defn:\n print(indent, defn.format())\n for loc in index.locs[defn.id]:\n print(indent, format_def_with_location(defn, loc.location))\n else:\n print(indent, \"None\")\n continue",
"def process_faqrefs(app, doctree):\n process_blocrefs_generic(\n app, doctree, bloc_name=\"faqref\", class_node=faqref_node)",
"def make_pdf_link(self):\n return",
"def references_to_markdown(references):\n\n pybtex_style = find_plugin('pybtex.style.formatting', 'plain')()\n pybtex_md_backend = find_plugin('pybtex.backends', 'markdown')\n pybtex_parser = Parser()\n\n # hack to not print labels (may remove this later)\n def write_entry(self, key, label, text):\n self.output(u'%s \\n' % text)\n pybtex_md_backend.write_entry = write_entry\n pybtex_md_backend = pybtex_md_backend()\n\n data = pybtex_parser.parse_stream(StringIO(references))\n data_formatted = pybtex_style.format_entries(data.entries.itervalues())\n output = StringIO()\n pybtex_md_backend.write_to_stream(data_formatted, output)\n\n # add blockquote style\n references_md = '> {}'.format(output.getvalue())\n references_md.replace('\\n', '\\n> ')\n\n return references_md",
"def resolve_one_reference(key, val, fmt, meta):\n\n if key == \"Link\":\n text = stringify(val[1])\n target = val[2][0]\n m = re.match(r\"#(.+)$\", target)\n if m:\n # pandoc automatically makes labels for headings.\n label = m.group(1).lower()\n label = re.sub(r\"[^\\w-]+\", \"\", label) # Strip HTML entities\n text = re.sub(r\"_\", r\"\\_\", text) # Escape underscores in display text\n return RawInline(\"tex\", rf\"\\hyperref[{label}]{{{text}}}\")\n\n # Other elements will be returned unchanged.",
"def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):\n # TODO Deal with xref so that they keep the proper path. Atm it'll just strip the path and leave only the id\n file_to_id_map = info['file_to_id_map']\n current_dir = cwd or os.path.dirname(src_file)\n cleaned_content = remove_conditional_content(content, info, tag=tag)\n links = LINKS_RE.finditer(cleaned_content)\n\n for link in links:\n link_text = link.group(0)\n link_file = link.group(1)\n link_anchor = link.group(2)\n link_title = link.group(3)\n\n if link_file is not None:\n fixed_link_file = link_file.replace(\".html\", \".adoc\")\n fixed_link_file_abs = os.path.abspath(os.path.join(current_dir, fixed_link_file))\n if fixed_link_file_abs in file_to_id_map:\n if fixed_link_file_abs.startswith(book_dir + os.sep) or fixed_link_file_abs == src_file:\n # We are dealing with a cross reference within the same book here\n if link_anchor is None:\n # Cross reference to the top of a topic, without an id being specified\n link_anchor = \"#\" + file_to_id_map[fixed_link_file_abs]\n\n fixed_link = \"xref:\" + link_anchor.replace(\"#\", \"\") + link_title\n else:\n # We are dealing with a cross reference to another book here\n external_link = EXTERNAL_LINK_RE.search(link_file)\n book_dir_name = external_link.group(1)\n\n # Find the book name\n book_name = book_dir_name\n for book in info['data']:\n if check_node_distro_matches(book, info['distro']) and book['Dir'] == book_dir_name:\n book_name = book['Name']\n break\n\n fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)\n\n if link_anchor is None:\n fixed_link = \"link:\" + fixed_link_file + \"#\" + file_to_id_map[fixed_link_file_abs] + link_title\n else:\n fixed_link = \"link:\" + fixed_link_file + link_anchor + link_title\n else:\n # Cross reference or link that isn't in the docs suite\n fixed_link = link_text\n if EXTERNAL_LINK_RE.search(link_file) is not None:\n rel_src_file = src_file.replace(os.path.dirname(book_dir) + \"/\", \"\")\n has_errors = True\n log.error(\"ERROR (%s): \\\"%s\\\" appears to try to reference a file not included in the \\\"%s\\\" distro\", rel_src_file, link_text.replace(\"\\n\", \"\"), info['distro'])\n sys.exit(-1)\n else:\n fixed_link = \"xref:\" + link_anchor.replace(\"#\", \"\") + link_title\n\n content = content.replace(link_text, fixed_link)\n\n return content",
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def format_link(self, ind):",
"def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div",
"def _process_biblio_ref(self, item):\n a_ref = nodes.Reference()\n logger = logging.getLogger(self.__class__.__name__)\n\n a_ref.set_name(item[\"name\"])\n a_ref.set_title(item.get(\"title\", \"\"))\n a_ref.set_organization(item.get(\"organization\", \"\"))\n a_ref.set_category(item.get(\"category\", \"\"))\n a_ref.set_date(item.get(\"date\", \"\"))\n\n logger.debug(\"Processing Bibliography Reference: \\\"{}\\\"\".format(a_ref.get_name()))\n\n if \"hyperlink\" in item:\n if isinstance(item[\"hyperlink\"], list):\n for hyperlink_item in item[\"hyperlink\"]:\n a_ref.add_hyperlink(hyperlink_item)\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(hyperlink_item))\n else:\n a_ref.add_hyperlink(item[\"hyperlink\"])\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(item[\"hyperlink\"]))\n\n return a_ref",
"def reference_section(self):\n reference = self.study.get('reference', \"\")\n\n # Allow \"0001111\", \"PMID:0001111\", \"PMID: 0001111\"\n pmid = self.study.get('PMID', \"\").split(':')[-1].strip()\n\n if not (reference or pmid):\n return \"\"\n\n if pmid:\n pmid = 'http://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(pmid)\n return dedent(\n \"\"\"\n Reference\n ---------\n {0}\n\n {1}\n \"\"\").format(reference, pmid)",
"def process_link(\n self,\n env: BuildEnvironment,\n refnode: Element,\n has_explicit_target: bool,\n title: str,\n target: str,\n ) -> Tuple[str, str]:\n\n if has_explicit_target:\n note_path = target\n else:\n filename = ws_re.sub(\"_\", target).casefold()\n note_path = str(Path(\"/documents\", filename))\n\n return title, note_path",
"def _do_links(self, text):\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\n\n # `anchor_allowed_pos` is used to support img links inside\n # anchors, but not anchors inside anchors. An anchor's start\n # pos must be `>= anchor_allowed_pos`.\n anchor_allowed_pos = 0\n\n curr_pos = 0\n while True: # Handle the next link.\n # The next '[' is the start of:\n # - an inline anchor: [text](url \"title\")\n # - a reference anchor: [text][id]\n # - an inline img: \n # - a reference img: ![text][id]\n # - a footnote ref: [^id]\n # (Only if 'footnotes' extra enabled)\n # - a footnote defn: [^id]: ...\n # (Only if 'footnotes' extra enabled) These have already\n # been stripped in _strip_footnote_definitions() so no\n # need to watch for them.\n # - a link definition: [id]: url \"title\"\n # These have already been stripped in\n # _strip_link_definitions() so no need to watch for them.\n # - not markup: [...anything else...\n try:\n try:\n start_idx = text.index('[[', curr_pos)\n is_img=False\n except:\n start_idx = text.index('{{', curr_pos)\n is_img=True\n except ValueError:\n break\n\n text_length = len(text)\n\n # Find the matching closing ']]' or '}}'.\n bracket_depth = 0\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\n text_length)):\n ch = text[p:p+2]\n if ch in [']]', '}}']:\n bracket_depth -= 1\n if bracket_depth < 0:\n break\n elif ch in ['[[', '{{']:\n bracket_depth += 1\n else:\n # Closing bracket not found within sentinel length.\n # This isn't markup.\n curr_pos = start_idx + 1\n continue\n link_text = text[start_idx+2:p]\n\n # Now determine what this is by the remainder.\n p += 1\n if p == text_length:\n return text\n\n if is_img:\n\n ########## syntax: link ##############\n result_head = '![%s]' % link_text\n result = '%s(%s)' % (result_head, link_text)\n text = text[:start_idx] + result + text[p+1:]\n ########## syntax: link END ##############\n\n elif start_idx >= anchor_allowed_pos:\n\n if '|' in link_text:\n link_re=re.compile('(.+)\\\\|(.+)',re.X | re.M)\n else:\n link_re=re.compile('(:|\\\\+|\\\\b)(.+)',re.X | re.M)\n\n m1=link_re.match(link_text)\n if m1 == None:\n url = \"\"\n link = link_text\n else:\n url,link=m1.groups()\n\n ########## syntax: link ##############\n result_head = '[%s]' % link\n url=parseLink(link, url, self.file)\n result = '%s(%s)' % (result_head, url)\n text = text[:start_idx] + result + text[p+1:]\n ########## syntax: link END ##############\n else:\n # Anchor not allowed here.\n curr_pos = start_idx + 1\n continue\n\n\n return text",
"def _replace_references(self, references):\n el_references = self.get_root().xpath('./person/ref')\n for el_ref in el_references:\n el_ref.getparent().remove(el_ref)\n for url, text in references:\n self.add_reference(uri=url, text=text)",
"def update_links():\n hn_soup = get_hn_soup()\n hn_links = get_hn_links(hn_soup)\n store_links(hn_links)\n print ''\n for i in range(len(hn_links)):\n j = i+1\n print_link(j, hn_links[i][0], hn_links[i][1])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up a lexical analyzer for `code` in `language`. | def __init__(self, code, language, tokennames='short'):
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language) | [
"def get_lexems(code):\n\n g.clear()\n lexer()\n g.lexer.input(code.lower())\n result = list(g.lexer)\n return g.error_list, result",
"def __init__(self, lang='sl', type='standard'):\n if lang not in ['sl', 'hr', 'sr', 'bg', 'mk']:\n raise Exception(\"Reldi tokenizer is currently only allowed in Slovene, Croatian and Serbian pipelines.\")\n\n check_reldi()\n from classla.submodules.reldi_tokeniser import tokeniser\n self.nlp = tokeniser\n self.lang = lang\n self.type = type",
"def fol_language():\n def make_symbols(start):\n \"\"\"E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9'].\"\"\"\n return [chr(ord(start) + i) + str(n)\n for i in range(0, 3)\n for n in range(1, 10)]\n\n return Language(\n collections.OrderedDict([\n (IDENTITY_SYMBOL, 0),\n (NEGATION_SYMBOL, 1),\n (AND_SYMBOL, 2),\n (OR_SYMBOL, 2),\n (XOR_SYMBOL, 2),\n (IMPLIES_SYMBOL, 2),\n (FOR_ALL_SYMBOL, 2),\n (EXISTS_SYMBOL, 2),\n (RELATION_SYMBOL.format(1), 2), # unary-relation\n (RELATION_SYMBOL.format(2), 3), # binary-relation\n ]),\n predicates=make_symbols('p'),\n constants=make_symbols('a'),\n variables=make_symbols('x'),\n )",
"def get_lexer(self, language: str) -> Any:\n import pygments.lexers as lexers # type: ignore\n trace = 'coloring' in g.app.debug\n try:\n # #1520: always define lexer_language.\n lexer_name = 'python3' if language == 'python' else language\n lexer = lexers.get_lexer_by_name(lexer_name)\n except Exception:\n # One of the lexer's will not exist.\n # pylint: disable=no-member\n if trace and language not in self.unknown_languages:\n self.unknown_languages.append(language)\n g.trace(f\"\\nno pygments lexer for {language!r}. Using python 3 lexer\\n\")\n lexer = lexers.Python3Lexer()\n return lexer",
"def changeLexer(self, language: str) -> None:\n c = self.c\n wrapper = c.frame.body.wrapper\n w = wrapper.widget # A Qsci.QsciSintilla object.\n self.lexer = self.lexersDict.get(language, self.nullLexer) # type:ignore\n w.setLexer(self.lexer)",
"def set_lexer(self) -> Any:\n if self.language == 'patch':\n self.language = 'diff'\n key = f\"{self.language}:{id(self)}\"\n lexer = self.lexers_dict.get(key)\n if not lexer:\n lexer = self.get_lexer(self.language)\n lexer = self.patch_lexer(self.language, lexer)\n self.lexers_dict[key] = lexer\n return lexer",
"def lex(code, lexer):\n try:\n return lexer.get_tokens(code)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.lexer import RegexLexer\n if isinstance(lexer, type) and issubclass(lexer, RegexLexer):\n raise TypeError('lex() argument must be a lexer instance, '\n 'not a class')\n raise",
"def configure_lexer(self) -> None:\n # c = self.leo_c\n lexer = self\n # To do: use c.config setting.\n # pylint: disable=no-member\n font = QtGui.QFont(\"DejaVu Sans Mono\", 14)\n lexer.setFont(font)",
"def __init__(self, language, parent=None):\n super(PreferencesLexer, self).__init__(parent)\n \n # These default font families are taken from QScintilla\n if Globals.isWindowsPlatform():\n self.__defaultFontFamily = \"Courier New\"\n elif Globals.isMacPlatform():\n self.__defaultFontFamily = \"Courier\"\n else:\n self.__defaultFontFamily = \"Bitstream Vera Sans Mono\"\n \n # instantiate a lexer object for the given language\n import QScintilla.Lexers\n self.__lex = QScintilla.Lexers.getLexer(language)\n if self.__lex is None:\n raise PreferencesLexerLanguageError(language)\n \n # read the last stored values from preferences file\n self.__lex.readSettings(Preferences.Prefs.settings, \"Scintilla\")\n if self.__lex.hasSubstyles():\n self.__lex.loadSubstyles()",
"def analyze(app):\n # Normalize config values:\n source_paths = [app.config.js_source_path] if isinstance(app.config.js_source_path, str) else app.config.js_source_path\n abs_source_paths = [normpath(join(app.confdir, path)) for path in source_paths]\n root_for_relative_paths = root_or_fallback(\n normpath(join(app.confdir, app.config.root_for_relative_js_paths)) if app.config.root_for_relative_js_paths else None,\n abs_source_paths)\n\n # Pick analyzer:\n try:\n analyzer = {'javascript': JsAnalyzer,\n 'typescript': TsAnalyzer}[app.config.js_language]\n except KeyError:\n raise SphinxError('Unsupported value of js_language in config: %s' % app.config.js_language)\n\n # Analyze source code:\n app._sphinxjs_analyzer = analyzer.from_disk(abs_source_paths,\n app,\n root_for_relative_paths)",
"def create_test_language(name: str, code: str) -> Language:\r\n lang = Language(name=name, code=code)\r\n lang.full_clean()\r\n lang.save()\r\n return lang",
"def __init__(self, language):\n if language.lower() in self.languages_rev:\n self._language = language.lower()\n elif language.upper() in self.languages:\n self._language = self.languages[language.upper()]\n else:\n raise ValueError(\"No such language: %s\" % language)",
"def __init__(self, lexeme, token_type, line_num):\n self.type = token_type\n self.lexeme = lexeme\n self.lineNum = line_num\n self.lexicalError = None",
"def create_lexer(self):\n raise NotImplementedError()",
"def prepare_scan():\n\n # Start a new grammar.\n grammar = LexicalGrammar()\n\n # Regular context.\n query = grammar.add_rule('query')\n\n # Whitespace characters and comments (discarded).\n query.add_token(r'''\n SPACE: [\\s]+ | [#] [^\\0\\r\\n]*\n ''', is_junk=True)\n\n # A sequence of characters encloses in single quotes.\n query.add_token(r'''\n STRING: ['] ( [^'\\0] | [']['] )* [']\n ''', unquote=(lambda t: t[1:-1].replace(\"''\", \"'\")))\n\n # An opening quote character without a closing quote.\n query.add_token(r'''\n BAD_STRING: [']\n ''', error=\"cannot find a matching quote mark\")\n\n # A number in exponential notation.\n query.add_token(r'''\n FLOAT: ( [0-9]+ ( [.] [0-9]* )? | [.] [0-9]+ ) [eE] [+-]? [0-9]+\n ''')\n\n # A number with a decimal point.\n query.add_token(r'''\n DECIMAL:\n [0-9]+ [.] [0-9]* | [.] [0-9]+\n ''')\n\n # An unsigned integer number.\n query.add_token(r'''\n INTEGER:\n [0-9]+\n ''')\n\n # A sequence of alphanumeric characters (not starting with a digit).\n query.add_token(r'''\n NAME: [\\w]+\n ''')\n\n # Operators and punctuation characters. The token code coincides\n # with the token value.\n query.add_token(r'''\n SYMBOL: [~] | [!][~] | [<][=] | [<] | [>][=] | [>] |\n [=][=] | [=] | [!][=][=] | [!][=] |\n [\\^] | [?] | [-][>] | [@] | [:][=] |\n [!] | [&] | [|] | [+] | [-] | [*] | [/] |\n [(] | [)] | [{] | [}] | [.] | [,] | [:] | [;] | [$]\n ''', is_symbol=True)\n\n # The `[` character starts an identity constructor.\n query.add_token(r'''\n LBRACKET:\n [\\[]\n ''', is_symbol=True, push='identity')\n\n # An unmatched `]`.\n query.add_token(r'''\n BAD_RBRACKET:\n [\\]]\n ''', error=\"cannot find a matching '['\")\n\n # The input end.\n query.add_token(r'''\n END: $\n ''', is_symbol=True, pop=1)\n\n # Identity constructor context.\n identity = grammar.add_rule('identity')\n\n # Whitespace characters (discarded).\n identity.add_token(r'''\n SPACE: [\\s]+\n ''', is_junk=True)\n\n # Start of a nested label group.\n identity.add_token(r'''\n LBRACKET:\n [\\[] | [(]\n ''', is_symbol=True, push='identity')\n\n # End of a label group or the identity constructor.\n identity.add_token(r'''\n RBRACKET:\n [\\]] | [)]\n ''', is_symbol=True, pop=1)\n\n # Label separator.\n identity.add_token(r'''\n SYMBOL: [.]\n ''', is_symbol=True)\n\n # Unquoted sequence of alphanumeric characters and dashes.\n identity.add_token(r'''\n LABEL: [\\w-]+\n ''')\n\n # A sequence of characters encloses in single quotes.\n identity.add_token(r'''\n STRING: ['] ( [^'\\0] | [']['] )* [']\n ''', unquote=(lambda t: t[1:-1].replace(\"''\", \"'\")))\n\n # An opening quote character without a closing quote.\n identity.add_token(r'''\n BAD_STRING: [']\n ''', error=\"cannot find a matching quote mark\")\n\n # A reference indicator.\n identity.add_token(r'''\n REFERENCE:\n [$]\n ''', is_symbol=True, push='name')\n\n # Unexpected end of input.\n identity.add_token(r'''\n END: $\n ''', error=\"cannot find a matching ']'\")\n\n # A context for an identifier following the `$` indicator\n # in an identity constructor. We need a separate rule because\n # `%NAME` and `%LABEL` productions intersect.\n name = grammar.add_rule('name')\n\n # Whitespace characters (discarded).\n name.add_token(r'''\n SPACE: [\\s]+\n ''', is_junk=True)\n\n # An integer number; not expected here, but ensures that the following\n # `%NAME` production does not start with a digit.\n name.add_token(r'''\n INTEGER:\n [0-9]+\n ''', pop=1)\n\n # A sequence of alphanumeric characters (not starting with a digit).\n name.add_token(r'''\n NAME: [\\w]+\n ''', pop=1)\n\n # Anything else.\n name.add_token(r'''\n OTHER: ()\n ''', is_junk=True, pop=1)\n\n # Add a `%DIRSIG` token in front of `+` and `-` direction indicators\n # to distinguish them from addition/subtraction operators.\n grammar.add_signal('''\n DIRSIG: ( `+` | `-` )+ ( `:` | `,` | `;` | `)` | `}` )\n ''')\n\n # Add `%PIPESIG` in front of `/:` pipe indicator to prevent it from\n # being recognized as a division operator.\n grammar.add_signal('''\n PIPESIG:\n `/` `:`\n ''')\n\n # Add `%LHSSIG` in front of a left-hand side of an assignment expression.\n grammar.add_signal('''\n LHSSIG: `$`? %NAME ( `.` `$`? %NAME )*\n ( `(` ( `$`? %NAME ( `,` `$`? %NAME )* `,`? )? `)` )?\n `:=`\n ''')\n\n # Generate and return the scanner.\n return grammar()",
"def set_interpreter(self, name):\n self.lang.kill()\n\n try:\n self.lang=langtypes[name]()\n \n except ExecutableNotFoundError as e:\n\n print(e)\n\n self.lang = DummyInterpreter()\n\n s = \"Changing interpreted lanaguage to {}\".format(repr(self.lang))\n print(\"\\n\" + \"=\"*len(s))\n print(s)\n print(\"\\n\" + \"=\"*len(s))\n\n self.lang.start()\n\n return",
"def prepare_context(grammar=None, lexer=None, lkt_file=None,\n warning_set=default_warning_set,\n symbol_canonicalizer=None, show_property_logging=False,\n types_from_lkt=False, lkt_semantic_checks=False,\n case_insensitive: bool = False,\n version: Optional[str] = None,\n build_date: Optional[str] = None,\n standalone: bool = False,\n property_exceptions: Set[str] = set()):\n\n # Have a clean build directory\n if P.exists('build'):\n shutil.rmtree('build')\n os.mkdir('build')\n\n # Try to emit code\n ctx = CompileCtx(lang_name='Foo', short_name='foo', lexer=lexer,\n grammar=grammar,\n symbol_canonicalizer=symbol_canonicalizer,\n show_property_logging=show_property_logging,\n lkt_file=lkt_file,\n types_from_lkt=types_from_lkt,\n lkt_semantic_checks=lkt_semantic_checks,\n case_insensitive=case_insensitive,\n version=version,\n build_date=build_date,\n standalone=standalone,\n property_exceptions=property_exceptions)\n ctx.warnings = warning_set\n ctx.pretty_print = pretty_print\n\n return ctx",
"def lex(program_str):\n def lex_helper(program_str, acc):\n if program_str == \"\":\n return acc\n\n token, length = match_keywords(program_str, 0)\n if token != None:\n return lex_helper(program_str[length:], acc + [token])\n\n token, length = match_variable(program_str, 0)\n if token != None:\n return lex_helper(program_str[length:], acc + [token])\n\n token, length = match_int(program_str, 0)\n if token != None:\n return lex_helper(program_str[length:], acc + [token])\n\n if program_str[0] == DOUBLE_QUOTE:\n token, length = match_string(program_str, 0)\n return lex_helper(program_str[length:], acc + [token])\n\n if program_str[0] == SPACE:\n return lex_helper(program_str[1:], acc)\n\n raise TOKENIZATION_ERROR(\"Could not consume characters\")\n\n return lex_helper(preprocess(program_str), [])",
"def language_mode_grammar(self, grammar_file):\n self.__rg__.json_loads(grammar_file)\n '''learn word2vec embedding'''\n w2v_folder = os.path.join(self.__lm_folder__, 'word2vec')\n self.__rg__.train_word2vec_model_by_prompt(15, 1, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(30, 1, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(50, 1, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(15, 0, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(30, 0, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(50, 0, w2v_folder)\n\n if not os.path.exists(self.__lm_folder__):\n os.makedirs(self.__lm_folder__)\n\n '''learn ngram language models'''\n sample_folder = os.path.join(self.__lm_folder__, 'samples')\n if not os.path.exists(sample_folder):\n os.makedirs(sample_folder)\n ngram_lm_folder = os.path.join(self.__lm_folder__, 'ngrams')\n if not os.path.exists(ngram_lm_folder):\n os.makedirs(ngram_lm_folder)\n self.__rg__.train_response_lm(self.__ngram_count_file__, sample_folder, ngram_lm_folder, 5)\n '''learn LDA model'''\n lda_folder = os.path.join(self.__lm_folder__, 'lda')\n if not os.path.exists(lda_folder):\n os.makedirs(lda_folder)\n self.__rg__.train_lda_model(lda_folder, rare_threshold=2, topic_count=50)\n '''learn ngram language models of error sentences'''\n read_grammar.train_grammatical_error_lm(self.__ngram_count_file__, self.__ge_folder__, 5)",
"def __init__(self, language=\"en-GB\", lang_dir=None):\n lang_dirs = [\"/usr/share/pico/lang/\", _LANG_DIR]\n if lang_dir:\n lang_dirs.insert(0, lang_dir)\n\n self.__e = None\n for ldir in lang_dirs:\n try:\n self.__e = ctts.engine_create(language_dir=ldir, language=language)\n except RuntimeError as ex:\n pass # Try next directory to find language...\n if self.__e:\n break\n\n if self.__e is None:\n raise RuntimeError(\"Could not instantiate TTS engine with language \" + language)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge subsequent tokens of same tokentype. Also strip the final newline (added by pygments). | def merge(self, tokens):
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
if lastval.endswith('\n'):
lastval = lastval[:-1]
if lastval:
yield(lasttype, lastval) | [
"def _MergeOrAddToken(self, text, token_type):\n if not text:\n return\n if (not self._tokens or\n self._tokens[-1][self.TOKEN_TYPE_INDEX] != token_type):\n self._tokens.append((token_type, text))\n elif self._tokens[-1][self.TOKEN_TYPE_INDEX] == Token.Markdown.Section:\n # A section header with no content.\n prv_text = self._tokens[-1][self.TOKEN_TEXT_INDEX]\n prv_indent = re.match('( *)', prv_text).group(1)\n new_indent = re.match('( *)', text).group(1)\n if prv_indent == new_indent:\n # Same indentation => discard the previous empty section.\n self._tokens[-1] = (token_type, text)\n else:\n # Insert newline to separate previous header from the new one.\n self._NewLine()\n self._tokens.append((token_type, text))\n else:\n self._tokens[-1] = (token_type,\n self._tokens[-1][self.TOKEN_TEXT_INDEX] + text)",
"def recombine_tokens(tokens):\n result = \"\"\n for type, value in tokens:\n if type == Token.Whitespace:\n result += \" \"\n if type == Token.NonbreakableWhitespace:\n result += \"~\"\n elif type == Token.Text:\n result += value\n elif type == Token.EscapedText:\n result += \"\\\\{}\".format(value)\n elif type == Token.Command:\n result += \"\\\\{}\".format(value)\n elif type == Token.InlineFormulaDelimiter:\n result += \"$\"\n elif type == Token.DisplayFormulaDelimiter:\n result += \"$$\"\n elif type == Token.CurlyBraketOpen:\n result += \"{\"\n elif type == Token.CurlyBraketClose:\n result += \"}\"\n elif type == Token.SquareBraketOpen:\n result += \"[\"\n elif type == Token.SquareBraketClose:\n result += \"]\"\n elif type == Token.DoubleNewLine:\n result += \"\\n\\n\"\n return result",
"def _merge_entities_with_whitespace_between(\n self,\n text: str,\n analyzer_results: List[RecognizerResult]\n ) -> List[RecognizerResult]:\n merged_results = []\n prev_result = None\n for result in analyzer_results:\n if prev_result is not None:\n if prev_result.entity_type == result.entity_type:\n if re.search(r'^( )+$', text[prev_result.end:result.start]):\n merged_results.remove(prev_result)\n result.start = prev_result.start\n merged_results.append(result)\n prev_result = result\n return merged_results",
"def tokenize(self):",
"def merge_text_nodes(self):\n ...",
"def _annotate_tokens(self, tokens):\n # Make a preliminary pass through the document, marking likely\n # sentence breaks, abbreviations, and ellipsis tokens.\n tokens = self._annotate_first_pass(tokens)\n\n # Make a second pass through the document, using token context\n # information to change our preliminary decisions about where\n # sentence breaks, abbreviations, and ellipsis occurs.\n tokens = self._annotate_second_pass(tokens)\n\n return tokens",
"def start_new_line(self) -> None:\n if self._unmatched_accumulator:\n assert self.unmatched_identifier is not None\n\n self.tokens[-1].append(\n Token(\n identifier=self.unmatched_identifier,\n content=''.join(self._unmatched_accumulator),\n position=self._unmatched_pos,\n lineno=self._unmatched_lineno))\n\n self._unmatched_accumulator = []\n\n self.tokens.append([])",
"def end_token(self) -> str:",
"def _tokenize_source_with_nl(source):\n if source[-1:] != \"\\n\":\n source += \"\\n\"\n nl_added = True\n else:\n nl_added = False\n return _tokenize_source(source), nl_added",
"def fix_empty_line(source, tokens):\n nb = 0\n for char in reversed(source):\n if char in (\" \", \"\\t\"):\n nb += 1\n else:\n break\n tokens[-1].string = source[-nb:]",
"def _get_tokenized_rep(self, field):\n return \" \".join([x.text for x in self._tokenizer.tokenize(field.strip())])",
"def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two regular expressions\n return re_tag_newline.sub(r\"\", rtf_text)",
"def right_truncations (tokens):\n while tokens:\n yield tokens\n tokens = tokens [1 :]",
"def untokenize(tokens) :\n if len(tokens)>0 and tokens and hasattr(tokens[0], '__iter__') :\n return [untokenize(t) for t in tokens]\n return \"\".join([\" \"+i if not i.startswith(\"'\") and i not in punctuation else i for i in tokens]).strip()",
"def join(self, tokens):\n return \" \".join(tokens)",
"def __rehydrate_blank_line(\n context: MarkdownTransformContext,\n current_token: MarkdownToken,\n previous_token: Optional[MarkdownToken],\n ) -> str:\n # if (\n # self.context.block_stack\n # and self.context.block_stack[-1].is_fenced_code_block\n # and (previous_token and previous_token.is_text)\n # ):\n # extra_newline_after_text_token = ParserHelper.newline_character\n # else:\n _ = previous_token, context\n extra_newline_after_text_token = \"\"\n\n current_blank_token = cast(BlankLineMarkdownToken, current_token)\n return f\"{extra_newline_after_text_token}{current_blank_token.extracted_whitespace}{ParserHelper.newline_character}\"",
"def emit(self, typ):\n # Check if we have some text in this chunk:\n if self._chunk_index > self._chunk_start:\n text = self._chunk[2][self._chunk_start:self._chunk_index]\n self.current_text.append(text)\n # Grab all pieces of text from start to here:\n val = \"\".join(self.current_text)\n location = self._start_loc\n assert location\n token = Token(typ, val, location)\n self.token_buffer.append(token)\n self._mark_start()",
"def _encode_and_add_eos(line, subtokenizer):\n return [tokenizer.CLS_ID] + subtokenizer.encode(line)",
"def handle_newline(self, token_type: int) -> None:\n assert self.processor is not None\n if token_type == tokenize.NEWLINE:\n self.run_logical_checks()\n self.processor.reset_blank_before()\n elif len(self.processor.tokens) == 1:\n # The physical line contains only this token.\n self.processor.visited_new_blank_line()\n self.processor.delete_first_token()\n else:\n self.run_logical_checks()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse self.code and yield "classified" tokens. | def __iter__(self):
if self.lexer is None:
yield ([], self.code)
return
tokens = pygments.lex(self.code, self.lexer)
for tokentype, value in self.merge(tokens):
if self.tokennames == 'long': # long CSS class args
classes = str(tokentype).lower().split('.')
else: # short CSS class args
classes = [_get_ttype_class(tokentype)]
classes = [cls for cls in classes if cls not in unstyled_tokens]
yield (classes, value) | [
"def itercodelines(self):\r\n codeline = CodeLine(0)\r\n for token in self.itertokens():\r\n codeline.append(token)\r\n if codeline.complete:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline\r\n codeline = CodeLine(codeline.end_row + 1)\r\n if codeline.string:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline",
"def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")",
"def parse(self, tokens):\n self.logger.debug(\"Parsing some nice C code!\")\n self.init_lexer(tokens)\n self.typedefs = set()\n cu = self.parse_translation_unit()\n self.logger.info(\"Parsing finished\")\n return cu",
"def tokenize(code: str):\n\n tk = _Tokenizer(code)\n tk.tokenize()\n\n return tk.tokens",
"def __handle_start_fenced_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n token_parts = [output_html]\n if (output_html.endswith(\"</ol>\") or output_html.endswith(\"</ul>\")) or (\n output_html and output_html[-1] != ParserHelper.newline_character\n ):\n token_parts.append(ParserHelper.newline_character)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n True,\n )\n token_parts.append(\"<pre><code\")\n if next_token.extracted_text:\n token_parts.extend([' class=\"language-', next_token.extracted_text, '\"'])\n token_parts.append(\">\")\n return \"\".join(token_parts)",
"def tokens(self):\n for t in self._ast.tokens:\n yield t",
"def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)",
"def _parse(self):\n logger.debug('Parsing file: %s', self.filename)\n self._context = []\n self._last_popped = None\n self.statement_pre_read = None\n self.sw = None\n while self.can_read():\n token = self.next_token()\n if token is None:\n continue\n if token.model is None:\n continue\n if self.find_context_top(cond=lambda x: x != token and x.isinstance(CodeBlock)) is None:\n # this token model has no parents, we must save it separately\n self._save_model(token.model)\n self.parsed = True",
"def tokenize_python(code):\n codeReader = BytesIO(code.encode('utf-8')).readline\n raw_tokens = tokenize.tokenize(codeReader)\n tokens = []\n last = None\n for token in raw_tokens:\n if token.type == 62: continue # this is an encoding token. Skip it.\n if last:\n # the python tokenizer doesn't always include whitespace\n # so when we detect whitespace is missing, we put it back in.\n # Uses the \"last\" token and checks for space between the end\n # and the start of the current token\n same_line = last.end[0] == token.start[0]\n same_pos = last.end[1] == token.start[1]\n is_start_of_line = token.start[1] == 0\n if not same_line and not is_start_of_line:\n whitespace = token.line[:token.start[1]]\n add_space(tokens, whitespace)\n elif same_line and not same_pos:\n whitespace = token.line[last.end[1]:token.start[1]]\n add_space(tokens, whitespace)\n tokens.append({\n 'text':token.string,\n 'type':get_token_type(token)\n })\n last = token\n return tokens",
"def get_all_tokens(self):\n word = \"\"\n begin_string = False\n i = 0\n\n while i < len(self.code):\n char = self.code[i]\n # Ignore white space\n if char in [' ', '\\t', '\\n'] and begin_string == False: \n i = i + 1 \n word = \"\" \n continue\n \n word = word + char\n if word in KEYWORDS and self.code[i + 1] in SYMBOLS + SKIPABLE:\n self.tokens.append(Token(\"keyword\", word))\n word = \"\"\n elif char == '\"' or begin_string: # Check for string\n if char == '\"':\n begin_string = not begin_string\n if not begin_string:\n self.tokens.append(Token(\"stringConstant\", word[1:-1]))\n word = \"\"\n elif word in SYMBOLS:\n self.tokens.append(Token(\"symbol\", word))\n word = \"\"\n elif self.code[i + 1] in SKIPABLE + SYMBOLS:\n if word.isdigit():\n self.tokens.append(Token(\"integerConstant\", word))\n else:\n self.tokens.append(Token(\"identifier\", word))\n word = \"\"\n i = i + 1",
"def compile_class(self):\n self.tokenizer.advance() # class\n self.class_name = self.tokenizer.advance()[TOKEN_NAME]\n self.tokenizer.advance() # {\n # compile the variables declaration part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods o functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine_dec()\n self.tokenizer.advance() # }",
"def get_tokens(code: str) -> List[Tuple[str, Union[str, int, float]]]:\n tokens.clear()\n parser = Lark(tokens_grammar, parser=\"lalr\", transformer=TestTransformer())\n try:\n parser.parse(code)\n except:\n tokens.append(('UNDEFINED_TOKEN',))\n return tokens",
"def tokenize(self):",
"def __handle_end_fenced_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n fenced_token = transform_state.actual_token_index - 1\n while not transform_state.actual_tokens[fenced_token].is_fenced_code_block:\n fenced_token -= 1\n\n # TODO can we store this in the begin so we don't have to compute it again?\n inner_tag_parts = [\"<code\"]\n if transform_state.actual_tokens[fenced_token].extracted_text:\n inner_tag_parts.extend(\n [\n ' class=\"language-',\n transform_state.actual_tokens[fenced_token].extracted_text,\n '\"',\n ]\n )\n inner_tag_parts.append(\">\")\n inner_tag = \"\".join(inner_tag_parts)\n\n POGGER.debug(f\"inner_tag>>:{inner_tag}:<<\")\n POGGER.debug(f\"output_html>>:{output_html}:<<\")\n POGGER.debug(\n f\"last_token>>:{transform_state.actual_tokens[transform_state.actual_token_index - 1]}:<<\"\n )\n\n token_parts = [output_html]\n if (\n not output_html.endswith(inner_tag)\n and output_html[-1] != ParserHelper.newline_character\n ):\n token_parts.append(ParserHelper.newline_character)\n POGGER.debug(\"#1\")\n elif (\n output_html[-1] == ParserHelper.newline_character\n and transform_state.last_token.is_text\n ):\n POGGER.debug(\"#2:$\", transform_state.last_token)\n if not (\n next_token.was_forced\n and transform_state.last_token.token_text.endswith(\"\\n\\x03\")\n ):\n token_parts.append(ParserHelper.newline_character)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n False,\n False,\n )\n token_parts.extend([\"</code></pre>\", ParserHelper.newline_character])\n return \"\".join(token_parts)",
"def parse_code(code: List[str]) -> List[Tuple[str, int]]:\n return [parse_line(line) for line in code]",
"def lex(code, lexer):\n try:\n return lexer.get_tokens(code)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.lexer import RegexLexer\n if isinstance(lexer, type) and issubclass(lexer, RegexLexer):\n raise TypeError('lex() argument must be a lexer instance, '\n 'not a class')\n raise",
"def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok",
"def parse(code, path=None):\n\n\tparser = Parser(code, path)\n\tast = parser.run()\n\treturn ast",
"def __handle_inline_code_span_token(cls, output_html, next_token, transform_state):\n _ = transform_state\n\n return \"\".join(\n [\n output_html,\n \"<code>\",\n ParserHelper.resolve_all_from_text(next_token.span_text),\n \"</code>\",\n ]\n )",
"def parse(self, token_list):\n\n classname = \"\"\n tlist = None\n\n bracket_stack = []\n\n # consume everything till the first TokenSymbol\n p1 = 0\n p2 = 0\n\n while p1 < len(token_list) and not isinstance(token_list[p1], t.TokenSymbol):\n p1 += 1\n\n if not isinstance(token_list[p1+1], t.TokenFunctionBracket):\n return None\n\n if isinstance(token_list[p1+2], t.TokenString):\n classname = token_list[p1+2].value\n\n if not isinstance(token_list[p1+3], t.TokenComma):\n return None\n\n if not isinstance(token_list[p1+4], t.TokenBlockBracket):\n return None\n\n p1 = p1 + 4\n p2 = p1 + 1\n bracket_stack.append(token_list[p1].value)\n\n\n print 'entering loop', bracket_stack, p2\n\n while p2 < len(token_list) and bracket_stack:\n tok = token_list[p2]\n if isinstance(tok, t.TokenBlockBracket) and tok.value == '{':\n bracket_stack.append('{')\n elif isinstance(tok, t.TokenBlockBracket) and tok.value == '}':\n bracket_stack.pop()\n\n p2 += 1\n\n tlist = token_list[p1:p2]\n\n return Eclass(classname, tlist)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return unicode representation of `self.data`. Try ``unicode(self.data)``, catch `UnicodeError` and if `self.data` is an Exception instance, work around | def __unicode__(self):
try:
u = unicode(self.data)
if isinstance(self.data, EnvironmentError):
u = u.replace(": u'", ": '") # normalize filename quoting
return u
except UnicodeError, error: # catch ..Encode.. and ..Decode.. errors
if isinstance(self.data, EnvironmentError):
return u"[Errno %s] %s: '%s'" % (self.data.errno,
SafeString(self.data.strerror, self.encoding,
self.decoding_errors),
SafeString(self.data.filename, self.encoding,
self.decoding_errors))
if isinstance(self.data, Exception):
args = [unicode(SafeString(arg, self.encoding,
decoding_errors=self.decoding_errors))
for arg in self.data.args]
return u', '.join(args)
if isinstance(error, UnicodeDecodeError):
return unicode(self.data, self.encoding, self.decoding_errors)
raise | [
"def get_unicode(self,data, force=False):\n if isinstance(data, binary_type):\n return data.decode('utf-8')\n elif data is None:\n return ''\n elif force:\n return str(data)\n else:\n return data",
"def __unicode__(self):\n s = StringIO()\n self.WriteXML(s, EscapeCharData, root=True)\n return unicode(s.getvalue())",
"def decode(self, data): # pragma: no cover\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')",
"def __str__(self):\n\n if util.is_text_mime_type(self._mime_type):\n return str(self._data, encoding=self._text_encoding)\n\n else:\n raise TypeError(\"cannot convert non-text resource to str\")",
"def safe_unicode(self, obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)",
"def __bytes__(self):\n\n return bytes(self._data)",
"def __unicode__(self):\n\t\treturn unicode(self.asPyDict())",
"def serialize(self, data) -> str:\n pass",
"def render(self, data):\n separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS\n\n try:\n render = json.dumps(\n data, ensure_ascii=self.ensure_ascii, separators=separators\n )\n\n # Unicode symbols \\u2028 and \\u2029 are invisible in JSON and\n # make output are invalid. To avoid this situations, necessary\n # replace this symbols.\n # For more information read this article: http://goo.gl/ImC89E\n for wrong_symbol, expected in WRONG_UNICODE_SYMBOLS:\n render = render.replace(wrong_symbol, expected)\n\n render = bytes(render.encode('utf-8'))\n except Exception as exc:\n raise SerializerError(exc)\n return render",
"def to_unicode(self, content, mimetype=None, charset=None):\n if not charset:\n charset = self.get_charset(content, mimetype)\n return to_unicode(content, charset)",
"def _format_data(data):\n if data is None:\n data = ''\n elif not isinstance(data, string_types):\n data = json_dumps_or_string(data)\n return data",
"def attributeEscapingDoneOutside(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n return data",
"def __unicode__(self):\n if self._element.childNodes:\n rc = \"\"\n for node in self._element.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc = rc + node.data\n return rc\n return ''",
"def body_as_unicode(self):\n possible_encodings = (self._encoding, self.headers_encoding(), \\\n self._body_declared_encoding())\n dammit = UnicodeDammit(self.body, possible_encodings)\n self._body_inferred_encoding = dammit.originalEncoding\n if self._body_inferred_encoding in ('ascii', None):\n self._body_inferred_encoding = self._DEFAULT_ENCODING\n return dammit.unicode",
"def decode(self, data):\r\n if self.encoding and self.encoding.lower() == 'unicode':\r\n assert isinstance(data, unicode), (\r\n 'input encoding is \"unicode\" '\r\n 'but input is not a unicode object')\r\n if isinstance(data, unicode):\r\n # Accept unicode even if self.encoding != 'unicode'.\r\n return data\r\n if self.encoding:\r\n # We believe the user/application when the encoding is\r\n # explicitly given.\r\n encodings = [self.encoding]\r\n else:\r\n data_encoding = self.determine_encoding_from_data(data)\r\n if data_encoding:\r\n # If the data declares its encoding (explicitly or via a BOM),\r\n # we believe it.\r\n encodings = [data_encoding]\r\n else:\r\n # Apply heuristics only if no encoding is explicitly given and\r\n # no BOM found. Start with UTF-8, because that only matches\r\n # data that *IS* UTF-8:\r\n encodings = ['utf-8', 'latin-1']\r\n if locale_encoding:\r\n encodings.insert(1, locale_encoding)\r\n for enc in encodings:\r\n try:\r\n decoded = unicode(data, enc, self.error_handler)\r\n self.successful_encoding = enc\r\n # Return decoded, removing BOMs.\r\n return decoded.replace(u'\\ufeff', u'')\r\n except (UnicodeError, LookupError), err:\r\n error = err # in Python 3, the <exception instance> is\r\n # local to the except clause\r\n raise UnicodeError(\r\n 'Unable to decode input data. Tried the following encodings: '\r\n '%s.\\n(%s)' % (', '.join([repr(enc) for enc in encodings]),\r\n ErrorString(error)))",
"def __repr__(self):\n return 'u{}'.format(super(StringWrapper, self).__repr__())",
"def write(self, data):\r\n if self.stream is False:\r\n return\r\n if isinstance(data, Exception):\r\n data = unicode(SafeString(data, self.encoding,\r\n self.encoding_errors, self.decoding_errors))\r\n try:\r\n self.stream.write(data)\r\n except UnicodeEncodeError:\r\n self.stream.write(data.encode(self.encoding, self.encoding_errors))\r\n except TypeError: # in Python 3, stderr expects unicode\r\n if self.stream in (sys.stderr, sys.stdout):\r\n self.stream.buffer.write(data) # write bytes to raw stream\r\n else:\r\n self.stream.write(unicode(data, self.encoding,\r\n self.decoding_errors))",
"def __unicode__(self):\n if isinstance(self.item, YAMLNode):\n return str(self.item)\n elif isinstance(self.item, str):\n return self.item\n else:\n raise InvalidYAMLTypeConversion(\n self.item.__repr__(), \"unicode\"\n )",
"def getData(self):\n return bytes(self.rawData)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write `data` to self.stream. Ignore, if self.stream is False. `data` can be a `string`, `unicode`, or `Exception` instance. | def write(self, data):
if self.stream is False:
return
if isinstance(data, Exception):
data = unicode(SafeString(data, self.encoding,
self.encoding_errors, self.decoding_errors))
try:
self.stream.write(data)
except UnicodeEncodeError:
self.stream.write(data.encode(self.encoding, self.encoding_errors))
except TypeError: # in Python 3, stderr expects unicode
if self.stream in (sys.stderr, sys.stdout):
self.stream.buffer.write(data) # write bytes to raw stream
else:
self.stream.write(unicode(data, self.encoding,
self.decoding_errors)) | [
"def write(self, data):\n if self.finished:\n raise SinkException(\"The AudioData is already finished writing.\")\n try:\n self.file.write(data)\n except ValueError:\n pass",
"def write(self, data):\n try:\n with open (self.filename, 'w') as file:\n dump(data, file)\n return True\n except Exception as e:\n return False",
"def send(self, data):\n ret = libvirtmod.virStreamSend(self._o, data)\n if ret == -1: raise libvirtError ('virStreamSend() failed')\n return ret",
"def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError as e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError) as err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data",
"def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError, e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError), err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data",
"def safe_write(self, data):\n\t\ttry:\n\t\t\tself.ser.write(data)\n\t\t\treturn\n\t\texcept serial.SerialTimeoutException:\n\t\t\tprint('SerialProtocol: write timeout, attempting reset..')\n\t\t\tprint('WARN: Serial write timed out, attempting reset')\n\t\t\tself.reset()\n\t\t\tprint('SerialProtocol: retrying send of {} bytes'.format(len(data)))\n\t\t\tself.ser.write(data)",
"def write_raw_file(self, data: bytes) -> None:\n pass",
"def write(self, data):\n try:\n # Hack to support unicode under Python 2.x\n if isinstance(data, str) or (sys.version_info < (3,) and isinstance(data, unicode)):\n data = data.encode('utf-8')\n\n self._device.write(data)\n\n except serial.SerialTimeoutException:\n pass\n\n except serial.SerialException as err:\n raise CommError('Error writing to device.', err)\n\n else:\n self.on_write(data=data)",
"def write(self,data):\r\n if not self.has_started:\r\n self.write_headers()\r\n self.has_started = True\r\n if self.is_chunked:\r\n self._write(hex(len(data))[2:])\r\n self._write(\"\\r\\n\")\r\n self._write(data)\r\n self._write(\"\\r\\n\")\r\n else:\r\n self._write(data)",
"def _writeSomeData(self, data):\n sent = self.transport._originalWriteSomeData(data)\n self.dataSentEvent(sent)\n return sent",
"def write(self, data, metadata):\n raise NotImplementedError",
"def write(self, data):\n self.logger.debug(data)",
"def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))",
"def write_bytes(self, data):\n # type-check for the buffer interface before truncating the file\n view = memoryview(data)\n with self.open(mode='wb') as f:\n return f.write(view)",
"def write(self, data, pack=struct.pack, eol=struct.pack('!b', 0)):\n send = self.send\n if data == 0:\n send(eol)\n else:\n for char in data:\n if sys.version_info[0] > 2:\n char = char.encode('utf-8')\n send(pack('!c', char))",
"def write(self, data, flushing=False):\n self._assert_mode(\"w-\")\n\n if \"a\" in self.mode:\n self.seek(0, Seek.end)\n\n if not isinstance(data, binary_type):\n if isinstance(data, bytearray):\n data = bytes(data)\n elif isinstance(data, text_type):\n data = data.encode(self.encoding, self.errors)\n\n statmsg, res = self._file.write(data, offset=self._ipp)\n\n if not statmsg.ok:\n self._raise_status(self.path, statmsg, \"writing\")\n\n self._ipp += len(data)\n self._size = max(self.size, self.tell())\n if flushing:\n self.flush()",
"def write_file(self, data) -> None:\n pass",
"def write(self, data):\r\n assert self.open, \"*%s not open, call begin() method before writing\" %\\\r\n UART[self.config][0]\r\n\r\n if (type(data) == float): data = int(data)\r\n if (type(data) == int): data = chr(data & 0xff)\r\n\r\n elif ((type(data) == list) or (type(data) == tuple)):\r\n bytes_written = 0\r\n for i in data:\r\n bytes_written += self.write(i) \r\n return bytes_written\r\n\r\n elif (type(data) != str):\r\n # Type not supported by write, e.g. dict; use prints().\r\n return 0\r\n\r\n written = self.ser_port.write(data)\r\n # Serial.write() returns None if no bits written, we want 0:\r\n return written if written else 0",
"def write(self, data):\n # PEP-3333 states:\n #\n # The server or gateway must transmit the yielded bytestrings to the\n # client in an unbuffered fashion, completing the transmission of\n # each bytestring before requesting another one.\n #\n # This write() method is used for the imperative and (indirectly) for\n # the more familiar iterable-of-bytestrings WSGI mechanism. It uses\n # C{blockingCallFromThread} to schedule writes. This allows exceptions\n # to propagate up from the underlying HTTP implementation. However,\n # that underlying implementation does not, as yet, provide any way to\n # know if the written data has been transmitted, so this method\n # violates the above part of PEP-3333.\n #\n # PEP-3333 also says that a server may:\n #\n # Use a different thread to ensure that the block continues to be\n # transmitted while the application produces the next block.\n #\n # Which suggests that this is actually compliant with PEP-3333,\n # because writes are done in the reactor thread.\n #\n # However, providing some back-pressure may nevertheless be a Good\n # Thing at some point in the future.\n\n def wsgiWrite(started):\n if not started:\n self._sendResponseHeaders()\n self.request.write(data)\n\n try:\n return blockingCallFromThread(\n self.reactor, wsgiWrite, self.started)\n finally:\n self.started = True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Close the erroroutput stream. Ignored if the stream is` sys.stderr` or `sys.stdout` or has no close() method. | def close(self):
if self.stream in (sys.stdout, sys.stderr):
return
try:
self.stream.close()
except AttributeError:
pass | [
"def close_stream(self, input_stream):\n # type: (object) -> None\n # see https://docs.oracle.com/javase/7/docs/api/java/io/FilterInputStream.html#close()\n input_stream.close()",
"def close(self):\n self._output_fh.close()",
"def close(self):\n \n self.stream.close()",
"def close(self):\n self.process.stdout.close()\n self.process.stderr.close()\n self.running = False",
"def stop_stream(self):\n if self._stream is not None:\n self._stream.close()\n self._stream = None",
"def output_stream(stream, formatter: Formatter):\n global output\n\n # create output before opening the stream, so file outputs can prompt on existing output\n output = create_output(formatter)\n\n success_open = False\n for i in range(args.retry_open):\n try:\n stream_fd, prebuffer = open_stream(stream)\n success_open = True\n break\n except StreamError as err:\n log.error(f\"Try {i + 1}/{args.retry_open}: Could not open stream {stream} ({err})\")\n\n if not success_open:\n return console.exit(f\"Could not open stream {stream}, tried {args.retry_open} times, exiting\")\n\n try:\n output.open()\n except OSError as err:\n if isinstance(output, PlayerOutput):\n console.exit(f\"Failed to start player: {args.player} ({err})\")\n elif output.filename:\n console.exit(f\"Failed to open output: {output.filename} ({err})\")\n else:\n console.exit(f\"Failed to open output ({err}\")\n return\n\n try:\n with closing(output):\n log.debug(\"Writing stream to output\")\n show_progress = args.progress == \"force\" or args.progress == \"yes\" and sys.stderr.isatty()\n if args.force_progress:\n show_progress = True\n warnings.warn(\n \"The --force-progress option has been deprecated in favor of --progress=force\",\n StreamlinkDeprecationWarning,\n stacklevel=1,\n )\n # TODO: finally clean up the global variable mess and refactor the streamlink_cli package\n # noinspection PyUnboundLocalVariable\n stream_runner = StreamRunner(stream_fd, output, show_progress=show_progress)\n # noinspection PyUnboundLocalVariable\n stream_runner.run(prebuffer)\n except OSError as err:\n # TODO: refactor all console.exit() calls\n console.exit(str(err))\n\n return True",
"def _on_end_of_stream(self, input_stream):\n # By default, this function closes the output stream.\n self.output_stream.end_stream()",
"def convert_stream_closed_error(obj, exc):\n if exc.real_error is not None:\n # The stream was closed because of an underlying OS error\n exc = exc.real_error\n if ssl and isinstance(exc, ssl.SSLError):\n if \"UNKNOWN_CA\" in exc.reason:\n raise FatalCommClosedError(\n \"in %s: %s: %s\" % (obj, exc.__class__.__name__, exc)\n )\n raise CommClosedError(\"in %s: %s: %s\" % (obj, exc.__class__.__name__, exc))\n else:\n raise CommClosedError(\"in %s: %s\" % (obj, exc))",
"def output_closed(self):\n outread = self.stdout.readable() if self.stdout is not None else False\n errread = self.stderr.readable() if self.stderr is not None else False\n return not (outread or errread)",
"def set_stdout_stderr():\n\n class Writer(object):\n def write(self, msg):\n log.debug(msg)\n if verbose:\n chunk_send(msg)\n\n def flush(self):\n pass\n\n orig_stds = sys.stdout, sys.stderr\n w = Writer()\n sys.stdout = w\n sys.stderr = w\n\n def cleanup():\n \"\"\"\n Restores stdout and stderr\n \"\"\"\n sys.stdout = orig_stds[0]\n sys.stderr = orig_stds[1]\n client_sock.close()\n\n return cleanup",
"def stderr_pipe(self):\r\n return self.stderr(PIPE)",
"def close(self, err_str=None, config_rollback=True):\n logger.info(\"entering close()\")\n if err_str:\n print(err_str)\n if (\n self.use_shell\n and self.sshshell._chan is not None\n and not self.sshshell._chan.closed\n or not self.use_shell\n and self.sshshell._transport is not None\n and self.sshshell._transport.active\n ):\n if self.rm_remote_tmp:\n self.remote_cleanup()\n if config_rollback and self.command_list:\n self.limits_rollback()\n print(f\"\\r{pad_string('closing device connection')}\")\n self.sshshell.close()\n if self.hard_close:\n try:\n shutil.rmtree(self.local_tmpdir)\n except PermissionError:\n # windows can throw this error, silence it for now\n print(\n f\"{self.local_tmpdir} may still exist, please delete manually if so\"\n )\n raise os._exit(1)\n else:\n raise SystemExit(1)",
"def p2p_stream_close(self, **kwargs):\n endpoint = 'p2p/stream/close'\n args = []\n return self.client.get(endpoint, args, kwargs)",
"def retrieve_stderr():\n with closing(StringIO()) as sio, replace_stderr(sio):\n oldprint = builtins.print\n try:\n # Overriding stderr doesn't work with libraries, this ensures even\n # cached variables take this up. Well... it works.\n def newprint(*args, **kwargs):\n kwargs['file'] = sio\n oldprint(*args, **kwargs)\n\n builtins.print = newprint\n yield sio\n finally:\n builtins.print = oldprint",
"def close_active_stream(self):\n if self.stream is not None:\n if self.stream.is_active():\n self.stream.stop_stream()\n self.stream.close()\n self.stream = None\n self.start_time = 0.\n self.end_time = 0.",
"def __shutdown_streams(self):\n if self.temp_sensor is not None:\n logger.debug('Closing temp sensor.')\n self.temp_sensor.close()\n if self.humid_sensor is not None:\n logger.debug('Closing humid sensor.')\n self.humid_sensor.close()",
"def output_verbose(errors: List[Error], stream: TextIO) -> None:\n for err in errors:\n if err.lineno is not None:\n stream.write(\"{}:{}: {} ({}){}\".format(err.filename, err.lineno, err.description, err.identifier.value,\n os.linesep))\n else:\n stream.write(\"{}: {} ({}){}\".format(err.filename, err.description, err.identifier.value, os.linesep))",
"def close(self):\n if not self.sink:\n return\n LOGGER.info('Closing connection with result sink server.')\n # Reset to default logging level of test runner scripts.\n logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.DEBUG)\n self._session.close()",
"def redirect_stderr(new_stderr=None):\n if new_stderr is None:\n new_stderr = cStringIO.StringIO()\n old_stderr = sys.stderr\n sys.stderr = new_stderr\n try:\n yield new_stderr\n finally:\n sys.stderr = old_stderr",
"def send_error_response(self, text) -> None:\n self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': text})"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
append(child) > element Appends child and returns self if self is not full or first nonfull parent. | def append(self, child):
assert not self.full()
self.children.append(child)
child.parent = self
node = self
while node.full():
node = node.parent
return node | [
"def add_child(self, e):\n if self.children == None:\n raise TypeError('this element cannot have a child: %r' % self)\n if isinstance(e, basestring) and self.children and isinstance(self.children[-1], basestring):\n self.children[-1] += e\n else:\n self.children.append(e)\n if isinstance(e, HTMLElement):\n e.parent = self\n return self",
"def add_or_fetch_child(self, element):\n\n child = self.child_by_element(element)\n if not child:\n child = Node(element, self)\n self._child_nodes.append(child)\n return child",
"def append_child(elt: Element, child: Element) -> Any:\n raise NotImplementedError",
"def appendOptionalChild(self, child):\n if child is not None:\n self.appendChild(child)",
"def append_to(self, parent: HTMLNode) -> HTMLNode:\n parent.append(self)\n\n return parent # for chaining",
"def append_child(self, child):\r\n child.parent = self\r\n self.children.append(child)\r\n self.changed()",
"def add_child(self, parent, child):\n if parent is None:\n raise NoParentError(\"Parent can't be None, use 'tree.add_root(child)' instead.\")\n elif child in self._nodes:\n if child in self.children_of(parent):\n logging.debug(\"[add_child] tried to add a child already in the tree, but parent matched -> kept already existing child node\")\n return self # the child is already there -> keep existing node\n else:\n logging.debug(\"[add_child] tried to add a child already in the tree, under another parent -> kept already existing child node and added it to the parents children\")\n self._node(parent).add_child_node(self._node(child)) # the child already exists -> keep existing child and add it to the parent's children\n return self\n else:\n try:\n parent_node = self._node(parent)\n except NotInTreeError:\n # parent is not in the tree, try to make it root.\n parent_node = self.add_root(parent) # raises MultipleRootsError if there is already a root\n\n # add the child\n child_node = self._create_node(parent=parent_node, data=child)\n parent_node.add_child_node(child_node)\n self._nodes[child] = child_node\n return self",
"def append_to(self, parent_element):\n\n xml = self.get_xml()\n parent_element.append(xml)",
"def add_child(self, child: 'Node'):\n if child in self.children:\n return\n self.children.append(child)\n child.set_parent(self)",
"def append(self, element: \"Element\") -> None:",
"def appendChild(self, item):\n self.children.append(item)",
"def add_child(self, child):\n if isinstance(child, list):\n seq = Sequence(child)\n seq.parent = self\n self.__children.append(seq)\n else:\n self.__children.append(child)\n child.parent = self",
"def add_child(self, child):\n raise NotImplementedError",
"def appendElement(self, e):\n eParent = e.parent\n if not eParent is None: \n eParent.removeElement(e) # Remove from current parent, if there is one.\n self._elements.append(e) # Possibly add to self again, will move it to the top of the element stack.\n e.setParent(self) # Set parent of element without calling this method again.\n if e.eId: # Store the element by unique element id, if it is defined.\n self._eIds[e.eId] = e\n # If this is a text box, then set self.lastTextBox\n if e.isTextBox:\n self.lastTextBox = e\n return len(self._elements)-1 # Answer the element index for e.",
"def test_auto_append():\n r = R()\n r.foo\n assert len(r._children_) == 1\n\n # this makes another foo child, not append to it\n r.foo.bar\n assert len(r._children_) == 2\n assert len(r._children_[1]._children_) == 1",
"def append(self, node: 'SoNode') -> \"void\":\n return _coin.SoChildList_append(self, node)",
"def add_child(self, element: DOMElement, index: int=None) -> 'DOMLayout':\n if index is None:\n index = len(self._children)\n\n self._children.insert(index, element)\n element._set_parent(self, index)\n element.add_observer(DOMEventType.RESIZE, self._on_child_resize)\n element.add_global_observer(self._on_child_event)\n\n self._rerender(resize=(True, True))\n return self",
"def addedChild(self, child):\n pass",
"def attach_to(child, parent, position=None):\n # this is essentially a shorthand function\n # NOTE notice the only difference in return value\n parent.add_child(child, position)\n return parent",
"def addChild(self, child, index=None):\n\n if self._children: # already have some children\n if index is None or index == len(self._children):\n n = self[-1]\n n.insertAfter(child)\n self.tail = child\n else:\n self[index].insertBefore(child)\n if index == 0:\n self.head = child\n else: # this is self's first child\n child.parent = self\n # child.user = self.user\n child.notebook = self.notebook\n self.head = self.tail = child\n if isinstance(child, Section):\n for node in child.nodes:\n node.notebook = self.notebook\n child.touchModified()\n self.touchModified()\n return child"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
delete_child() > child Delete last child and return it. | def delete_child(self):
child = self.children[-1]
del self.children[-1]
return child | [
"def delete_child(child_id):\n Child.objects(id=child_id).delete()\n return {'success': True, 'data': \"Data Deleted\"}, 200",
"def _delete(self, node):\n if self.num_children(node) == 2:\n raise ValueError('Position has two children')\n child = node._left if node._left else node._right # might be None\n if child is not None:\n child._parent = node._parent # child's grandparent becomes parent\n if node is self._root:\n self._root = child # child becomes root\n else:\n parent = node._parent\n if node is parent._left:\n parent._left = child\n else:\n parent._right = child\n self._size -= 1\n return node._element",
"def delete(self):\n parent = self.parent\n if not parent:\n return None\n parent.childList.remove(self)\n self.parent = None\n globalref.docRef.modified = True\n return parent",
"def remove_child(self, **attrs) -> Optional[SlashCommandChild]:\n child = utils.get(self._children, **attrs)\n if child:\n self._children.remove(child)\n\n return child",
"def DeleteChild(self, child):\n if self.IsEmpty():\n raise XMLUnknownChild(child.xmlname)\n factoryName = self._FindFactory(child.__class__)\n if factoryName:\n factory = getattr(self, factoryName)\n if isinstance(factory, MethodType):\n deleteFactory = getattr(self, \"Delete_\" + factoryName)\n deleteFactory(child)\n elif isinstance(factory, NoneType):\n raise XMLUnknownChild(child.xmlname)\n elif isinstance(factory, ListType):\n match = False\n for i in xrange(len(factory)):\n if factory[i] is child:\n child.DetachFromDocument()\n child.parent = None\n del factory[i]\n match = True\n break\n if not match:\n raise XMLUnknownChild(child.xmlname)\n elif factory is child:\n # Single allowable child is replaced with None\n child.DetachFromDocument()\n child.parent = None\n setattr(self, factoryName, None)\n else:\n raise TypeError\n else:\n match = False\n for i in xrange(len(self._children)):\n if self._children[i] is child:\n child.DetachFromDocument()\n child.parent = None\n del self._children[i]\n match = True\n break\n if not match:\n raise XMLUnknownChild(child.xmlname)",
"def last_child(self) -> None | \"Node\":\n ...",
"def getLastChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[-1]\n return None",
"def deleteLast(self):\n rv = self.last\n new_last = self.last.prev\n new_last.next = None;\n self.last = new_last\n return rv",
"def delete(self, value):\r\n deleted_node = self.search(value)\r\n if deleted_node == None:\r\n return 0\r\n deleted_node_original_color = deleted_node.color\r\n if deleted_node.right == self.null_node: #right child is null\r\n node_to_fix = deleted_node.left\r\n self.transplant(deleted_node, deleted_node.left)\r\n del deleted_node\r\n elif deleted_node.left == self.null_node: #left child is null\r\n node_to_fix = deleted_node.right\r\n self.transplant(deleted_node, deleted_node.right)\r\n del deleted_node\r\n else: #no child is null\r\n if deleted_node.right.left == self.null_node: #if the right child has no left child\r\n node_to_fix = deleted_node.right.right\r\n node_to_fix.parent = deleted_node.right\r\n deleted_node_original_color = node_to_fix.color\r\n self.transplant(deleted_node, deleted_node.right)\r\n deleted_node.left.parent = node_to_fix.parent\r\n node_to_fix.parent.left = deleted_node.left\r\n node_to_fix.parent.color = deleted_node.color\r\n del deleted_node\r\n else:\r\n trans_node = self.minium(deleted_node.right) #if the right child has left child\r\n deleted_node.key = trans_node.key\r\n deleted_node.satellite_data = trans_node.satellite_data\r\n node_to_fix = trans_node.right\r\n deleted_node_original_color = trans_node.color\r\n self.transplant(trans_node, trans_node.right)\r\n del trans_node\r\n\r\n if node_to_fix != self.null_node:\r\n node_to_fix.satellite_data = node_to_fix.left.satellite_data[0] + node_to_fix.right.satellite_data[0] + 1\r\n original_node_to_fix = node_to_fix\r\n while node_to_fix.parent != self.null_node:\r\n node_to_fix.parent.satellite_data[0] -= 1\r\n node_to_fix = node_to_fix.parent\r\n if deleted_node_original_color == \"BLACK\":\r\n self.delete_fixup(original_node_to_fix)",
"def moveLast(self):\n if self.parent:\n self.parent.childList.remove(self)\n self.parent.childList.append(self)\n globalref.docRef.modified = True",
"def delete_child(self, child: Union[NodeStat, 'DirectoryStat']):\n child_dirs, child_files = self._scan_result()\n\n if isinstance(child, DirectoryStat):\n rm_items = -child.total_items\n rm_size = -child.total_size\n child_dirs.remove(child)\n shutil.rmtree(str(child.path))\n elif isinstance(child, NodeStat):\n child.path.unlink()\n rm_items = -1\n rm_size = -child.size\n child_files.remove(child)\n else:\n raise TypeError(f\"The type {type(child)} is not supported!\")\n self.total_items += rm_items\n self.total_size += rm_size\n if self._on_stats_change is not None:\n self._on_stats_change(rm_items, rm_size, self.finished.is_set())",
"def delete_nth_node(self):\n nth_leaf = self.get_nth_node()\n if not nth_leaf:\n raise Exception(\"Can't delete empty heap\")\n # The last should be the right child.\n\n # if the n_th node is not the root :\n if nth_leaf is not self.root:\n if nth_leaf.parent.right_child:\n nth_leaf.parent.right_child = None\n else:\n nth_leaf.parent.left_child = None\n\n # update the nth\n self.n -= 1\n self.nth_binary_representing = format(self.n, \"b\")\n return nth_leaf.data",
"def remove(self):\r\n self.child = None",
"def delete_child(self, name):\n if name not in self.children:\n return False\n else:\n del self.children[name]",
"def _delete_max(self):\n assert not self.parent, 'self should be root.'\n\n if not self.right:\n # self is max, so delete self.\n self_left = self.left\n self._cut('left')\n return self_left\n\n grand = self\n parent = grand.right\n child = parent.right\n while child:\n grand = grand.right\n parent = parent.right\n child = child.right\n\n # parent is max, so delete parent.\n #\n # grand\n # \\\n # --- cut\n # \\\n # parent\n # / \\\n # cut --- \\\n # / \\\n # parent_left child(None)\n #\n parent_left = parent.left\n grand._cut('right')\n parent._cut('left')\n grand._connect('right', parent_left)\n return self",
"def test_delete_node_only_child(bst_long_branch_right):\n bst_long_branch_right.delete(2)\n assert bst_long_branch_right.root.right.data == 3",
"def __delete_last_node(\n self\n ):\n size_of_list = self.size()\n node_before_last_node = self.get_node(size_of_list - 2)\n node_before_last_node.set_next_node(None)\n self.last_node.set_previous_node(None)\n self.last_node = node_before_last_node",
"def remove_child_at(parent, position=None):\n if position is None:\n child = parent._children.pop()\n else:\n child = parent._children.pop(position)\n object.__setattr__(child, '_parent', None)\n\n # invalidate all ancestor nodes' length\n p = parent\n while p is not None:\n object.__setattr__(p, '_len', None)\n p = p._parent\n\n return child",
"def remove(self, child):\n try:\n if self.element == child.traversal_parent:\n self._remove_from_traversal_index(child)\n else:\n self._remove_from_index(child)\n self.list.remove(child)\n except:\n raise",
"def __delitem__(self, id):\n child = self[id]\n child.parent = None\n self.child_dict.pop(id)\n self.child_list.remove(child)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
close() > parent Close element and return first nonfull element. | def close(self):
parent = self.parent
while parent.full():
parent = parent.parent
return parent | [
"def end_child(self):\n if self.cur_child is not None and not self.cur_child.closed:\n self.cur_child.end()\n self.cur_child = None",
"def lastDescendant(self, inclClosed=False):\n item = self\n while True:\n if item.childList and (item.open or inclClosed):\n item = item.childList[-1]\n else:\n return item",
"def closer(x):\n x.close()",
"def closest_parent(self):\n # type: () -> Optional[Tag]\n parent = self.parent\n while parent:\n if parent.name in self.PARENT_TAGS:\n return parent\n parent = parent.parent\n return None # pragma: no cover",
"def remove_one_from_stack(self):\n stack = self.opened_inventorystack\n stack.reverse()\n for inventory in stack:\n if inventory.is_closable_by_escape():\n self.hide(inventory)\n return inventory",
"def get_parent(self) -> 'Element':\n\n return self._parent",
"def close_tags_to(self, elem_cp):\n while len(self._stack) and self._stack[-1] != elem_cp:\n self.end_tag()\n if len(self._stack):\n self.end_tag()",
"def parent(self):\n parent_elem = self.element_info.parent\n\n if parent_elem:\n return self.backend.generic_wrapper_class(parent_elem)\n else:\n return None",
"def test_parent(self):\n button = self.dlg.Alpha.find()\n self.assertEqual(button.parent(), self.dlg.find())",
"def delete(self):\n parent = self.parent\n if not parent:\n return None\n parent.childList.remove(self)\n self.parent = None\n globalref.docRef.modified = True\n return parent",
"def click_button_close(self):\n # AutoGen method click_link: None\n self.click_element(self.BUTTON_CLOSE)",
"def get_parent ( self ):\n return self.parent_ref.deref_safe()",
"def get_parent(self):\n return self.__return(self.node.parent())",
"def close(self, close_all=False):\n if close_all or self.parent is None:\n self.game.close_modal()\n else:\n self.parent.submodal = None",
"def get_parent(self, it):\n return self._parent_array[it]",
"def del_parent(self):\n self.parent = None",
"def getLastChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[-1]\n return None",
"def remove(self):\r\n if self.parent:\r\n for i, node in enumerate(self.parent.children):\r\n if node is self:\r\n self.parent.changed()\r\n del self.parent.children[i]\r\n self.parent = None\r\n return i",
"def _cur_close(self):\n open = self._prices.open[self._offset]\n rel_close = self._prices.close[self._offset] # so close is rel ?\n return open * (1.0 + rel_close)",
"def lastElementChild(self):\n try:\n return self.args[len(self.args) - 1]\n except Exception:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse_latex_math(string [,inline]) > MathMLtree Returns a MathMLtree parsed from string. inline=True is for inline math and inline=False is for displayed math. tree is the whole tree and node is the current element. | def parse_latex_math(string, inline=True):
# Normalize white-space:
string = ' '.join(string.split())
if inline:
node = mrow()
tree = math(node, inline=True)
else:
node = mtd()
tree = math(mtable(mtr(node)), inline=False)
while len(string) > 0:
n = len(string)
c = string[0]
skip = 1 # number of characters consumed
if n > 1:
c2 = string[1]
else:
c2 = ''
## print n, string, c, c2, node.__class__.__name__
if c == ' ':
pass
elif c == '\\':
if c2 in '{}':
node = node.append(mo(c2))
skip = 2
elif c2 == ' ':
node = node.append(mspace())
skip = 2
elif c2 == ',': # TODO: small space
node = node.append(mspace())
skip = 2
elif c2.isalpha():
# We have a LaTeX-name:
i = 2
while i < n and string[i].isalpha():
i += 1
name = string[1:i]
node, skip = handle_keyword(name, node, string[i:])
skip += i
elif c2 == '\\':
# End of a row:
entry = mtd()
row = mtr(entry)
node.close().close().append(row)
node = entry
skip = 2
else:
raise SyntaxError(ur'Syntax error: "%s%s"' % (c, c2))
elif c.isalpha():
node = node.append(mi(c))
elif c.isdigit():
node = node.append(mn(c))
elif c in "+-*/=()[]|<>,.!?':;@":
node = node.append(mo(c))
elif c == '_':
child = node.delete_child()
if isinstance(child, msup):
sub = msubsup(child.children, reversed=True)
elif isinstance(child, mo) and child.data in sumintprod:
sub = munder(child)
else:
sub = msub(child)
node.append(sub)
node = sub
elif c == '^':
child = node.delete_child()
if isinstance(child, msub):
sup = msubsup(child.children)
elif isinstance(child, mo) and child.data in sumintprod:
sup = mover(child)
elif (isinstance(child, munder) and
child.children[0].data in sumintprod):
sup = munderover(child.children)
else:
sup = msup(child)
node.append(sup)
node = sup
elif c == '{':
row = mrow()
node.append(row)
node = row
elif c == '}':
node = node.close()
elif c == '&':
entry = mtd()
node.close().append(entry)
node = entry
else:
raise SyntaxError(ur'Illegal character: "%s"' % c)
string = string[skip:]
return tree | [
"def parse_mathml(s):\n import xml.dom.minidom\n x = xml.dom.minidom.parseString(s)\n return parse_mathml_rhs(dom_child(x))",
"def compile_math(math):\n if isinstance(math, str):\n math = (\n math\n .replace('&&', 'and')\n .replace('||', 'or')\n .replace('^', '**')\n )\n\n model = evalidate.base_eval_model.clone()\n model.nodes.extend(VALID_MATH_EXPRESSION_NODES)\n model.allowed_functions.extend(MATHEMATICAL_FUNCTIONS.keys())\n\n math_node = evalidate.Expr(math, model=model)\n compiled_math = compile(math_node.node, '<math>', 'eval')\n return compiled_math",
"def build_tree(math_exp_string):\n if not validate_math_exp(math_exp_string):\n raise InvalidInput('Validation Error, one or more parenthesis are not closed properly')\n \n exp_list = filter_exp_list(math_exp_string)\n stack = Stack()\n current_node = Tree()\n\n for token in exp_list:\n\n if token == '(':\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n\n elif token == ')':\n if stack.size():\n current_node = stack.pop()\n\n elif token in operator_map.keys():\n if current_node.get_val():\n if current_node.get_val() == token:\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n else:\n parent = Tree(token)\n parent.update_child(current_node)\n parent.add_child()\n stack.push(parent)\n current_node = parent.get_newborn_child()\n else:\n current_node.set_val(token)\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n\n else:\n try:\n current_node.set_val(float(token))\n except ValueError, e:\n logging.info(e.message)\n current_node.set_val(token)\n current_node = stack.pop()\n\n return current_node",
"def parse_mathml_rhs(node, var_table=None, logger=None,\n number_post_processor=None, derivative_post_processor=None):\n def parsex(node):\n \"\"\"\n Parses a mathml expression.\n \"\"\"\n def chain(kind, node, unary=None):\n \"\"\"\n Parses operands for chained operators (for example plus, minus,\n times and division).\n\n The argument ``kind`` must be the myokit expression type being\n parsed, ``node`` is a DOM node and ``unary``, if given, should be\n the unary expression type (unary Plus or unary Minus).\n \"\"\"\n ops = []\n node = dom_next(node)\n while node:\n ops.append(parsex(node))\n node = dom_next(node)\n n = len(ops)\n if n < 1:\n raise MathMLError('Operator needs at least one operand.')\n if n < 2:\n if unary:\n return unary(ops[0])\n else:\n raise MathMLError('Operator needs at least two operands')\n ex = kind(ops[0], ops[1])\n for i in xrange(2, n):\n ex = kind(ex, ops[i])\n return ex\n # Start parsing\n name = node.tagName\n if name == 'apply':\n # Brackets, can be ignored in an expression tree.\n return parsex(dom_child(node))\n elif name == 'ci':\n # Reference\n var = str(node.firstChild.data).strip()\n if var_table:\n try:\n var = var_table[var]\n except KeyError:\n logger.warn('Unable to resolve reference to <' + str(var)\n + '>.')\n return myokit.Name(var)\n elif name == 'diff':\n # Derivative\n # Check time variable\n bvar = dom_next(node, 'bvar')\n if derivative_post_processor:\n derivative_post_processor(parsex(dom_child(bvar, 'ci')))\n # Check degree, if given\n d = dom_child(bvar, 'degree')\n if d is not None:\n d = parsex(dom_child(d, 'cn')).eval()\n if not d == 1:\n raise MathMLError('Only derivatives of degree one are'\n ' supported.')\n # Create derivative and return\n x = dom_next(node, 'ci')\n if x is None:\n raise MathMLError('Derivative of an expression found: only'\n ' derivatives of variables are supported.')\n return myokit.Derivative(parsex(x))\n elif name == 'cn':\n # Number\n number = parse_mathml_number(node, logger)\n if number_post_processor:\n return number_post_processor(node, number)\n return number\n #\n # Algebra\n #\n elif name == 'plus':\n return chain(myokit.Plus, node, myokit.PrefixPlus)\n elif name == 'minus':\n return chain(myokit.Minus, node, myokit.PrefixMinus)\n elif name == 'times':\n return chain(myokit.Multiply, node)\n elif name == 'divide':\n return chain(myokit.Divide, node)\n #\n # Functions\n #\n elif name == 'exp':\n return myokit.Exp(parsex(dom_next(node)))\n elif name == 'ln':\n return myokit.Log(parsex(dom_next(node)))\n elif name == 'log':\n if dom_next(node).tagName != 'logbase':\n return myokit.Log10(parsex(dom_next(node)))\n else:\n return myokit.Log(\n parsex(dom_next(dom_next(node))),\n parsex(dom_child(dom_next(node))))\n elif name == 'root':\n # Check degree, if given\n next = dom_next(node)\n if next.tagName == 'degree':\n # Degree given, return x^(1/d) unless d is 2\n d = parsex(dom_child(next))\n x = parsex(dom_next(next))\n if d.is_literal() and d.eval() == 2:\n return myokit.Sqrt(x)\n return myokit.Power(x, myokit.Divide(myokit.Number(1), d))\n else:\n return myokit.Sqrt(parsex(next))\n elif name == 'power':\n n2 = dom_next(node)\n return myokit.Power(parsex(n2), parsex(dom_next(n2)))\n elif name == 'floor':\n return myokit.Floor(parsex(dom_next(node)))\n elif name == 'ceiling':\n return myokit.Ceil(parsex(dom_next(node)))\n elif name == 'abs':\n return myokit.Abs(parsex(dom_next(node)))\n elif name == 'quotient':\n n2 = dom_next(node)\n return myokit.Quotient(parsex(n2), parsex(dom_next(n2)))\n elif name == 'rem':\n n2 = dom_next(node)\n return myokit.Remainder(parsex(n2), parsex(dom_next(n2)))\n #\n # Trigonometry\n #\n elif name == 'sin':\n return myokit.Sin(parsex(dom_next(node)))\n elif name == 'cos':\n return myokit.Cos(parsex(dom_next(node)))\n elif name == 'tan':\n return myokit.Tan(parsex(dom_next(node)))\n elif name == 'arcsin':\n return myokit.ASin(parsex(dom_next(node)))\n elif name == 'arccos':\n return myokit.ACos(parsex(dom_next(node)))\n elif name == 'arctan':\n return myokit.ATan(parsex(dom_next(node)))\n #\n # Redundant trigonometry (CellML includes this)\n #\n elif name == 'csc':\n # Cosecant: csc(x) = 1 / sin(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Sin(parsex(dom_next(node))))\n elif name == 'sec':\n # Secant: sec(x) = 1 / cos(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Cos(parsex(dom_next(node))))\n elif name == 'cot':\n # Contangent: cot(x) = 1 / tan(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Tan(parsex(dom_next(node))))\n elif name == 'arccsc':\n # ArcCosecant: acsc(x) = asin(1/x)\n return myokit.ASin(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n elif name == 'arcsec':\n # ArcSecant: asec(x) = acos(1/x)\n return myokit.ACos(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n elif name == 'arccot':\n # ArcCotangent: acot(x) = atan(1/x)\n return myokit.ATan(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n #\n # Hyperbolic trigonometry (CellML again)\n #\n elif name == 'sinh':\n # Hyperbolic sine: sinh(x) = 0.5 * (e^x - e^-x)\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'cosh':\n # Hyperbolic cosine: cosh(x) = 0.5 * (e^x + e^-x)\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Plus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'tanh':\n # Hyperbolic tangent: tanh(x) = (e^2x - 1) / (e^2x + 1)\n x = parsex(dom_next(node))\n e2x = myokit.Exp(myokit.Multiply(myokit.Number(2), x))\n return myokit.Divide(myokit.Minus(e2x, myokit.Number(1)),\n myokit.Plus(e2x, myokit.Number(1)))\n #\n # Inverse hyperbolic trigonometry (CellML...)\n #\n elif name == 'arcsinh':\n # Inverse hyperbolic sine: asinh(x) = log(x + sqrt(1 + x*x))\n x = parsex(dom_next(node))\n return myokit.Log(myokit.Plus(x, myokit.Sqrt(myokit.Plus(\n myokit.Number(1), myokit.Multiply(x, x)))))\n elif name == 'arccosh':\n # Inverse hyperbolic cosine:\n # acosh(x) = log(x + sqrt(x + 1) * sqrt(x - 1))\n x = parsex(dom_next(node))\n return myokit.Log(myokit.Plus(x, myokit.Multiply(myokit.Sqrt(\n myokit.Plus(x, myokit.Number(1))), myokit.Sqrt(\n myokit.Minus(x, myokit.Number(1))))))\n elif name == 'arctanh':\n # Inverse hyperbolic tangent:\n # atanh(x) = 0.5 * (log(1 + x) - log(1 - x))\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Log(myokit.Plus(myokit.Number(1), x)), myokit.Log(\n myokit.Minus(myokit.Number(1), x))))\n #\n # Hyperbolic redundant trigonometry (CellML...)\n #\n elif name == 'csch':\n # Hyperbolic cosecant: csch(x) = 2 / (exp(x) - exp(-x))\n x = parsex(dom_next(node))\n return myokit.Divide(myokit.Number(2), myokit.Minus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'sech':\n # Hyperbolic secant: sech(x) = 2 / (exp(x) + exp(-x))\n x = parsex(dom_next(node))\n return myokit.Divide(myokit.Number(2), myokit.Plus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'coth':\n # Hyperbolic cotangent:\n # coth(x) = (exp(2*x) + 1) / (exp(2*x) - 1)\n x = parsex(dom_next(node))\n e2x = myokit.Exp(myokit.Multiply(myokit.Number(2), x))\n return myokit.Divide(myokit.Plus(e2x, myokit.Number(1)),\n myokit.Minus(e2x, myokit.Number(1)))\n #\n # Inverse hyperbolic redundant trigonometry (CellML has a lot to answer\n # for...)\n #\n elif name == 'arccsch':\n # Inverse hyperbolic cosecant:\n # arccsch(x) = log(sqrt(1 + 1/x^2) + 1/x)\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Log(myokit.Plus(myokit.Sqrt(myokit.Number(1),\n myokit.Power(xi, myokit.Number(2))), xi))\n elif name == 'arcsech':\n # Inverse hyperbolic secant:\n # arcsech(x) = log(sqrt(1/x - 1) * sqrt(1/x + 1) + 1/x)\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Log(myokit.Plus(myokit.Multiply(\n myokit.Sqrt(myokit.Minus(xi, myokit.Number(1))),\n myokit.Sqrt(myokit.Plus(xi, myokit.Number(1)))), xi))\n elif name == 'arccoth':\n # Inverse hyperbolic cotangent:\n # arccoth(x) = 0.5 * (log(1 + 1/x) - log(1 - 1/x))\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Log(myokit.Plus(myokit.Number(1), xi)),\n myokit.Log(myokit.Minus(myokit.Number(1), xi))))\n #\n # Logic\n #\n elif name == 'and':\n return chain(myokit.And, node)\n elif name == 'or':\n return chain(myokit.Or, node)\n elif name == 'not':\n return chain(None, node, myokit.Not)\n elif name == 'eq' or name == 'equivalent':\n n2 = dom_next(node)\n return myokit.Equal(parsex(n2), parsex(dom_next(n2)))\n elif name == 'neq':\n n2 = dom_next(node)\n return myokit.NotEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'gt':\n n2 = dom_next(node)\n return myokit.More(parsex(n2), parsex(dom_next(n2)))\n elif name == 'lt':\n n2 = dom_next(node)\n return myokit.Less(parsex(n2), parsex(dom_next(n2)))\n elif name == 'geq':\n n2 = dom_next(node)\n return myokit.MoreEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'leq':\n n2 = dom_next(node)\n return myokit.LessEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'piecewise':\n # Piecewise contains at least one piece, optionally contains an\n # \"otherwise\". Syntax doesn't ensure this statement makes sense.\n conds = []\n funcs = []\n other = None\n piece = dom_child(node)\n while piece:\n if piece.tagName == 'otherwise':\n if other is None:\n other = parsex(dom_child(piece))\n elif logger:\n logger.warn('Multiple <otherwise> tags found in'\n ' <piecewise> statement.')\n elif piece.tagName == 'piece':\n n2 = dom_child(piece)\n funcs.append(parsex(n2))\n conds.append(parsex(dom_next(n2)))\n elif logger:\n logger.warn('Unexpected tag type in <piecewise>: '\n + '<' + piece.tagName + '>.')\n piece = dom_next(piece)\n if other is None:\n other = myokit.Number(0)\n # Create string of if statements\n args = []\n f = iter(funcs)\n for c in conds:\n args.append(c)\n args.append(f.next())\n args.append(other)\n return myokit.Piecewise(*args)\n #\n # Constants\n #\n elif name == 'pi':\n return myokit.Number('3.14159265358979323846')\n elif name == 'exponentiale':\n return myokit.Exp(myokit.Number(1))\n elif name == 'true':\n # This is corrent, even in Python True == 1 but not True == 2\n return myokit.Number(1)\n elif name == 'false':\n return myokit.Number(0)\n #\n # Unknown/unhandled elements\n #\n else:\n if logger:\n logger.warn('Unknown element: ' + name)\n ops = []\n node = dom_child(node) if dom_child(node) else dom_next(node)\n while node:\n ops.append(parsex(node))\n node = dom_next(node)\n return myokit.UnsupportedFunction(name, ops)\n # Remove math node, if given\n if node.tagName == 'math':\n node = dom_child(node)\n #TODO: Check xmlns?\n return parsex(node)",
"def calc(mystring):\n return(evaltree(buildtree(tokenize(mystring))))",
"def is_mathml(self):\n return '<math ' in self.expr",
"def make_sympy(self, xml=None): # lint-amnesty, pylint: disable=too-many-statements\n\n if self.the_sympy:\n return self.the_sympy\n\n if xml is None:\t # root\n if not self.is_mathml():\n return my_sympify(self.expr)\n if self.is_presentation_mathml():\n cmml = None\n try:\n cmml = self.cmathml\n xml = etree.fromstring(str(cmml))\n except Exception as err:\n if 'conversion from Presentation MathML to Content MathML was not successful' in cmml: # lint-amnesty, pylint: disable=unsupported-membership-test\n msg = \"Illegal math expression\"\n else:\n msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)\n raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n else:\n xml = etree.fromstring(self.expr)\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n return self.the_sympy\n\n def gettag(expr):\n return re.sub('{http://[^}]+}', '', expr.tag)\n\n def op_plus(*args):\n return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]\n\n def op_times(*args):\n return reduce(operator.mul, args)\n\n def op_minus(*args):\n if len(args) == 1:\n return -args[0]\n if not len(args) == 2: # lint-amnesty, pylint: disable=unneeded-not\n raise Exception('minus given wrong number of arguments!')\n #return sympy.Add(args[0],-args[1])\n return args[0] - args[1]\n\n opdict = {\n 'plus': op_plus,\n 'divide': operator.div, # lint-amnesty, pylint: disable=no-member\n 'times': op_times,\n 'minus': op_minus,\n 'root': sympy.sqrt,\n 'power': sympy.Pow,\n 'sin': sympy.sin,\n 'cos': sympy.cos,\n 'tan': sympy.tan,\n 'cot': sympy.cot,\n 'sinh': sympy.sinh,\n 'cosh': sympy.cosh,\n 'coth': sympy.coth,\n 'tanh': sympy.tanh,\n 'asin': sympy.asin,\n 'acos': sympy.acos,\n 'atan': sympy.atan,\n 'atan2': sympy.atan2,\n 'acot': sympy.acot,\n 'asinh': sympy.asinh,\n 'acosh': sympy.acosh,\n 'atanh': sympy.atanh,\n 'acoth': sympy.acoth,\n 'exp': sympy.exp,\n 'log': sympy.log,\n 'ln': sympy.ln,\n }\n\n def parse_presentation_symbol(xml):\n \"\"\"\n Parse <msub>, <msup>, <mi>, and <mn>\n \"\"\"\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'msub':\n return '_'.join([parse_presentation_symbol(y) for y in xml])\n elif tag == 'msup':\n return '^'.join([parse_presentation_symbol(y) for y in xml])\n raise Exception('[parse_presentation_symbol] unknown tag %s' % tag)\n\n # parser tree for Content MathML\n tag = gettag(xml)\n\n # first do compound objects\n\n if tag == 'apply':\t\t# apply operator\n opstr = gettag(xml[0])\n if opstr in opdict:\n op = opdict[opstr] # pylint: disable=invalid-name\n args = [self.make_sympy(expr) for expr in xml[1:]]\n try:\n res = op(*args)\n except Exception as err:\n self.args = args # pylint: disable=attribute-defined-outside-init\n self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name\n raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args)) # lint-amnesty, pylint: disable=raise-missing-from\n return res\n else:\n raise Exception('[formula]: unknown operator tag %s' % (opstr))\n\n elif tag == 'list':\t\t# square bracket list\n if gettag(xml[0]) == 'matrix':\n return self.make_sympy(xml[0])\n else:\n return [self.make_sympy(expr) for expr in xml]\n\n elif tag == 'matrix':\n return sympy.Matrix([self.make_sympy(expr) for expr in xml])\n\n elif tag == 'vector':\n return [self.make_sympy(expr) for expr in xml]\n\n # atoms are below\n\n elif tag == 'cn':\t\t\t# number\n return sympy.sympify(xml.text)\n\n elif tag == 'ci':\t\t\t# variable (symbol)\n if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'):\t # subscript or superscript\n usym = parse_presentation_symbol(xml[0])\n sym = sympy.Symbol(str(usym))\n else:\n usym = six.text_type(xml.text)\n if 'hat' in usym:\n sym = my_sympify(usym)\n else:\n if usym == 'i' and self.options is not None and 'imaginary' in self.options:\t # i = sqrt(-1)\n sym = sympy.I\n else:\n sym = sympy.Symbol(str(usym))\n return sym\n\n else:\t\t\t\t# unknown tag\n raise Exception('[formula] unknown tag %s' % tag)",
"def parse_arithmetic(reference_string):\n try:\n return _ast_eval(ast.parse(reference_string, mode=\"eval\").body)\n except (TypeError, SyntaxError, KeyError):\n return reference_string",
"def math_for_markdown(pelicanobj):\n\n try:\n pelicanobj.settings[\"MARKDOWN\"].setdefault(\"extensions\", []).append(\n MathExtension()\n )\n except Exception:\n sys.excepthook(*sys.exc_info())\n sys.stderr.write(\n \"\\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\\n\"\n )\n sys.stderr.flush()",
"def make_new_mathelement(TeX):\n mathmlelement = etree.Element('math')\n semantics = etree.Element('semantics')\n junk = etree.Element('mtext')\n junk.text = 'CLICKME'\n semantics.append(junk)\n annotation = etree.Element('annotation')\n annotation.attrib['encoding'] = 'TeX'\n annotation.text = TeX\n\n semantics.append(annotation)\n mathmlelement.append(semantics)\n return etree.tostring(mathmlelement)",
"def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings):\n\n # Create the configuration for the markdown template\n config = {}\n config['mathjax_script'] = mathjax_script\n config['math_tag_class'] = 'math'\n config['auto_insert'] = mathjax_settings['auto_insert']\n\n # Instantiate markdown extension and append it to the current extensions\n try:\n if isinstance(pelicanobj.settings.get('MD_EXTENSIONS'), list): # pelican 3.6.3 and earlier\n pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config))\n else:\n pelicanobj.settings['MARKDOWN'].setdefault('extensions', []).append(PelicanMathJaxExtension(config))\n except:\n sys.excepthook(*sys.exc_info())\n sys.stderr.write(\"\\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\\n\")\n sys.stderr.flush()",
"def parse(cls, xml_string, **parser_kwargs):\n\n xml_string = OOXMLtoLatexParser.change_xml_double_open_tag_to_left_arrow(xml_string)\n xml_string = OOXMLtoLatexParser._remove_self_closing_tags(xml_string)\n xml_to_latex_parser = cls(**parser_kwargs)\n\n if isinstance(xml_string, basestring):\n element = etree.fromstring(xml_string)\n sax.saxify(element, xml_to_latex_parser)\n return xml_to_latex_parser\n else:\n raise TypeError(\"xml string parameter must be str or unicode\")",
"def test_advanced_math(self):\n exp = \"m{(10+10)+10+10}\"\n self.assertEqual(self.engine.Process(exp), \"40\", \"adds complex nested math\")",
"def preprocess_pmathml(self, xml): # lint-amnesty, pylint: disable=too-many-statements\n\n if isinstance(xml, (str, six.text_type)):\n xml = etree.fromstring(xml)\t\t# TODO: wrap in try\n\n xml = self.fix_greek_in_mathml(xml)\t # convert greek utf letters to greek spelled out in ascii\n\n def gettag(expr):\n return re.sub('{http://[^}]+}', '', expr.tag)\n\n def fix_pmathml(xml):\n \"\"\"\n f and g are processed as functions by asciimathml, eg \"f-2\" turns\n into \"<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>\" this is\n really terrible for turning into cmathml. undo this here.\n \"\"\"\n for k in xml:\n tag = gettag(k)\n if tag == 'mrow':\n if len(k) == 2:\n if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':\n idx = xml.index(k)\n xml.insert(idx, deepcopy(k[0]))\t # drop the <mrow> container\n xml.insert(idx + 1, deepcopy(k[1]))\n xml.remove(k)\n fix_pmathml(k)\n\n fix_pmathml(xml)\n\n def fix_hat(xml):\n \"\"\"\n hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle\n this into <mi>hat(f)</mi> hat i also somtimes turned into\n <mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover>\n \"\"\"\n for k in xml:\n tag = gettag(k)\n if tag == 'mover':\n if len(k) == 2:\n if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':\n newk = etree.Element('mi')\n newk.text = 'hat(%s)' % k[0].text\n xml.replace(k, newk)\n if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and \\\n gettag(k[1]) == 'mo' and str(k[1].text) == '^':\n newk = etree.Element('mi')\n newk.text = 'hat(%s)' % k[0][0].text\n xml.replace(k, newk)\n fix_hat(k)\n fix_hat(xml)\n\n def flatten_pmathml(xml):\n \"\"\"\n Give the text version of certain PMathML elements\n\n Sometimes MathML will be given with each letter separated (it\n doesn't know if its implicit multiplication or what). From an xml\n node, find the (text only) variable name it represents. So it takes\n <mrow>\n <mi>m</mi>\n <mi>a</mi>\n <mi>x</mi>\n </mrow>\n and returns 'max', for easier use later on.\n \"\"\"\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'mrow':\n return ''.join([flatten_pmathml(y) for y in xml])\n raise Exception('[flatten_pmathml] unknown tag %s' % tag)\n\n def fix_mathvariant(parent):\n \"\"\"\n Fix certain kinds of math variants\n\n Literally replace <mstyle mathvariant=\"script\"><mi>N</mi></mstyle>\n with 'scriptN'. There have been problems using script_N or script(N)\n \"\"\"\n for child in parent:\n if gettag(child) == 'mstyle' and child.get('mathvariant') == 'script':\n newchild = etree.Element('mi')\n newchild.text = 'script%s' % flatten_pmathml(child[0])\n parent.replace(child, newchild)\n fix_mathvariant(child)\n fix_mathvariant(xml)\n\n # find \"tagged\" superscripts\n # they have the character \\u200b in the superscript\n # replace them with a__b so snuggle doesn't get confused\n def fix_superscripts(xml):\n \"\"\" Look for and replace sup elements with 'X__Y' or 'X_Y__Z'\n\n In the javascript, variables with '__X' in them had an invisible\n character inserted into the sup (to distinguish from powers)\n E.g. normal:\n <msubsup>\n <mi>a</mi>\n <mi>b</mi>\n <mi>c</mi>\n </msubsup>\n to be interpreted '(a_b)^c' (nothing done by this method)\n\n And modified:\n <msubsup>\n <mi>b</mi>\n <mi>x</mi>\n <mrow>\n <mo>​</mo>\n <mi>d</mi>\n </mrow>\n </msubsup>\n to be interpreted 'a_b__c'\n\n also:\n <msup>\n <mi>x</mi>\n <mrow>\n <mo>​</mo>\n <mi>B</mi>\n </mrow>\n </msup>\n to be 'x__B'\n \"\"\"\n for k in xml:\n tag = gettag(k)\n\n # match things like the last example--\n # the second item in msub is an mrow with the first\n # character equal to \\u200b\n if (\n tag == 'msup' and\n len(k) == 2 and gettag(k[1]) == 'mrow' and\n gettag(k[1][0]) == 'mo' and k[1][0].text == u'\\u200b' # whew\n ):\n\n # replace the msup with 'X__Y'\n k[1].remove(k[1][0])\n newk = etree.Element('mi')\n newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]))\n xml.replace(k, newk)\n\n # match things like the middle example-\n # the third item in msubsup is an mrow with the first\n # character equal to \\u200b\n if (\n tag == 'msubsup' and\n len(k) == 3 and gettag(k[2]) == 'mrow' and\n gettag(k[2][0]) == 'mo' and k[2][0].text == u'\\u200b' # whew\n ):\n\n # replace the msubsup with 'X_Y__Z'\n k[2].remove(k[2][0])\n newk = etree.Element('mi')\n newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2]))\n xml.replace(k, newk)\n\n fix_superscripts(k)\n fix_superscripts(xml)\n\n def fix_msubsup(parent):\n \"\"\"\n Snuggle returns an error when it sees an <msubsup> replace such\n elements with an <msup>, except the first element is of\n the form a_b. I.e. map a_b^c => (a_b)^c\n \"\"\"\n for child in parent:\n # fix msubsup\n if gettag(child) == 'msubsup' and len(child) == 3:\n newchild = etree.Element('msup')\n newbase = etree.Element('mi')\n newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1]))\n newexp = child[2]\n newchild.append(newbase)\n newchild.append(newexp)\n parent.replace(child, newchild)\n\n fix_msubsup(child)\n fix_msubsup(xml)\n\n self.xml = xml # pylint: disable=attribute-defined-outside-init\n return self.xml",
"def md2html():\n if len(sys.argv) < 2:\n _usage_md2html()\n sys.exit(1)\n\n filename = sys.argv[1]\n if not filename.endswith('.md'):\n if os.path.isfile(filename + '.md'):\n filename += '.md'\n else:\n raise IOError('no file %s.md' % filename)\n # First make sure \\eqref survives the pandoc translation\n f = open(filename ,'r'); text = f.read(); f.close()\n text = text.replace('\\\\eqref{', 'EQREF{')\n f = open(filename ,'w'); f.write(text); f.close()\n\n # Translate to HTML and fix the MathJax things\n basename = filename[:-3]\n cmd = 'pandoc -f markdown -t html --mathjax -s -o %s.html %s.md' % \\\n (basename, basename)\n print cmd\n failure = os.system(cmd)\n if failure:\n print 'could not run\\n', cmd\n sys.exit(1)\n f = open('%s.html' % basename, 'r')\n text = f.read()\n f.close()\n # Add extra info\n pattern = r'(<script src=\".+?MathJax\\.js)'\n replacement = r\"\"\"\n<script type=\"text/x-mathjax-config\">\nMathJax.Hub.Config({\n TeX: {\n equationNumbers: { autoNumber: \"AMS\" },\n extensions: [\"AMSmath.js\", \"AMSsymbols.js\", \"autobold.js\"]\n }\n});\n</script>\n\\g<1>\"\"\"\n text = re.sub(pattern, replacement, text)\n text = text.replace('EQREF{', '\\\\eqref{')\n\n f = open('%s.html' % basename, 'w')\n f.write(text)\n f.close()\n print 'output in %s.html' % basename",
"def loads(string):\r\n tree = qasm_parser.parse(string)\r\n tree = QASMToIRTransformer().transform(tree)\r\n return tree",
"def render_md(self, ds):\n ds_no_math, math = remove_math(ds, '$')\n # We have to run `mathjax_editing.replace_math` on the text in code\n # blocks before passing it to Pygments (see `render_block_code`),\n # otherwise `replace_math` will be confused by the added syntax\n # highlighting `<span>`s and won't be able to splice in those blocks.\n self.math = math\n html = self.render(Document(ds_no_math))\n return replace_math(html, self.math)",
"def parse(line):\n\n document = Document()\n root = document.createElement('tree')\n current_element = root\n rest = line\n\n while True:\n element, separator, rest = parse_element(rest, document)\n\n if isinstance(current_element.lastChild, Text) and \\\n current_element.lastChild.data == '':\n current_element.removeChild(current_element.lastChild)\n\n current_element.appendChild(element)\n\n if rest is None:\n break\n\n if separator == '<':\n current_element = current_element.parentNode\n elif separator == '+':\n current_element = current_element\n elif separator == '>':\n current_element = element\n\n expand_multipliers(root)\n\n return root",
"def math_eval(str, other_modules=()):\n gdict = globals().copy()\n exec('import galsim', gdict)\n exec('import numpy', gdict)\n exec('import numpy as np', gdict)\n\n exec('import math', gdict)\n exec('import coord', gdict)\n for m in other_modules: # pragma: no cover (We don't use this.)\n exec('import ' + m, gdict)\n\n # A few other things that show up in reprs, so useful to import here.\n exec('from numpy import array, uint16, uint32, int16, int32, float32, float64, complex64, complex128, ndarray',\n gdict)\n exec('from astropy.units import Unit', gdict)\n\n return eval(str, gdict)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return dictionary of Unicode character lists. For each of the `catagories`, an item contains a list with all Unicode characters with `cp_min` <= codepoint <= `cp_max` that belong to the category. (The default values check every codepoint supported by Python.) | def unicode_charlists(categories, cp_min=0, cp_max=None):
# Determine highest code point with one of the given categories
# (may shorten the search time considerably if there are many
# categories with not too high characters):
if cp_max is None:
cp_max = max(x for x in xrange(sys.maxunicode + 1)
if unicodedata.category(unichr(x)) in categories)
# print cp_max # => 74867 for unicode_punctuation_categories
charlists = {}
for cat in categories:
charlists[cat] = [unichr(x) for x in xrange(cp_min, cp_max+1)
if unicodedata.category(unichr(x)) == cat]
return charlists | [
"def unicode_charlists(categories, cp_min=0, cp_max=None):\r\n # Determine highest code point with one of the given categories\r\n # (may shorten the search time considerably if there are many\r\n # categories with not too high characters):\r\n if cp_max is None:\r\n cp_max = max(x for x in range(sys.maxunicode + 1)\r\n if unicodedata.category(chr(x)) in categories)\r\n # print cp_max # => 74867 for unicode_punctuation_categories\r\n charlists = {}\r\n for cat in categories:\r\n charlists[cat] = [chr(x) for x in range(cp_min, cp_max+1)\r\n if unicodedata.category(chr(x)) == cat]\r\n return charlists",
"def generate_categories():\n # inspired by https://gist.github.com/anonymous/2204527\n code_points_ranges = []\n iso_15924_aliases = []\n categories = []\n\n match = re.compile(r'([0-9A-F]+)(?:\\.\\.([0-9A-F]+))?\\W+(\\w+)\\s*#\\s*(\\w+)',\n re.UNICODE)\n\n url = 'ftp://ftp.unicode.org/Public/UNIDATA/Scripts.txt'\n file = get(url)\n for line in file:\n p = re.findall(match, line)\n if p:\n code_point_range_from, code_point_range_to, alias, category = p[0]\n alias = u(alias.upper())\n category = u(category)\n if alias not in iso_15924_aliases:\n iso_15924_aliases.append(alias)\n if category not in categories:\n categories.append(category)\n code_points_ranges.append((\n int(code_point_range_from, 16),\n int(code_point_range_to or code_point_range_from, 16),\n iso_15924_aliases.index(alias), categories.index(category))\n )\n code_points_ranges.sort()\n\n categories_data = {\n 'iso_15924_aliases': iso_15924_aliases,\n 'categories': categories,\n 'code_points_ranges': code_points_ranges,\n }\n\n dump('categories.json', categories_data)",
"def build_categories():\n # Note that we're acting like we're going to grab all categories from\n # the dataset, though in practice we're going to limit it to COMMON and\n # LATIN entries.\n #\n # This is because right now, we only need it for Unicode confusable maps,\n # but as we implement more Unicode checks, we will likely need to fetch\n # and store more information. Given the complexity of this, it's better to\n # have a more future-proof, \"correct\" implementation up-front.\n LINE_RE = re.compile(\n r'^(?P<codepoint_from>[0-9A-F]+)'\n r'(?:\\.\\.(?P<codepoint_through>[0-9A-F]+))?'\n r'\\s*; (?P<alias>\\w+) # (?P<category>[\\w]+)',\n re.UNICODE)\n\n aliases = []\n alias_id_map = {}\n\n categories = []\n category_id_map = {}\n\n codepoint_ranges = {}\n\n for info in _load_data(CATEGORIES_URL, LINE_RE):\n alias = info['alias'].upper()\n\n if alias not in ('COMMON', 'LATIN'):\n continue\n\n if alias not in alias_id_map:\n alias_id_map[alias] = len(aliases)\n aliases.append(alias)\n\n category = info['category']\n\n if category not in category_id_map:\n category_id_map[category] = len(categories)\n categories.append(category)\n\n codepoint_from_s = info['codepoint_from']\n codepoint_through_s = info['codepoint_through'] or codepoint_from_s\n\n codepoint_from = int(codepoint_from_s, 16)\n codepoint_through = int(codepoint_through_s, 16)\n\n # Split into subtables. Key off from some prefix.\n prev_key = None\n cur_range = None\n\n # We need a quick way to look up Unicode codepoints, but it's too\n # expensive to maintain a mapping of every codepoint. So, instead\n # we have a 5-level tree.\n #\n # The first 4 levels are increasingly specific masks of starting\n # codepoint ranges, with the 5th level being the codepoints in that\n # range.\n #\n # Codepoint ranges are split up as needed to fit in the correct range.\n #\n # As an example, if we were storing category range 1F400..1F6D7\n # (RAT..ELEVATOR):\n #\n # 10000: {\n # 1F000: {\n # 1F400: {\n # 1F400: [1F400..1F409],\n # ...\n # 1F490: [1F490..1F499],\n # },\n # ..\n # 1F600: {\n # 1F600: [1F600..1F609],\n # ...\n # 1F6D0: [1F6D0..1F6D7],\n # }\n # }\n # }\n #\n # In practice, the leafs often have more than one range, particularly\n # for the lower codepoint ranges.\n #\n # This is easy to build and fast for lookup.\n for codepoint in range(codepoint_from, codepoint_through + 1):\n key = _make_codepoints_key_path(codepoint)\n\n if key != prev_key:\n if cur_range:\n codepoints = (\n codepoint_ranges\n .setdefault(prev_key[0], {})\n .setdefault(prev_key[1], {})\n .setdefault(prev_key[2], {})\n .setdefault(prev_key[3], [])\n )\n codepoints.append(cur_range)\n\n cur_range = (\n codepoint,\n codepoint,\n alias_id_map[alias],\n category_id_map[category],\n )\n else:\n cur_range = (\n cur_range[0],\n codepoint,\n cur_range[2],\n cur_range[3],\n )\n\n prev_key = key\n\n if prev_key:\n codepoints = (\n codepoint_ranges\n .setdefault(prev_key[0], {})\n .setdefault(prev_key[1], {})\n .setdefault(prev_key[2], {})\n .setdefault(prev_key[3], [])\n )\n codepoints.append(cur_range)\n\n categories_data.update({\n 'aliases': aliases,\n 'categories': categories,\n 'codepoints': codepoint_ranges,\n })",
"def category(chr: str) -> str:\n idx = ord(chr)\n start_keys = sorted(unicode_data_to_category_start.keys())\n insertion_point = bisect.bisect_left(start_keys, idx)\n if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:\n insertion_point -= 1\n key_start = start_keys[insertion_point]\n result_start = unicode_data_to_category_start[key_start]\n\n end_keys = sorted(unicode_data_to_category_end.keys())\n insertion_point = bisect.bisect_left(end_keys, idx)\n try:\n key_end = end_keys[insertion_point]\n result_end = unicode_data_to_category_end[key_end]\n\n if result_end != key_start:\n result_end = result_start\n key_end = key_start\n else:\n result_end = unicode_data_to_category_start[result_end]\n\n if key_start <= idx <= key_end and result_start == result_end:\n return result_start\n else:\n return \"Zzzz\"\n except IndexError:\n return \"Zzzz\"",
"def _build_public_opentype_categories(ufo: Font) -> dict[str, str]:\n from glyphsLib import glyphdata\n\n categories: dict[str, str] = {}\n category_key = GLYPHLIB_PREFIX + \"category\"\n subCategory_key = GLYPHLIB_PREFIX + \"subCategory\"\n\n # NOTE: We can generate the category even for glyphs that are not exported,\n # because entries don't have to exist in the final fonts.\n for glyph in ufo:\n glyph_name = glyph.name\n assert glyph_name is not None\n\n has_attaching_anchor = False\n for anchor in glyph.anchors:\n name = anchor.name\n if name and not name.startswith(\"_\"):\n has_attaching_anchor = True\n\n # First check glyph.lib for category/subCategory overrides. Otherwise,\n # use global values from GlyphData.\n glyphinfo = glyphdata.get_glyph(\n glyph_name, unicodes=[f\"{c:04X}\" for c in glyph.unicodes]\n )\n category = glyph.lib.get(category_key) or glyphinfo.category\n subCategory = glyph.lib.get(subCategory_key) or glyphinfo.subCategory\n\n if subCategory == \"Ligature\" and has_attaching_anchor:\n categories[glyph_name] = \"ligature\"\n elif category == \"Mark\" and (\n subCategory == \"Nonspacing\" or subCategory == \"Spacing Combining\"\n ):\n categories[glyph_name] = \"mark\"\n elif has_attaching_anchor:\n categories[glyph_name] = \"base\"\n\n return categories",
"def crime_category_breakdown():\n db_request = main_db_call()\n all_crimes = [item[0] for item in db_request]\n sub_offense = offense_counter(all_crimes)\n sub_pie = color_applicator(sub_offense)\n sub_dict = {}\n for i, thing in enumerate(sub_pie):\n for key, category in UPPER_DICT.items():\n if sub_pie[i][0] in category:\n sub_dict.setdefault(key, [])\n sub_dict[key].append(sub_pie[i])\n return sub_dict",
"def get_nuclei_in_range(zmin, zmax, amin, amax):\n\n nuc_list = []\n assert zmax >= zmin, \"zmax must be >= zmin\"\n assert amax >= amin, \"amax must be >= amin\"\n\n for z in range(zmin, zmax+1):\n element = PeriodicTable.lookup_Z(z)\n for a in range(amin, amax+1):\n name = f\"{element.abbreviation}{a}\"\n nuc_list.append(Nucleus(name))\n\n return nuc_list",
"def _build_unicode_property_table(unicode_range):\n table = {}\n p = None\n for i in range(*unicode_range):\n try:\n c = uchr(i)\n p = unicodedata.category(c)\n except:\n continue\n if p[0] not in table:\n table[p[0]] = {}\n if p[1] not in table[p[0]]:\n table[p[0]][p[1]] = []\n table[p[0]][p[1]].append(c)\n\n # Join as one string\n for k1, v1 in table.items():\n for k2, v2 in v1.items():\n v1[k2] = ''.join(v2)\n\n return table",
"def __get_cat_levels(self,data):\n levels = {}\n\n for v in self.categorical:\n ds = data[v].astype('category')\n levels[v] = ds[ds.notnull()].unique().categories.sort_values()\n\n return levels",
"def _convert_to_cmap_props(glyphs):\n return {\n ord(_g.char): _name\n for _name, _g in glyphs.items()\n # .notdef should not be mapped in cmap\n if _g.char and _name != '.notdef'\n }",
"def world_cups():\n return [(\"Germany\", 2006, \"Italy\"), (\"South-Africa\", 2010, \"Spain\"), (\"Brazil\", 2014, \"Germany\")]",
"def test_code_to_category_basic_worklist(self):\n # Basic Worklist Management, PS3.4 Annex K\n c2c = code_to_category\n\n assert c2c(0x0000) == \"Success\"\n for code in [0xA700, 0xA900]:\n assert c2c(code) == \"Failure\"\n for code in range(0xC000, 0xD000):\n assert c2c(code) == \"Failure\"\n assert c2c(0xFE00) == \"Cancel\"\n assert c2c(0xB000) == \"Warning\"\n for code in [0xFF00, 0xFF01]:\n assert c2c(code) == \"Pending\"",
"def _categoryMap (self):\n return self.__categoryMap",
"def get_categories():\n # feel free to modify this as you like. just make sure that\n # the category is a valid Yelp category:\n # https://blog.yelp.com/businesses/yelp_category_list/#section21\n categories = [\n 'mexican', 'chinese', 'pizza', 'italian', 'thai', 'japanese',\n 'vietnamese', 'asianfusion', 'ethiopian', 'korean', 'indpak',\n 'mideastern', 'tapas', 'pakistani', 'brazilian', 'filipino',\n 'african', 'greek', 'coffee', 'dessert'\n ]\n categories.sort()\n return categories",
"def characters(whitelist_categories=None, blacklist_categories=None,\n blacklist_characters=None, min_codepoint=None,\n max_codepoint=None):\n if (\n min_codepoint is not None and max_codepoint is not None and\n min_codepoint > max_codepoint\n ):\n raise InvalidArgument(\n 'Cannot have min_codepoint=%d > max_codepoint=%d ' % (\n min_codepoint, max_codepoint\n )\n )\n\n from hypothesis.searchstrategy.strings import OneCharStringStrategy\n return OneCharStringStrategy(whitelist_categories=whitelist_categories,\n blacklist_categories=blacklist_categories,\n blacklist_characters=blacklist_characters,\n min_codepoint=min_codepoint,\n max_codepoint=max_codepoint)",
"def utility_characterization_factors(self) -> dict[tuple[str, str], tuple[float, AbsoluteUnitsOfMeasure]]:\n return bst.HeatUtility.characterization_factors",
"def test_code_to_category_substance_admin(self):\n # Substance Administration Query, PS3.4 Annex V\n c2c = code_to_category\n\n assert c2c(0x0000) == \"Success\"\n for code in [0xA700, 0xA900]:\n assert c2c(code) == \"Failure\"\n for code in range(0xC000, 0xD000):\n assert c2c(code) == \"Failure\"\n assert c2c(0xFE00) == \"Cancel\"\n assert c2c(0xB000) == \"Warning\"\n for code in [0xFF00, 0xFF01]:\n assert c2c(code) == \"Pending\"",
"def _discretize_feature(\n feature: np.ndarray,\n category_map: List[Tuple[float, str]]\n):\n cat_feat = np.zeros(feature.shape[0]).astype(str)\n for lower_bound, category in category_map:\n cat_feat[feature >= lower_bound] = category\n return cat_feat",
"def gen_kmers(kmin, kmax, alphabet=\"ACGT\"):\r\n\r\n for n in xrange(kmin, kmax + 1):\r\n kmer_lis = [''.join(mer) for mer in prod(alphabet, repeat=n)]\r\n\r\n return kmer_lis"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Docutils punctuation category sample strings. Return list of sample strings for the categories "Open", "Close", "Delimiters" and "ClosingDelimiters" used in the `inline markup recognition rules`_. | def punctuation_samples():
# Lists with characters in Unicode punctuation character categories
cp_min = 160 # ASCII chars have special rules for backwards compatibility
ucharlists = unicode_charlists(unicode_punctuation_categories, cp_min)
# match opening/closing characters
# --------------------------------
# Rearange the lists to ensure matching characters at the same
# index position.
# low quotation marks are also used as closers (e.g. in Greek)
# move them to category Pi:
ucharlists['Ps'].remove(u'‚') # 201A SINGLE LOW-9 QUOTATION MARK
ucharlists['Ps'].remove(u'„') # 201E DOUBLE LOW-9 QUOTATION MARK
ucharlists['Pi'] += [u'‚', u'„']
ucharlists['Pi'].remove(u'‛') # 201B SINGLE HIGH-REVERSED-9 QUOTATION MARK
ucharlists['Pi'].remove(u'‟') # 201F DOUBLE HIGH-REVERSED-9 QUOTATION MARK
ucharlists['Pf'] += [u'‛', u'‟']
# 301F LOW DOUBLE PRIME QUOTATION MARK misses the opening pendant:
ucharlists['Ps'].insert(ucharlists['Pe'].index(u'\u301f'), u'\u301d')
# print u''.join(ucharlists['Ps']).encode('utf8')
# print u''.join(ucharlists['Pe']).encode('utf8')
# print u''.join(ucharlists['Pi']).encode('utf8')
# print u''.join(ucharlists['Pf']).encode('utf8')
# The Docutils character categories
# ---------------------------------
#
# The categorization of ASCII chars is non-standard to reduce both
# false positives and need for escaping. (see `inline markup recognition
# rules`_)
# matching, allowed before markup
openers = [re.escape('"\'(<[{')]
for cat in ('Ps', 'Pi', 'Pf'):
openers.extend(ucharlists[cat])
# matching, allowed after markup
closers = [re.escape('"\')>]}')]
for cat in ('Pe', 'Pf', 'Pi'):
closers.extend(ucharlists[cat])
# non-matching, allowed on both sides
delimiters = [re.escape('-/:')]
for cat in ('Pd', 'Po'):
delimiters.extend(ucharlists[cat])
# non-matching, after markup
closing_delimiters = [re.escape('.,;!?')]
# # Test open/close matching:
# for i in range(min(len(openers),len(closers))):
# print '%4d %s %s' % (i, openers[i].encode('utf8'),
# closers[i].encode('utf8'))
return [u''.join(chars)
for chars in (openers, closers, delimiters, closing_delimiters)] | [
"def _run_split_on_punctuation(self, text):\n\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if self._is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]",
"def test_all_punctuation_effectively_removed(self):\n text1 = 'THe, quick; (br-own): fox! \"jumped\" <> - -- [over] the\\ /lazy dog.'\n text2 = \"He shouted, 'Hello!'\"\n textlist = text_to_list(text1) + text_to_list(text2)\n for char in '-.,\\;\":_*!\\n' or \"'[]()/\":\n if char in textlist:\n return False",
"def filter_words_by_punctuation(self):\n list_ = []\n for word in self.words:\n if \"'\" in word:\n list_.append(word)\n return list_",
"def create_punctC_representation(lyrics): # based on 'Bleaching text: Abstract features for cross-lingual gender prediction', van der Goot et al. 2018\n\t\n\tpunctC_repr = \"\"\n\tfor sentence in lyrics.split('\\n'):\n\t\tsentence_repr = ''\n\t\tfor word in sentence.split():\n\t\t\tpunctC = \"\"\n\t\t\tfor char in word:\n\t\t\t\tif char not in string.punctuation:\n\t\t\t\t\tpunctC += 'W'\n\t\t\t\telse:\n\t\t\t\t\tpunctC += char\n\t\t\tpunctC = re.sub(\"W+\", \"W\", punctC) + ' '\n\t\t\tsentence_repr += punctC\n\t\tpunctC_repr += sentence_repr.rstrip() + '\\n'\n\t\t\n\treturn punctC_repr.rstrip()",
"def get_punctuation_frequency(self):\r\n \r\n punctuations = list(string.punctuation)\r\n #Dataframe to store the punctuation occurences only\r\n punc_occ = pd.DataFrame(columns=['punctuation', 'occurence'])\r\n #Loop for identifying rows with punctuations in the instance variable\r\n for i, row in self.char_occ.iterrows():\r\n if row['character'] in punctuations:\r\n temp_df = pd.DataFrame([[row['character'],row['occurence']]], \r\n columns=['punctuation', 'occurence'])\r\n punc_occ = punc_occ.append(temp_df, ignore_index=True)\r\n \r\n return punc_occ",
"def apply_nlp(category):\n if \" \" in category:\n if \" for \" in category:\n idx = category.find(\" for \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 5 :])\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \"(\" in category:\n start = category.find(\"(\")\n end = category.find(\")\")\n outer = strip_article(category[:start] + \" \" + category[end + 1 :])\n inner = strip_article(category[start + 1 : end])\n return [outer, inner, *apply_nlp(outer), *apply_nlp(inner)]\n elif \" with \" in category:\n idx = category.find(\" with \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 6 :])\n return [prefix, suffix, *apply_nlp(prefix), *apply_nlp(suffix)]\n elif \" of \" in category:\n idx = category.find(\" of \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 4 :])\n if prefix in [\"pair\", \"copy\", \"base\", \"fragments\", \"figure\", \"copy\"]:\n return [suffix, *apply_nlp(suffix)]\n else:\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \" from \" in category:\n idx = category.find(\" from \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 4 :])\n if prefix in [\"pair\", \"copy\", \"base\", \"fragments\", \"figure\", \"copy\"]:\n return [suffix, *apply_nlp(suffix)]\n else:\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \"&\" in category:\n categories = [strip_article(c) for c in category.split(\"&\")]\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n elif \" and \" in category or \",\" in category:\n categories = []\n while \" and \" in category or \",\" in category:\n and_idx = category.find(\" and \")\n comma_idx = category.find(\",\")\n if and_idx >= 0 and comma_idx >= 0:\n idx = min(and_idx, comma_idx)\n elif and_idx >= 0:\n idx = and_idx\n elif comma_idx >= 0:\n idx = comma_idx\n else:\n idx = -1\n if idx >= 0:\n categories.append(strip_article(category[:idx]))\n if category[idx] == \",\":\n category = category[idx + 1 :]\n else:\n category = category[idx + 5 :]\n if category.strip().strip(\"()[]\"):\n categories.append(strip_article(category.strip().strip(\"()[]\")))\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n elif \" or \" in category:\n categories = []\n while \" or \" in category:\n idx = category.find(\" or \")\n if idx >= 0:\n categories.append(strip_article(category[:idx]))\n category = category[idx + 4 :].strip().strip(\"()[]\")\n if category.strip().strip(\"()[]\"):\n categories.append(strip_article(category))\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n else:\n categories = category.split()\n return [\" \".join(categories[-idx:]) for idx in range(len(categories) - 1, 0, -1)]\n else:\n return []",
"def get_punctuation_tokens(tokens):\n punct_tokens = []\n punct_tokens = punct_tokens + [term for term in tokens\n if term in string.punctuation]\n return punct_tokens",
"def categories(self, word):\n ...",
"def StopW_Punct():\r\n punctList = [\"!\",'\"',\"#\",\"$\",\"%\",\"&\",\"'\",\"(\",\")\",\"*\",\"+\",\",\",\"-\",\".\",\"/\",\":\",\";\",\"<\",\"=\",\">\",\"?\",\"@\",\"[\",\"{\",\"|\",\"}\",\"~\",\"^\",\"_\",\"]\",\"`\"]\r\n return punctList",
"def _split_on_punctuation(self, tokens: List[str]) -> List[str]:\n punctuation = []\n punctuation.extend(self.terminators)\n if self.allow_redirection:\n punctuation.extend(constants.REDIRECTION_CHARS)\n\n punctuated_tokens = []\n\n for cur_initial_token in tokens:\n\n # Save tokens up to 1 character in length or quoted tokens. No need to parse these.\n if len(cur_initial_token) <= 1 or cur_initial_token[0] in constants.QUOTES:\n punctuated_tokens.append(cur_initial_token)\n continue\n\n # Iterate over each character in this token\n cur_index = 0\n cur_char = cur_initial_token[cur_index]\n\n # Keep track of the token we are building\n new_token = ''\n\n while True:\n if cur_char not in punctuation:\n\n # Keep appending to new_token until we hit a punctuation char\n while cur_char not in punctuation:\n new_token += cur_char\n cur_index += 1\n if cur_index < len(cur_initial_token):\n cur_char = cur_initial_token[cur_index]\n else:\n break\n\n else:\n cur_punc = cur_char\n\n # Keep appending to new_token until we hit something other than cur_punc\n while cur_char == cur_punc:\n new_token += cur_char\n cur_index += 1\n if cur_index < len(cur_initial_token):\n cur_char = cur_initial_token[cur_index]\n else:\n break\n\n # Save the new token\n punctuated_tokens.append(new_token)\n new_token = ''\n\n # Check if we've viewed all characters\n if cur_index >= len(cur_initial_token):\n break\n\n return punctuated_tokens",
"def print_categories():\n category_list = ['60fps', 'amateur', 'anal', 'arab', 'asian', 'bbw(big busty women)', 'babe', 'babysitter',\n 'btscenes(behind the scenes)',\n 'bigass', 'bigdick', 'titslg(big tits)', 'bimale', 'blonde', 'bj(blowjob)', 'bondage', 'brazilian',\n 'british', 'brunette',\n 'bukkake', 'cartoon', 'casting', 'celeb', 'cc', 'college', 'comp(compilation)', 'cosplay',\n 'creampie', 'cuckold',\n 'cumshot', 'czech', 'described', 'dp', 'ebony', 'euro', 'exclusive', 'feet',\n 'femaleorgy(female orgasm)',\n 'fetish', 'fisting', 'french', 'funny', 'gangbang', 'gay', 'german', 'hd', 'handjob', 'hardcore',\n 'hentai',\n 'indian', 'interactive', 'interracial', 'italian', 'japanese', 'korean', 'latina', 'lesbian',\n 'milf', 'massage',\n 'masturbate', 'mature', 'musclemen', 'music', 'oldyoung', 'orgy', 'pov', 'parody', 'party', 'piss',\n 'popww(popular with women)', 'pornstar', 'public', 'pussylick', 'reality', 'redhead',\n 'rp(roleplay)',\n 'romantic', 'rough', 'russian', 'sfw(safe for work)', 'school', 'titssm(small tits)', 'smoking', 'solofemale',\n 'solomale',\n 'squirt', 'step(step fantasy)', 'strip(striptease)', 'tatwomen(tatooed women)', 'teen', '3some',\n 'toys',\n 'tmale(transmale)', 'twgirl(trans with girl)', 'twguy(trans with guy)', 'trans(transgender)',\n 'veramateurs(verified amateurs)', 'vercouples(verified couples)', 'vermodels(verified models)',\n 'vintage', 'vr(virtual reality)', 'webcam']\n print(category_list)",
"def sentence_punctuation():\n check50.run(\"python3 readability.py\").stdin(\"Congratulations! Today is your day. You're off to Great Places! You're off and away!\").stdout(\"Grade\\D+3\", \"Grade 3\\n\").exit(0)",
"def get_sample_categories(self):\n # TODO: cache these results since they change very rarely\n result = self.get(cc_urls['sample_categories'])\n return result['sample_categories']",
"def segment_by_punctuation(text: str):\n\treturn nltk.sent_tokenize(text)",
"def _split_punctuation(self, word):\n\n opening_puncts = []\n closing_puncts = []\n core_token = word\n\n (off1, off2, tok) = word\n\n while True:\n if not tok:\n break\n found_punc = punctuation_pattern.search(tok[0])\n if found_punc:\n opening_puncts.append((off1, off1 + 1, tok[0]))\n core_token = (off1 + 1, off2, tok[1:])\n off1 += 1\n tok = tok[1:]\n else:\n break\n \n while True:\n if not tok:\n break\n found_punc = punctuation_pattern.search(tok[-1])\n if found_punc:\n closing_puncts.append((off2 - 1, off2, tok[-1]))\n core_token = (off1, off2 - 1, tok[:-1])\n off2 += -1\n tok = tok[:-1]\n else:\n break\n\n # need to reverse because the closing punctuations were added from the\n # end\n closing_puncts.reverse()\n return opening_puncts, core_token, closing_puncts",
"def buildCategoryKeywords(categoryname):\n catKeywords = []\n catKeywords2 =[]\n \n catKeywords = re.findall('[A-Z]+[^A-Z ]*', categoryname)\n\n for word in catKeywords:\n noSpaceWord = word.replace(\" \", \"\")\n catKeywords2.append(noSpaceWord)\n \n return catKeywords2",
"def test_category_format_Category(self):\n data = [pywikibot.Category(self.site, 'Cat1'),\n pywikibot.Category(self.site, 'Cat2')]\n self.assertEqual(self.catresult,\n textlib.categoryFormat(data, self.site))",
"def test_category_format_bare(self):\n self.assertEqual(self.catresult,\n textlib.categoryFormat(['Cat1', 'Cat2'], self.site))",
"def test_valid_punctuation():\n assert rw('What did they say? Say what again!') == 'say'\n assert rw('I am... that am!') == 'am'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append the separator for table head. | def append_separator(self, separator):
self._rows.append([separator]) | [
"def add_divider(self):\n self.page += '<hr style=\"clear:both;\"/>\\n'",
"def set_separator(self) -> None:\n self.separator = len(self.lines)",
"def __writeSeparator(self, indent):\n self.__dev.write(\" \" * indent)\n self.__dev.write(\"<HR>\\n\")",
"def _Header(numCols):\n return \"\\\\begin{center}\\n\\\\begin{tabular}{\" + \"|c\" * numCols + \"|}\\n\"",
"def print_row_separator(columns):\n print(\"--+\", end=\"\")\n for i in range(1, columns - 1):\n print(\"---+\", end=\"\")\n print(\"--\")",
"def getSeparator(self) -> str:\n ...",
"def test_custom_hor_split(self):\n tab = tabl.Tabl()\n tab.set_hor('~')\n string = tab.to_table([['a']])\n self.assertEqual('+~+\\n' + \\\n '|a|\\n' + \\\n '+~+\\n', string)",
"def InsertCellSeparator(self):\n #dlg = wx.TextEntryDialog(self, 'Enter code cell separator label text:',\n # 'Insert code cell separator', '')\n dlg = CellDialog(self, -1)\n\n if dlg.ShowModal() == wx.ID_OK:\n label = dlg.GetValue()\n\n #If not at the start of a line add \\n\n pos = self.GetCurrentPos()\n indent = self.GetColumn(pos)\n if indent!=0:\n self.InsertText(pos,'\\n')\n self.SetCurrentPos(pos+1)\n\n #add the separator\n pos = self.GetCurrentPos()\n line = self.LineFromPosition(pos)\n pos = self.PositionFromLine(line)\n self.InsertText(pos,label)\n\n #move to end of separator\n self.SetCurrentPos(pos+len(label))\n self.SetAnchor(pos+len(label))\n\n dlg.Destroy()",
"def _render_column_separator(self, type: int = BORDER_OUTSIDE) -> str:\n borders = self._style.border_chars\n\n return self._style.border_format.format(\n borders[1] if type == self.BORDER_OUTSIDE else borders[3]\n )",
"def append_header(self):\r\n # NOTE before everything\r\n # .TH title_upper section date source manual\r\n if self.header_written:\r\n return\r\n self.head.append(self.header())\r\n self.head.append(MACRO_DEF)\r\n self.header_written = 1",
"def _generate_top_border(self):\n self._generate_separator(left_char='╭', right_char='╮')",
"def add_menu_separator(self):\n if self._menu is None:\n self._create_menu()\n self._menu.addSeparator()",
"def _generate_header(self):\n margin_str = ' ' * self.column_margin\n top = '┌'\n headings = '│'\n heading_sep = '╞'\n row_sep = '├'\n self._bottom = '└'\n for i, col in enumerate(self.columns, start=1):\n top += ('─' * (col.width + 2 * self.column_margin)\n + ('┐' if i == len(self.columns) else '┬'))\n headings += margin_str + col.get_header_cell() + margin_str + '│'\n heading_sep += ('═' * (col.width + 2 * self.column_margin)\n + ('╡' if i == len(self.columns) else '╪'))\n row_sep += ('─' * (col.width + 2 * self.column_margin)\n + ('┤' if i == len(self.columns) else '┼'))\n self._bottom += ('─' * (col.width + 2 * self.column_margin)\n + ('┘' if i == len(self.columns) else '┴'))\n if self.title:\n self._text_lines.append(self.title)\n self._text_lines.append(top)\n if self.include_headings:\n self._text_lines.append(headings)\n self._text_lines.append(heading_sep)\n self._row_separator = row_sep if self.use_row_separators else None",
"def addSeparator(self, *args) -> \"adsk::core::Ptr< adsk::core::ListItem >\" :\n return _core.ListItems_addSeparator(self, *args)",
"def getSeparator(self):\r\n return '/'",
"def separator(self, menu):\n return menu.AppendSeparator()",
"def getSeparator(self):\n return '/'",
"def tbl_header():\n header = ['REGION', 'DEL/DUP', 'CNV LENGTH', 'ZSCORE', 'MEAN DEPTH', 'NUMBER OF PROBES', 'TOTAL ALLELES',\n 'POP DEL COUNT', 'POP DEL AF', 'POP DUP COUNT', 'POP DUP AF', 'GENES']\n return header",
"def print_headers():\n print(\"symbol\\t count\\t price\\t\\t total\")\n print(\"-\" * 71)",
"def get_divider(source, tbl_filt_label, tbl_filt, *, page_break_before=False):\r\n debug = False\r\n filt_msg = lib.FiltLib.get_filt_msg(tbl_filt_label, tbl_filt)\r\n pagebreak = ' page-break-before: always ' if page_break_before else ''\r\n div = (f'\\n<br><br>\\n<hr style=\"clear: both; {pagebreak}\">\\n{source}'\r\n f'\\n<p>{filt_msg}</p>\\n{mg.VISUAL_DIVIDER_BEFORE_THIS}')\r\n if debug: print(div)\r\n return div"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return commented version of the passed text. | def comment(self, text):
return self.comment_begin(text)+'.\n' | [
"def get_comment_text():\n first = comment_start + len(lang.comment_start)\n return line[first:]",
"def _get_comment_text():\n comment_samples = [\n \"Malesu mauris nas lum rfusce vehicula bibend. Morbi.\",\n \"Nuncsed quamal felis donec rutrum class ipsumnam teger. Sedin metusd metusdo quamnunc utcras facilis nequen.\",\n \"Adipisci ent neque eger vehicula dis. Miquis auctorpr quamphas purusp phasel duifusce parturi. Ris liberoa ligula lacini risus nean. Arcualiq cubilia aenean nuncnunc ulum fringi uisque abitur rerit setiam. Nean miproin aliquet risusvi tempusp aliquete. Integer nequenu bulum ibulum laoree accumsan ellus mus odio uis. Amet curae ivamus congue aliquama liberofu que.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In justov volutpat mus habitas dapibusc nequenu volutp justo. Quam blandi tur maurisd egesta erossed morbi turpis risus tate. Lacusp facilis class vehicula varius iaculis setiam montes pharetra. Usce ecenas quispr naeos nec nibhphas lacinia roin. Abitur maurisma metusqui justop uscras llam enas. Magnaqu faucibus sduis arcualiq imperd teger egetlor teger.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Conseq tristiq enas duis sociosqu eduis enimsed tudin vel. Lus semnunc risusm nulla parturi atein at placerat. Tiam laut nibhnul turpisn vitaenul eleifen commodo euismo quat posuered. Egestas nullain justop maurisin purusp donec nas liberofu aptent. Nec aliquam tiam puruscra turpisp luctus proin. Lectusin turpisn usce orcivest nullam eget arcuduis tdonec min. Esent cursus vulput aenean bulum lacini congued pretiu. Portamor bulum tate isse llam cidunt estmae.\\n\\nSque leocras fusce nullap fusce convall laoreet nibhnull estsusp. Roin aliquet esent ctetur blandit etiam nequesed viverr. Nislqu sse orciduis lacusp in tasse gravida lla ullam. Itnunc id mauris rerit entum disse lacinia. Oin luctus velit musetiam onec potenti ipsump volutp. Tortor musetiam bibendum onec esent libero esque sim. Enas ras eclass placerat sedin risusut vulput enimdon montes. Rhoncus dolorma estsusp facilis etsed llaut esque cursus. Nisl ullamcor tincid llus nulla iaculis.\",\n ]\n return random.choice(comment_samples)",
"def comment(s):\n return '\\n'.join('// ' + line if line else '' for line in s.split('\\n'))",
"def strip_comments(text):\n \n # (m?) enables multiline mode\n return re.sub(r'(?m)^ *#.*\\n?', '', text).strip()",
"def comment(self, text):\n \n self.stream.write(\"# {}\\n\".format(text))",
"def remove_comments(text):\n return re.sub(r' //.*\\n', r'', text)",
"def get_comment(self):\n return str(self.gui.txt_comment.text())",
"def commentify(lang):\n plaintext = pyperclip.paste().split('\\n')\n\n if lang == 'python':\n comment = ['###\\n']\n char = ' # '\n end = '###\\n'\n\n else:\n comment = ['/*\\n']\n char = ' * '\n end = '*/\\n'\n\n for line in plaintext:\n comment.append(char + line + '\\n')\n\n comment.append(end)\n return ''.join(comment)",
"def _comment_format(self, path):\n _, extension = os.path.splitext(path)\n return '# {}\\n' if extension == '.py' else '<!-- {} -->'",
"def extract_comments(self, sid, text):\n pass",
"def remove_comment_lines_in_str(text_data):\n try:\n from StringIO import StringIO # python 2\n except ImportError:\n from io import StringIO # python 3\n\n newData = ''\n\n for line in StringIO(text_data).readlines():\n # rstrip() will keep the _indent but remove all white spaces including '\\n'\n stripped_line = line.strip()\n line = line.rstrip()\n # The Shebang line should survive. shouldn't she?\n if stripped_line.startswith(('#!', '# -*-')):\n newData += line + '\\n'\n # user wants to leave a comment\n elif stripped_line.startswith(('##', '!!')):\n newData += line.replace(stripped_line[0:2], stripped_line[:1], 1) + '\\n'\n # Also keep existing empty lines\n elif not stripped_line:\n newData += line + '\\n'\n # But remove lines that only contains comments\n elif stripped_line.startswith(('#', '!', 'REM')):\n pass\n else:\n # the comments after the code will remain.\n newData += line + '\\n'\n\n return newData",
"def add_escapement_back_for_not_comments(text):\n return text.replace(COMMENT_MARKER, ESCAPE_SYM+COMMENT_MARKER)",
"def ps1_comment(line: str) -> str:\n return f\"# {line}\"",
"def to_comment(comment):\n return '#' + re.sub(r'[^\\x00-\\xFF]', _esc,\n re.sub(r'\\n(?![#!])', '\\n#',\n re.sub(r'\\r\\n?', '\\n', comment)))",
"def format_comment(listed_game):\n\n comment = listed_game.comment\n\n if comment:\n return comment\n else:\n return \"\"",
"def remove_comments(tex):\n return re.sub(r'%(.+)\\n', r'', tex)",
"def _commentWrap(self, message, text, length):\n\n comment = message + \" \" + text\n\n return self._wrapText(comment, 0, \"* \", 70)",
"def commentLine(self, line):\n # if the line is non-empty\n if line:\n # mark it\n return self.leader + self.startBlock + ' ' + line + ' ' + self.endBlock\n # otherwise, just return the comment characters\n return line",
"def cmd_comment(line: str) -> str:\n return f\"@REM {line}\"",
"def rem_comment(line):\n return line.split(\"#\", 1)[0].rstrip()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure the last line in body is terminated by new line. | def ensure_eol(self):
if len(self.body) > 0 and self.body[-1][-1] != '\n':
self.body.append('\n') | [
"def have_trailing_newline(line):\n\treturn line[-1] == '\\n' or line[-1] == '\\r' or line[-2:] == '\\r\\n'",
"def ensure_newline(self, n):\n assert n >= 0\n text = self._output.getvalue().rstrip('\\n')\n if not text:\n return\n self._output = StringIO()\n self._output.write(text)\n self._output.write('\\n' * n)\n text = self._output.getvalue()\n assert text[-n-1] != '\\n'\n assert text[-n:] == '\\n' * n",
"def end(self):\n line = '%s End' % self.tstamp\n if line not in self.lines:\n self.blank()\n self.lines.append(line)",
"def rfc6376_simple_body(body: bytes) -> bytes:\n # In DKIM simple body, an empty body becomes CRLF\n body = body or b\"\\r\\n\"\n while body.endswith(b\"\\r\\n\\r\\n\"):\n body = body[:-2]\n return body",
"def rfc5322_endings(data: bytes) -> bytes:\n # v\n # [^\\r]\\n -> [^\\r]\\r\\n\n # v\n # \\r[^\\n] -> \\r\\n[^\\n]\n # v\n # \\r$ -> \\r\\n$\n CR: int = 0x0D\n LF: int = 0x0A\n this: int\n prev: Optional[int] = None\n output: bytearray = bytearray()\n for this in data:\n if (this == LF) and (prev != CR):\n output.extend(b\"\\r\\n\")\n elif (prev == CR) and (this != LF):\n output.extend(b\"\\n\")\n output.append(this)\n else:\n output.append(this)\n prev = this\n if prev == CR:\n output.append(LF)\n return bytes(output)",
"def test_ends_newline(self):\r\n text = 'A line\\nAnother line\\nAnd a final one.\\n'\r\n expected_res = text.split('\\n')\r\n for res, expected in zip(split_by_newline(text), expected_res):\r\n self.assertEqual(res[1], expected)",
"def add_newline(self):\n if len(self.gem) == 0 or self.gem[-1] == '\\n':\n return\n self.gem += \"\\n\"",
"def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line",
"def handle_newline(self, token_type: int) -> None:\n assert self.processor is not None\n if token_type == tokenize.NEWLINE:\n self.run_logical_checks()\n self.processor.reset_blank_before()\n elif len(self.processor.tokens) == 1:\n # The physical line contains only this token.\n self.processor.visited_new_blank_line()\n self.processor.delete_first_token()\n else:\n self.run_logical_checks()",
"def append_newline(string):\n if len(string) > 0 and string[-1] != '\\n':\n string += \"\\n\"\n return string",
"def test_alternate_eol(self):\n if hasattr(self.s, 'xreadlines'): # test if it is our FileLike base class\n self.s.write(serial.to_bytes(\"no\\rno\\nyes\\r\\n\"))\n self.assertEqual(\n self.s.readline(eol=serial.to_bytes(\"\\r\\n\")),\n serial.to_bytes(\"no\\rno\\nyes\\r\\n\"))",
"def line_feed(self):\n self._stream.write(self._line_separator)\n self._is_new_line = True\n return self",
"def __get_line_ending(self, file_content):\r\n\r\n ending = LINE_ENDINGS.search(file_content)\r\n return \"\\r\" if ending is not None and ending.group(2) else \"\\n\"",
"def end(self):\n while self.position < len(self.document.characters) and self.document.characters[self.position] != '\\n':\n self.position += 1",
"def _end_of(self, output, lines):\n return '\\n'.join(output.split('\\n')[-lines:])",
"def _fixupEOL(self, doc):\n eolPref = self._globalPrefsvc.prefs.getStringPref(\"endOfLine\")\n try:\n eol = eollib.eolPref2eol[eolPref]\n except KeyError:\n # Be paranoid: stay with system default if pref value is bogus.\n log.exception(\"unexpected 'endOfLine' pref value: %r\", eolPref)\n eol = eollib.EOL_PLATFORM\n doc.existing_line_endings = eol\n doc.new_line_endings = eol\n doc.isDirty = 0",
"def testReadlinesWithFileWithoutNewLineAtEnd(self):\n test_file_path = self._GetTestFilePath(['bodyfile'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_path_spec = os_path_spec.OSPathSpec(location=test_file_path)\n file_object = path_spec_resolver.Resolver.OpenFileObject(\n test_path_spec, resolver_context=self._resolver_context)\n\n line_reader = line_reader_file.BinaryLineReader(file_object)\n\n lines = line_reader.readlines()\n\n self.assertEqual(len(lines), 25)",
"def idempotent_append_newline(string):\n if type(string) is not str:\n raise TypeError\n if string.endswith('\\n'):\n return string\n else:\n return string + '\\n'",
"def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
append header with .TH and .SH NAME | def append_header(self):
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.head.append(self.header())
self.head.append(MACRO_DEF)
self.header_written = 1 | [
"def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n file_sam = open(os.path.join(args.output_dir,'watsonAligned.out.sam'))\n print(file_sam)\n for line in file_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args",
"def _create_header(self):\r\n t = time.localtime() # get current time\r\n time_string = \"%d/%d/%d %d:%d:%d\" % (t.tm_mday,\r\n t.tm_mon,\r\n t.tm_year,\r\n t.tm_hour,\r\n t.tm_min,\r\n t.tm_sec)\r\n self.header_lines = \"\"\"#Filename = \"\"\" + self.filename + \"\"\" \r\n#Date Saved = \"\"\" + time_string + \"\"\"\r\nFileType = 111\r\nVersion = 7.11\r\n\r\n\"\"\"",
"def add_header(self, *args, **kwargs):\r\n self.header = True\r\n self.add_row(ypos=0, *args, **kwargs)",
"def add_header(self, drawing, header_type, value):\n drawing.header[header_type] = value",
"def synth_header(self):\n\n header = \"n,imbalanced,num_c,internoiselvl,intranoiselvl,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,kvs_sze,kvs_fsze,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_sze_GT,l2_fsze_GT,l1_sze_GT,l1_fsze_GT,l2_usze_G, th_usze_G,l2_ufsze_G, th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)",
"def real_header(self):\n\n header = \"n,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_usze_G,th_usze_G,l2_ufsze_G,th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)",
"def createHeading(self):\n\t\tfieldNames = ['Year','Month','State','District']\n\t\tfor i in range(1,43):\n\t\t\tfieldNames.append('col '+str(i))\n\t\twith open(self.filepath, 'w') as PMGSYFile:\n\t\t\tcsvWriter = csv.writer(PMGSYFile)\n\t\t\tcsvWriter.writerow(fieldNames)\n\t\tPMGSYFile.close()",
"def add_headers(sheet):\n row = ['URL', 'Title Text', 'Title Length', 'Bad Anchor Text', 'No Alt Text', 'H1 Tags', \"Reading Score\"]\n title_list = sheet.range('A1:G1')\n for n in range(len(row)):\n title_list[n].value = row[n]\n\n sheet.update_cells(title_list)",
"def tvp_writeheader( self ):\n dsw = self.dsw\n\n if self.verbosity and self.verbosity is not None:\n print (\"tvp_writeheader:header_name=%s, fieldnames='%s'\"\n % (dsw.header_name, repr(self.fieldnames)))\n # fieldnames is authoritative ordered list of colnames\n # because in future od_column_type will not be required to have\n # all columns\n with open(dsw.header_name, 'wb') as fh:\n for colname in self.fieldnames:\n # add logic to allow missing entry and use a default spec/type\n coltype = dsw.od_column_type[colname]\n line = \"%s\\t%s\\n\" % (colname, coltype)\n fh.write(line)",
"def writeRowHeaders(self):\n titles = [\"Rule Name\", \"NAT Type\", \"Src Zone\", \"Dst Zone\", \"Dst Interface\", \"Orig Src Address\", \"Orig Dst Address\", \"Service\", \"Src Translation\", \"Dst Translation\", \"Description\", \"Disabled\"]\n i = 0\n for title in titles:\n worksheet.write(0, i, title, bold)\n i += 1",
"def draw_header(self, stream, header):\n stream.writeln(header)\n stream.writeln('~' * len(header))\n stream.writeln()",
"def hvp_writeheader( self ):\n dsw = self.dsw\n columns_line = dsw.delimiter.join(self.fieldnames)\n if self.verbosity and self.verbosity is not None:\n print (\"hvp_writeheader:header_name=%s, Columns='%s'\"\n % (dsw.header_name,repr(columns_line)))\n\n with open(self.dsw.header_name, 'wb') as fh:\n fh.write(columns_line)",
"def append_header_row(self):\n if not self._exists:\n raise AssertionError(\"The data file has not been created. Use the 'Create Data File' keyword to create it.\")\n else:\n headers = self.row_definition.create_header_row()\n self._append(headers)",
"def add_to_header(self, header_f, outfilename, line_ann):\n\n of = open(outfilename, 'w')\n\n # getting the type of line that is being passed\n p = re.compile(\"^##(\\w+)=\")\n\n m1 = p.match(line_ann)\n type_ann = m1.group(1)\n\n line_seen = False\n with open(header_f) as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n m2 = p.match(line)\n if m2 is None:\n of.write(line+\"\\n\")\n continue\n type1_ann = m2.group(1)\n if type_ann == type1_ann and line_seen is False:\n line_seen = True\n of.write(line+\"\\n\"+line_ann+\"\\n\")\n continue\n else:\n of.write(line+\"\\n\")\n of.close()\n\n return outfilename",
"def createNewHeader(fileName, new_period):\n\n # Changes the new_period format to the one used in the files\n new_period = changeFormatTime(new_period)\n\n header = getHeader(fileName)\n\n header[INDEXPeriod] = new_period\n\n # Turns header into string, each line separated by commas. To understand the\n # use of commas, see outputStatus.writeServicesFile\n header = ','.join(header)\n\n # Deletes newlines\n header = header.replace('\\n', '')\n\n return header",
"def add(self, t, header, ignore_existing=False):\n t = sanitize_t(t)\n header_file = Path(\n self.header_folder / (t.strftime(\"%Y_%m_%d_%H_%M_%S%z\") + \".csv\")\n )\n\n # print(header_file)\n header = \"\\n\".join(header)\n if not header_file.exists() or ignore_existing:\n with open(header_file, \"w\") as fp:\n fp.write(header)\n self._logger.info(f\"Added {file} header file.\")\n else:\n raise FileExistsError(\n f\"File {file} already exists, pass \" \"ignore_existing=True to replace.\"\n )",
"def updateheader(self,data):\n # Update PRODuct TYPE keyword with step name, add history keyword\n data.setheadval('PRODTYPE',self.name,'Product Type')\n histmsg = 'Reduced: ' + self.name + ' v' + self.stepver + ' '\n histmsg += time.strftime('%Y-%m-%d_%H:%M:%S')\n data.setheadval('HISTORY',histmsg)\n # Add input parameters to history\n for p in [par[0] for par in self.paramlist]:\n histmsg = ' %s: %s=%s' % (self.name, p, self.getarg(p))\n data.setheadval('HISTORY',histmsg)\n # Update file name with .PipeStepName.fits\n data.filename = data.filenamebegin + self.procname.upper() + data.filenameend\n # Add config file name if available and not already present\n # in HISTORY\n try:\n # This may fail if config has no filename - in that case,\n # don't add the message.\n conffilename = '' + self.config.filename\n # New history message\n histmsg = 'CONFIG: %s' % conffilename\n # Check history for presence of the full message or possibly\n # a truncated version (eg. for long filenames in FITS headers)\n full_history = data.getheadval('HISTORY')\n if len(histmsg) > 72:\n shortmsg = histmsg[0:72]\n else:\n shortmsg = histmsg\n if histmsg not in full_history and shortmsg not in full_history:\n self.log.debug('Recording config file name %s' % conffilename)\n data.setheadval('HISTORY',histmsg)\n except TypeError:\n pass\n\n # Send log messages",
"def mkHeaders(phdr, events_header, extver=1):\n\n headers = [phdr]\n # This is a reference, not a copy. Keywords will be updated (in other\n # functions) in headers[1], and the output corrtag header as well as the\n # flt and counts headers will contain the updated values.\n headers.append(events_header)\n\n err_hdr = fits.Header()\n dq_hdr = fits.Header()\n err_hdr[\"extname\"] = (\"ERR\", \"extension name\")\n dq_hdr[\"extname\"] = (\"DQ\", \"extension name\")\n err_hdr[\"extver\"] = (extver, \"extension version number\")\n dq_hdr[\"extver\"] = (extver, \"extension version number\")\n if \"rootname\" in events_header:\n rootname = events_header[\"rootname\"]\n err_hdr[\"rootname\"] = (rootname, \"rootname of the observation set\")\n dq_hdr[\"rootname\"] = (rootname, \"rootname of the observation set\")\n if \"expname\" in events_header:\n expname = events_header[\"expname\"]\n err_hdr[\"expname\"] = (expname, \"exposure identifier\")\n dq_hdr[\"expname\"] = (expname, \"exposure identifier\")\n if \"ra_aper\" in events_header:\n err_hdr[\"ra_aper\"] = (events_header[\"ra_aper\"],\n \"RA of reference aperture center\")\n if \"dec_aper\" in events_header:\n err_hdr[\"dec_aper\"] = (events_header[\"dec_aper\"],\n \"Declination of reference aperture center\")\n if \"pa_aper\" in events_header:\n err_hdr[\"pa_aper\"] = (events_header[\"pa_aper\"],\n \"Position Angle of reference aperture center (de\")\n if \"dispaxis\" in events_header:\n err_hdr[\"dispaxis\"] = (events_header[\"dispaxis\"],\n \"dispersion axis; 1 = axis 1, 2 = axis 2, none\")\n if \"ngoodpix\" in events_header:\n err_hdr[\"ngoodpix\"] = (-999, \"number of good pixels\")\n if \"goodmean\" in events_header:\n err_hdr[\"goodmean\"] = (-999., \"mean value of good pixels\")\n if \"goodmax\" in events_header:\n err_hdr[\"goodmax\"] = (-999., \"maximum value of good pixels\")\n\n headers.append(err_hdr)\n headers.append(dq_hdr)\n\n return headers",
"def add_header(self, _name, _value, **_params):\r\n parts = []\r\n for k, v in _params.items():\r\n if v is None:\r\n parts.append(k.replace('_', '-'))\r\n else:\r\n parts.append(_formatparam(k.replace('_', '-'), v))\r\n if _value is not None:\r\n parts.insert(0, _value)\r\n self._headers.append((_name, SEMISPACE.join(parts)))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a fragment that has the given positional arguments as child nodes. | def __call__(self, *args):
return Fragment()(*args) | [
"def createDocumentFragment(*args):\n return DocumentFragment(*args)",
"def create_from_args(self, name, content=None, attributes=None, children=None):\n node = self.node_class(name, content, attributes, children)\n return [node]",
"def create_node(**kwargs):",
"def create_arguments(primary, pyfunction, call_node, scope):\n args = list(call_node.args)\n args.extend(call_node.keywords)\n called = call_node.func\n # XXX: Handle constructors\n if _is_method_call(primary, pyfunction) and \\\n isinstance(called, ast.Attribute):\n args.insert(0, called.value)\n return Arguments(args, scope)",
"def create_child(self,*names, **meta):\n \n for name in names: \n child = node(name, self, **meta)\n self.childs[name] = child",
"def setFragment(*args, **kwargs):\n \n pass",
"def create_body(self):\n for pos in STARTING_POS:\n self.add_fragment(pos)",
"def _(self, node: FunctionDef):\n args = []\n body = []\n # print(node.name)\n if len(node.func_args) > 0:\n args = self.visit_list(node.func_args)\n if len(node.body) > 0:\n body = self.visit_list(node.body)\n\n node_uid = uuid.uuid4()\n args_node = uuid.uuid4()\n body_node = uuid.uuid4()\n\n # include the Name node's id if we have it\n if isinstance(node.name, Name):\n label = \"Function: \" + str(node.name.name) + \" (id: \" + str(node.name.id) +\")\"\n # otherwise name is just str\n else:\n label = f\"Function: {node.name}\"\n self.G.add_node(node_uid, label=label)\n self.G.add_node(args_node, label=\"Arguments\")\n self.G.add_node(body_node, label=\"Body\")\n\n self.G.add_edge(node_uid, body_node)\n self.G.add_edge(node_uid, args_node)\n for n in args:\n self.G.add_edge(args_node, n)\n\n for n in body:\n self.G.add_edge(body_node, n)\n\n return node_uid",
"def initialize_fragment(self):\n # if self.ndim == 1:\n # shader_pointcoord = \".x\"\n # else:\n # shader_pointcoord = \"\"\n shader_pointcoord = \"\"\n fragment = \"\"\"\n out_color = texture%dD(tex_sampler, varying_tex_coords%s);\n \"\"\" % (self.ndim, shader_pointcoord)\n # print fragment\n self.add_fragment_main(fragment)",
"def createArgumentGraph(self, vn):\n \"\"\"\n argument_graph = self.copy()\n num_nodes_sn = self.number_of_nodes()\n num_nodes_vn = vn.number_of_nodes()\n\n for i in range(0, num_nodes_sn):\n meta_node = 'meta_' + str(i % num_nodes_vn)\n argument_graph.add_edge(i, meta_node, {'capacity': float('inf')})\n\n \"\"\"\n argument_graph = self.copy()\n num_areas = vn.number_of_nodes()\n for i in range(0, num_areas ):\n self.addMetaNode(argument_graph, vn, 'meta_'+str(i))\n return argument_graph",
"def fragment(self, space_left, fragment_msg):\n new_args = []\n key_length = 2 # 2bytes for size\n for i, arg in enumerate(self.args):\n if space_left >= key_length:\n space_left -= key_length\n\n if arg is not None:\n arg_length = len(arg)\n if space_left < arg_length:\n fragment_msg.args.append(arg[space_left:])\n new_args.append(arg[:space_left])\n space_left = 0\n else:\n new_args.append(arg)\n space_left -= arg_length\n if space_left <= key_length:\n # boundary for arg\n fragment_msg.args.append(\"\")\n else:\n new_args.append(\"\")\n else:\n for l in range(i, len(self.args)):\n fragment_msg.args.append(self.args[l])\n break\n\n self.args = new_args\n if space_left >= 0 and len(fragment_msg.args) == 0:\n # don't need to fragment any more\n return None\n else:\n self.flags = FlagsType.fragment\n fragment_msg.id = self.id\n return fragment_msg",
"def wrap_arguments(args=None):\r\n if args is None:\r\n args = []\r\n\r\n tags = []\r\n for name, value in args:\r\n tag = \"<{name}>{value}</{name}>\".format(\r\n name=name, value=escape(\"%s\" % value, {'\"': \""\"}))\r\n # % converts to unicode because we are using unicode literals.\r\n # Avoids use of 'unicode' function which does not exist in python 3\r\n tags.append(tag)\r\n\r\n xml = \"\".join(tags)\r\n return xml",
"def pack_args(self, args):\n\n return nest.pack_sequence_as(self._args, args)",
"def create_sub_child(root, node_name, text=None, **kwargs):\n sub_child = etree.SubElement(root, node_name, **kwargs)\n sub_child.text = text if text is not None else None\n return sub_child",
"def varargsexpr(args):\n # lazy import to break cycle\n from blaze.compute.varargs import register_varargs_arity\n args = tuple(args)\n register_varargs_arity(len(args))\n\n return VarArgsExpr(args)",
"def add_child_directive(self, name: str, parameters: Optional[List[str]] = None,\n position: Optional[int] = None) -> DualDirectiveNode:\n\n primary_new = self.primary.add_child_directive(name, parameters, position)\n secondary_new = self.secondary.add_child_directive(name, parameters, position)\n assertions.assertEqual(primary_new, secondary_new)\n return DualDirectiveNode(primary=primary_new, secondary=secondary_new)",
"def do_arguments(self, node):\n kind = self.kind(node)\n assert kind == 'arguments', kind\n args = [self.visit(z) for z in node.args]\n defaults = [self.visit(z) for z in node.defaults]\n args2 = []\n n_plain = len(args) - len(defaults)\n for i in range(len(node.args)):\n if i < n_plain:\n args2.append(args[i])\n else:\n args2.append('%s=%s' % (args[i], defaults[i - n_plain]))\n # Add the vararg and kwarg expressions.\n vararg = getattr(node, 'vararg', None)\n if vararg: args2.append('*' + self.visit(vararg))\n kwarg = getattr(node, 'kwarg', None)\n if kwarg: args2.append('**' + self.visit(kwarg))\n return ','.join(args2)",
"def _create_args(\n self, stmt: ParametrizedStatement\n ) -> tuple[list[ast.expr], list[ast.keyword]]:\n args: list[ast.expr] = []\n kwargs = []\n\n gen_callable: GenericCallableAccessibleObject = cast(\n GenericCallableAccessibleObject, stmt.accessible_object()\n )\n\n left_of_current: list[str] = []\n\n parameters = gen_callable.inferred_signature.signature.parameters\n\n for param_name, param in parameters.items():\n if param_name in stmt.args:\n # The variable that is passed in as an argument\n var = au.create_full_name(\n self._variable_names,\n self._module_aliases,\n stmt.args[param_name],\n True,\n )\n match param.kind:\n case Parameter.POSITIONAL_ONLY:\n args.append(var)\n case Parameter.POSITIONAL_OR_KEYWORD:\n # If a POSITIONAL_OR_KEYWORD parameter left of the current param\n # has a default, and we did not pass a value, we must pass the\n # current value by keyword, otherwise by position.\n if any(\n parameters[left].default is not Parameter.empty\n and left not in stmt.args\n for left in left_of_current\n ):\n kwargs.append(\n ast.keyword(\n arg=param_name,\n value=var,\n )\n )\n else:\n args.append(var)\n case Parameter.KEYWORD_ONLY:\n kwargs.append(\n ast.keyword(\n arg=param_name,\n value=var,\n )\n )\n case Parameter.VAR_POSITIONAL:\n # Append *args, if necessary.\n args.append(\n ast.Starred(\n value=var,\n ctx=ast.Load(),\n )\n )\n case Parameter.VAR_KEYWORD:\n # Append **kwargs, if necessary.\n kwargs.append(\n ast.keyword(\n arg=None,\n value=var,\n )\n )\n left_of_current.append(param_name)\n return args, kwargs",
"def create_child_container(self, *args, **kwargs):\n kwargs['parent'] = self\n return type(self)(*args, **kwargs)",
"def gen_child_def_node(cg, node, local_names):\n # Validate the type of the child\n load_name(cg, node.typename, local_names)\n with cg.try_squash_raise():\n cg.dup_top()\n load_helper(cg, 'validate_declarative')\n cg.rot_two() # base -> helper -> base\n cg.call_function(1) # base -> retval\n cg.pop_top() # base\n\n # Subclass the child class if needed\n store_types = (StorageExpr, AliasExpr, FuncDef)\n if any(isinstance(item, store_types) for item in node.body):\n cg.load_const(node.typename)\n cg.rot_two()\n cg.build_tuple(1)\n cg.build_map()\n cg.load_global('__name__')\n cg.load_const('__module__')\n cg.store_map() # name -> bases -> dict\n cg.build_class() # class\n\n # Build the declarative compiler node\n store_locals = should_store_locals(node)\n load_helper(cg, 'declarative_node')\n cg.rot_two()\n cg.load_const(node.identifier)\n cg.load_fast(SCOPE_KEY)\n cg.load_const(store_locals) # helper -> class -> identifier -> key -> bool\n cg.call_function(4) # node"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new factory that is bound to the specified namespace. | def __getitem__(self, namespace):
return ElementFactory(namespace) | [
"def __getitem__(self, namespace):\n return ElementFactory(namespace)",
"def convert_namespace_to_factory(class_input):\r\n return decorate_class_methods(class_input, to_factory)",
"def get_factory(package):\r\n return functools.partial(get, package)",
"def __new__ (cls, *args, **kw):\n (uri,) = args\n if isinstance(uri, tuple):\n # Special handling to reconstruct absent namespaces.\n (variant, uid) = uri\n if cls.__SerializedVariantAbsent == variant:\n ns = cls.__AbsentNamespaceRegistry.get(uid)\n if ns is None:\n raise pyxb.UsageError('Unable to reconstruct instance of absent namespace')\n return ns\n raise pyxb.LogicError('Unrecognized serialized namespace variant %s uid %s' % (variant, uid))\n elif not (uri in cls.__Registry):\n instance = object.__new__(cls)\n # Do this one step of __init__ so we can do checks during unpickling\n instance.__uri = uri\n instance._reset()\n # Absent namespaces are not stored in the registry.\n if uri is None:\n cls.__AbsentNamespaces.add(instance)\n return instance\n cls.__Registry[uri] = instance\n return cls.__Registry[uri]",
"def test_init_namespace(store_session):\n\n _, session = store_session\n\n Person = session.get_class(surf.ns.FOAF.Person)\n surf.ns.register(nstest=\"http://example.com/ns#\")\n\n # namespace is an instance of Namespace\n p = Person(namespace=surf.ns.NSTEST)\n ns, _ = uri_split(p.subject)\n assert ns == \"NSTEST\"\n\n # namespace is an instance of URIRef\n p = Person(namespace=URIRef(\"http://example.com/ns#\"))\n ns, _ = uri_split(p.subject)\n assert ns == \"NSTEST\"\n\n # namespace is string\n p = Person(namespace=\"http://example.com/ns#\")\n ns, _ = uri_split(p.subject)\n assert ns == \"NSTEST\"",
"def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory, pvc_factory):\n return namespacestore_factory_implementation(\n request, cld_mgr, mcg_obj, cloud_uls_factory, pvc_factory\n )",
"def factory(cls, *args, **kwargs):\n return Factory(cls).bind(*args, **kwargs)",
"def bind_namespace(g, prefix, namespace):\n ns = Namespace(namespace)\n g.namespace_manager.bind(prefix, ns, override=False)\n return ns",
"def convert_namespace_to_callable_factory(class_input):\r\n return decorate_class_methods(class_input, to_callable_factory)",
"def factory( self ):\n return self._factory",
"def getFactory(self):\n factory = ServerFactory()\n def protocol():\n proto = CredReceiver()\n proto.portal = Portal(\n self.loginSystem,\n [self.loginSystem,\n OneTimePadChecker(self._oneTimePads)])\n return proto\n factory.protocol = protocol\n return factory",
"def factory(cls, sitename: str):\n return cls.subclasses[sitename]",
"def getFactory(self):\n return self.factory",
"def getNamespaceFromName(*args, **kwargs):\n \n pass",
"def init_service(parent_device, service_root, service_type, namespace):\n try:\n service = service_map[service_type]\n except KeyError:\n service = Service\n\n return service(parent_device, service_root, service_type, namespace)",
"def create_namespaced_binding(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_binding\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_binding`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_binding`\")\n\n resource_path = '/api/v1/namespaces/{namespace}/bindings'.replace('{format}', 'json')\n method = 'POST'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1Binding',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def ns(self, name):\n if not isinstance(name, str):\n raise TypeError('`name` must be a string.')\n\n if not name:\n raise ValueError('`name` must not be blank.')\n\n return self._get_namespace_class()(\n self._client,\n f'{self._path}/{name}',\n )",
"def factoryAddress() -> address(Factory):\n return self.factory",
"def __new__(cls, *args, **kw):\n factory = object.__new__(cls)\n factory.__init__(*args, **kw)\n return cls.source_class(factory)",
"def returnFactory(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return Factory(func).bind(*args, **kwargs)\n return wrapper"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that every item on the stream is actually a markup event. | def _ensure(stream):
stream = iter(stream)
event = stream.next()
# Check whether the iterable is a real markup event stream by examining the
# first item it yields; if it's not we'll need to do some conversion
if type(event) is not tuple or len(event) != 3:
for event in chain([event], stream):
if hasattr(event, 'totuple'):
event = event.totuple()
else:
event = TEXT, unicode(event), (None, -1, -1)
yield event
return
# This looks like a markup event stream, so we'll just pass it through
# unchanged
yield event
for event in stream:
yield event | [
"def _ensure(stream):\r\n stream = iter(stream)\r\n event = next(stream)\r\n\r\n # Check whether the iterable is a real markup event stream by examining the\r\n # first item it yields; if it's not we'll need to do some conversion\r\n if type(event) is not tuple or len(event) != 3:\r\n for event in chain([event], stream):\r\n if hasattr(event, 'totuple'):\r\n event = event.totuple()\r\n else:\r\n event = TEXT, str(event), (None, -1, -1)\r\n yield event\r\n return\r\n\r\n # This looks like a markup event stream, so we'll just pass it through\r\n # unchanged\r\n yield event\r\n for event in stream:\r\n yield event",
"def test_accept_feed_advice(self):\n pass",
"def assert_no_events_published(self, event_type):\n for event in self.events:\n assert event['event_type'] != event_type",
"def test_item_timestamp_missing(testapp):\n\n stream = todatetime.process(\n testapp,\n [holocron.Item({\"content\": \"the Force is strong with this one\"})],\n todatetime=\"timestamp\",\n )\n\n assert isinstance(stream, collections.abc.Iterable)\n assert list(stream) == [\n holocron.Item({\"content\": \"the Force is strong with this one\"})\n ]",
"def is_normal_news_item(self):\n if self.text:\n return False\n else:\n return True",
"def test_parse_semantics(self):\n\n items = pulldom.parseString(SMALL_SAMPLE)\n evt, node = next(items)\n # Just check the node is a Document:\n self.assertTrue(hasattr(node, \"createElement\"))\n self.assertEqual(pulldom.START_DOCUMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"html\", node.tagName)\n self.assertEqual(2, len(node.attributes))\n self.assertEqual(node.attributes.getNamedItem(\"xmlns:xdc\").value,\n \"http://www.xml.com/books\")\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt) # Line break\n evt, node = next(items)\n # XXX - A comment should be reported here!\n # self.assertEqual(pulldom.COMMENT, evt)\n # Line break after swallowed comment:\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(\"title\", node.tagName)\n title_node = node\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n self.assertEqual(\"Introduction to XSL\", node.data)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"title\", node.tagName)\n self.assertTrue(title_node is node)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"p\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n # XXX No END_DOCUMENT item is ever obtained:\n #evt, node = next(items)\n #self.assertEqual(pulldom.END_DOCUMENT, evt)",
"def testInfiniteSendItem(self):\n self.item_generator = self.handler.readStream(self.bin_file)\n i = 0\n while True:\n self.item = self.handler.sendItem()\n if self.item == 'stream ended':\n self.assertTrue(self.item)\n break",
"def test_encode_and_parse_all(self):\n p = mido.Parser()\n for spec in mido.messages.get_message_specs():\n if spec.type == 'sysex_end':\n # This is considered a part of 'sysex_start'.\n continue\n\n msg = Message(spec.type)\n p.feed(msg.bytes())\n outmsg = p.get_message()\n self.assertTrue(outmsg is not True)\n self.assertTrue(outmsg.type == spec.type)",
"def test_template_tag(self):\n entries = Entry.objects.all()\n entries = get_published_entries(entries, 'en')\n self.assertEqual(len(entries), 1, msg=(\n 'Should return the entries that are published.'))",
"def check_for_event(self):\r\n a=self.read_chat(0)\r\n event=False\r\n finmes=\"\"\r\n next=False\r\n for m in a:\r\n if next==True:\r\n finmes=m\r\n break\r\n\r\n elif \"event\" in m:\r\n event=True\r\n next=True\r\n\r\n\r\n if event==True:\r\n finmes+=\" \"\r\n t1=finmes[finmes.find(\"Type\")+5:-1]\r\n\r\n self.write_to_chat(t1)\r\n\r\n t2=finmes[finmes.find(\"type\")+5:-1]\r\n self.write_to_chat(t2)\r\n\r\n for i in range(5):\r\n self.write_to_chat(t2)\r\n sleep(0.8)\r\n self.write_to_chat(t1)\r\n sleep(0.8)\r\n\r\n return True\r\n\r\n else:\r\n return False",
"def test_get_inbox_replier_events(self):\n pass",
"def check_events(self, event:Event):\n pass",
"def test_issue_104__ignore_exceptions(self):\n ical_str = \"\"\"\nBEGIN:VEVENT\nDTSTART:20140401T000000Z\nDTEND:20140401T010000Z\nDTSTAMP:20140401T000000Z\nSUMMARY:Broken Eevnt\nCLASS:PUBLIC\nSTATUS:CONFIRMED\nTRANSP:OPAQUE\nX\nEND:VEVENT\"\"\"\n event = icalendar.Calendar.from_ical(ical_str)\n self.assertTrue(isinstance(event, icalendar.Event))\n self.assertTrue(event.is_broken) # REMOVE FOR NEXT MAJOR RELEASE\n self.assertEqual(\n event.errors,\n [(None, \"Content line could not be parsed into parts: 'X': Invalid content line\")] # noqa\n )",
"def _process_event(self, operation, event):\n\n event_type, data, pos = event\n if event_type == START:\n tag, attrs = data\n\n # check how these tag should be diffed\n diff_type = Html5Definition.get_diff_type(tag)\n if diff_type == DiffBehaviour.skip:\n # diffing of this tag and its contents should be skipped\n # passthrough whole tag to the output\n self._passthrough(event)\n return True\n elif diff_type == DiffBehaviour.as_block:\n # diff this tag as one element, to do that go through all\n self._process_block(event)\n return True\n\n self.append(event)\n self._enter(data[0])\n elif event_type == END:\n self._leave(data)\n self.append(event)\n else:\n self.append(event)\n\n return True",
"def run_stream(self, stream, handler):\n for item in stream:\n if item is None:\n break\n handler(item)",
"def strip_eof_messages(messages):\n if not messages:\n return messages, False\n stripped_messages = [message for message in messages\n if not isinstance(message, EOFNotification)]\n found_eof = len(stripped_messages) != len(messages)\n return stripped_messages, found_eof",
"def process_events(self):\n pass",
"def mightRender(self, text):\r\n return True",
"def inject_meta_tags(self, stream, taglist):\n done = False\n meta_tag = None\n for ev, item in stream:\n if not done:\n if ev in (START, END):\n tag = self.tagname(item.tag)\n if meta_tag:\n if item.tag == meta_tag:\n if ev == START:\n for attributes in taglist:\n for attrib, value in item.items():\n attrib = attrib.lower()\n if attrib == 'content':\n continue\n if attrib not in attributes:\n break\n value = value.lower()\n if attributes[attrib] != value:\n break\n else:\n # that meta tag exists already\n attributes['content'] = None\n break\n else:\n for attributes in taglist:\n if attributes['content'] is None:\n continue\n meta_item = Element(meta_tag, **attributes)\n yield START, meta_item\n yield END, meta_item\n yield TEXT, '\\n'\n done = True\n elif tag == 'head' and ev == START:\n meta_tag = item.tag[:-4] + 'meta'\n yield ev, item",
"def testWriteEventBody(self):\n formatters_manager.FormattersManager.RegisterFormatter(\n L2TTestEventFormatter)\n\n event, event_data = containers_test_lib.CreateEventFromValues(\n self._TEST_EVENTS[0])\n\n event_tag = events.EventTag()\n event_tag.AddLabels(['Malware', 'Printed'])\n\n self._formatter.WriteEventBody(event, event_data, event_tag)\n\n expected_event_body = (\n '06/27/2012,18:17:01,UTC,M...,LOG,Syslog,Content Modification Time,-,'\n 'ubuntu,Reporter <CRON> PID: 8442 (pam_unix(cron:session): session '\n 'closed for user root),Reporter <CRON> PID: 8442 '\n '(pam_unix(cron:session): session closed for user root),'\n '2,log/syslog.1,-,Malware Printed,test_parser,'\n 'a_binary_field: binary; my_number: 123; some_additional_foo: True\\n')\n\n event_body = self._output_writer.ReadOutput()\n self.assertEqual(event_body, expected_event_body)\n\n # Ensure that the only commas returned are the 16 delimiters.\n self.assertEqual(event_body.count(','), 16)\n\n formatters_manager.FormattersManager.DeregisterFormatter(\n L2TTestEventFormatter)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an item or slice of the attributes list. >>> attrs = Attrs([('href', ''), ('title', 'Foo')]) >>> attrs[1] ('title', 'Foo') | def __getitem__(self, i):
items = tuple.__getitem__(self, i)
if type(i) is slice:
return Attrs(items)
return items | [
"def __getitem__(self, index):\n return self.attribute_values[index]",
"def __getslice__(self, i, j):\r\n return Attrs(tuple.__getslice__(self, i, j))",
"def attribute_get(self, attr):\n attributes_struct = self.single_query_get('Attributes')\n attribute_struct = [x for x in attributes_struct\n if x['Name'] == attr]\n if len(attribute_struct) > 1:\n raise tdapi.TDException(\"Too many attributes with name {}\".format(attr))\n elif len(attribute_struct) == 0:\n return\n else:\n return attribute_struct[0]['Value']",
"def getAttrs(self, i):\n try:\n val = self._table[i]\n except IndexError, e:\n # raise e\n return None\n else:\n return val\n pass",
"def _get_attributes(measure, position=-1):\n if len(measure) and measure[position].tag == 'attributes':\n return measure[position]\n else:\n attributes = etree.Element('attributes')\n measure.insert(position, attributes)\n return attributes",
"def firstAttr(self, *attrs):\n for attr in attrs:\n value = getattr(self, attr, None)\n if value is not None:\n return value",
"def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm",
"def itemsByGroup(self, *args) -> \"std::vector< adsk::core::Ptr< adsk::core::Attribute >,std::allocator< adsk::core::Ptr< adsk::core::Attribute > > >\" :\n return _core.Attributes_itemsByGroup(self, *args)",
"def __getitem__(self, key):\n if isinteger(key):\n return self._list[key]\n\n if isinstance(key, slice):\n return type(self)(self._list[key])\n\n return tuple(xml[key] for xml in self)",
"def attributes(self, string, pos):\n\t\tfor (expr, attr) in self.attrs:\n\t\t\tmatch = re.search(expr, string)\n\t\t\tif match and match.start() == pos:\n\t\t\t\treturn attr\n\t\treturn 0",
"def _getAttrMultiple(self, data, **kwargs):\n returnData = []\n if not data:\n return returnData\n\n for i in data:\n result = self._getAttr(i, **kwargs)\n if result:\n returnData.append(result)\n return returnData",
"def get_next_attribute(self, attribute_list, records_df):\n if attribute_list:\n return attribute_list[0]\n else:\n return None",
"def get_attribute(self,attr):\n\t\tif (attr is None):\n\t\t\traise ValueError(\"You must specify an attribute\")\n\t\tif (attr not in self._Attributes):\n\t\t\traise ValueError(\"Attribute \" + attr + \" unrecognized\")\n\t\treturn self._Attributes[attr]",
"def get(iterable: Iterable[_T], **attrs: Any) -> _T | None:\n\n # global -> local\n _all = all\n attrget = attrgetter\n\n # Special case the single element call\n if len(attrs) == 1:\n k, v = attrs.popitem()\n pred = attrget(k.replace(\"__\", \".\"))\n for elem in iterable:\n if pred(elem) == v:\n return elem\n return None\n\n converted = [(attrget(attr.replace(\"__\", \".\")), value) for attr, value in attrs.items()]\n\n for elem in iterable:\n if _all(pred(elem) == value for pred, value in converted):\n return elem\n return None",
"def getAttr(self,attr):\n try: return self.__getattribute__(attr)\n\texcept: return None",
"def get_attr(self,n,attr):\n\t\tself.realopen(self.rohint)\t# make sure the database is open\n\t\ttry :\n\t\t\tret={}\n\t\t\tfor i in n:\n\t\t\t\td=loads(self.bdb.get(dumps(i,-1),txn=self.txn))\n\t\t\t\tif getattr(attr, '__iter__', False):\n\t\t\t\t\tret[i]={}\n\t\t\t\t\tfor a in attr:\n\t\t\t\t\t\tif a in d : ret[i][a]=d[a]\n\t\t\t\telse:\n\t\t\t\t\ttry: ret[i]=d[attr]\n\t\t\t\t\texcept: pass\n\t\t\treturn ret\n\t\texcept:\n\t\t\tif getattr(attr, '__iter__', False):\n\t\t\t\td=loads(self.bdb.get(dumps(n,-1),txn=self.txn))\n\t\t\t\tret={}\n\t\t\t\tfor a in attr:\n\t\t\t\t\tif a in d : ret[a]=d[a]\n\t\t\t\treturn ret\n\t\t\treturn loads(self.bdb.get(dumps(n,-1),txn=self.txn))[attr]",
"def get_attrs(expr):\n if isinstance(expr, Call):\n return expr.attrs\n if isinstance(expr, TupleGetItem):\n return get_attrs(expr.tuple_value)\n return {}",
"def get_attributes(start, *path):\n\n cursor = _get_cursor(start)\n\n (intermediateNode, _) = _traverse_path(cursor, path)\n if intermediateNode:\n # we encountered an array with variable (-1) indices.\n # this is only allowed when calling coda.fetch().\n raise ValueError(\"variable (-1) array indices are only allowed when calling coda.fetch()\")\n\n cursor_goto_attributes(cursor)\n\n result = _fetch_subtree(cursor)\n\n del cursor\n return result",
"def getAttrs(self):\n\t\treturn self._attributes",
"def get_attribute(self):\n data = self.data\n # Step 1 (skip chars)\n c = data.skip(skip1)\n assert c is None or len(c) == 1\n # Step 2\n if c in (b\">\", None):\n return None\n # Step 3\n attr_name = []\n attr_value = []\n # Step 4 attribute name\n while True:\n if c == b\"=\" and attr_name:\n break\n elif c in space_chars_bytes:\n # Step 6!\n c = data.skip()\n break\n elif c in (b\"/\", b\">\"):\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_name.append(c)\n # Step 5\n c = next(data)\n # Step 7\n if c != b\"=\":\n data.previous()\n return b\"\".join(attr_name), b\"\"\n # Step 8\n next(data)\n # Step 9\n c = data.skip()\n # Step 10\n if c in (b\"'\", b'\"'):\n # 10.1\n quote_char = c\n while True:\n # 10.2\n c = next(data)\n # 10.3\n if c == quote_char:\n next(data)\n return b\"\".join(attr_name), b\"\".join(attr_value)\n # 10.4\n else:\n attr_value.append(c)\n elif c == b\">\":\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_value.append(c)\n # Step 11\n while True:\n c = next(data)\n if c in spaces_angle_brackets:\n return b\"\".join(attr_name), b\"\".join(attr_value)\n elif c is None:\n return None\n else:\n attr_value.append(c)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a slice of the attributes list. >>> attrs = Attrs([('href', ''), ('title', 'Foo')]) | def __getslice__(self, i, j):
return Attrs(tuple.__getslice__(self, i, j)) | [
"def attrsToList(self, attrs):\n return [g.Bunch(name=name, val=attrs.getValue(name))\n for name in attrs.getNames()]",
"def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm",
"def getAttrs(self):\n\t\treturn self._attributes",
"def itemsByGroup(self, *args) -> \"std::vector< adsk::core::Ptr< adsk::core::Attribute >,std::allocator< adsk::core::Ptr< adsk::core::Attribute > > >\" :\n return _core.Attributes_itemsByGroup(self, *args)",
"def _get_attributes(measure, position=-1):\n if len(measure) and measure[position].tag == 'attributes':\n return measure[position]\n else:\n attributes = etree.Element('attributes')\n measure.insert(position, attributes)\n return attributes",
"def _get_attrs_items(obj):\n attrs = getattr(obj.__class__, \"__attrs_attrs__\")\n attr_names = [a.name for a in attrs]\n return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]",
"def listAttributes(self):\n return list(self._attributes.keys())",
"def get_stripped_attributes(self):\n stripped = []\n for (tags, attrs) in self.getHtmlExclusions():\n if not tags:\n stripped.extend(attrs)\n return stripped",
"def iterAttrs(self):\n return iter(self.requested_attributes.values())",
"def get_attributes(html):\n\n for i, c in enumerate(html):\n if c == '>':\n if USE_BUFFER:\n html = buffer(html, 0, i)\n else:\n html = html[:i]\n break\n return dict((name.lower().strip(), value.strip('\\'\" ')) for (name, value) in attributes_regex.findall(html))",
"def getPredAttrs (self, predicates): \n counter = 0\n PredAttrList = []\n for item in predicates: \n if (counter) % 2 == 0: \n PredAttrList.append(item[0]) \n counter += 1 \n return PredAttrList",
"def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])",
"def getAttributes(self):\n title = self.getAttributeNode('title')\n if title is not None:\n return NamedNodeMap({'title':title})\n return NamedNodeMap()",
"def get_attrs(self):\n return self.ms.get_attrs()",
"def zip_with_attr(iterable, *attrs):\n\n return zip(\n iterable, *(tuple(xattr(item, attr) for item in iterable) for attr in attrs)\n )",
"def all_attributes (self):\n attrs = []\n for sup in self.super:\n sup_attrs = sup.all_attributes ()\n if len (sup_attrs) > 0:\n attrs.extend (sup_attrs)\n attrs.extend (self.attributes)\n return attrs",
"def parse_attrs(buf):\n attrs = []\n while buf:\n t = ord(buf[0])\n l = ord(buf[1])\n if l < 2:\n break\n d, buf = buf[2:l], buf[l:]\n attrs.append((t, d))\n return attrs",
"def Attributes(self) -> _n_5_t_17:",
"def _get_attributes_list(self, xpath, xml=None):\n if xml is None:\n xml = self.pom_data\n attrs = xml.findall(xpath)\n attrs = [attr.text for attr in attrs]\n return [attr.strip() for attr in attrs if attr and attr.strip()]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.