query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Return a new instance that contains the attributes in `attrs` in addition to any already existing attributes. Any attributes in the new set that have a value of `None` are removed. | def __or__(self, attrs):
remove = set([an for an, av in attrs if av is None])
replace = dict([(an, av) for an, av in attrs
if an in self and av is not None])
return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self
if sn not in remove] +
[(an, av) for an, av in attrs
if an not in self and an not in remove]) | [
"def copy(self):\n\t\tnewAttr = AtomAttributes(None, None, None, None, None, None)\n\t\tnewAttr.__dict__ = self.__dict__.copy()\n\t\treturn newAttr",
"def copy(self,**kwds):\n new_ds = copy.copy(self)\n def _find_set(kwd):\n val = kwds.get(kwd)\n if val is not None:\n setattr(new_ds,kwd,val) \n for attr in self.__attrs__: _find_set(attr) \n return(new_ds)",
"def _remove_attributes(attrs, remove_list):\n new_attrs = {}\n for attr in attrs.keys():\n if attr not in remove_list:\n new_attrs[attr] = attrs[attr]\n return new_attrs",
"def strip_attributes(self):\r\n original_attributes = set(self.inventory)\r\n keys = list(self.__dict__.keys())\r\n for att in keys:\r\n if att not in original_attributes:\r\n del(self.__dict__[att])",
"def possibly_init_attrs(self, attrs):\n for key, value in attrs.items():\n if not self.__dict__.has_key(key):\n setattr(self, key, value)",
"def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])",
"def strip_attrs(self):\n for tag in self.root.findAll(True):\n tag.attrs = [(attr, val) for attr, val in tag.attrs\n if attr in self.settings['valid_attrs']]",
"def clone_attributes():\n _clone_attributes(utils.get_sentiwordnet_groups(SENTIWORDNET_FILE))\n _clone_attributes(utils.get_e_lemma_groups(E_LEMMA_FILE))",
"def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml",
"def reset(self):\n for attribute in self._trained_attributes:\n setattr(self, attribute, None)\n return None",
"def set_attributes_all_required(instance, attrs, res):\r\n for attr in attrs:\r\n attr_val = res.get(attr)\r\n # all attributes are required\r\n if not attr_val:\r\n print(attr)\r\n abort(400)\r\n setattr(instance, attr, attr_val)\r\n return instance",
"def set_stripped_attributes(self, stripped):\n exclusions = [(tags, attrs) for (tags, attrs) in self.html_exclusions if tags]\n exclusions.append(((), tuple(stripped)))\n self.set_html_exclusions(exclusions)",
"def setattrs(self, attrs):\n for k, v in attrs:\n self.setattr(k, v)",
"def newFixedAtomSet(self, **attrlinks):\n return FixedAtomSet(self, **attrlinks)",
"def clean(self):\n attr = self.getAttributes()\n for a in attr:\n value = getattr(self, a)\n if value is None:\n # remove from header file? (easy)\n if a in self.header:\n del self.header[a]\n else:\n # remove from disk...\n # solve path from attribute name\n path = None\n for f in os.listdir(self._getDirectory()):\n if os.path.splitext(f)[0] == a: # we have found the right file\n path = os.path.join(self._getDirectory(), f)\n break\n if path is not None and os.path.exists(path):\n hdr, dat = hylite.io.matchHeader( path )\n if hdr is not None and os.path.exists(hdr):\n os.remove(hdr)\n if dat is not None and os.path.exists(dat) and os.path.isdir(dat): # nested HyCollection\n shutil.rmtree(dat)\n if os.path.exists(dat) and os.path.isfile(dat): # other data type\n os.remove(dat)\n # remove attribute\n delattr(self, a)",
"def copyAttributes(source, dest, skip_refs=True):\n for attr in source.attrs.keys():\n atval = source.attrs[attr]\n \"\"\"\n Don't copy references unless asked\n \"\"\"\n if isinstance(atval, h5py.Reference):\n if isinstance(atval, h5py.RegionReference) or skip_refs:\n continue\n elif isinstance(atval, h5py.RegionReference):\n \"\"\"\n Dereference old reference, get the appropriate data\n slice and create new reference.\n \"\"\"\n try:\n region = h5py.h5r.get_region(atval, source.id)\n\n start, end = region.get_select_bounds()\n ref_slice = []\n for i in range(len(start)):\n if start[i] == end[i]:\n ref_slice.append(start[i])\n else:\n ref_slice.append(slice(start[i], end[i]))\n except:\n warn('Could not create new region reference for {} in {}.'.format(attr, source.name))\n continue\n\n dest.attrs[attr] = dest.regionref[tuple(ref_slice)]\n continue\n else:\n dest.attrs[attr] = atval\n continue\n dest.attrs[attr] = atval\n if not skip_refs:\n try:\n copyRegionRefs(source, dest)\n except:\n print('Could not create new region reference for {} in {}.'.format(attr, source.name))\n\n return dest",
"def copy_attributes(self, parent_dict, child_dict, attrs):\n for attr in attrs:\n has_attr = parent_dict.get(attr)\n if has_attr is not None:\n child_dict[attr] = has_attr",
"def remove_attr(self,attr_list=[]):\n for x in attr_list: \n if hasattr(self,x): delattr(self,x)",
"def sanitize(self):\n if self._has_private_attribute():\n self.attributes = {k:v for (k,v) in self.attributes.items() if not k.startswith('__')}\n return self"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new instance with all attributes with a name in `names` are removed. | def __sub__(self, names):
if isinstance(names, basestring):
names = (names,)
return Attrs([(name, val) for name, val in self if name not in names]) | [
"def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])",
"def remove_attr(self, name):\n del self.attributes_dict[name]",
"def strip_attributes(self):\r\n original_attributes = set(self.inventory)\r\n keys = list(self.__dict__.keys())\r\n for att in keys:\r\n if att not in original_attributes:\r\n del(self.__dict__[att])",
"def deleteAttr(attribute=\"string\", name=\"string\"):\n pass",
"def _remove_attributes(attrs, remove_list):\n new_attrs = {}\n for attr in attrs.keys():\n if attr not in remove_list:\n new_attrs[attr] = attrs[attr]\n return new_attrs",
"def removeMembers(members):",
"def get_with_excluded_names(self, *names, **kwargs):\n order_by = kwargs.get('order_by')\n limit = kwargs.get('limit')\n\n cursor = db.cursor()\n cursor.execute(\n f\"SELECT * FROM {self.table} \"\n f\"WHERE name NOT IN ({', '.join('%s' for name in names)}) \"\n f\"{self._format_order_by(order_by)}\"\n f\"{self._format_limit(limit)}\",\n tuple(names),\n )\n results = [self.model(*row) for row in cursor]\n cursor.close()\n return results",
"def unselect(self, *colnames):\n for colname in self.colnames:\n if colname not in colnames:\n yield colname, self[colname].copy()",
"def del_component_instances(names):\n for name in names:\n del_component_instance(name)",
"def remove_attr(self,attr_list=[]):\n for x in attr_list: \n if hasattr(self,x): delattr(self,x)",
"def remove_members(self, members):\n members = [member.dn for member in pyadutils.generate_list(members)]\n return self.remove_from_attribute('member', members)",
"def remove_attribute(self, name):\n try:\n del self._attributes[name]\n except KeyError:\n pass",
"def without(self, *names):\n only_vars = {}\n\n for name in self.request_variables:\n if name not in names:\n only_vars[name] = self.request_variables.get(name)\n\n return only_vars",
"def _replace_names(self, names):\n el_namen = self.get_root().xpath('./person/persName')\n for el_naam in el_namen:\n el_naam.getparent().remove(el_naam)\n for name in names:\n self._add_a_name(name)",
"def __delattr__(self, name):\n # First check if is a valid DICOM name and if we have that data element\n tag = tag_for_name(name)\n if tag and tag in self:\n del self[tag]\n # If not a DICOM name (or we don't have it), check for regular instance name\n # can't do delete directly, that will call __delattr__ again!\n elif name in self.__dict__:\n del self.__dict__[name]\n # Not found, raise an error in same style as python does\n else:\n raise AttributeError, name",
"def clearing_cases(self, *names):\n\n return self._updating(lambda builder: builder.clear_cases(*names))",
"def remove_columns(self, column_names):\n if not hasattr(column_names, '__iter__'):\n raise TypeError('Column_names must be an iterable.')\n for name in column_names:\n if name not in self.column_names():\n raise KeyError('Cannot find column {}.'.format(name))\n return XStream(impl=self._impl.remove_columns(column_names))",
"def remove_private_attrs(mapping: Mapping) -> Mapping:\n cls = type(mapping)\n public_keys = [key for key in mapping if not key.startswith('_')]\n dict_ = {key: mapping[key] for key in public_keys}\n return cls(dict_)",
"def del_attribs(self, alist):\r\n if isinstance(alist, str):\r\n alist = (alist, )\r\n d = self.dict\r\n for a in alist:\r\n if a in d:\r\n del d[a]",
"def trimUnnamed(df):\n df_new = df.copy()\n if UNNAMED in df_new.columns:\n del df_new[UNNAMED]\n return df_new"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a `Markup` object which is the concatenation of the strings in the given sequence, where this `Markup` object is the separator between the joined elements. Any element in the sequence that is not a `Markup` instance is automatically escaped. | def join(self, seq, escape_quotes=True):
return Markup(unicode.join(self, [escape(item, quotes=escape_quotes)
for item in seq])) | [
"def html_join(sep, sequence):\n sep_safe = conditional_escape(sep)\n return mark_safe(sep_safe.join(conditional_escape(e) for e in sequence))",
"def join(self, iterable):\r\n result = ANSIString('')\r\n last_item = None\r\n for item in iterable:\r\n if last_item is not None:\r\n result += self\r\n result += item\r\n last_item = item\r\n return result",
"def make_seq_string(seq, container_chars = '[]'):\n string = '%s ' % container_chars[0]\n for elem in seq: string += str(elem) + ', '\n string = '%s %s' % (string[:-2], container_chars[1])\n return string",
"def join(self, iterable, formatter=lambda s, t: t.format(s),\n template=\"{}\"):\n # type: (Iterable, Callable, str) -> ww.s.StringWrapper\n formatted_iterable = (formatter(st, template) for st in iterable)\n return self.__class__(unicode.join(self, formatted_iterable))",
"def WrappedJoin(items, separator=..., width=...):\n ...",
"def join(sequence, join_char=''):\n return reduce(lambda x, y: x + join_char + y, sequence)",
"def commize(sequence):\n return ', '.join((str(elem) for elem in sequence))",
"def wrap_seq_for_applescript(seq):\n quoted = [f'\"{item}\"' for item in seq]\n joined = ', '.join(quoted)\n wrapped = '{' + joined + '}'\n return wrapped",
"def _concat(self, items):\n if items is None:\n return ''\n if isinstance(items, string_types):\n items = [items]\n return ','.join(items)",
"def and_join(lst: t.List[t.Any], sep: str = \", \") -> str:\n return f\"{sep.join(str(x) for x in lst[:-1])}{sep}and {lst[-1]}\"",
"def join_and(iterable, plural=False):\n\n # Remove any empty strings.\n values = list(filter(None, [str(value) for value in iterable]))\n\n if not values:\n return \"\"\n\n value = values.pop()\n\n if plural:\n return \" and \".join(filter(None, [\", \".join(values), value]))\n\n return \" and a \".join(filter(None, [\", a \".join(values), value]))",
"def _build_html(items, wrapping):\r\n return jinja2.Markup('\\n'.join((wrapping % item for item in items)))",
"def sjoin(iterable, sep=' ', tpl='%s'):\n return sep.join( [tpl % str(x) for x in iterable] )",
"def markupSeq(seq, ulPosList, boldPosList, annots = {}):\n annotStarts = {}\n annotEnds = defaultdict(set)\n for (start, end), aDict in annots.iteritems():\n annotStarts[start] = aDict\n aDict[\"end\"] = end\n\n ulStarts = set([x[0] for x in ulPosList])\n ulEnds = set([x[1] for x in ulPosList])\n boldStarts = set([x[0] for x in boldPosList])\n boldEnds = set([x[1] for x in boldPosList])\n ret = []\n openAnnots = defaultdict(int) # current number of open spans, per cssString\n openTags = set()\n for i, nucl in enumerate(seq):\n if i in annotEnds:\n for tagStr in annotEnds[i]:\n if tagStr in openAnnots:\n openAnnots[tagStr]-=1\n if openAnnots[tagStr]==0:\n ret.append(\"</span>\")\n del openAnnots[tagStr]\n\n if i in annotStarts:\n aDict = annotStarts[i]\n cssParts = []\n for key, val in aDict[\"css\"].iteritems():\n cssParts.append(\"%s:%s\" % (key, val))\n cssStr = \";\".join(cssParts)\n tagStr = \"<span style='%s'>\" % cssStr\n if not tagStr in openAnnots:\n ret.append(tagStr)\n openAnnots[tagStr]+=1\n annotEnds[aDict[\"end\"]].add(tagStr)\n\n if i in ulStarts:\n ret.append(\"<u>\")\n openTags.add(\"u\")\n if i in ulEnds:\n ret.append(\"</u>\")\n if \"u\" in openTags:\n openTags.remove(\"u\")\n if i in boldStarts:\n ret.append(\"<b>\")\n openTags.add(\"b\")\n if i in boldEnds:\n ret.append(\"</b>\")\n if \"strong\" in openTags:\n openTags.remove(\"b\")\n ret.append(nucl)\n if (i+1) % 80==0:\n ret.append(\"<br>\")\n for tag in openTags:\n ret.append(\"</%s>\" % tag)\n return \"\".join(ret)\n #return seq[:start]+\"<u>\"+seq[start:end]+\"</u>\"+seq[end:]",
"def item_join(items, sep=' '):\n return sep.join(items).strip()",
"def join_english(items: Iterable[Any], conj=\" and \"):\n items_list = list(items)\n if len(items_list) > 1:\n return \", \".join(str(v) for v in items_list[:-1]) + conj + str(items_list[-1])\n else:\n return \", \".join(str(v) for v in items_list)",
"def join(items):\n return ''.join(items)",
"def q_join(lst, sep=','):\r\n return sep.join(dquote(itm) for itm in lst)",
"def commaAndify(seq, comma=',', And='and'):\n L = list(seq)\n if len(L) == 0:\n return ''\n elif len(L) == 1:\n return ''.join(L) # We need this because it raises TypeError.\n elif len(L) == 2:\n L.insert(1, And)\n return ' '.join(L)\n else:\n L[-1] = '%s %s' % (And, L[-1])\n sep = '%s ' % comma\n return sep.join(L)",
"def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a Markup instance from a string and escape special characters it may contain (, & and \"). >>> escape('"1 If the `quotes` parameter is set to `False`, the \" character is left as is. Escaping quotes is generally only required for strings that are to be used in attribute values. >>> escape('"1 | def escape(cls, text, quotes=True):
if not text:
return cls()
if type(text) is cls:
return text
if hasattr(text, '__html__'):
return cls(text.__html__())
text = text.replace('&', '&') \
.replace('<', '<') \
.replace('>', '>')
if quotes:
text = text.replace('"', '"')
return cls(text) | [
"def escape_quotes(self, str): \n return str.replace(\"\\\"\", \"\\\\\\\"\")",
"def escape(t):\n return (t\n .replace(\""\", '@quot;')\n .replace(\"&\", \"@amp;\").replace(\"<\", \"@lt;\").replace(\">\", \"@gt;\")\n\n .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\n .replace(\"'\", \"'\").replace('\"', \""\")\n .replace(\"\\\\\", \"\\")\n\n .replace(\"@quot;\", '"')\n .replace(\"@amp;\", \"&\").replace(\"@lt;\", \"<\").replace(\"@gt;\", \">\")\n\n )",
"def html_escape(s):\n if s is None:\n return ''\n if hasattr(s, '__html__'):\n return s.__html__()\n if not isinstance(s, basestring):\n if hasattr(s, '__unicode__'):\n s = unicode(s)\n else:\n s = str(s)\n s = cgi.escape(s, True)\n if isinstance(s, unicode):\n s = s.encode('ascii', 'xmlcharrefreplace')\n return s",
"def _escape(self, s):\r\n return s",
"def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")",
"def escape_xml_string(s):\n return xml.sax.saxutils.escape(s)",
"def escape_html(s):\n return cgi.escape(s, quote = True)",
"def test_attr_escape_quotes(self):\r\n tmpl = MarkupTemplate(\"\"\"<div xmlns:py=\"http://genshi.edgewall.org/\">\r\n <elem class=\"$myvar\"/>\r\n </div>\"\"\")\r\n self.assertEqual(\"\"\"<div>\r\n <elem class=\""foo"\"/>\r\n </div>\"\"\", str(tmpl.generate(myvar='\"foo\"')))",
"def escape_string(self, s): # real signature unknown; restored from __doc__\n pass",
"def writeWithAttributeEscaping(write):\n def _write(data):\n write(escapeForContent(data).replace(b'\"', b'"'))\n return _write",
"def _quote_escape(item):\n\n rex_sqlquote = re.compile(\"'\", re.M)\n\n return rex_sqlquote.sub(\"''\", item)",
"def format_html(format_string, *args, **kwargs):\n args_safe = map(html.conditional_escape, args)\n kwargs_safe = dict([(k, html.conditional_escape(v)) for (k, v) in\n six.iteritems(kwargs)])\n return html.mark_safe(format_string.format(*args_safe, **kwargs_safe))",
"def escape( *args ):\n cmd = ''\n for s in args:\n if cmd: cmd += ' '\n if not s:\n cmd += '\"\"'\n else:\n cmd += pipes.quote(s)\n return cmd",
"def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&\")\r\n data = data.replace(\"<\", \"<\")\r\n data = data.replace(\">\", \">\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data",
"def test_escape_tags(self):\n simple_replace = ('#', '$', '%', '_', '{', '}', '&')\n for character in simple_replace:\n escaped_char = u'\\\\{}'.format(character)\n self.assertEqual(tags.latex_safe(character), escaped_char)\n\n self.assertEqual(tags.latex_safe('\\\\'), '\\\\textbackslash{}')\n self.assertEqual(tags.latex_safe('~'), '\\\\textasciitidle{}')\n self.assertEqual(tags.latex_safe('^'), '\\\\^{}')",
"def _QuoteString(s):\n single_quote_count = s.count('\\'')\n double_quote_count = s.count('\"')\n quote_delim = '\\'' if single_quote_count <= double_quote_count else '\"'\n # Apply escaping to the chosen quote character and the backslash.\n encoded = re.sub(r'([%s\\\\])' % quote_delim, r'\\\\\\1', s)\n return quote_delim + encoded + quote_delim",
"def _quote(s):\n return b\"'%s'\" % stringutil.escapestr(pycompat.bytestr(s))",
"def test_escape(fb, fb_secure):\n\n assert fb.escape('This has \"quotes\"') == 'This has \\\\\"quotes\\\\\"'\n assert fb.escape('This has a backslash \\\\') == 'This has a backslash \\\\\\\\'\n assert fb.escape('This has \\\\\"both\\\\\"') == 'This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"'",
"def UndoSafeForHTML(escaped_string):\n raw_string = escaped_string.replace('<', '<')\n raw_string = raw_string.replace('>', '>')\n raw_string = raw_string.replace('"', '\"')\n raw_string = raw_string.replace('&', '&')\n return raw_string"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reverseescapes &, , and \" and returns a `unicode` object. >>> Markup('1 < 2').unescape() u'1 < 2' | def unescape(self):
if not self:
return ''
return unicode(self).replace('"', '"') \
.replace('>', '>') \
.replace('<', '<') \
.replace('&', '&') | [
"def htmlunescape(value):\n\n retVal = value\n if value and isinstance(value, basestring):\n codes = ((\"<\", '<'), (\">\", '>'), (\""\", '\"'), (\" \", ' '), (\"&\", '&'), (\"'\", \"'\"))\n retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)\n try:\n retVal = re.sub(r\"&#x([^ ;]+);\", lambda match: unichr(int(match.group(1), 16)), retVal)\n except ValueError:\n pass\n return retVal",
"def unescape(text):\r\n\r\n if not text:\r\n return text\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == \"&#\":\r\n # character reference\r\n try:\r\n if text[:3] == \"&#x\":\r\n return unichr(int(text[3:-1], 16))\r\n else:\r\n return unichr(int(text[2:-1]))\r\n except ValueError:\r\n pass\r\n else:\r\n # named entity\r\n try:\r\n text = unichr(htmlentitydefs.name2codepoint[text[1: -1]])\r\n except KeyError:\r\n pass\r\n return text # leave as is\r\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def unescape_html(s):\n return HTMLParser.unescape.__func__(HTMLParser, s)",
"def html_unescape(text):\n rep = dict((re.escape(k), v) for k, v in HTML_UNESCAPE_TABLE.items())\n pattern = re.compile(\"|\".join(rep.keys()))\n return pattern.sub(lambda m: rep[re.escape(m.group(0))], text)",
"def html_unquote(s, encoding=None):\r\n if isinstance(s, str):\r\n if s == '':\r\n # workaround re.sub('', '', u'') returning '' < 2.5.2\r\n # instead of u'' >= 2.5.2\r\n return u''\r\n s = s.decode(encoding or default_encoding)\r\n return _unquote_re.sub(_entity_subber, s)",
"def unescape(text):\n\n return __entity_regex.sub(__replacement_for_entity, text)",
"def _html_decode(text):\n return text.replace('>', '>').replace('<', '<').replace('&', '&').replace('"', '\"')",
"def UndoSafeForHTML(escaped_string):\n raw_string = escaped_string.replace('<', '<')\n raw_string = raw_string.replace('>', '>')\n raw_string = raw_string.replace('"', '\"')\n raw_string = raw_string.replace('&', '&')\n return raw_string",
"def decode_html_entities(text):\n h = HTMLParser()\n return h.unescape(text)",
"def raw_unicode(raw):\r\n return raw.encode(\"utf8\").decode(\"unicode-escape\")",
"def __html2unicode(self, s):\n # First the digits:\n ents = set(html_entity_digit_re.findall(s))\n if len(ents) > 0:\n for ent in ents:\n entnum = ent[2:-1]\n try:\n entnum = int(entnum)\n s = s.replace(ent, unichr(entnum))\n except:\n pass\n # Now the alpha versions:\n ents = set(html_entity_alpha_re.findall(s))\n ents = filter((lambda x : x != amp), ents)\n for ent in ents:\n entname = ent[1:-1]\n try: \n s = s.replace(ent, unichr(htmlentitydefs.name2codepoint[entname]))\n except:\n pass \n s = s.replace(amp, \" and \")\n return s",
"def unicode_unquote(s):\n if s is not None:\n return urllib.unquote(s)",
"def convert_unicode_to_html(text):\n return html.escape(text).encode(\"ascii\", \"xmlcharrefreplace\").decode()",
"def escapeForContent(data):\n if isinstance(data, unicode):\n data = data.encode('utf-8')\n data = data.replace(b'&', b'&'\n ).replace(b'<', b'<'\n ).replace(b'>', b'>')\n return data",
"def escape(t):\n return (t\n .replace(\""\", '@quot;')\n .replace(\"&\", \"@amp;\").replace(\"<\", \"@lt;\").replace(\">\", \"@gt;\")\n\n .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\n .replace(\"'\", \"'\").replace('\"', \""\")\n .replace(\"\\\\\", \"\\")\n\n .replace(\"@quot;\", '"')\n .replace(\"@amp;\", \"&\").replace(\"@lt;\", \"<\").replace(\"@gt;\", \">\")\n\n )",
"def test_unescape(fb, fb_secure):\n\n assert fb.unescape('This has \\\\\"quotes\\\\\"') == 'This has \"quotes\"'\n assert fb.unescape('This has a backslash \\\\\\\\') == 'This has a backslash \\\\'\n assert fb.unescape('This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"') == 'This has \\\\\"both\\\\\"'",
"def websafe(val):\r\n if val is None:\r\n return u''\r\n elif isinstance(val, str):\r\n val = val.decode('utf-8')\r\n elif not isinstance(val, unicode):\r\n val = unicode(val)\r\n\r\n return htmlquote(val)",
"def escape_html(html):\n #boileeeeeeerplate\n return unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('\"', '"').replace(\"'\", ''')",
"def udec(thing, encoding=None):\n if encoding is None:\n encoding = uenc_encoding\n\n if isinstance(thing, unicode):\n return thing\n\n try:\n return thing.decode(encoding)\n except UnicodeDecodeError:\n return repr(thing.decode('ISO-8859-1'))",
"def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&\")\r\n data = data.replace(\"<\", \"<\")\r\n data = data.replace(\">\", \">\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the `QName` instance. | def __new__(cls, qname):
if type(qname) is cls:
return qname
qname = qname.lstrip('{')
parts = qname.split('}', 1)
if len(parts) > 1:
self = unicode.__new__(cls, '{%s' % qname)
self.namespace, self.localname = map(unicode, parts)
else:
self = unicode.__new__(cls, qname)
self.namespace, self.localname = None, unicode(qname)
return self | [
"def __new__(cls, qname):\r\n if type(qname) is cls:\r\n return qname\r\n\r\n qname = qname.lstrip('{')\r\n parts = qname.split('}', 1)\r\n if len(parts) > 1:\r\n self = str.__new__(cls, '{%s' % qname)\r\n self.namespace, self.localname = list(map(str, parts))\r\n else:\r\n self = str.__new__(cls, qname)\r\n self.namespace, self.localname = None, str(qname)\r\n return self",
"def createElementNS(namespaceURI, qualifiedName, options=None):\n from domonic.html import tag, tag_init\n el = type(qualifiedName, (tag, Element), {'name': qualifiedName, '__init__': tag_init})\n el.namespaceURI = namespaceURI\n return el()",
"def fromClarkName(self, p_str, QXmlNamePool): # real signature unknown; restored from __doc__\n return QXmlName",
"def createDocumentType(self, qualifiedName, publicId, systemId):\n return DocumentType(qualifiedName, publicId, systemId)\n # d = DocumentType()\n # d.name = qualifiedName\n # d.publicId = publicId\n # d.systemId = systemId\n # return d\n # pass",
"def qstrvec_t_create(*args) -> \"PyObject *\":\n return _ida_pro.qstrvec_t_create(*args)",
"def create(cls) -> str:\n return f'create_{cls.__name__.lower()}'",
"def create_namespace(self):\n ns = self.project.create_namespace()\n ns['task'] = self\n return ns",
"def create_namespace(self):\n name = 'namespace-{random_string}'.format(random_string=random_str(5))\n\n namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=name))\n\n self.core_api.create_namespace(namespace)\n\n logger.info(\"Creating namespace: %s\", name)\n\n # save all namespaces created with this backend\n self.managed_namespaces.append(name)\n\n # wait for namespace to be ready\n Probe(timeout=30, pause=5, expected_retval=True,\n fnc=self._namespace_ready, namespace=name).run()\n\n return name",
"def _makeelement(self, name):\n return self.__class__(name)",
"def create_qualification_type(Name=None, Keywords=None, Description=None, QualificationTypeStatus=None, RetryDelayInSeconds=None, Test=None, AnswerKey=None, TestDurationInSeconds=None, AutoGranted=None, AutoGrantedValue=None):\n pass",
"def namespace_create(self, name, size=None, password=None, public=True):\n self.state.check('status', 'running', 'ok')\n if self._namespace_exists_update_delete(name):\n raise ValueError('Namespace {} already exists'.format(name))\n self.data['namespaces'].append({'name': name, 'size': size, 'password': password, 'public': public})\n self._zerodb_sal.deploy()",
"def prefix(self, QXmlNamePool): # real signature unknown; restored from __doc__\n return \"\"",
"def __createXMLElement (name, descr = None, attrs = {}, nsmap = {}):\n\n element = etree.Element(name, attrs, nsmap=nsmap)\n \n if descr != None:\n for match in regex.finditer(descr):\n descr = descr[:match.start()] + \"?\" + descr[match.end():]\n element.text= descr\n\n return (element)",
"def _expand_qname(self, qname):\n if type(qname) is not rt.URIRef:\n raise TypeError(\"Cannot expand qname of type {}, must be URIRef\"\n .format(type(qname)))\n for ns in self.graph.namespaces():\n if ns[0] == qname.split(':')[0]:\n return rt.URIRef(\"%s%s\" % (ns[1], qname.split(':')[-1]))\n return qname",
"def instance(origin, copy, identifier):\n newInstance = ObName()\n newInstance.origin = origin\n newInstance.copy = copy\n newInstance.identifier = identifier\n return newInstance",
"def attributeNodeNS(self, QString, QString_1): # real signature unknown; restored from __doc__\r\n return QDomAttr",
"def create_queue(self, queue: Queue, address: Address, durable: bool = True):",
"def create_queue(self, queue: Queue, address: Address, durable: bool=True):\n pass",
"def declare_exchange_name(self, exchange_name=None, exchange_space_id=''):\n # get xntype and translate @TODO should we just consolidate these to be the same?\n typemap = { 'XN_SERVICE':'service', 'XN_PROCESS':'process', 'XN_QUEUE':'queue' }\n if not exchange_name.xn_type in typemap:\n raise BadRequest(\"Unknown exchange name type: %s\" % exchange_name.xn_type)\n\n xntype = typemap[exchange_name.xn_type]\n\n exchange_space = self.read_exchange_space(exchange_space_id)\n exchange_name_id,rev = self.clients.resource_registry.create(exchange_name)\n\n aid = self.clients.resource_registry.create_association(exchange_space_id, PRED.hasExchangeName, exchange_name_id)\n\n # call container API\n xs = exchange.ExchangeSpace(self.container.ex_manager, exchange_space.name)\n self.container.ex_manager._create_xn(xntype, exchange_name.name, xs, use_ems=False)\n\n return exchange_name_id #QUestion - is this the correct canonical name?"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine whether the given css property declaration is to be considered safe for inclusion in the output. | def is_safe_css(self, propname, value):
if propname not in self.safe_css:
return False
if propname.startswith('margin') and '-' in value:
# Negative margins can be used for phishing
return False
return True | [
"def is_property_allowed(prop):\n return self.allowed_styles is None or \\\n prop.lower() in self.allowed_styles",
"def _isprop(self, attr: str) -> bool:\n\n return isinstance(attr, property)",
"def _is_property(self,key):\n return bool(re.match(database.RE_KIMID, key))",
"def _HasProperty(step, prop):\n try:\n step.getProperty(prop)\n return True\n # pylint: disable=W0702\n except:\n return False",
"def explicit_no_additional_properties(self) -> bool:\n return bool(\n (self.properties or self.pattern_properties)\n and self.no_additional_properties\n and not self.additional_properties\n )",
"def _has_prop_on_mol_block(block, prop_key):\n\n if prop_key not in expected_props:\n raise ValueError('%s is not a supported property type.', prop_key)\n has_prop = False\n for line in block:\n if line.strip() == ('> <%s>' % prop_key):\n has_prop = True\n return has_prop",
"def _check_style(style):\n\t\treturn style in plt.style.available",
"def check_minimal_propset():\n\n unset_properties = []\n for node, val in REQUIRED_PROPS.iteritems():\n for param in val:\n if CONFIG[node][param] is None:\n unset_properties.append(\"%s.%s\" % (node, param))\n\n if len(unset_properties) > 0:\n print(\"[ERROR] There is not enough information to proceed.\"\n \"Please define these properties: %s\" % unset_properties)\n return False\n else:\n return True",
"def check_properties(self):\n matches = const.regex['properties'].findall(self.data)\n if matches:\n for _, x in enumerate(matches):\n self.properties[x[0].lower()] = x[1]",
"def __check_if_cstyle_comment(source_line) -> Tuple[bool, bool]:\n src_line = source_line.strip()\n cstyle_start = '/*' in src_line\n cstyle_end = '*/' in src_line\n return cstyle_start, cstyle_end",
"def decorated_with_property(node: astroid.FunctionDef) -> bool:\n if not node.decorators:\n return False\n for decorator in node.decorators.nodes:\n try:\n if _is_property_decorator(decorator):\n return True\n except astroid.InferenceError:\n pass\n return False",
"def isProperty(self,uid):\n return( self.id2node[uid].group==\"Property\" )",
"def __validateKeys(self, style_dict):\n\n invalidKeys = [key for key in style_dict.keys()\n if key not in STYLE_PROPERTIES]\n if len(invalidKeys) > 0:\n raise InvalidPropertyError(str(invalidKeys))",
"def is_required_property(self) -> bool:\n return self.parent and self.property_name in self.parent.required_properties",
"def _is_valid_element(self, element):\n # pylint: disable=no-self-use\n\n return element.get_tag_name() in AccessibleCSSImplementation.VALID_TAGS",
"def sanitize_css(self, text):\r\n decls = []\r\n text = self._strip_css_comments(self._replace_unicode_escapes(text))\r\n for decl in text.split(';'):\r\n decl = decl.strip()\r\n if not decl:\r\n continue\r\n try:\r\n propname, value = decl.split(':', 1)\r\n except ValueError:\r\n continue\r\n if not self.is_safe_css(propname.strip().lower(), value.strip()):\r\n continue\r\n is_evil = False\r\n if self._EXPRESSION_SEARCH(value):\r\n is_evil = True\r\n for match in self._URL_FINDITER(value):\r\n if not self.is_safe_uri(match.group(1)):\r\n is_evil = True\r\n break\r\n if not is_evil:\r\n decls.append(decl.strip())\r\n return decls",
"def is_code_prop(prop):\n return prop.startswith('co_')",
"def check_css(css):\r\n # Using 'encoding' adds a CSSCharsetRule\r\n rule = css.stylesheet.rules[-1]\r\n assert rule.selector.as_css() == 'h1::before'\r\n content, background = rule.declarations\r\n\r\n assert content.name == 'content'\r\n string, = content.value\r\n assert string.value == 'I løvë Unicode'\r\n\r\n assert background.name == 'background-image'\r\n url_value, = background.value\r\n assert url_value.type == 'URI'\r\n url = urljoin(css.base_url, url_value.value)\r\n assert url.startswith('file:')\r\n assert url.endswith('weasyprint/tests/resources/pattern.png')",
"def has(self, prop: P, quiet: bool = False) -> bool:\n prop_name = self._prop_name(prop, quiet=quiet)\n return prop_name in self._properties"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine whether the given URI is to be considered safe for inclusion in the output. The default implementation checks whether the scheme of the URI is in the set of allowed URIs (`safe_schemes`). >>> sanitizer = HTMLSanitizer() | def is_safe_uri(self, uri):
if '#' in uri:
uri = uri.split('#', 1)[0] # Strip out the fragment identifier
if ':' not in uri:
return True # This is a relative URI
chars = [char for char in uri.split(':', 1)[0] if char.isalnum()]
return ''.join(chars).lower() in self.safe_schemes | [
"def safe_uri(uri):\n path, query, frag = split_path(uri)\n safe = True\n for part in (path, query, frag):\n safe = safe and safe_chars_regex.search(part)\n return safe",
"def _ManifestUrlHasSecureScheme(self):\n secure_schemes = (\n \"file\",\n \"https\",\n \"ssh\",\n \"persistent-https\",\n \"sso\",\n \"rpc\",\n )\n parse_results = urllib.parse.urlparse(self._manifest_url)\n return parse_results.scheme in secure_schemes",
"def is_safe_url(url, host):\r\n if not url:\r\n return False\r\n\r\n parsed = urllib.parse.urlparse(url)\r\n\r\n return ((not parsed.netloc or parsed.netloc == host) and\r\n (not parsed.scheme or parsed.scheme in [\"http\", \"https\"]))",
"def checkUri(uri):\n # Must replace spaces\n space = ' '\n newUri = uri.replace(' ', '%20')\n return newUri",
"def MakeUrllibSafe(uriRef):\r\n # IDN support requires decoding any percent-encoded octets in the\r\n # host part (if it's a reg-name) of the authority component, and when\r\n # doing DNS lookups, applying IDNA encoding to that string first.\r\n # As of Python 2.3, there is an IDNA codec, and the socket and httplib\r\n # modules accept Unicode strings and apply IDNA encoding automatically\r\n # where necessary. However, urllib.urlopen() has not yet been updated\r\n # to do the same; it raises an exception if you give it a Unicode\r\n # string, and does no conversion on non-Unicode strings, meaning you\r\n # have to give it an IDNA string yourself. We will only support it on\r\n # Python 2.3 and up.\r\n #\r\n # see if host is a reg-name, as opposed to IPv4 or IPv6 addr.\r\n if isinstance(uriRef, unicode):\r\n try:\r\n uriRef = uriRef.encode('us-ascii') # parts of urllib are not unicode safe\r\n except UnicodeError:\r\n raise ValueError(\"uri %r must consist of ASCII characters.\" % uriRef)\r\n (scheme, auth, path, query, frag) = urlparse.urlsplit(uriRef)\r\n if auth and auth.find('@') > -1:\r\n userinfo, hostport = auth.split('@')\r\n else:\r\n userinfo = None\r\n hostport = auth\r\n if hostport and hostport.find(':') > -1:\r\n host, port = hostport.split(':')\r\n else:\r\n host = hostport\r\n port = None\r\n if host and REG_NAME_HOST_PATTERN.match(host):\r\n # percent-encoded hostnames will always fail DNS lookups\r\n host = urllib.unquote(host) #PercentDecode(host)\r\n # IDNA-encode if possible.\r\n # We shouldn't do this for schemes that don't need DNS lookup,\r\n # but are there any (that you'd be calling urlopen for)?\r\n if sys.version_info[0:2] >= (2, 3):\r\n if isinstance(host, str):\r\n host = host.decode('utf-8')\r\n host = host.encode('idna')\r\n # reassemble the authority with the new hostname\r\n # (percent-decoded, and possibly IDNA-encoded)\r\n auth = ''\r\n if userinfo:\r\n auth += userinfo + '@'\r\n auth += host\r\n if port:\r\n auth += ':' + port\r\n\r\n # On Windows, ensure that '|', not ':', is used in a drivespec.\r\n if os.name == 'nt' and scheme == 'file':\r\n path = path.replace(':', '|', 1)\r\n\r\n # Note that we drop fragment, if any. See RFC 3986 sec. 3.5.\r\n uri = urlparse.urlunsplit((scheme, auth, path, query, None))\r\n\r\n return uri",
"def MakeUrllibSafe(uriRef):\n # IDN support requires decoding any percent-encoded octets in the\n # host part (if it's a reg-name) of the authority component, and when\n # doing DNS lookups, applying IDNA encoding to that string first.\n # As of Python 2.3, there is an IDNA codec, and the socket and httplib\n # modules accept Unicode strings and apply IDNA encoding automatically\n # where necessary. However, urllib.urlopen() has not yet been updated\n # to do the same; it raises an exception if you give it a Unicode\n # string, and does no conversion on non-Unicode strings, meaning you\n # have to give it an IDNA string yourself. We will only support it on\n # Python 2.3 and up.\n #\n # see if host is a reg-name, as opposed to IPv4 or IPv6 addr.\n if isinstance(uriRef, unicode):\n try:\n uriRef = uriRef.encode('us-ascii') # parts of urllib are not unicode safe\n except UnicodeError:\n raise ValueError(\"uri %r must consist of ASCII characters.\" % uriRef)\n (scheme, auth, path, query, frag) = urlparse.urlsplit(uriRef)\n if auth and auth.find('@') > -1:\n userinfo, hostport = auth.split('@')\n else:\n userinfo = None\n hostport = auth\n if hostport and hostport.find(':') > -1:\n host, port = hostport.split(':')\n else:\n host = hostport\n port = None\n if host and REG_NAME_HOST_PATTERN.match(host):\n # percent-encoded hostnames will always fail DNS lookups\n host = urllib.unquote(host) #PercentDecode(host)\n # IDNA-encode if possible.\n # We shouldn't do this for schemes that don't need DNS lookup,\n # but are there any (that you'd be calling urlopen for)?\n if sys.version_info[0:2] >= (2, 3):\n if isinstance(host, str):\n host = host.decode('utf-8')\n host = host.encode('idna')\n # reassemble the authority with the new hostname\n # (percent-decoded, and possibly IDNA-encoded)\n auth = ''\n if userinfo:\n auth += userinfo + '@'\n auth += host\n if port:\n auth += ':' + port\n\n # On Windows, ensure that '|', not ':', is used in a drivespec.\n if os.name == 'nt' and scheme == 'file':\n path = path.replace(':', '|', 1)\n\n # Note that we drop fragment, if any. See RFC 3986 sec. 3.5.\n uri = urlparse.urlunsplit((scheme, auth, path, query, None))\n\n return uri",
"def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc",
"def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc",
"def is_legacy_signed_url_valid(user, url):\n parsed = urlsplit(url)\n params = MultiDict(parse_qs(parsed.query))\n try:\n signature = params.pop('token')\n except KeyError:\n return False\n\n url = urlunsplit((\n '',\n '',\n parsed.path,\n urlencode(list(params.lists()), doseq=True),\n parsed.fragment\n ))\n signer = Signer(user.signing_secret, salt='url-signing')\n return signer.verify_signature(url.encode(), signature)",
"def Sanitize(Content): # for your protection\n \n ### strip any illegal HTML\n Content = re.sub(r\"(?is)<.+?>\", HTMLChecker, Content)\n\n ### validate any links\n Content = re.sub(r'(?is)(<A .*?HREF=\")(.+?)(\".*?>)', LinkChecker, Content)\n \n ### then escape any funky characters\n ### TODO: is this really neccesary for the database?\n \n # Content = re.escape(Content)\n\n return Content",
"def is_uri(val: str = None) -> bool:\n is_valid = False\n validator = validators.Validator().allow_schemes(\n \"http\", \"https\", \"ftp\"\n ).require_presence_of(\n \"scheme\", \"host\"\n ).check_validity_of(\n \"scheme\", \"host\", \"path\"\n )\n uri = uri_reference(val)\n try:\n validator.validate(uri)\n is_valid = True\n except (InvalidComponentsError, MissingComponentError, UnpermittedComponentError) as ex:\n logger.debug(ex)\n return is_valid",
"def default_validation(url):\n return bool(urlparse(url).scheme)",
"def validate_uri(self, uri):\n logging.debug(\"Validating URL %s\" % uri)\n\n # Return None in error case. This is 'null' in final output.\n try:\n if not validators.url(uri):\n uri = None\n except validators.utils.ValidationFailure:\n logging.error(\"Invalid URL %s\" % uri)\n uri = None\n return uri",
"def _sanitizeURL(self, couchURL):\n return couchURL",
"def es_url_valida(url_):\n url_parseado = urlparse.urlparse(url_)\n return all([url_parseado.scheme, url_parseado.netloc])",
"def has_valid_scheme(uri: ParseResult) -> bool:\n scheme = uri.scheme\n return scheme == 'ws' or scheme == 'warp'",
"def test_check_uri(self):\n # OK\n self.assertTrue(SiteService.check_uri(\"localhost:12345\"))\n self.assertTrue(SiteService.check_uri(\"www.google.com:12345\"))\n self.assertTrue(SiteService.check_uri(\"127.0.0.1:12345\"))\n # Missing Port\n self.assertFalse(SiteService.check_uri(\"localhost:\"))\n # Missing seperator\n self.assertFalse(SiteService.check_uri(\"localhost\"))\n self.assertFalse(SiteService.check_uri(\"localhost12345\"))\n self.assertFalse(SiteService.check_uri(\"localhost@12345\"))\n # Starts with invalid char\n self.assertFalse(SiteService.check_uri(\"_localhost:12345\"))\n self.assertFalse(SiteService.check_uri(\".localhost:12345\"))\n # Non-numeric port\n self.assertFalse(SiteService.check_uri(\"localhost:bah\"))",
"def URL_CANONICALIZER(self):\n return util.UrlCanonicalizer(\n domain=self.gr_source.DOMAIN,\n headers=util.REQUEST_HEADERS)",
"def validateURL(url):",
"def is_uri(uri):\n scheme, netloc, path, params, query, fragment = urlparse(uri)\n if scheme and netloc and path:\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove potentially dangerous property declarations from CSS code. In particular, properties using the CSS ``url()`` function with a scheme | def sanitize_css(self, text):
decls = []
text = self._strip_css_comments(self._replace_unicode_escapes(text))
for decl in text.split(';'):
decl = decl.strip()
if not decl:
continue
try:
propname, value = decl.split(':', 1)
except ValueError:
continue
if not self.is_safe_css(propname.strip().lower(), value.strip()):
continue
is_evil = False
if self._EXPRESSION_SEARCH(value):
is_evil = True
for match in self._URL_FINDITER(value):
if not self.is_safe_uri(match.group(1)):
is_evil = True
break
if not is_evil:
decls.append(decl.strip())
return decls | [
"def minify_properties(src):\n min_re = re.compile(r\"(^|[^\\\\](?:\\\\\\\\)*)#.*$\", re.M)\n src = min_re.sub(r\"\\1\", src)\n src = re.sub(r\"\\n+\", r\"\\n\", src)\n return src",
"def remove_urls(document):\n return re.sub(r'https?://(www\\.)?[-\\w@:%.\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-\\w@:%_\\+.~#?&/=;]*)', '', document)",
"def ClearExternalCssStyle(matchobj):\n\treturn ''",
"def repair_broken_urls(line):\n def _chop_spaces_in_url_match(m):\n \"\"\"Suppresses spaces in a matched URL.\"\"\"\n return m.group(1).replace(\" \", \"\")\n for ptn in re_list_url_repair_patterns:\n line = ptn.sub(_chop_spaces_in_url_match, line)\n return line",
"def clean_code_prop(prop):\n if is_code_prop(prop) is False:\n return prop\n return prop.replace('co_', '')",
"def clean(tweet):\n clean = re.sub(r'https?:\\/\\/\\w+(\\.\\w+)*(:\\w+)?(/[A-Za-z0-9-_\\.]*)* ?', '', tweet)\n clean = re.sub(r'#', '', clean)\n clean = re.sub(r'!', '', clean)\n clean = re.sub(r'\\.\\.\\.', '', clean)\n clean = re.sub(r',', '', clean)\n return clean",
"def ClearExternalCss(matchobj):\n\treturn ''",
"def preprocess_declarations(base_url, declarations):\r\n def validation_error(level, reason):\r\n getattr(LOGGER, level)(\r\n 'Ignored `%s: %s` at %i:%i, %s.',\r\n declaration.name, declaration.value.as_css(),\r\n declaration.line, declaration.column, reason)\r\n\r\n for declaration in declarations:\r\n name = declaration.name\r\n\r\n if name in PREFIXED and not name.startswith(PREFIX):\r\n validation_error(\r\n 'warning',\r\n 'the property is experimental or non-standard, use '\r\n + PREFIX + name)\r\n continue\r\n\r\n if name in NOT_PRINT_MEDIA:\r\n validation_error(\r\n 'info', 'the property does not apply for the print media')\r\n continue\r\n\r\n if name.startswith(PREFIX):\r\n unprefixed_name = name[len(PREFIX):]\r\n if unprefixed_name in UNPREFIXED:\r\n validation_error(\r\n 'warning',\r\n 'the property was unprefixed, use ' + unprefixed_name)\r\n continue\r\n if unprefixed_name in PREFIXED:\r\n name = unprefixed_name\r\n\r\n expander_ = EXPANDERS.get(name, validate_non_shorthand)\r\n tokens = remove_whitespace(declaration.value)\r\n try:\r\n # Use list() to consume generators now and catch any error.\r\n result = list(expander_(base_url, name, tokens))\r\n except InvalidValues as exc:\r\n validation_error(\r\n 'warning',\r\n exc.args[0] if exc.args and exc.args[0] else 'invalid value')\r\n continue\r\n\r\n priority = declaration.priority\r\n for long_name, value in result:\r\n yield long_name.replace('-', '_'), value, priority",
"def remove_http(self):\n\n if re.search(\"https\",self.get_url()):\n self.set_url(self.get_url().replace('https://',''))\n elif re.search(\"http\",self.get_url()):\n self.set_url(self.get_url().replace('http://',''))\n elif re.search(\"ftp\",self.get_url()):\n self.set_url(self.get_url().replace('ftp://',''))\n else:\n pass",
"def archive_css(self, css_string, base_url):\n # It would be nice to do this with a proper CSS parser, but all the\n # ones I've tried are missing modern CSS features, e.g. ignore URIs in\n # a @font-face rule.\n for match in re.finditer(r'url\\((?P<url>[^\\)]+)\\)', css_string):\n resource_url = match.group('url')\n resource_url = resource_url.strip('\"').strip(\"'\")\n\n # Something to do with SVG resources that are identified elsewhere\n # in the stylesheet\n resource_url = unquote_plus(resource_url)\n if resource_url.startswith('#'):\n continue\n\n # Any existing data: URIs are already self-contained and don't\n # need changing.\n if resource_url.startswith('data:'):\n continue\n\n # Determine the media type for the data: URI\n resource_url = urljoin(base_url, resource_url)\n data = self._get_base64_encode(resource_url)\n if data is not None:\n css_string = css_string.replace(match.group('url'), data)\n\n return css_string",
"def sanitize_source(src):\n src_lines = src.splitlines(True)\n for i, line in enumerate(src_lines[:2]):\n if _CODING_PATTERN.match(line):\n src_lines[i] = re.sub('#.*$', '# (removed coding)', line)\n return ''.join(src_lines)",
"def fix_uris(self, host_url):\n ret_val = copy.deepcopy(self)\n ret_val.uri = uris.ATTRIBUTE_DEF_URI_STR % (host_url, ret_val.uri)\n return ret_val",
"def _remove_file_scheme(path):\n path = _file_utils.remove_prefix(path, \"file://\")\n path = _file_utils.remove_prefix(path, \"file:\")\n\n return path",
"def clean_url(url) -> str:\n if 'http' not in url:\n return f'http://{url}'\n return url",
"def strip_protocol(path: str) -> str:\n return re.sub(r\"https?:\\/\\/\", \"\", path)",
"def remove_URLs(section_content):\n\n # remove URL with regexp\n section_content = re.sub(r'http\\S+', '', section_content)\n section_content = re.sub(r'www\\S+', '', section_content)\n section_content = re.sub(r'mailto\\S+', '', section_content)\n\n # remove multiple consecutive spaces\n section_content = re.sub(' +', ' ', section_content)\n\n return section_content",
"def restore_boxmodelhack(css):\n return re.sub('___PSEUDOCLASSBMH___', '\"\\\\\"}\\\\\"\"', css)",
"def __cleanUrl(self, url):\n cleanurl = QUrl(url)\n if cleanurl.password():\n # don't save the password in the history\n cleanurl.setPassword(\"\")\n if cleanurl.host():\n # convert host to lower case\n cleanurl.setHost(url.host().lower())\n \n return cleanurl",
"def sanitize_property(self, property):\n\n if property and not property.startswith('.'):\n property = '.%s' % property\n return property"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translate any localizable strings in the given stream. This function shouldn't be called directly. Instead, an instance of the `Translator` class should be registered as a filter with the `Template` or the `TemplateLoader`, or applied as a regular stream filter. If used as a template filter, it should be inserted in front of all the default filters. | def __call__(self, stream, ctxt=None, translate_text=True,
translate_attrs=True):
ignore_tags = self.ignore_tags
include_attrs = self.include_attrs
skip = 0
xml_lang = XML_NAMESPACE['lang']
if not self.extract_text:
translate_text = False
translate_attrs = False
if type(self.translate) is FunctionType:
gettext = self.translate
if ctxt:
ctxt['_i18n.gettext'] = gettext
else:
if IS_PYTHON2:
gettext = self.translate.ugettext
ngettext = self.translate.ungettext
else:
gettext = self.translate.gettext
ngettext = self.translate.ngettext
try:
if IS_PYTHON2:
dgettext = self.translate.dugettext
dngettext = self.translate.dungettext
else:
dgettext = self.translate.dgettext
dngettext = self.translate.dngettext
except AttributeError:
dgettext = lambda _, y: gettext(y)
dngettext = lambda _, s, p, n: ngettext(s, p, n)
if ctxt:
ctxt['_i18n.gettext'] = gettext
ctxt['_i18n.ngettext'] = ngettext
ctxt['_i18n.dgettext'] = dgettext
ctxt['_i18n.dngettext'] = dngettext
if ctxt and ctxt.get('_i18n.domain'):
# TODO: This can cause infinite recursion if dgettext is defined
# via the AttributeError case above!
gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)
for kind, data, pos in stream:
# skip chunks that should not be localized
if skip:
if kind is START:
skip += 1
elif kind is END:
skip -= 1
yield kind, data, pos
continue
# handle different events that can be localized
if kind is START:
tag, attrs = data
if tag in self.ignore_tags or \
isinstance(attrs.get(xml_lang), basestring):
skip += 1
yield kind, data, pos
continue
new_attrs = []
changed = False
for name, value in attrs:
newval = value
if isinstance(value, basestring):
if translate_attrs and name in include_attrs:
newval = gettext(value)
else:
newval = list(
self(_ensure(value), ctxt, translate_text=False)
)
if newval != value:
value = newval
changed = True
new_attrs.append((name, value))
if changed:
attrs = Attrs(new_attrs)
yield kind, (tag, attrs), pos
elif translate_text and kind is TEXT:
text = data.strip()
if text:
data = data.replace(text, unicode(gettext(text)))
yield kind, data, pos
elif kind is SUB:
directives, substream = data
current_domain = None
for idx, directive in enumerate(directives):
# Organize directives to make everything work
# FIXME: There's got to be a better way to do this!
if isinstance(directive, DomainDirective):
# Grab current domain and update context
current_domain = directive.domain
ctxt.push({'_i18n.domain': current_domain})
# Put domain directive as the first one in order to
# update context before any other directives evaluation
directives.insert(0, directives.pop(idx))
# If this is an i18n directive, no need to translate text
# nodes here
is_i18n_directive = any([
isinstance(d, ExtractableI18NDirective)
for d in directives
])
substream = list(self(substream, ctxt,
translate_text=not is_i18n_directive,
translate_attrs=translate_attrs))
yield kind, (directives, substream), pos
if current_domain:
ctxt.pop()
else:
yield kind, data, pos | [
"def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate_text = False\r\n translate_attrs = False\r\n\r\n if type(self.translate) is FunctionType:\r\n gettext = self.translate\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n else:\r\n if IS_PYTHON2:\r\n gettext = self.translate.ugettext\r\n ngettext = self.translate.ungettext\r\n else:\r\n gettext = self.translate.gettext\r\n ngettext = self.translate.ngettext\r\n try:\r\n if IS_PYTHON2:\r\n dgettext = self.translate.dugettext\r\n dngettext = self.translate.dungettext\r\n else:\r\n dgettext = self.translate.dgettext\r\n dngettext = self.translate.dngettext\r\n except AttributeError:\r\n dgettext = lambda _, y: gettext(y)\r\n dngettext = lambda _, s, p, n: ngettext(s, p, n)\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n ctxt['_i18n.ngettext'] = ngettext\r\n ctxt['_i18n.dgettext'] = dgettext\r\n ctxt['_i18n.dngettext'] = dngettext\r\n\r\n if ctxt and ctxt.get('_i18n.domain'):\r\n # TODO: This can cause infinite recursion if dgettext is defined\r\n # via the AttributeError case above!\r\n gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)\r\n\r\n for kind, data, pos in stream:\r\n\r\n # skip chunks that should not be localized\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n elif kind is END:\r\n skip -= 1\r\n yield kind, data, pos\r\n continue\r\n\r\n # handle different events that can be localized\r\n if kind is START:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), str):\r\n skip += 1\r\n yield kind, data, pos\r\n continue\r\n\r\n new_attrs = []\r\n changed = False\r\n\r\n for name, value in attrs:\r\n newval = value\r\n if isinstance(value, str):\r\n if translate_attrs and name in include_attrs:\r\n newval = gettext(value)\r\n else:\r\n newval = list(\r\n self(_ensure(value), ctxt, translate_text=False)\r\n )\r\n if newval != value:\r\n value = newval\r\n changed = True\r\n new_attrs.append((name, value))\r\n if changed:\r\n attrs = Attrs(new_attrs)\r\n\r\n yield kind, (tag, attrs), pos\r\n\r\n elif translate_text and kind is TEXT:\r\n text = data.strip()\r\n if text:\r\n data = data.replace(text, str(gettext(text)))\r\n yield kind, data, pos\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n current_domain = None\r\n for idx, directive in enumerate(directives):\r\n # Organize directives to make everything work\r\n # FIXME: There's got to be a better way to do this!\r\n if isinstance(directive, DomainDirective):\r\n # Grab current domain and update context\r\n current_domain = directive.domain\r\n ctxt.push({'_i18n.domain': current_domain})\r\n # Put domain directive as the first one in order to\r\n # update context before any other directives evaluation\r\n directives.insert(0, directives.pop(idx))\r\n\r\n # If this is an i18n directive, no need to translate text\r\n # nodes here\r\n is_i18n_directive = any([\r\n isinstance(d, ExtractableI18NDirective)\r\n for d in directives\r\n ])\r\n substream = list(self(substream, ctxt,\r\n translate_text=not is_i18n_directive,\r\n translate_attrs=translate_attrs))\r\n yield kind, (directives, substream), pos\r\n\r\n if current_domain:\r\n ctxt.pop()\r\n else:\r\n yield kind, data, pos",
"def translator(string, list=bool, defaultOptions=\"string\", filter=bool, optionsScript=bool, extension=bool, fileCompression=\"string\", objectType=bool, readSupport=bool, loaded=bool, defaultFileRule=bool, writeSupport=bool):\n pass",
"def translate(input_str, lang_source, lang_target):\n pass",
"def _visit_translation(self, s):\r\n return s",
"def translate(self, texts):\n inputs = self._preprocess(texts)\n\n outputs = self._translate_fn(**inputs)\n return self._postprocess(outputs)",
"def _get_user_filtered_source_strings(resources, users, language, *args, **kwargs):\r\n return Translation.objects.user_translated_strings(resources, language, users)",
"def _apply_translations(self, translations, text):\r\n regex = hash_regex()\r\n return regex.sub(\r\n lambda m: translations.get(m.group(0), m.group(0)), text\r\n )",
"def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)",
"def test_apply_translations(self):\r\n hash_normal = '1' * 32 + '_tr'\r\n hash_plural = '2' * 32 + '_pl_0'\r\n text = '%s %s' % (hash_normal, hash_plural)\r\n translations = {\r\n hash_normal: 'normal',\r\n hash_plural: 'plural',\r\n }\r\n compiler = PluralCompiler(resource=None)\r\n res = compiler._apply_translations(translations, text)\r\n self.assertEquals(res, 'normal plural')",
"def test_translate_locations(self):\n # Check that translatables can be loaded from the dialog directory\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-dialog/'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n # Check that translatables can be loaded from locale folder\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n\n # Check loading in a non-en-us language\n s = SimpleSkill1()\n s.config_core['lang'] = 'de-de'\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertEqual(lst, ['sonne', 'mycroft', 'zahne'])\n vals = s.translate_namedvalues('named_things')\n self.assertEqual(vals['blau'], '2')\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Aber setzen sie sich herr test framework'])\n\n # Check fallback to english\n lst = s.translate_list('not_in_german')\n self.assertEqual(lst, ['not', 'in', 'German'])\n\n # Restore lang to en-us\n s.config_core['lang'] = 'en-us'",
"def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS,\r\n search_text=True, comment_stack=None):\r\n if not self.extract_text:\r\n search_text = False\r\n if comment_stack is None:\r\n comment_stack = []\r\n skip = 0\r\n\r\n xml_lang = XML_NAMESPACE['lang']\r\n\r\n for kind, data, pos in stream:\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n if kind is END:\r\n skip -= 1\r\n\r\n if kind is START and not skip:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n continue\r\n\r\n for message in self._extract_attrs((kind, data, pos),\r\n gettext_functions,\r\n search_text=search_text):\r\n yield message\r\n\r\n elif not skip and search_text and kind is TEXT:\r\n text = data.strip()\r\n if text and [ch for ch in text if ch.isalpha()]:\r\n yield pos[1], None, text, comment_stack[-1:]\r\n\r\n elif kind is EXPR or kind is EXEC:\r\n for funcname, strings in extract_from_code(data,\r\n gettext_functions):\r\n # XXX: Do we need to grab i18n:comment from comment_stack ???\r\n yield pos[1], funcname, strings, []\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n in_comment = False\r\n\r\n for idx, directive in enumerate(directives):\r\n # Do a first loop to see if there's a comment directive\r\n # If there is update context and pop it from directives\r\n if isinstance(directive, CommentDirective):\r\n in_comment = True\r\n comment_stack.append(directive.comment)\r\n if len(directives) == 1:\r\n # in case we're in the presence of something like:\r\n # <p i18n:comment=\"foo\">Foo</p>\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n directives.pop(idx)\r\n elif not isinstance(directive, I18NDirective):\r\n # Remove all other non i18n directives from the process\r\n directives.pop(idx)\r\n\r\n if not directives and not in_comment:\r\n # Extract content if there's no directives because\r\n # strip was pop'ed and not because comment was pop'ed.\r\n # Extraction in this case has been taken care of.\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip):\r\n yield message\r\n\r\n for directive in directives:\r\n if isinstance(directive, ExtractableI18NDirective):\r\n for message in directive.extract(self,\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n else:\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n\r\n if in_comment:\r\n comment_stack.pop()",
"def translate_file(self, fname):\n po = polib.pofile(fname)\n\n # FIXME - This might be a bit goofy\n po.metadata['Language'] = \",\".join(self.pipeline_spec)\n po.metadata['Plural-Forms'] = 'nplurals=2; plural= n != 1'\n po.metadata['Content-Type'] = 'text/plain; charset=UTF-8'\n count = 0\n for entry in po:\n if entry.msgid_plural:\n entry.msgstr_plural[0] = self.translate_string(\n entry.msgid)\n entry.msgstr_plural[1] = self.translate_string(\n entry.msgid_plural)\n else:\n entry.msgstr = self.translate_string(entry.msgid)\n\n if 'fuzzy' in entry.flags:\n entry.flags.remove('fuzzy') # clear the fuzzy flag\n count += 1\n\n po.save()\n return '{0}: Translated {1} messages.'.format(fname, count)",
"def user_translated_strings(self, resources, language, users):\r\n source_language = get_source_language(resources)\r\n user_translated_se_ids = frozenset(self.filter(\r\n language=language, rule=5,\r\n user__id__in=users,\r\n resource__in=resources\r\n ).values_list('source_entity_id', flat=True))\r\n # Add resource_id as well to reduce the search space\r\n # by taking advantage of the indexes in resource and language\r\n return self.filter(\r\n resource__in=resources,\r\n source_entity__id__in=user_translated_se_ids,\r\n language=source_language, rule=5,\r\n )",
"def hook_StreamString(state, level, format_ea, str_ea):\n DeepManticore(state).api_stream_string(level, format_ea, str_ea)",
"def translated_source_strings(self, resources, language):\r\n source_language = get_source_language(resources)\r\n translated_se_ids = frozenset(self.filter(\r\n resource__in=resources, language=language, rule=5\r\n ).values_list('source_entity_id', flat=True))\r\n # Add resource_id as well to reduce the search space\r\n # by taking advantage of the indexes in resource and language\r\n return self.filter(\r\n resource__in=resources,\r\n source_entity__id__in=translated_se_ids,\r\n language=source_language, rule=5\r\n )",
"def getStringsStream(self) -> ghidra.app.util.bin.format.pe.cli.streams.CliStreamStrings:\n ...",
"def translate(self, string, regex=re.compile(r'%\\((\\w+)\\)s')):\r\n substream = None\r\n\r\n def yield_parts(string):\r\n for idx, part in enumerate(regex.split(string)):\r\n if idx % 2:\r\n yield self.values[part]\r\n elif part:\r\n yield (TEXT,\r\n part.replace('\\[', '[').replace('\\]', ']'),\r\n (None, -1, -1)\r\n )\r\n\r\n parts = parse_msg(string)\r\n parts_counter = {}\r\n for order, string in parts:\r\n parts_counter.setdefault(order, []).append(None)\r\n\r\n while parts:\r\n order, string = parts.pop(0)\r\n events = self.events[order].pop(0)\r\n parts_counter[order].pop()\r\n\r\n for event in events:\r\n if event[0] is SUB_START:\r\n substream = []\r\n elif event[0] is SUB_END:\r\n # Yield a substream which might have directives to be\r\n # applied to it (after translation events)\r\n yield SUB, (self.subdirectives[order], substream), event[2]\r\n substream = None\r\n elif event[0] is TEXT:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is START:\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is END:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n elif event[0] is EXPR:\r\n # These are handled on the strings itself\r\n continue\r\n else:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event",
"async def translate(self, ctx, *, message: commands.clean_content):\n\n loop = self.bot.loop\n\n try:\n ret = await loop.run_in_executor(None, self.trans.translate, message)\n except Exception as e:\n return await ctx.send(f'An error occurred: {e.__class__.__name__}: {e}')\n\n embed = discord.Embed(title='Translated', colour=0x4284F3)\n src = googletrans.LANGUAGES.get(ret.src, '(auto-detected)').title()\n dest = googletrans.LANGUAGES.get(ret.dest, 'Unknown').title()\n embed.add_field(name=f'From {src}', value=ret.origin, inline=False)\n embed.add_field(name=f'To {dest}', value=ret.text, inline=False)\n await ctx.send(embed=embed)",
"def translate_or_register( cls, source, language ):\n if source:\n source = unicode( source )\n translation = cls.translate( source, language )\n if not translation:\n session = Session()\n query = session.query( cls )\n translation = query.filter_by( source = source, \n language = language ).first()\n if not translation:\n if ( source, language ) not in cls._cache:\n registered_translation = Translation( source = source, \n language = language )\n cls._cache[( source, language )] = source\n session.flush( [registered_translation] )\n logger.debug( 'registed %s with id %s' % ( source, registered_translation.id ) )\n return source\n return translation\n return ''"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract localizable strings from the given template stream. For every string found, this function yields a ``(lineno, function, | def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS,
search_text=True, comment_stack=None):
if not self.extract_text:
search_text = False
if comment_stack is None:
comment_stack = []
skip = 0
xml_lang = XML_NAMESPACE['lang']
for kind, data, pos in stream:
if skip:
if kind is START:
skip += 1
if kind is END:
skip -= 1
if kind is START and not skip:
tag, attrs = data
if tag in self.ignore_tags or \
isinstance(attrs.get(xml_lang), basestring):
skip += 1
continue
for message in self._extract_attrs((kind, data, pos),
gettext_functions,
search_text=search_text):
yield message
elif not skip and search_text and kind is TEXT:
text = data.strip()
if text and [ch for ch in text if ch.isalpha()]:
yield pos[1], None, text, comment_stack[-1:]
elif kind is EXPR or kind is EXEC:
for funcname, strings in extract_from_code(data,
gettext_functions):
# XXX: Do we need to grab i18n:comment from comment_stack ???
yield pos[1], funcname, strings, []
elif kind is SUB:
directives, substream = data
in_comment = False
for idx, directive in enumerate(directives):
# Do a first loop to see if there's a comment directive
# If there is update context and pop it from directives
if isinstance(directive, CommentDirective):
in_comment = True
comment_stack.append(directive.comment)
if len(directives) == 1:
# in case we're in the presence of something like:
# <p i18n:comment="foo">Foo</p>
for message in self.extract(
substream, gettext_functions,
search_text=search_text and not skip,
comment_stack=comment_stack):
yield message
directives.pop(idx)
elif not isinstance(directive, I18NDirective):
# Remove all other non i18n directives from the process
directives.pop(idx)
if not directives and not in_comment:
# Extract content if there's no directives because
# strip was pop'ed and not because comment was pop'ed.
# Extraction in this case has been taken care of.
for message in self.extract(
substream, gettext_functions,
search_text=search_text and not skip):
yield message
for directive in directives:
if isinstance(directive, ExtractableI18NDirective):
for message in directive.extract(self,
substream, gettext_functions,
search_text=search_text and not skip,
comment_stack=comment_stack):
yield message
else:
for message in self.extract(
substream, gettext_functions,
search_text=search_text and not skip,
comment_stack=comment_stack):
yield message
if in_comment:
comment_stack.pop() | [
"def extract(fileobj, keywords, comment_tags, options):\n encoding = options.get('encoding', 'utf-8')\n\n original_position = fileobj.tell()\n\n text = fileobj.read().decode(encoding)\n\n if django.VERSION[:2] >= (1, 9):\n tokens = Lexer(text).tokenize()\n else:\n tokens = Lexer(text, None).tokenize()\n\n vars = [token.token_type != TOKEN_TEXT for token in tokens]\n\n could_be_django = any(list(vars))\n\n if could_be_django:\n fileobj.seek(original_position)\n iterator = extract_django(fileobj, keywords, comment_tags, options)\n for lineno, funcname, message, comments in iterator:\n yield lineno, funcname, message, comments\n else:\n # Underscore template extraction\n comments = []\n\n fileobj.seek(original_position)\n\n for lineno, line in enumerate(fileobj, 1):\n funcname = None\n\n stream = TokenStream.from_tuple_iter(tokenize(line, underscore.rules))\n while not stream.eof:\n if stream.current.type == 'gettext_begin':\n stream.expect('gettext_begin')\n funcname = stream.expect('func_name').value\n args, kwargs = parse_arguments(stream, 'gettext_end')\n\n strings = []\n\n for arg, argtype in args:\n if argtype == 'func_string_arg':\n strings.append(force_text(arg))\n else:\n strings.append(None)\n\n for arg in kwargs:\n strings.append(None)\n\n if len(strings) == 1:\n strings = strings[0]\n else:\n strings = tuple(strings)\n\n yield lineno, funcname, strings, []\n\n stream.next()",
"def extract_from_code(code, gettext_functions):\r\n def _walk(node):\r\n if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \\\r\n and node.func.id in gettext_functions:\r\n strings = []\r\n def _add(arg):\r\n if isinstance(arg, _ast.Str) and isinstance(arg.s, str):\r\n strings.append(arg.s)\r\n elif isinstance(arg, _ast.Str):\r\n strings.append(str(arg.s, 'utf-8'))\r\n elif arg:\r\n strings.append(None)\r\n [_add(arg) for arg in node.args]\r\n _add(node.starargs)\r\n _add(node.kwargs)\r\n if len(strings) == 1:\r\n strings = strings[0]\r\n else:\r\n strings = tuple(strings)\r\n yield node.func.id, strings\r\n elif node._fields:\r\n children = []\r\n for field in node._fields:\r\n child = getattr(node, field, None)\r\n if isinstance(child, list):\r\n for elem in child:\r\n children.append(elem)\r\n elif isinstance(child, _ast.AST):\r\n children.append(child)\r\n for child in children:\r\n for funcname, strings in _walk(child):\r\n yield funcname, strings\r\n return _walk(code.ast)",
"def extract_from_code(code, gettext_functions):\r\n def _walk(node):\r\n if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \\\r\n and node.func.id in gettext_functions:\r\n strings = []\r\n def _add(arg):\r\n if isinstance(arg, _ast.Str) and isinstance(arg.s, unicode):\r\n strings.append(arg.s)\r\n elif isinstance(arg, _ast.Str):\r\n strings.append(unicode(arg.s, 'utf-8'))\r\n elif arg:\r\n strings.append(None)\r\n [_add(arg) for arg in node.args]\r\n _add(node.starargs)\r\n _add(node.kwargs)\r\n if len(strings) == 1:\r\n strings = strings[0]\r\n else:\r\n strings = tuple(strings)\r\n yield node.func.id, strings\r\n elif node._fields:\r\n children = []\r\n for field in node._fields:\r\n child = getattr(node, field, None)\r\n if isinstance(child, list):\r\n for elem in child:\r\n children.append(elem)\r\n elif isinstance(child, _ast.AST):\r\n children.append(child)\r\n for child in children:\r\n for funcname, strings in _walk(child):\r\n yield funcname, strings\r\n return _walk(code.ast)",
"def translate(self, string, regex=re.compile(r'%\\((\\w+)\\)s')):\r\n substream = None\r\n\r\n def yield_parts(string):\r\n for idx, part in enumerate(regex.split(string)):\r\n if idx % 2:\r\n yield self.values[part]\r\n elif part:\r\n yield (TEXT,\r\n part.replace('\\[', '[').replace('\\]', ']'),\r\n (None, -1, -1)\r\n )\r\n\r\n parts = parse_msg(string)\r\n parts_counter = {}\r\n for order, string in parts:\r\n parts_counter.setdefault(order, []).append(None)\r\n\r\n while parts:\r\n order, string = parts.pop(0)\r\n events = self.events[order].pop(0)\r\n parts_counter[order].pop()\r\n\r\n for event in events:\r\n if event[0] is SUB_START:\r\n substream = []\r\n elif event[0] is SUB_END:\r\n # Yield a substream which might have directives to be\r\n # applied to it (after translation events)\r\n yield SUB, (self.subdirectives[order], substream), event[2]\r\n substream = None\r\n elif event[0] is TEXT:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is START:\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is END:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n elif event[0] is EXPR:\r\n # These are handled on the strings itself\r\n continue\r\n else:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event",
"def find_iters(template_string):\n\n # {{% match + any number of spaces + whatever + any number of spaces + %}}\n pattern = re.compile('{{%(.*?)\\s+.*\\s+%}}')\n tags = re.findall(pattern, template_string)\n \n return tags",
"def _get_all_source_strings(resources, *args, **kwargs):\r\n return Translation.objects.source_strings(resources)",
"def _get_user_filtered_source_strings(resources, users, language, *args, **kwargs):\r\n return Translation.objects.user_translated_strings(resources, language, users)",
"def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)",
"def process(\n self,\n *,\n in_str: str,\n fname: str,\n config: Optional[FluffConfig] = None,\n formatter=None,\n ) -> Tuple[Optional[TemplatedFile], list]:\n return TemplatedFile(in_str, fname=fname), []",
"def extract(path):\n# --------------------------------------------------------------------\n body = []\n func = \"\"\n brief = \"\"\n seenfunction = False\n seenpercent = False\n\n for l in open(path):\n\n # Remove whitespace and newline\n line = l.strip().lstrip()\n\n if line.startswith('%'): seenpercent = True\n if line.startswith('function'):\n seenfunction = True\n continue\n if not line.startswith('%'):\n if (seenfunction and seenpercent) or not seenfunction:\n break\n else:\n continue\n\n # remove leading `%' character\n line = line[1:] #\n body.append('%s\\n' % line)\n\n # Extract header from body\n if len(body) > 0:\n head = body[0]\n body = body[1:]\n match = re.match(r\"^\\s*(\\w+)\\s*(\\S.*)\\n$\", head)\n func = match.group(1)\n brief = match.group(2)\n\n return (body, func, brief)",
"def getStringsStream(self) -> ghidra.app.util.bin.format.pe.cli.streams.CliStreamStrings:\n ...",
"def extract_placeholders(template):\n return re.findall(r'{(.*?)}', template)",
"def _get_untranslated_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.untranslated_source_strings(resources, language)",
"def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate_text = False\r\n translate_attrs = False\r\n\r\n if type(self.translate) is FunctionType:\r\n gettext = self.translate\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n else:\r\n if IS_PYTHON2:\r\n gettext = self.translate.ugettext\r\n ngettext = self.translate.ungettext\r\n else:\r\n gettext = self.translate.gettext\r\n ngettext = self.translate.ngettext\r\n try:\r\n if IS_PYTHON2:\r\n dgettext = self.translate.dugettext\r\n dngettext = self.translate.dungettext\r\n else:\r\n dgettext = self.translate.dgettext\r\n dngettext = self.translate.dngettext\r\n except AttributeError:\r\n dgettext = lambda _, y: gettext(y)\r\n dngettext = lambda _, s, p, n: ngettext(s, p, n)\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n ctxt['_i18n.ngettext'] = ngettext\r\n ctxt['_i18n.dgettext'] = dgettext\r\n ctxt['_i18n.dngettext'] = dngettext\r\n\r\n if ctxt and ctxt.get('_i18n.domain'):\r\n # TODO: This can cause infinite recursion if dgettext is defined\r\n # via the AttributeError case above!\r\n gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)\r\n\r\n for kind, data, pos in stream:\r\n\r\n # skip chunks that should not be localized\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n elif kind is END:\r\n skip -= 1\r\n yield kind, data, pos\r\n continue\r\n\r\n # handle different events that can be localized\r\n if kind is START:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), str):\r\n skip += 1\r\n yield kind, data, pos\r\n continue\r\n\r\n new_attrs = []\r\n changed = False\r\n\r\n for name, value in attrs:\r\n newval = value\r\n if isinstance(value, str):\r\n if translate_attrs and name in include_attrs:\r\n newval = gettext(value)\r\n else:\r\n newval = list(\r\n self(_ensure(value), ctxt, translate_text=False)\r\n )\r\n if newval != value:\r\n value = newval\r\n changed = True\r\n new_attrs.append((name, value))\r\n if changed:\r\n attrs = Attrs(new_attrs)\r\n\r\n yield kind, (tag, attrs), pos\r\n\r\n elif translate_text and kind is TEXT:\r\n text = data.strip()\r\n if text:\r\n data = data.replace(text, str(gettext(text)))\r\n yield kind, data, pos\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n current_domain = None\r\n for idx, directive in enumerate(directives):\r\n # Organize directives to make everything work\r\n # FIXME: There's got to be a better way to do this!\r\n if isinstance(directive, DomainDirective):\r\n # Grab current domain and update context\r\n current_domain = directive.domain\r\n ctxt.push({'_i18n.domain': current_domain})\r\n # Put domain directive as the first one in order to\r\n # update context before any other directives evaluation\r\n directives.insert(0, directives.pop(idx))\r\n\r\n # If this is an i18n directive, no need to translate text\r\n # nodes here\r\n is_i18n_directive = any([\r\n isinstance(d, ExtractableI18NDirective)\r\n for d in directives\r\n ])\r\n substream = list(self(substream, ctxt,\r\n translate_text=not is_i18n_directive,\r\n translate_attrs=translate_attrs))\r\n yield kind, (directives, substream), pos\r\n\r\n if current_domain:\r\n ctxt.pop()\r\n else:\r\n yield kind, data, pos",
"def _parse(self, is_source=False, lang_rules=None):\r\n # entries is a dictionary with the entry keys in the file\r\n entries = defaultdict(list)\r\n\r\n template = u''\r\n for line in self._iter_by_line(self.content):\r\n if self._should_skip(line) :\r\n template += line + \"\\n\"\r\n continue\r\n key, value = self._get_elements(line)\r\n if '[' in key:\r\n # this is a translation\r\n # find the language of it\r\n # Skip the template\r\n actual_key = key[:key.find('[')]\r\n locale = self._get_locale(key)\r\n lang_code = self._get_lang_code(locale)\r\n if lang_code == \"x-test\":\r\n template += line + \"\\n\"\r\n continue\r\n try:\r\n lang = Language.objects.by_code_or_alias(lang_code)\r\n except Language.DoesNotExist, e:\r\n msg = _(\"Unknown language specified: %s\" % lang_code)\r\n logger.warning(msg)\r\n raise DesktopParseError(msg)\r\n else:\r\n lang = False # Use False to mark source string\r\n actual_key = key\r\n template += line + \"\\n\"\r\n\r\n if actual_key not in self.localized_keys:\r\n # Translate only standard localestring keys\r\n continue\r\n entries[actual_key].append((value, lang))\r\n\r\n context = \"\"\r\n template += '\\n# Translations\\n'\r\n\r\n for key, value in entries.iteritems():\r\n for translation, language in value:\r\n if is_source and language:\r\n # Skip other languages when parsing a source file\r\n continue\r\n elif not is_source and language != self.language:\r\n # Skip other languages than the one the parsing is for\r\n continue\r\n self._add_translation_string(key, translation, context=context)\r\n\r\n return template",
"def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate_text = False\r\n translate_attrs = False\r\n\r\n if type(self.translate) is FunctionType:\r\n gettext = self.translate\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n else:\r\n if IS_PYTHON2:\r\n gettext = self.translate.ugettext\r\n ngettext = self.translate.ungettext\r\n else:\r\n gettext = self.translate.gettext\r\n ngettext = self.translate.ngettext\r\n try:\r\n if IS_PYTHON2:\r\n dgettext = self.translate.dugettext\r\n dngettext = self.translate.dungettext\r\n else:\r\n dgettext = self.translate.dgettext\r\n dngettext = self.translate.dngettext\r\n except AttributeError:\r\n dgettext = lambda _, y: gettext(y)\r\n dngettext = lambda _, s, p, n: ngettext(s, p, n)\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n ctxt['_i18n.ngettext'] = ngettext\r\n ctxt['_i18n.dgettext'] = dgettext\r\n ctxt['_i18n.dngettext'] = dngettext\r\n\r\n if ctxt and ctxt.get('_i18n.domain'):\r\n # TODO: This can cause infinite recursion if dgettext is defined\r\n # via the AttributeError case above!\r\n gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)\r\n\r\n for kind, data, pos in stream:\r\n\r\n # skip chunks that should not be localized\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n elif kind is END:\r\n skip -= 1\r\n yield kind, data, pos\r\n continue\r\n\r\n # handle different events that can be localized\r\n if kind is START:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n yield kind, data, pos\r\n continue\r\n\r\n new_attrs = []\r\n changed = False\r\n\r\n for name, value in attrs:\r\n newval = value\r\n if isinstance(value, basestring):\r\n if translate_attrs and name in include_attrs:\r\n newval = gettext(value)\r\n else:\r\n newval = list(\r\n self(_ensure(value), ctxt, translate_text=False)\r\n )\r\n if newval != value:\r\n value = newval\r\n changed = True\r\n new_attrs.append((name, value))\r\n if changed:\r\n attrs = Attrs(new_attrs)\r\n\r\n yield kind, (tag, attrs), pos\r\n\r\n elif translate_text and kind is TEXT:\r\n text = data.strip()\r\n if text:\r\n data = data.replace(text, unicode(gettext(text)))\r\n yield kind, data, pos\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n current_domain = None\r\n for idx, directive in enumerate(directives):\r\n # Organize directives to make everything work\r\n # FIXME: There's got to be a better way to do this!\r\n if isinstance(directive, DomainDirective):\r\n # Grab current domain and update context\r\n current_domain = directive.domain\r\n ctxt.push({'_i18n.domain': current_domain})\r\n # Put domain directive as the first one in order to\r\n # update context before any other directives evaluation\r\n directives.insert(0, directives.pop(idx))\r\n\r\n # If this is an i18n directive, no need to translate text\r\n # nodes here\r\n is_i18n_directive = any([\r\n isinstance(d, ExtractableI18NDirective)\r\n for d in directives\r\n ])\r\n substream = list(self(substream, ctxt,\r\n translate_text=not is_i18n_directive,\r\n translate_attrs=translate_attrs))\r\n yield kind, (directives, substream), pos\r\n\r\n if current_domain:\r\n ctxt.pop()\r\n else:\r\n yield kind, data, pos",
"def _get_reviewed_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.reviewed_source_strings(resources, language)",
"def _get_unreviewed_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.unreviewed_source_strings(resources, language)",
"def get_lexicon_file_lines(lexicon: Lexicon) -> Generator[str, None, None]:\n for word in lexicon.words:\n string = word.string.replace(\"\\t\", \" \").replace(\n \"\\n\", \" \"\n ) # sanitize for tabs and newlines in the word\n yield f\"{string}\\t{word.score}\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience function to register the `Translator` filter and the related directives with the given template. | def setup(self, template):
template.filters.insert(0, self)
if hasattr(template, 'add_directives'):
template.add_directives(Translator.NAMESPACE, self) | [
"def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)",
"def templateFilter(func):\n jinja2_env.filters[func.__name__] = func",
"def register_pre_resources_template(self, template):\n pass",
"def test_can_use_imported_templatetags(self):\n template = (\"{% load cachet i18n %}{% cachet %}\"\n \"{% get_current_language as lang %}{{ lang }}\"\n \"{% endcachet %}\")\n translation.activate('en')\n rendered = self.render_template(template)\n self.assertEqual(rendered, 'en')",
"def add_plim_renderer(config, extension, mako_settings_prefix='mako.', preprocessor='plim.preprocessor'):\r\n renderer_factory = MakoRendererFactory()\r\n config.add_renderer(extension, renderer_factory)\r\n\r\n def register():\r\n settings = copy.copy(config.registry.settings)\r\n settings['{prefix}preprocessor'.format(prefix=mako_settings_prefix)] = preprocessor\r\n\r\n opts = parse_options_from_settings(settings, mako_settings_prefix, config.maybe_dotted)\r\n lookup = PkgResourceTemplateLookup(**opts)\r\n\r\n renderer_factory.lookup = lookup\r\n\r\n # read about config.action() at\r\n # http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/extconfig.html#using-config-action-in-a-directive\r\n config.action(('plim-renderer', extension), register)",
"def register(f):\n if f.__name__ in template_functions:\n raise KeyError('Template function %s already registered' % (f.__name__,))\n template_functions[f.__name__] = f\n return f",
"def register_type_pre_resources_template(cls, project, template):\n pass",
"def configure_template_filters(app):\r\n app.jinja_env.filters['format_date'] = format_date\r\n app.jinja_env.filters['time_since'] = time_since\r\n app.jinja_env.filters['older_than_one_month'] = older_than_one_month\r\n app.jinja_env.filters['time_left_to'] = time_left_to\r\n app.jinja_env.filters['is_online'] = is_online\r\n app.jinja_env.filters['crop_title'] = crop_title\r\n app.jinja_env.filters['quote'] = quote",
"def __init__(self):\n self.template_files = {\n 'CCDA': CCDA_TPL_FILENAME,\n 'FHIR-XML': FHIR_TPL_FILENAME,\n 'FHIR-JSON': FHIR_TPL_FILENAME\n }\n self.environment = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATES_DIR))\n\n # load filters defined in custom_filters\n for a in dir(custom_filters):\n if isinstance(custom_filters.__dict__.get(a), types.FunctionType):\n self.environment.filters[a] = custom_filters.__dict__.get(a)\n\n self.templates = {}\n for key in self.template_files:\n self.templates[key] = self.environment.get_template(self.template_files[key])",
"def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)",
"def addSyntheticTemplate(self, templates, class_id) -> retval:\n ...",
"def register_filter(self, filter, function):\n if filter in self.filters:\n self.filters[filter].append(function)\n else:\n self.filters[filter] = [ function ]",
"def add_template(self, template, label, units='counts'):\n\n if units == 'flux':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a flux template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template *= self.exposure_map\n\n if units == 'PS':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a PS template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template /= self.exposure_map/np.mean(self.exposure_map)\n self.templates_dict.update({label: template})\n self.templates.append(template)",
"def includeme(config): # pragma: no cover\n config.add_renderer('.pt', zpt.renderer_factory)\n config.add_renderer('.txt', text.renderer_factory)\n config.include('.localization')",
"def register(mgr):\n mgr.set_lang_info(\"Less\",\n silvercity_lexer=LessLexer(),\n buf_class=LessBuffer,\n langintel_class=LessLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"SCSS\",\n silvercity_lexer=SCSSLexer(),\n buf_class=SCSSBuffer,\n langintel_class=SCSSLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"Sass\",\n silvercity_lexer=SassLexer(),\n buf_class=SassBuffer,\n langintel_class=SassLangIntel,\n is_cpln_lang=True)",
"def register_template_extensions(\n cls,\n exts_fn: Callable[[CompileCtx], Dict[str, Any]]\n ) -> None:\n assert not cls._template_extensions_frozen\n CompileCtx._template_extensions_fns.append(exts_fn)",
"def create_translator(self, *args):\r\n translator_class = self.translator_class\r\n return translator_class(*args)",
"def render_template(text, **context_args):\r\n template = Template(\"{% load bootstrap3 %}\" + text)\r\n if not 'form' in context_args:\r\n context_args['form'] = ExpenseFilterForm()\r\n return template.render(Context(context_args))",
"def sub_template(template,template_tag,substitution):\n\n template = template.replace(template_tag,substitution)\n return template",
"def templates():\n return [\n Template(\"dummy\", [\n Decompressor,\n DummyService,\n ])\n ]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Interpolate the given message translation with the events in the buffer and return the translated stream. | def translate(self, string, regex=re.compile(r'%\((\w+)\)s')):
substream = None
def yield_parts(string):
for idx, part in enumerate(regex.split(string)):
if idx % 2:
yield self.values[part]
elif part:
yield (TEXT,
part.replace('\[', '[').replace('\]', ']'),
(None, -1, -1)
)
parts = parse_msg(string)
parts_counter = {}
for order, string in parts:
parts_counter.setdefault(order, []).append(None)
while parts:
order, string = parts.pop(0)
events = self.events[order].pop(0)
parts_counter[order].pop()
for event in events:
if event[0] is SUB_START:
substream = []
elif event[0] is SUB_END:
# Yield a substream which might have directives to be
# applied to it (after translation events)
yield SUB, (self.subdirectives[order], substream), event[2]
substream = None
elif event[0] is TEXT:
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
elif event[0] is START:
if substream is not None:
substream.append(event)
else:
yield event
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
elif event[0] is END:
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
if substream is not None:
substream.append(event)
else:
yield event
elif event[0] is EXPR:
# These are handled on the strings itself
continue
else:
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
if substream is not None:
substream.append(event)
else:
yield event | [
"def interpolate(stream):\n\n current_index = None\n values_buffer = []\n\n for (t,v) in stream:\n if t:\n if current_index is not None and values_buffer:\n delta = (v - current_index) / len(values_buffer)\n for v2 in values_buffer:\n yield (current_index, v2)\n current_index += delta\n values_buffer = []\n current_index = v\n\n else:\n values_buffer.append(v)",
"def resolve_interpolates(self, s, context):\n interps = interpolate.findall(s)\n\n for i in interps:\n s = interpolate.sub(context.get(i), s, 1)\n\n return s",
"async def translate(self, ctx, *, message: commands.clean_content):\n\n loop = self.bot.loop\n\n try:\n ret = await loop.run_in_executor(None, self.trans.translate, message)\n except Exception as e:\n return await ctx.send(f'An error occurred: {e.__class__.__name__}: {e}')\n\n embed = discord.Embed(title='Translated', colour=0x4284F3)\n src = googletrans.LANGUAGES.get(ret.src, '(auto-detected)').title()\n dest = googletrans.LANGUAGES.get(ret.dest, 'Unknown').title()\n embed.add_field(name=f'From {src}', value=ret.origin, inline=False)\n embed.add_field(name=f'To {dest}', value=ret.text, inline=False)\n await ctx.send(embed=embed)",
"def interpolate(self, translated=None):\n if translated is None:\n translated = self.default\n\n # NB: this function should never never lose the *original\n # identity* of a non-``None`` but empty ``default`` value it\n # is provided. If (translated == default) , it should return the\n # *orignal* default, not a derivation. See the comment below in\n # ChameleonTranslate.\n\n if self.mapping and translated:\n def replace(match):\n whole, param1, param2 = match.groups()\n return text_type(self.mapping.get(param1 or param2, whole))\n translated = _interp_regex.sub(replace, translated)\n\n return translated",
"def _transform_buffer(self, buffer):\n raise NotImplementedError()",
"def interpolate(self, times, proj=PlateCarree()) -> np.ndarray:\n if proj not in self.interpolator:\n self.interpolator[proj] = interp1d(\n np.stack(t.to_pydatetime().timestamp() for t in self.timestamp),\n proj.transform_points(\n PlateCarree(), *np.stack(self.coords).T\n ).T,\n )\n return PlateCarree().transform_points(\n proj, *self.interpolator[proj](times)\n )",
"def interpolate(self, index, viewloc):\n prev = self.eventlist[index]\n post = self.eventlist[index+1]\n v_i = (post.space() - prev.space())/(post.t - prev.t)\n #quadratic formula components\n a = np.sum(v_i**2)-self.univ.lightspeed**2\n b = (2 * np.sum(v_i*(prev.space() - prev.t*v_i - viewloc.space())) +\n 2 * viewloc.t * self.univ.lightspeed**2)\n c = (np.sum((prev.space() - prev.t*v_i - viewloc.space())**2) -\n (viewloc.t * self.univ.lightspeed)**2)\n pos = (-1*b + np.sqrt(b**2 - 4*a*c))/(2*a)\n neg = (-1*b - np.sqrt(b**2 - 4*a*c))/(2*a)\n #use correct component for time, then use time to find spatial location \n inter_t = (pos if prev.t<=pos<=post.t else neg)\n inter_x = list((inter_t-prev.t)*v_i+prev.space())\n while len(inter_x) < 3:\n inter_x.append(None) #fixing dimentionality \n self.timestamps[inter_t] = index #viewer might dictionary-lookup inter_t later\n \n return Event(Location(inter_t,inter_x[0],inter_x[1],inter_x[2]),prev.phystype,prev.descrip) #should really interpolate description too....",
"def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate_text = False\r\n translate_attrs = False\r\n\r\n if type(self.translate) is FunctionType:\r\n gettext = self.translate\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n else:\r\n if IS_PYTHON2:\r\n gettext = self.translate.ugettext\r\n ngettext = self.translate.ungettext\r\n else:\r\n gettext = self.translate.gettext\r\n ngettext = self.translate.ngettext\r\n try:\r\n if IS_PYTHON2:\r\n dgettext = self.translate.dugettext\r\n dngettext = self.translate.dungettext\r\n else:\r\n dgettext = self.translate.dgettext\r\n dngettext = self.translate.dngettext\r\n except AttributeError:\r\n dgettext = lambda _, y: gettext(y)\r\n dngettext = lambda _, s, p, n: ngettext(s, p, n)\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n ctxt['_i18n.ngettext'] = ngettext\r\n ctxt['_i18n.dgettext'] = dgettext\r\n ctxt['_i18n.dngettext'] = dngettext\r\n\r\n if ctxt and ctxt.get('_i18n.domain'):\r\n # TODO: This can cause infinite recursion if dgettext is defined\r\n # via the AttributeError case above!\r\n gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)\r\n\r\n for kind, data, pos in stream:\r\n\r\n # skip chunks that should not be localized\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n elif kind is END:\r\n skip -= 1\r\n yield kind, data, pos\r\n continue\r\n\r\n # handle different events that can be localized\r\n if kind is START:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n yield kind, data, pos\r\n continue\r\n\r\n new_attrs = []\r\n changed = False\r\n\r\n for name, value in attrs:\r\n newval = value\r\n if isinstance(value, basestring):\r\n if translate_attrs and name in include_attrs:\r\n newval = gettext(value)\r\n else:\r\n newval = list(\r\n self(_ensure(value), ctxt, translate_text=False)\r\n )\r\n if newval != value:\r\n value = newval\r\n changed = True\r\n new_attrs.append((name, value))\r\n if changed:\r\n attrs = Attrs(new_attrs)\r\n\r\n yield kind, (tag, attrs), pos\r\n\r\n elif translate_text and kind is TEXT:\r\n text = data.strip()\r\n if text:\r\n data = data.replace(text, unicode(gettext(text)))\r\n yield kind, data, pos\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n current_domain = None\r\n for idx, directive in enumerate(directives):\r\n # Organize directives to make everything work\r\n # FIXME: There's got to be a better way to do this!\r\n if isinstance(directive, DomainDirective):\r\n # Grab current domain and update context\r\n current_domain = directive.domain\r\n ctxt.push({'_i18n.domain': current_domain})\r\n # Put domain directive as the first one in order to\r\n # update context before any other directives evaluation\r\n directives.insert(0, directives.pop(idx))\r\n\r\n # If this is an i18n directive, no need to translate text\r\n # nodes here\r\n is_i18n_directive = any([\r\n isinstance(d, ExtractableI18NDirective)\r\n for d in directives\r\n ])\r\n substream = list(self(substream, ctxt,\r\n translate_text=not is_i18n_directive,\r\n translate_attrs=translate_attrs))\r\n yield kind, (directives, substream), pos\r\n\r\n if current_domain:\r\n ctxt.pop()\r\n else:\r\n yield kind, data, pos",
"def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate_text = False\r\n translate_attrs = False\r\n\r\n if type(self.translate) is FunctionType:\r\n gettext = self.translate\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n else:\r\n if IS_PYTHON2:\r\n gettext = self.translate.ugettext\r\n ngettext = self.translate.ungettext\r\n else:\r\n gettext = self.translate.gettext\r\n ngettext = self.translate.ngettext\r\n try:\r\n if IS_PYTHON2:\r\n dgettext = self.translate.dugettext\r\n dngettext = self.translate.dungettext\r\n else:\r\n dgettext = self.translate.dgettext\r\n dngettext = self.translate.dngettext\r\n except AttributeError:\r\n dgettext = lambda _, y: gettext(y)\r\n dngettext = lambda _, s, p, n: ngettext(s, p, n)\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n ctxt['_i18n.ngettext'] = ngettext\r\n ctxt['_i18n.dgettext'] = dgettext\r\n ctxt['_i18n.dngettext'] = dngettext\r\n\r\n if ctxt and ctxt.get('_i18n.domain'):\r\n # TODO: This can cause infinite recursion if dgettext is defined\r\n # via the AttributeError case above!\r\n gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)\r\n\r\n for kind, data, pos in stream:\r\n\r\n # skip chunks that should not be localized\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n elif kind is END:\r\n skip -= 1\r\n yield kind, data, pos\r\n continue\r\n\r\n # handle different events that can be localized\r\n if kind is START:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), str):\r\n skip += 1\r\n yield kind, data, pos\r\n continue\r\n\r\n new_attrs = []\r\n changed = False\r\n\r\n for name, value in attrs:\r\n newval = value\r\n if isinstance(value, str):\r\n if translate_attrs and name in include_attrs:\r\n newval = gettext(value)\r\n else:\r\n newval = list(\r\n self(_ensure(value), ctxt, translate_text=False)\r\n )\r\n if newval != value:\r\n value = newval\r\n changed = True\r\n new_attrs.append((name, value))\r\n if changed:\r\n attrs = Attrs(new_attrs)\r\n\r\n yield kind, (tag, attrs), pos\r\n\r\n elif translate_text and kind is TEXT:\r\n text = data.strip()\r\n if text:\r\n data = data.replace(text, str(gettext(text)))\r\n yield kind, data, pos\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n current_domain = None\r\n for idx, directive in enumerate(directives):\r\n # Organize directives to make everything work\r\n # FIXME: There's got to be a better way to do this!\r\n if isinstance(directive, DomainDirective):\r\n # Grab current domain and update context\r\n current_domain = directive.domain\r\n ctxt.push({'_i18n.domain': current_domain})\r\n # Put domain directive as the first one in order to\r\n # update context before any other directives evaluation\r\n directives.insert(0, directives.pop(idx))\r\n\r\n # If this is an i18n directive, no need to translate text\r\n # nodes here\r\n is_i18n_directive = any([\r\n isinstance(d, ExtractableI18NDirective)\r\n for d in directives\r\n ])\r\n substream = list(self(substream, ctxt,\r\n translate_text=not is_i18n_directive,\r\n translate_attrs=translate_attrs))\r\n yield kind, (directives, substream), pos\r\n\r\n if current_domain:\r\n ctxt.pop()\r\n else:\r\n yield kind, data, pos",
"def translate(motion, v, local=False):\n return transform(motion, conversions.p2T(v), local)",
"def _interpolate(self, i_buffers, count, step, offset):\n\n iterators = []\n new_elements = []\n compare_key = None\n if self._key:\n compare_key = lambda x: self._key(x[0])\n for buffer_elem in i_buffers:\n iterators.append(buffer_elem.sized_iterator())\n\n # Python 3 `heapq.merge` support key comparison and returns an iterator and\n # does not pull the data into memory all at once. Python 2 does not\n # support comparison on its `heapq.merge` api, so we use the itertools\n # which takes the `key` function for comparison and creates an iterator\n # from it.\n if sys.version_info[0] < 3:\n sorted_elem = iter(\n sorted(\n itertools.chain.from_iterable(iterators),\n key=compare_key,\n reverse=self._reverse))\n else:\n sorted_elem = heapq.merge(\n *iterators, key=compare_key, reverse=self._reverse)\n\n weighted_element = next(sorted_elem)\n current = weighted_element[1]\n j = 0\n while j < count:\n target = j * step + offset\n j = j + 1\n try:\n while current <= target:\n weighted_element = next(sorted_elem)\n current = current + weighted_element[1]\n except StopIteration:\n pass\n new_elements.append(weighted_element[0])\n return new_elements",
"def linear_interp(val, lo, hi):\n return (1 - val) * lo + val * hi",
"def interpolate(self, x):\n # Cache interpolant to avoid overhead\n if not hasattr(self, \"_interpolant\"):\n self._create_interpolant()\n return self._interpolant(x)",
"def _external_substitution(seq, trajectory):\n # Assign proper type\n seq = ETC.cast(seq)\n\n # Initialize ETC to 0\n etc = 0\n\n # Iterate over the given substitution table and substitute\n for step in trajectory[1:]: # Skip first entry, not a substitution step\n\n pair = step.get(\"window\")\n\n # Substitute only if the sequence is atleast 2 symbols long\n if len(seq) > 1 and _check_pair(tuple(pair), seq):\n\n # Cython function call\n seq = ETC.cast(core.substitute_pairs(seq, pair, max(seq) + 1))\n etc += 1\n\n # If sequence has been fully compressed, stop\n else:\n break\n\n # Return both etc as well as the sequence, whatever is left of it\n return etc, seq",
"def _visit_translation(self, s):\r\n return s",
"def stream(self, embeddings, message):\n\n offset, batch = 0, 1000\n with tqdm(total=embeddings.count(), desc=message) as progress:\n for offset in range(0, embeddings.count(), batch):\n for result in embeddings.search(\n f\"select id, text, tags, source, sourceid from txtai limit {batch} offset {offset}\"\n ):\n yield (result[\"id\"], result, None)\n\n progress.update(batch)",
"def translate(f, shift):\n return GenericTranslation(f, 'pos', shift)",
"def _interp_temporal(a):\n\n import scipy.interpolate as si\n\n i, j, old_time, data, new_time, kind = a\n f = si.interp1d(old_time, data, kind = kind)\n new_data = f(new_time)\n\n return i, j, new_data",
"def interpolate(self, t):\n\n # Standard bounds checks\n first_update_t,first_update_pos = self[0]\n if t <= first_update_t: return first_update_pos\n\n last_update_t,last_update_pos = self[-1]\n if t >= last_update_t: return last_update_pos\n\n # Otherwise, find the right pair of updates\n\n # Start the search on the first element where t >= timestamp to satisfy t >= prev_t\n start_idx = bisect.bisect_left(self.timestamps(), t) - 1\n assert start_idx >= 0\n\n prev_t,prev_pos = self[start_idx]\n for idx in range(start_idx+1,len(self)):\n cur_t,cur_pos = self[idx]\n if t >= prev_t and t < cur_t:\n alpha = float(t - prev_t) / float(cur_t - prev_t)\n return vec3.add(vec3.scale(cur_pos, alpha), vec3.scale(prev_pos, (1.0 - alpha)))\n\n prev_t,prev_pos = cur_t,cur_pos\n\n print t, self.timestamps(), start_idx"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract strings from Python bytecode. >>> from genshi.template.eval import Expression >>> expr = Expression('_("Hello")') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('_', u'Hello')] >>> expr = Expression('ngettext("You have %(num)s item", ' ... '"You have %(num)s items", num)') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('ngettext', (u'You have %(num)s item', u'You have %(num)s items', None))] | def extract_from_code(code, gettext_functions):
def _walk(node):
if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \
and node.func.id in gettext_functions:
strings = []
def _add(arg):
if isinstance(arg, _ast.Str) and isinstance(arg.s, unicode):
strings.append(arg.s)
elif isinstance(arg, _ast.Str):
strings.append(unicode(arg.s, 'utf-8'))
elif arg:
strings.append(None)
[_add(arg) for arg in node.args]
_add(node.starargs)
_add(node.kwargs)
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.func.id, strings
elif node._fields:
children = []
for field in node._fields:
child = getattr(node, field, None)
if isinstance(child, list):
for elem in child:
children.append(elem)
elif isinstance(child, _ast.AST):
children.append(child)
for child in children:
for funcname, strings in _walk(child):
yield funcname, strings
return _walk(code.ast) | [
"def extract_from_code(code, gettext_functions):\r\n def _walk(node):\r\n if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \\\r\n and node.func.id in gettext_functions:\r\n strings = []\r\n def _add(arg):\r\n if isinstance(arg, _ast.Str) and isinstance(arg.s, str):\r\n strings.append(arg.s)\r\n elif isinstance(arg, _ast.Str):\r\n strings.append(str(arg.s, 'utf-8'))\r\n elif arg:\r\n strings.append(None)\r\n [_add(arg) for arg in node.args]\r\n _add(node.starargs)\r\n _add(node.kwargs)\r\n if len(strings) == 1:\r\n strings = strings[0]\r\n else:\r\n strings = tuple(strings)\r\n yield node.func.id, strings\r\n elif node._fields:\r\n children = []\r\n for field in node._fields:\r\n child = getattr(node, field, None)\r\n if isinstance(child, list):\r\n for elem in child:\r\n children.append(elem)\r\n elif isinstance(child, _ast.AST):\r\n children.append(child)\r\n for child in children:\r\n for funcname, strings in _walk(child):\r\n yield funcname, strings\r\n return _walk(code.ast)",
"def get_functions(text, startswith='def '):\n return get_definition(text, startswith)",
"def extract_function_code(code_chunk):\n # Remove the function definition line\n #print(code_chunk)\n function_code = re.sub(r'^\\s*def .+\\n', '', code_chunk)\n # Split the function code by triple \"s into a function chunks variable\n function_chunks = re.split(r'\\\"\\\"\\\"', function_code)\n # If the first chunk contains anything besides newlines and whitespace, return the function_code unchanged\n if not re.match(r'^\\s*$', function_chunks[0]):\n print(function_chunks[0])\n return function_code\n #print(function_code)\n # Remove the first docstring\n function_code = re.sub(r'\"\"\".*?\"\"\"', '', function_code, 1, flags=re.DOTALL)\n #function_code = re.sub(r'\\):\\n*\\s*\"\"\".*?\"\"\"', '\\):\\n', function_code, flags=re.DOTALL)\n\n #print(function_code)\n return function_code",
"def extract(self, *args) -> \"simpleline_t *\":\n return _ida_pro.strvec_t_extract(self, *args)",
"def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS,\r\n search_text=True, comment_stack=None):\r\n if not self.extract_text:\r\n search_text = False\r\n if comment_stack is None:\r\n comment_stack = []\r\n skip = 0\r\n\r\n xml_lang = XML_NAMESPACE['lang']\r\n\r\n for kind, data, pos in stream:\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n if kind is END:\r\n skip -= 1\r\n\r\n if kind is START and not skip:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n continue\r\n\r\n for message in self._extract_attrs((kind, data, pos),\r\n gettext_functions,\r\n search_text=search_text):\r\n yield message\r\n\r\n elif not skip and search_text and kind is TEXT:\r\n text = data.strip()\r\n if text and [ch for ch in text if ch.isalpha()]:\r\n yield pos[1], None, text, comment_stack[-1:]\r\n\r\n elif kind is EXPR or kind is EXEC:\r\n for funcname, strings in extract_from_code(data,\r\n gettext_functions):\r\n # XXX: Do we need to grab i18n:comment from comment_stack ???\r\n yield pos[1], funcname, strings, []\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n in_comment = False\r\n\r\n for idx, directive in enumerate(directives):\r\n # Do a first loop to see if there's a comment directive\r\n # If there is update context and pop it from directives\r\n if isinstance(directive, CommentDirective):\r\n in_comment = True\r\n comment_stack.append(directive.comment)\r\n if len(directives) == 1:\r\n # in case we're in the presence of something like:\r\n # <p i18n:comment=\"foo\">Foo</p>\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n directives.pop(idx)\r\n elif not isinstance(directive, I18NDirective):\r\n # Remove all other non i18n directives from the process\r\n directives.pop(idx)\r\n\r\n if not directives and not in_comment:\r\n # Extract content if there's no directives because\r\n # strip was pop'ed and not because comment was pop'ed.\r\n # Extraction in this case has been taken care of.\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip):\r\n yield message\r\n\r\n for directive in directives:\r\n if isinstance(directive, ExtractableI18NDirective):\r\n for message in directive.extract(self,\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n else:\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n\r\n if in_comment:\r\n comment_stack.pop()",
"def initFunctionsFromText(self, text):",
"def _code_str_to_source_list(self, code):\n source_list = [\"{}\\n\".format(s) for s in code.split(\"\\n\")]\n source_list[-1] = source_list[-1].rstrip(\"\\n\")\n return source_list",
"def strings_in_code_files(code_files: List[str]) -> List[LocalizedString]:\n\n strings: List[LocalizedString] = []\n\n for file_path in code_files:\n strings += strings_in_code_file(file_path)\n\n return strings",
"def process_source_text(self, source_text):\n return source_text",
"def extract(path):\n# --------------------------------------------------------------------\n body = []\n func = \"\"\n brief = \"\"\n seenfunction = False\n seenpercent = False\n\n for l in open(path):\n\n # Remove whitespace and newline\n line = l.strip().lstrip()\n\n if line.startswith('%'): seenpercent = True\n if line.startswith('function'):\n seenfunction = True\n continue\n if not line.startswith('%'):\n if (seenfunction and seenpercent) or not seenfunction:\n break\n else:\n continue\n\n # remove leading `%' character\n line = line[1:] #\n body.append('%s\\n' % line)\n\n # Extract header from body\n if len(body) > 0:\n head = body[0]\n body = body[1:]\n match = re.match(r\"^\\s*(\\w+)\\s*(\\S.*)\\n$\", head)\n func = match.group(1)\n brief = match.group(2)\n\n return (body, func, brief)",
"def translateStrings(language_code):\n from translate_admin import translateAdminStrings\n from translate_frontend import translateFrontendStrings\n from translate_help import translateHelpStrings\n from translate_login import translateLoginStrings\n\n translateAdminStrings(language_code)\n translateFrontendStrings(language_code)\n translateHelpStrings(language_code)\n translateLoginStrings(language_code)",
"def get_function_instructions(self, _ea):\n\t\tinstr = []\n\t\tif (_ea != BADADDR):\n\t\t\tinstr_matrix = self.get_function_disasm(_ea)\n\t\t\tfor line in instr_matrix:\n\t\t\t\tinstr.append(line[0])\n\t\treturn instr",
"def extract(fileobj, keywords, comment_tags, options):\n encoding = options.get('encoding', 'utf-8')\n\n original_position = fileobj.tell()\n\n text = fileobj.read().decode(encoding)\n\n if django.VERSION[:2] >= (1, 9):\n tokens = Lexer(text).tokenize()\n else:\n tokens = Lexer(text, None).tokenize()\n\n vars = [token.token_type != TOKEN_TEXT for token in tokens]\n\n could_be_django = any(list(vars))\n\n if could_be_django:\n fileobj.seek(original_position)\n iterator = extract_django(fileobj, keywords, comment_tags, options)\n for lineno, funcname, message, comments in iterator:\n yield lineno, funcname, message, comments\n else:\n # Underscore template extraction\n comments = []\n\n fileobj.seek(original_position)\n\n for lineno, line in enumerate(fileobj, 1):\n funcname = None\n\n stream = TokenStream.from_tuple_iter(tokenize(line, underscore.rules))\n while not stream.eof:\n if stream.current.type == 'gettext_begin':\n stream.expect('gettext_begin')\n funcname = stream.expect('func_name').value\n args, kwargs = parse_arguments(stream, 'gettext_end')\n\n strings = []\n\n for arg, argtype in args:\n if argtype == 'func_string_arg':\n strings.append(force_text(arg))\n else:\n strings.append(None)\n\n for arg in kwargs:\n strings.append(None)\n\n if len(strings) == 1:\n strings = strings[0]\n else:\n strings = tuple(strings)\n\n yield lineno, funcname, strings, []\n\n stream.next()",
"def extract(self, source: str, **kwargs):\n raise NotImplementedError",
"def _get_unreviewed_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.unreviewed_source_strings(resources, language)",
"def extract_text(self):\n text = pypdfUnicode(\"\")\n content = self[\"/Contents\"].getObject()\n\n if not isinstance(content, ContentStream):\n content = ContentStream(content, self.pdf)\n\n # Note: we check all strings are TextStringObjects. ByteStringObjects\n # are strings where the byte->string encoding was unknown, so adding\n # them to the text here would be gibberish.\n for operands, operator in content.operations:\n if operator == by_(\"Tj\"):\n _text = operands[0]\n\n if isinstance(_text, TextStringObject):\n text += _text\n text += \"\\n\"\n elif operator == by_(\"T*\"):\n text += \"\\n\"\n elif operator == by_(\"'\"):\n text += \"\\n\"\n _text = operands[0]\n if isinstance(_text, TextStringObject):\n text += operands[0]\n elif operator == by_('\"'):\n _text = operands[2]\n if isinstance(_text, TextStringObject):\n text += \"\\n\"\n text += _text\n elif operator == by_(\"TJ\"):\n for i__ in operands[0]:\n if isinstance(i__, TextStringObject):\n text += i__\n text += \"\\n\"\n\n return text",
"def get_inverted_code(self, code_list):\n name_code, name_arg = code_list[-2]\n if name_code == LOAD_NAME and len(code_list) == 3:\n handler = self.get_name_handler()\n new_code = code_list[:-2]\n new_code.extend([\n (LOAD_CONST, handler),\n (LOAD_NAME, '_[expr]'),\n (LOAD_NAME, '_[obj]'),\n (LOAD_NAME, '_[name]'),\n (LOAD_CONST, name_arg),\n (LOAD_NAME, '_[new]'),\n (CALL_FUNCTION, 0x0005),\n (RETURN_VALUE, None),\n ])\n return new_code",
"def get_functions_dictionary():\n return {\n 'tfidf': extract_tf_idf,\n 'post_length': extract_post_length,\n 'topics': extract_topics,\n 'screamer': extract_screamer,\n 'words': extract_meaningful_words_existence,\n 'off_dis': extract_distance_from_offensive,\n 'not_off_dis': extract_distance_from_not_offensive,\n 'wmd_off': extract_wmd_offensive,\n 'wmd_not_off': extract_wmd_not_offensive,\n 'dis_avg_vec': extract_distance_from_avg_vector\n }",
"def parse_code(code: List[str]) -> List[Tuple[str, int]]:\n return [parse_line(line) for line in code]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simplify a marked stream. | def _simplify(stream, with_attrs=False):
def _generate():
for mark, (kind, data, pos) in stream:
if kind is START:
if with_attrs:
data = (unicode(data[0]), dict((unicode(k), v)
for k, v in data[1]))
else:
data = unicode(data[0])
elif kind is END:
data = unicode(data)
elif kind is ATTR:
kind = ATTR
data = dict((unicode(k), v) for k, v in data[1])
yield mark, kind, data
return list(_generate()) | [
"def _simplify(stream, with_attrs=False):\r\n def _generate():\r\n for mark, (kind, data, pos) in stream:\r\n if kind is START:\r\n if with_attrs:\r\n data = (str(data[0]), dict((str(k), v)\r\n for k, v in data[1]))\r\n else:\r\n data = str(data[0])\r\n elif kind is END:\r\n data = str(data)\r\n elif kind is ATTR:\r\n kind = ATTR\r\n data = dict((str(k), v) for k, v in data[1])\r\n yield mark, kind, data\r\n return list(_generate())",
"def strip_changes(self, source, stream):\r\n raise NotImplementedError()",
"def gentle_simplify(self):\n return self.apply_to_content(operator.methodcaller('gentle_simplify'))",
"def _substitute_stream_ ( klass ) :\n index = klass.find('>>')\n while -1 != index :\n klass = klass.replace('>>','> >')\n index = klass.find( '>>' )\n index = klass.find(' ')\n while -1 != index :\n klass = klass.replace(' ',' ')\n index = klass.find( ' ' )\n return klass",
"def filter_stream(fn, s):\n if s.empty:\n return s\n\n def compute_rest():\n return filter_stream(fn, s.rest)\n if fn(s.first):\n return Stream(s.first, compute_rest)\n return compute_rest()",
"def format_stream(self, stream, format):\n filter_text = format.filter\n indent, wrap = format.indent, format.wrap\n if indent is not None:\n indent_lines = format.indent_lines\n lstrip_blanks = format.lstrip_blanks\n rstrip_blanks = format.rstrip_blanks\n lstrip_lines = format.lstrip_lines\n min_level, max_level = format.min_level, format.max_level\n indent_level = []\n new_line = False\n if wrap is not None:\n wrap_lines = format.wrap_lines\n indent_width, new_offset = format.indent_width, format.new_offset\n offset = 0\n formatted = 0\n text = last_char = ''\n for ev, item in stream:\n if ev == TEXT:\n text += item\n else:\n if ev in (START, END):\n tag = item.tag\n if not formatted:\n text = filter_text(text, last_char)\n if indent is None:\n if wrap is not None:\n text = wrap_lines(text, wrap, offset)\n else:\n level = len(indent_level)\n if max_level and level > max_level:\n level = max_level\n if min_level:\n level -= min_level\n if level < 0:\n level = 0\n if wrap is not None:\n text = wrap_lines(text, wrap, offset,\n indent_width(level*indent))\n if '\\n' in text:\n indent_level[-1] = True\n if new_line:\n if lstrip_blanks(text)[:1] != '\\n':\n text = '\\n' + lstrip_blanks(text)\n offset = 0\n new_line = False\n if tag == Comment or not self.is_inline(tag):\n if ev == START:\n if indent_level:\n if rstrip_blanks(text)[-1:] != '\\n':\n text = rstrip_blanks(text) + '\\n'\n text = indent_lines(text, level*indent)\n indent_level[-1] = True\n elif text:\n text = lstrip_lines(text)\n if tag != Comment \\\n and not self.is_formatted(tag):\n indent_level.append(False)\n else:\n if indent_level:\n if indent_level.pop():\n if rstrip_blanks(text)[-1:] == '\\n':\n text = rstrip_blanks(text)[:-1]\n text = indent_lines(text,\n level*indent)\n text = rstrip_blanks(text) + '\\n'\n level = len(indent_level)\n if max_level and level > max_level:\n level = max_level\n if min_level:\n level -= min_level\n if level < 0:\n level = 0\n text += level*indent\n elif text:\n text = lstrip_lines(text)\n new_line = True\n elif text:\n if level > 0:\n text = indent_lines(text, level*indent)\n else:\n text = lstrip_lines(text)\n if tag == Comment or self.is_formatted(tag):\n if ev == START:\n formatted += 1\n elif formatted:\n formatted -= 1\n new_line = True\n yield TEXT, text\n if wrap is not None:\n offset = new_offset(text, offset)\n last_char = text[-1:]\n text = ''\n yield ev, item\n if text:\n if not formatted:\n text = filter_text(text, last_char)\n if wrap is not None:\n text = wrap_lines(text, wrap, offset)\n if indent is None:\n if wrap is not None:\n text = wrap_lines(text, wrap, offset)\n else:\n level = len(indent_level)\n if max_level and level > max_level:\n level = max_level\n if min_level:\n level -= min_level\n if level < 0:\n level = 0\n if wrap is not None:\n text = wrap_lines(text, wrap, offset,\n indent_width(level*indent))\n if rstrip_blanks(text)[-1:] == '\\n':\n text = text[:-1]\n text = indent_lines(text, level*indent)\n yield TEXT, text",
"def filter_stream(fn, s):\r\n def compute_rest():\r\n return filter_stream(fn, s.rest)\r\n if s is Stream.empty:\r\n return 'There is not matched value in this stream.'\r\n elif fn(s.first):\r\n return Stream(s.first, compute_rest) # compute the rest stream whenever .rest method is called.\r\n else:\r\n return compute_rest() # This will compute the rest stream immediately.\r",
"def toggle_marked(self) -> None:\n self.show_marked = not self.show_marked\n self._refilter()",
"def flatland_filter(stream, context):\n return Stream(FlatlandFilter()(stream, context))",
"def simplify(self):\n #c = 0\n simp_sentences = []\n for s in self.sentences:\n\n #print \"Original: \" + s\n \n simp_sentences.append(self.transformation(s, ''))\n\n ## for demonstration purposes only. remove the prints later\n #print \"Simplified: \",\n #print simp_sentences[c]\n #c+=1\n\n #print \n return simp_sentences",
"def encode_stream(cls, stream):\n stream.seek(0)\n return cls.encode_string(stream.read())",
"def minify_html_from_file (stream: IOBase):\n\n with HTMLMinifier() as minifier:\n minifier.feed(stream.read())\n return minifier.get_result()",
"def map_stream(fn, s):\r\n def compute_rest():\r\n return map_stream(fn, s.rest)\r\n if s is Stream.empty:\r\n return s\r\n return Stream(fn(s.first), compute_rest)",
"def writable(stream):",
"def transform_incoming(self, son, collection):\n if self.will_copy():\n return SON(son)\n return son",
"def _ensure(stream):\r\n stream = iter(stream)\r\n event = next(stream)\r\n\r\n # Check whether the iterable is a real markup event stream by examining the\r\n # first item it yields; if it's not we'll need to do some conversion\r\n if type(event) is not tuple or len(event) != 3:\r\n for event in chain([event], stream):\r\n if hasattr(event, 'totuple'):\r\n event = event.totuple()\r\n else:\r\n event = TEXT, str(event), (None, -1, -1)\r\n yield event\r\n return\r\n\r\n # This looks like a markup event stream, so we'll just pass it through\r\n # unchanged\r\n yield event\r\n for event in stream:\r\n yield event",
"def b4kencode_stream(stream, style='s', width=None):\n tw = SimpleTextWrap(width)\n data = stream.read(_BUFFER_4k_e)\n while data:\n yield ''.join(tw.write(b4kencode(data, style)))\n data = stream.read(_BUFFER_4k_e)",
"def test_expand_fragments():\n template = \"\"\"<div xmlns:py=\"http://purl.org/kid/ns#\"\n py:replace=\"stream\" />\"\"\"\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\">\n <div py:for=\"i in range(3)\">\n <p>Hello World #$i</p>\n </div>\n </div>\"\"\")\n s = t.serialize(fragment=True)\n expected = \"\"\"<div>\n <div>\n <p>Hello World #0</p>\n </div><div>\n <p>Hello World #1</p>\n </div><div>\n <p>Hello World #2</p>\n </div>\n </div>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True)\n assert s2 == s\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\" py:for=\"i in range(3)\">\n <p>Hello World #$i</p>\n </div>\"\"\")\n s = t.serialize(fragment=True)\n expected = \"\"\"<div>\n <p>Hello World #0</p>\n </div><div>\n <p>Hello World #1</p>\n </div><div>\n <p>Hello World #2</p>\n </div>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True)\n assert s2 == s\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\">\n <div py:strip=\"True\">\n <p>Hello World</p>\n </div>\n </div>\"\"\")\n s = t.serialize(fragment=True)\n expected = \"\"\"<div>\n <p>Hello World</p>\n </div>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True)\n assert s2 == s\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\" py:strip=\"True\">\n <p>Hello World</p>\n </div>\"\"\")\n s = t.serialize(fragment=True).strip()\n expected = \"\"\"<p>Hello World</p>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True).strip()\n assert s2 == s",
"def parse_stream_raw(self, stream, debug=False):\n tokens = tokenize.generate_tokens(stream.readline)\n return self.parse_tokens(tokens, debug)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply the transform filter to the marked stream. | def __call__(self, stream, keep_marks=False):
transforms = self._mark(stream)
for link in self.transforms:
transforms = link(transforms)
if not keep_marks:
transforms = self._unmark(transforms)
return Stream(transforms,
serializer=getattr(stream, 'serializer', None)) | [
"def apply_transform(self, transform, include_scatter=False):\n self._transformed_events = self._transform(transform, include_scatter=include_scatter)\n self._include_scatter_option = include_scatter\n self.transform = transform",
"def flatland_filter(stream, context):\n return Stream(FlatlandFilter()(stream, context))",
"def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))\n\n combined_filters = combine_filters(block_structure, filters)\n block_structure.filter_topological_traversal(combined_filters)",
"def transform(self, input_stream):\n input_stream.initialize()\n self.output_stream.initialize()\n class_name = self.__class__.__qualname__\n try:\n logging.debug('%s transformer beginning.', class_name)\n self.on_begin()\n logging.debug('%s transformation started.', class_name)\n self._transform(input_stream)\n except Exception:\n # TODO(markdr): Get multi-process error reporting to play nicer.\n logging.exception('%s ran into an exception.', class_name)\n raise\n finally:\n logging.debug('%s transformation ended.', class_name)\n self.on_end()\n logging.debug('%s finished.', class_name)",
"def transform_block_filters(self, usage_info, block_structure):\n raise NotImplementedError",
"def transform(self, tf):\n pass",
"def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset",
"def transform(self, dataset, params={}):\n raise NotImplementedError()",
"def transform(self, data):\n\t\t\n\t\tfor t in self.transformer_list:\n\t\t\tdata = t.transform(data)\n\t\t\t\n\t\treturn data",
"def apply(self, transform: Callable[[Any], Any]) -> \"InitialCondition\":\n transforms = self.transforms.copy()\n transforms.append(transform)\n return type(self)(self.name, transforms=transforms)",
"def transform(self):\n self._prepare()\n self._cluster()\n self._store()\n self.logger.debug('Transformation is done...')",
"def transform(func):\n WalkoffTag.transform.tag(func)\n return func",
"def _apply_transformation_on_transformed_link(self, item, transformation):\n if getattr(item, \"current_transformation\"):\n relative_transformation = transformation * item.current_transformation.inverse()\n else:\n relative_transformation = transformation\n for native_geometry in item.native_geometry or []:\n self.transform(native_geometry, relative_transformation)\n item.current_transformation = transformation",
"def transform_corpus(self, transformation):\n self.docs = self.transformation[self.docs]\n transformed_model = transformation(self.docs)\n self.transformation = transformed_model\n return",
"def transform(node, ctx, config=None):\n return AnfTransformer(ctx, config).visit(node)",
"def filter_stream(fn, s):\r\n def compute_rest():\r\n return filter_stream(fn, s.rest)\r\n if s is Stream.empty:\r\n return 'There is not matched value in this stream.'\r\n elif fn(s.first):\r\n return Stream(s.first, compute_rest) # compute the rest stream whenever .rest method is called.\r\n else:\r\n return compute_rest() # This will compute the rest stream immediately.\r",
"def transform_only(self, x_data):\n for (_, transformer) in self.steps[:-1]:\n x_data = transformer.transform(x_data)\n return x_data",
"def transform_incoming(self, son, collection):\n if self.will_copy():\n return SON(son)\n return son",
"def filter(self, stack) -> None:\n low_pass = partial(self.low_pass, sigma=self.sigma)\n stack.image.apply(low_pass, is_volume=self.is_volume, verbose=self.verbose)\n\n # apply to aux dict too:\n for auxiliary_image in stack.auxiliary_images.values():\n auxiliary_image.apply(low_pass, is_volume=self.is_volume)",
"def filter_stream(fn, s):\n if s.empty:\n return s\n\n def compute_rest():\n return filter_stream(fn, s.rest)\n if fn(s.first):\n return Stream(s.first, compute_rest)\n return compute_rest()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Invert selection so that marked events become unmarked, and vice versa. Specificaly, all marks are converted to null marks, and all null marks are converted to OUTSIDE marks. >>> html = HTML('Some test text', encoding='utf8') >>> print(html | Transformer('//em').invert().trace()) ('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0))) ('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6))) (None, ('START', (QName('em'), Attrs()), (None, 1, 11))) (None, ('TEXT', u'test', (None, 1, 15))) (None, ('END', QName('em'), (None, 1, 19))) ('OUTSIDE', ('TEXT', u' text', (None, 1, 24))) ('OUTSIDE', ('END', QName('body'), (None, 1, 29))) Some test text | def invert(self):
return self.apply(InvertTransformation()) | [
"def inverse(transformer, inverse='identity', inverse_dropped='nan'):\n if isinstance(transformer, TransformerExtensions):\n transformer.inverse = inverse\n return transformer\n\n return TransformerExtensions(\n transformer,\n inverse=inverse,\n inverse_dropped=inverse_dropped\n )",
"def inverse(self):\n return Transform(self.m_inv, self.m)",
"def invert(self):\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = self.ref(col).invert()\n return self.copy(op=TransformNode(self, exprs))",
"def invert(self):\n d = det(self.a, self.b, self.d, self.e)\n return affine(self.e/d, -self.b/d,\n det(self.b, self.c, self.e, self.f)/d,\n -self.d/d, self.a/d,\n -det(self.a, self.c, self.d, self.f)/d)",
"def inverse(self):\n return _almathswig.Transform_inverse(self)",
"def invert_transform(trans):\n return Transform(trans['to'], trans['from'], linalg.inv(trans['trans']))",
"def invert(self) -> 'BaseFlow':\n return InverseFlow(self)",
"def invert(self) -> \"SbDPRotation &\":\n return _coin.SbDPRotation_invert(self)",
"def invert(self):\n inverse = self.copy()\n inverse.pixels = ~self.pixels\n return inverse",
"def invert(self):\n self.image = ImageOps.invert(self.image).convert(o.device_mode)\n self.display_if_interactive()",
"def invert_in_place(self) -> \"vnl_diag_matrixSI &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_invert_in_place(self)",
"def invert(self):\n new_tree = binary_tree()\n new_tree.val = self.val\n if self.left:\n new_tree.right = self.left.invert()\n if self.right:\n new_tree.left = self.right.invert()\n\n return new_tree",
"def set_invert(self, flag):\n if self._invert != flag:\n self._invert = flag\n self.Modified()",
"def inverse(self):\n return self._inverse",
"def invertQTransform(tr):\n try:\n det = tr.determinant()\n detr = 1.0 / det # let singular matrices raise ZeroDivisionError\n inv = tr.adjoint()\n inv *= detr\n return inv\n except ZeroDivisionError:\n return _pinv_fallback(tr)",
"def invert_coverage(self):\n if self.inst is None: \n warnings.warn(\"experiment.invert_coverage(): called with experiment.inst == None.\")\n return\n \n invert = self.params[PARAM_INVERT]\n if invert is None:\n #Don't invert if no setting is saved.\n do_invert = False\n else:\n do_invert = invert.invert\n \n self._lock_qspace_displayed.acquire()\n if do_invert:\n #Ok, we invert, and account for the sphere that fits in the box\n self.qspace_displayed = 1.0*(self.qspace == 0) * (self.inst.qspace_radius < self.inst.qlim)\n else:\n #Or we don't\n self.qspace_displayed = self.qspace.copy()\n self._lock_qspace_displayed.release()\n \n #Continue processing\n self.slice_coverage()",
"def setInverted(self, state):\n collapsed = self.isCollapsed()\n self._inverted = state\n if self.isCollapsible():\n self.setCollapsed(collapsed)",
"def get_inverse(self):\n return MotionDiffeo(topology=self.topology_s,\n lvel=(-self.lvel),\n avel=(-self.avel),\n interval=self.interval)",
"def inverse(self): \n if self._inverse is None:\n if self._name is None:\n inv_name = None\n else:\n inv_name = self._name + '^(-1)'\n if self._latex_name is None:\n inv_latex_name = None\n else:\n inv_latex_name = self._latex_name + r'^{-1}'\n self._inverse = AutomorphismField(self._vmodule, name=inv_name, \n latex_name=inv_latex_name)\n for dom, rst in self._restrictions.iteritems():\n self._inverse._restrictions[dom] = rst.inverse()\n return self._inverse",
"def getInverted(self) -> bool:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap selection in an element. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('.//em').wrap('strong')) Some TitleSome body text. | def wrap(self, element):
return self.apply(WrapTransformation(element)) | [
"def wrap(text, open_tag, close_tag):\n return ''.join((open_tag, text, close_tag, ))",
"def wrap(self, text: str) -> str:\n return \"\\n\".join(textwrap.wrap(\n text, self.width,\n initial_indent=self.prefix + self.initial_indent,\n subsequent_indent=self.prefix + self.subsequent_indent\n ))",
"def __enclose_in_html_tag(self, elem, tag):\n\n return tag + elem.strip() + self.__create_closing_html_tag(tag)",
"def optwrap(text):\n\t\t\t#if not BODY_WIDTH:\n\t\t\tif 1:\n\t\t\t\treturn text\n\n\t\t\tassert wrap, \"Requires Python 2.3.\"\n\t\t\tresult = ''\n\t\t\tnewlines = 0\n\t\t\tfor para in text.split(\"\\n\"):\n\t\t\t\tif len(para) > 0:\n\t\t\t\t\tif para[0] != ' ' and para[0] != '-' and para[0] != '*':\n\t\t\t\t\t\tfor line in wrap(para, BODY_WIDTH):\n\t\t\t\t\t\t\tresult += line + \"\\n\"\n\t\t\t\t\t\tresult += \"\\n\"\n\t\t\t\t\t\tnewlines = 2\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not onlywhite(para):\n\t\t\t\t\t\t\tresult += para + \"\\n\"\n\t\t\t\t\t\t\tnewlines = 1\n\t\t\t\telse:\n\t\t\t\t\tif newlines < 2:\n\t\t\t\t\t\tresult += \"\\n\"\n\t\t\t\t\t\tnewlines += 1\n\t\t\treturn result",
"def render(elt: Element) -> HTML:\n raise NotImplementedError",
"def __convert_first_level_tags(self, chunk, tag):\n\n html_tag = self.first_level_tags[tag]\n if html_tag == '<blockquote>':\n for index, line in enumerate(chunk):\n line = line + '<br>'\n chunk[index] = line\n\n chunk = list(map(lambda elem: elem[len(tag):], chunk))\n if html_tag in ('<ul>', '<ol>'):\n chunk = [\n self.__enclose_in_html_tag(elem, '<li>') for elem in chunk\n ]\n chunk[0] = html_tag + chunk[0]\n chunk[-1] = chunk[-1] + self.__create_closing_html_tag(html_tag)\n return chunk",
"def wrap(text, maxlen=76, wrapstr=\" \"):\n\n assert \"\\n\" not in text\n return wrapstr + wrapstr.join([text[0 + i:maxlen + i]\n for i in range(0, len(text), maxlen)])",
"def __handle_start_emphasis_token(cls, output_html, next_token, transform_state):\n _ = transform_state\n\n return \"\".join(\n [output_html, \"<em>\" if next_token.emphasis_length == 1 else \"<strong>\"]\n )",
"def Wrap( self, fn, wrapFn ):\n def Wrapped( *args ):\n return wrapFn( *fn( *args ) )\n return Wrapped",
"def wrap_text(code_edit, key):\n if not key:\n return\n try:\n key_in, key_out = key_lookup[key]\n except KeyError:\n return\n\n textCursor = code_edit.textCursor()\n\n text = textCursor.selectedText()\n if text:\n text = key_in + text + key_out\n else:\n text = key\n textCursor.insertText(text)",
"def insert_before_element_by_text(soup: BeautifulSoup, text_element: str, insert_html: str) -> None:\n for target in soup.find_all(text=text_element):\n target: Tag\n target.string.insert_before(BeautifulSoup(insert_html, 'html.parser'))",
"def find_wrapper(element):\n raise NotImplementedError()",
"def __apply_leading_text(cls, output_html, transform_state):\n\n output_html = (\n f\"{output_html}{ParserHelper.newline_character}{transform_state.add_leading_text}\"\n if output_html and output_html[-1] != ParserHelper.newline_character\n else f\"{output_html}{transform_state.add_leading_text}\"\n )\n transform_state.transform_stack.append(output_html)\n return \"\"",
"def text_word_wrap(text, width):\n return textwrap.wrap(text, width)",
"def wrap(self, text, width=None, **kwargs):\n width = self.width if width is None else width\n lines = []\n for line in text.splitlines():\n lines.extend(\n (_linewrap for _linewrap in SequenceTextWrapper(\n width=width, term=self, **kwargs).wrap(text))\n if line.strip() else (u'',))\n\n return lines",
"def chunk_type_wrap(chunk_type, chunk):\n return '<%s>%s</%s>' % (chunk_type, '<br>'.join(chunk), chunk_type)",
"def _build_html(items, wrapping):\r\n return jinja2.Markup('\\n'.join((wrapping % item for item in items)))",
"def add_children(soup: BeautifulSoup, css_selector: str, child_html: str, wrap_tag: str,\n wrap_attrs: Dict[str, str]) -> None:\n for target in soup.select(css_selector):\n wrap_tag = soup.new_tag(wrap_tag)\n # child_tag.string = child_text\n for key, value in wrap_attrs.items():\n setattr(wrap_tag, key, value)\n target: Tag\n wrap_tag.append(BeautifulSoup(child_html, 'html.parser'))\n target.append(wrap_tag)",
"def includechain(parser, tocken):\r\n class WrapperIncludeNode(Node):\r\n def __init__(self, objInclude):\r\n self.objInclude = objInclude\r\n\r\n def template(self, context):\r\n return self.objInclude.template.resolve(context)\r\n\r\n def settemplate(self, value):\r\n self.objInclude.template.filters = []\r\n self.objInclude.template.tocken = value\r\n self.objInclude.template.var = SafeText(value)\r\n\r\n def render(self, context):\r\n # trabajamos con objInclude.template que es el nombre\r\n xtemplate, modif1 = templateCTX.separatemplateModif(self.template(context))\r\n xtemplate, prefix1 = templateCTX.separatemplatePREFIJO(xtemplate)\r\n aplic, prefix2, modif2 = templateCTX.dameAPLICPREFIXMODIF(context)\r\n modif = modif1 if modif1 else modif2\r\n prefix = prefix1 if prefix1 else prefix2\r\n milist = templateCTX.damechainTemplate(aplic, prefix, xtemplate, modif)\r\n\r\n for mitemp in milist:\r\n try:\r\n self.settemplate(mitemp)\r\n return self.objInclude.render(context)\r\n except template.base.TemplateDoesNotExist:\r\n pass\r\n raise template.base.TemplateDoesNotExist(\"Template %s no encontrada en chain\" % self.template(context))\r\n\r\n # HACEMOS COMO SI LLAMARAMOS A BASE Y WRAPEAMOS\r\n return WrapperIncludeNode(template.loader_tags.do_include(parser, tocken))",
"def html_surround(phrase, tag=\"strong\"):\n\n return \"<\" + tag + \">\" + str(phrase) + \"</\" + tag + \">\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy selection into buffer. The buffer is replaced by each contiguous selection before being passed to the next transformation. If accumulate=True, further selections will be appended to the buffer rather than replacing it. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('head/title/text()').copy(buffer) ... .end().select('body').prepend(tag.h1(buffer))) Some TitleSome TitleSome body text. This example illustrates that only a single contiguous selection will | def copy(self, buffer, accumulate=False):
return self.apply(CopyTransformation(buffer, accumulate)) | [
"def simpleCopySelection():\n # ideas / tests / original:\n # push into current group..\n\n App = FreeCAD\n Gui = FreeCADGui\n\n selection = FreeCADGui.Selection.getSelection()\n\n for obj in selection:\n obj_new = object_create_copy(obj)\n obj_new.ViewObject.Visibility = True\n obj.ViewObject.Visibility = False\n # try to add it at same tree location\n obj_parent = find_Parent(obj)\n if obj_parent:\n obj_parent.addObject(obj_new)\n\n #\n\n App.ActiveDocument.recompute()",
"def copy(self):\n return TransformNode(self.input[0], self.exprs, self.keep_index)",
"def _copy_iterator(self):\n self.__udiff, iterator_copy = tee(self.__udiff)\n return iterator_copy",
"def copy(self):\n self.focus()\n self.dispatch('Copy')\n return self",
"def copy(self, select, position):\n if select == []:\n self.status.update('No row selected to copy !')\n return\n select_iid = self.select_to_iid(select)\n self.clipboard = []\n for iid in select_iid:\n data_dict = self.Widget.set(iid) # get row data\n self.clipboard.append([data_dict[column] for column in self.header])",
"def editCopy(self):\n splitter = self.activeWindow.rightTabs.currentWidget()\n if splitter == self.activeWindow.outputSplitter:\n for view in splitter.children():\n try:\n if view.hasSelectedText():\n view.copy()\n return\n except AttributeError:\n pass\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.copy()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()",
"def copy(self):\n self.lazycopy = True\n p = page(self.begin, self.end)\n p.lazycopy = True\n p.tree = self.tree\n p.lookup = self.lookup\n return p",
"def copy(self):\n return UnionNode(self.input)",
"def concat_from_buffer(self, clear=True, ignore_index=False):\n\t\t_buffer = self._buffer\n\t\tif _buffer:\n\t\t\tif self.empty:\n\t\t\t\tself.append_bar(_buffer[0])\n\t\t\t\t_new_frame = pd.concat([self, pd.DataFrame(\n\t\t\t\t\t\t\t\t\t\tdata = _buffer[1:], \n\t\t\t\t\t\t\t\t\t\tindex = range(1,len(_buffer)))],\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\n\t\t\telse:\n\t\t\t\tdf = pd.DataFrame(_buffer)\n\t\t\t\tnew_frame = pd.concat([self, pd.DataFrame(_buffer)],\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\n\t\t\tself.__init__(data = new_frame)\n\t\t\tif clear:\n\t\t\t\tself._buffer = []\n\t\telse:\n\t\t\tpass",
"def concat_from_buffer(self, clear=True, ignore_index=False):\r\n\t\t_buffer = self._buffer\r\n\t\tif _buffer:\r\n\t\t\tif self.empty:\r\n\t\t\t\tself.append_bar(_buffer[0])\r\n\t\t\t\t_new_frame = pd.concat([self, pd.DataFrame(\r\n\t\t\t\t\t\t\t\t\t\tdata = _buffer[1:], \r\n\t\t\t\t\t\t\t\t\t\tindex = range(1,len(_buffer)))],\r\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\r\n\t\t\telse:\r\n\t\t\t\tdf = pd.DataFrame(_buffer)\r\n\t\t\t\tnew_frame = pd.concat([self, pd.DataFrame(_buffer)],\r\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\r\n\t\t\tself.__init__(data = new_frame)\r\n\t\t\tif clear:\r\n\t\t\t\tself._buffer = []\r\n\t\telse:\r\n\t\t\tpass",
"def maintained_selection():\n\n previous_selection = cmds.ls(selection=True)\n try:\n yield\n finally:\n if previous_selection:\n cmds.select(previous_selection,\n replace=True,\n noExpand=True)\n else:\n cmds.select(deselect=True,\n noExpand=True)",
"def select(self): \n if self.l_buffer:\n selectList = []\n # Need to dig down through the items\n for item in self.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n tmpFactory = cgmBuffer(item)\n selectList.extend(tmpFactory.l_buffer)\n \n for item in tmpFactory.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n subTmpFactory = cgmBuffer(item) \n selectList.extend(subTmpFactory.l_buffer)\n \n else:\n selectList.append(item)\n \n mc.select(selectList)\n return\n \n log.warning(\"'%s' has no data\"%(self.mNode)) \n return False",
"def NETRCopy(self):\n for buf, nodes in self._picked_nodes.items():\n buf.copy(nodes)\n self._copied_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.redraw_if_highlight_outdated()",
"def replace(self):\n if not self.buffer.get_has_selection():\n self.next()\n else:\n if self.matches != None:\n self.buffer.begin_user_action()\n \n start_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][0])\n end_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][1])\n \n self.buffer.delete(start_iter, end_iter)\n \n self.buffer.insert(start_iter, self.entry_replace.get_text())\n self.matches_num -= 1\n \n self.__get_environment()\n #begin = self.buffer.get_start_iter()\n #end = self.buffer.get_end_iter()\n \n #begin, end = self.view.get_line_iters()\n #self.__get_matches(self.entry.get_text(), begin, end)\n self.next()\n #if self.matches != None:\n if self.matches_num != 0:\n if self.index > (self.matches_num - 1):\n self.index = 0\n \n try:\n start_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][0])\n end_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][1])\n \n self.buffer.select_range(start_iter, end_iter)\n except IndexError:\n cursor_iter = self.buffer.get_iter_at_mark(self.cursor_mark)\n self.buffer.place_cursor(cursor_iter)\n else:\n self.view.scroll_to_iter(start_iter, 0, True)\n self.buffer.end_user_action()\n else:\n cursor_iter = self.buffer.get_iter_at_mark(self.cursor_mark)\n self.buffer.place_cursor(cursor_iter)",
"def gen_memcpy(self, dst, src, count):\n # Destination pointer:\n yield instructions.Lea(rdi, dst)\n\n # Source pointer:\n yield instructions.Lea(rsi, src)\n\n yield instructions.MovImm(rcx, count) # Byte count\n yield instructions.Rep()\n yield RegisterUseDef(uses=(rcx,))\n yield instructions.Movsb()\n yield RegisterUseDef(uses=(rdi, rsi))\n\n # for x in\n # Memcopy action!\n # yield mov(rdi, arg)\n # yield mov(rsi, arg_loc)\n # yield mov(rcx, arg_loc.size)\n # yield rep()\n # yield movsb()\n # raise NotImplementedError()",
"def NETRCopy(self):\n for buf, nodes in self._picked_nodes.items():\n buf.copy(nodes)\n self._copied_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.refresh_outdated_highlight()",
"def apply_buffer(self):\n print(\"Processing Sentence buffers...\")\n self.df['buffered_story_sentence_index'] = self.df.progress_apply(self.__series_wrapper_apply_buffer, axis=1)",
"def copy(self, bfrom):\n _ldns.ldns_buffer_copy(self, bfrom)\n #parameters: ldns_buffer *, ldns_buffer *,\n #retvals: ",
"def copy(self):\n # seq length will be provided when copying, no need to pass\n return CyclerParams(sequence=self.sequence, mutation_probability=self.mutation_probability)",
"def copy(self):\n\n # Get the bounds using the top left and bottom right selected cells\n indexes = self.selectionModel().selection().indexes()\n rows = [ix.row() for ix in indexes]\n cols = [ix.column() for ix in indexes]\n\n df = self.pgdf.dataframe.iloc[min(rows): max(rows) + 1, min(cols): max(cols) + 1]\n\n # Special case for single-cell copy since df.to_clipboard appends extra newline\n if df.shape == (1, 1):\n clipboard = QtWidgets.QApplication.instance().clipboard()\n value = str(df.iloc[0, 0])\n clipboard.setText(value)\n else:\n # If I try to use Pyperclip without starting new thread large selections give access denied error\n threading.Thread(target=lambda df: df.to_clipboard(index=False, header=False), args=(df,)).start()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy selection into buffer and remove the selection from the stream. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('.//em/text()').cut(buffer) ... .end().select('.//em').after(tag.h1(buffer))) Some TitleSome body text. Specifying accumulate=True, appends all selected intervals onto the buffer. Combining this with the .buffer() operation allows us operate on all copied events rather than persegment. See the documentation on buffer() for more information. | def cut(self, buffer, accumulate=False):
return self.apply(CutTransformation(buffer, accumulate)) | [
"def cut(self):\n self.focus()\n self.dispatch('Cut')\n return self",
"def editCut(self):\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.cut()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()\n self.nodeDelete()",
"def cut(self):\n # get the current signal selection interval\n self._edition_action(self.editionSignalProcessor.cut, CutAction)\n self.graph()",
"def _clear_temporary_buffer(self):\n # Select and remove all text below the input buffer.\n cursor = self._get_prompt_cursor()\n prompt = self._continuation_prompt.lstrip()\n if(self._temp_buffer_filled):\n self._temp_buffer_filled = False\n while cursor.movePosition(QtGui.QTextCursor.NextBlock):\n temp_cursor = QtGui.QTextCursor(cursor)\n temp_cursor.select(QtGui.QTextCursor.BlockUnderCursor)\n text = temp_cursor.selection().toPlainText().lstrip()\n if not text.startswith(prompt):\n break\n else:\n # We've reached the end of the input buffer and no text follows.\n return\n cursor.movePosition(QtGui.QTextCursor.Left) # Grab the newline.\n cursor.movePosition(QtGui.QTextCursor.End,\n QtGui.QTextCursor.KeepAnchor)\n cursor.removeSelectedText()\n\n # After doing this, we have no choice but to clear the undo/redo\n # history. Otherwise, the text is not \"temporary\" at all, because it\n # can be recalled with undo/redo. Unfortunately, Qt does not expose\n # fine-grained control to the undo/redo system.\n if self._control.isUndoRedoEnabled():\n self._control.setUndoRedoEnabled(False)\n self._control.setUndoRedoEnabled(True)",
"def empty(doc, selection, selectmode=None):\n beg = selection[0][0]\n return Selection(Interval(beg, beg))",
"def NETRCut(self):\n for buf, nodes in self._picked_nodes.items():\n buf.cut(nodes)\n self._cut_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.refresh_outdated_highlight()",
"def NETRCut(self):\n for buf, nodes in self._picked_nodes.items():\n buf.cut(nodes)\n self._cut_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.redraw_if_highlight_outdated()",
"def clear(self):\r\n self.prepare()\r\n self.buffer[:] = [self.dtext]\r\n self.firstwrite = 1",
"def cut(self, loc):\n \n val = self.body.pop()\n while val != loc:\n val = self.body.pop()",
"def cut_copy_paste_del_sel_event(self, event):\n\n if event.GetId() == wx.ID_CUT or wx.ID_COPY or wx.ID_PASTE or wx.ID_DELETE or wx.ID_SELECTALL:\n self.cut_copy_paste_del_sel_action(event)\n else:\n event.Skip()",
"def cut(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n row = content.row()\n col = content.column()\n if content.text() is not None:\n clip.setText(content.text())\n self.tableWidget.setItem(row, col, QTableWidgetItem(str()))\n self.isChanged = True\n else:\n pass",
"def __cutAllChat(self):\n txt = self.chatEdit.toPlainText()\n if txt:\n cb = QApplication.clipboard()\n cb.setText(txt)\n self.chatEdit.clear()",
"def texCutContext(*args, **kwargs):\n\n pass",
"def select(self): \n if self.l_buffer:\n selectList = []\n # Need to dig down through the items\n for item in self.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n tmpFactory = cgmBuffer(item)\n selectList.extend(tmpFactory.l_buffer)\n \n for item in tmpFactory.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n subTmpFactory = cgmBuffer(item) \n selectList.extend(subTmpFactory.l_buffer)\n \n else:\n selectList.append(item)\n \n mc.select(selectList)\n return\n \n log.warning(\"'%s' has no data\"%(self.mNode)) \n return False",
"def cut_to_clipboard(self, widget, data=None):\n\t\t#print \"Copying text\"\n\t\tbuff = self._get_buffer()\n\t\tbuff.cut_clipboard(self.clipboard, True)",
"def cut(self, matches):\n\t\t# TODO move these lines out of here\n\t\tread = matches[0].read\n\n\t\tif __debug__:\n\t\t\told_length = len(read.sequence)\n\t\tassert matches\n\t\tif self.trim:\n\t\t\t# The last match contains a copy of the read it was matched to.\n\t\t\t# No iteration is necessary.\n\t\t\tread = matches[-1].adapter.trimmed(matches[-1])\n\n\t\t\t# if an adapter was found, then the read should now be shorter\n\t\t\tassert len(read.sequence) < old_length\n\n\t\t\tif self.mask_adapter:\n\t\t\t\t# add N from last modification\n\t\t\t\tmasked_sequence = matches[-1].adapter.trimmed(matches[-1]).sequence\n\t\t\t\tfor match in sorted(matches, reverse=True):\n\t\t\t\t\tns = 'N' * (len(match.read.sequence) -\n\t\t\t\t\t\t\t\tlen(match.adapter.trimmed(match).sequence))\n\t\t\t\t\t# add N depending on match position\n\t\t\t\t\tif match.front:\n\t\t\t\t\t\tmasked_sequence = ns + masked_sequence\n\t\t\t\t\telse:\n\t\t\t\t\t\tmasked_sequence += ns\n\t\t\t\t# set masked sequence as sequence with original quality\n\t\t\t\tread.sequence = masked_sequence\n\t\t\t\tread.qualities = matches[0].read.qualities\n\t\t\t\tread.trimmed = True\n\n\t\t\t\tassert len(read.sequence) == old_length\n\n\t\tself.reads_matched += 1 # TODO move to filter class\n\n\t\tif self.rest_writer:\n\t\t\tself.rest_writer.write(matches[-1])\n\n\t\treturn read",
"def remove_selected(self):\n idx = 0\n for i in list(self.selection):\n idx = self.index(i)\n self.remove(i)\n new = max(0, (idx - 1))\n if len(self) > new:\n self.selection.add(self[new])",
"def cut(self, nodes):\n with self.history.command_context(\"cut\"):\n clipboard = self.copy(nodes)\n\n # Delete nodes\n self.delete_nodes(nodes)\n\n return clipboard",
"def simpleCopySelection():\n # ideas / tests / original:\n # push into current group..\n\n App = FreeCAD\n Gui = FreeCADGui\n\n selection = FreeCADGui.Selection.getSelection()\n\n for obj in selection:\n obj_new = object_create_copy(obj)\n obj_new.ViewObject.Visibility = True\n obj.ViewObject.Visibility = False\n # try to add it at same tree location\n obj_parent = find_Parent(obj)\n if obj_parent:\n obj_parent.addObject(obj_new)\n\n #\n\n App.ActiveDocument.recompute()",
"def to_cut(self):\n from lhotse.cut import MonoCut, MultiCut\n\n cls = MonoCut if self.num_channels == 1 else MultiCut\n return cls(\n id=self.id,\n start=0.0,\n duration=self.duration,\n channel=self.channel_ids[0] if self.num_channels == 1 else self.channel_ids,\n recording=self,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies a function to the ``data`` element of events of ``kind`` in the selection. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('head/title').map(unicode.upper, TEXT)) SOME TITLESome body text. | def map(self, function, kind):
return self.apply(MapTransformation(function, kind)) | [
"def transform(self, node):\n try:\n handler = getattr(self, 'transform_%s' % node.kind.name.lower())\n return handler(node)\n except AttributeError:\n print(\n \"Ignoring node of type %s (%s)\" % (\n node.kind,\n ' '.join(\n t.spelling for t in node.get_tokens())\n ),\n file=sys.stderr\n )",
"def transform(self, data):\n\t\t\n\t\tfor t in self.transformer_list:\n\t\t\tdata = t.transform(data)\n\t\t\t\n\t\treturn data",
"def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset",
"def _apply_chapter(self,\n chapter: 'Chapter',\n data: Union['Dataset', 'Book']) -> 'Chapter':\n for technique in chapter.techniques:\n data = technique.apply(data = data)\n setattr(chapter, 'data', data)\n return chapter",
"def pipe(data, *funcs):\n for func in funcs:\n data = func(data)\n return data",
"def transform(func):\n WalkoffTag.transform.tag(func)\n return func",
"def __process_data(self, data):\n return self.mlda.transform(data)",
"def apply_to_collection(\n data: Any,\n dtype: Union[type, tuple],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, tuple]] = None,\n **kwargs: Any,\n) -> Any:\n elem_type = type(data)\n\n # Breaking condition\n if isinstance(data, dtype) and (\n wrong_dtype is None or not isinstance(data, wrong_dtype)\n ):\n return function(data, *args, **kwargs)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n return elem_type(\n {\n k: apply_to_collection(v, dtype, function, *args, **kwargs)\n for k, v in data.items()\n }\n )\n\n if isinstance(data, tuple) and hasattr(data, \"_fields\"): # named tuple\n return elem_type(\n *(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data)\n )\n\n if isinstance(data, Sequence) and not isinstance(data, str):\n return elem_type(\n [apply_to_collection(d, dtype, function, *args, **kwargs) for d in data]\n )\n\n # data is neither of dtype, nor a collection\n return data",
"def map(f,data):\n for item in data:\n yield f(item)",
"def transform(self, dataset, params={}):\n raise NotImplementedError()",
"def translate(data, taxfiledirect, mode='species'):\n t = makeTrans(mode, taxfiledirect)\n for i in range(len(data['category'])):\n data['category'][i] = changeCategory(t, data['hierarchy'][i])\n return data",
"def process(self, instance: _Traversable):\n try:\n return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)\n except AttributeError as e:\n raise RuntimeError(\n 'This visitor does not support {}'.format(type(instance))\n ) from e",
"def apply_transform_to_type(self, typedef):\n for iconv in self.transform:\n if not iconv.original_datatype:\n iconv.set_original_datatype(typedef)\n typedef = iconv.transformed_datatype\n return typedef",
"def execute_sequence(data, rule):\n for func in rule:\n for index, value in enumerate(data):\n func_name = \"_\".join([\"fun\", func])\n # run function by name and redefine value of data element\n data[index] = getattr(functions, func_name)(value)\n return data",
"def map(self, col_name: str, func):\n self._validate_col_name(col_name)\n self.data_table[col_name] = [func(x) for x in self.data_table[col_name]]",
"def apply(self, fn, dtype):\n if not callable(fn):\n raise TypeError('Input must be a function.')\n if not type(dtype) is type:\n raise TypeError('Dtype must be a type')\n\n return XStream(impl=self._impl.apply(fn, dtype))",
"def process(context, boundTo, data):",
"def apply_to_fig_text(fig: mpl.figure.Figure, fn: Callable[[str], str]):\n for text in fig.findobj(match=plt.Text):\n text.set_text(fn(text.get_text()))",
"def pipeline_each(data, fns):\n\tfrom functools import reduce\n\treturn reduce(lambda a, x: list(map(x, a)), fns, data)",
"def _map_write_functions(self, data: pd.DataFrame) -> accepted_methods:\n function_map = {\n \"parquet\": data.to_parquet,\n \"csv\": data.to_csv,\n \"xls\": data.to_excel,\n \"xlsx\": data.to_excel,\n \"dat\": data.to_csv,\n \"data\": data.to_csv\n }\n return function_map.get(self.path.file_type)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generator that parses the HTML source, yielding markup events. | def parse(self):
def _generate():
if self.encoding:
reader = codecs.getreader(self.encoding)
source = reader(self.source)
else:
source = self.source
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = source.read(bufsize)
if not data: # end of data
self.close()
done = True
else:
if not isinstance(data, unicode):
raise UnicodeError("source returned bytes, but no encoding specified")
self.feed(data)
for kind, data, pos in self._queue:
yield kind, data, pos
self._queue = []
if done:
open_tags = self._open_tags
open_tags.reverse()
for tag in open_tags:
yield END, QName(tag), pos
break
except html.HTMLParseError, e:
msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce) | [
"def parse(self):\r\n def _generate():\r\n if self.encoding:\r\n reader = codecs.getreader(self.encoding)\r\n source = reader(self.source)\r\n else:\r\n source = self.source\r\n try:\r\n bufsize = 4 * 1024 # 4K\r\n done = False\r\n while 1:\r\n while not done and len(self._queue) == 0:\r\n data = source.read(bufsize)\r\n if not data: # end of data\r\n self.close()\r\n done = True\r\n else:\r\n if not isinstance(data, str):\r\n raise UnicodeError(\"source returned bytes, but no encoding specified\")\r\n self.feed(data)\r\n for kind, data, pos in self._queue:\r\n yield kind, data, pos\r\n self._queue = []\r\n if done:\r\n open_tags = self._open_tags\r\n open_tags.reverse()\r\n for tag in open_tags:\r\n yield END, QName(tag), pos\r\n break\r\n except html.HTMLParseError as e:\r\n msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)\r\n raise ParseError(msg, self.filename, e.lineno, e.offset)\r\n return Stream(_generate()).filter(_coalesce)",
"def _parse_html_source( self, htmlsource ):\n try:\n parser = Parser()\n parser.feed( htmlsource )\n parser.close()\n return parser.tags, parser.url\n except: return None, None",
"def event_generator(self):\n with open(self.event_file, \"r\") as events:\n _ = events.readline() # skip header\n for event in events:\n yield self.process_event(event)\n # generator needs to be indefinite\n while True:\n yield [0],[0]",
"def parse(self, data):\n self.reset()\n try:\n self.feed(data)\n except HTMLParseError:\n pass\n self.finish()\n return self.items()",
"def events():\n for el in _list_events():\n yield Event(el)",
"def test_parse_semantics(self):\n\n items = pulldom.parseString(SMALL_SAMPLE)\n evt, node = next(items)\n # Just check the node is a Document:\n self.assertTrue(hasattr(node, \"createElement\"))\n self.assertEqual(pulldom.START_DOCUMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"html\", node.tagName)\n self.assertEqual(2, len(node.attributes))\n self.assertEqual(node.attributes.getNamedItem(\"xmlns:xdc\").value,\n \"http://www.xml.com/books\")\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt) # Line break\n evt, node = next(items)\n # XXX - A comment should be reported here!\n # self.assertEqual(pulldom.COMMENT, evt)\n # Line break after swallowed comment:\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(\"title\", node.tagName)\n title_node = node\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n self.assertEqual(\"Introduction to XSL\", node.data)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"title\", node.tagName)\n self.assertTrue(title_node is node)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"p\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n # XXX No END_DOCUMENT item is ever obtained:\n #evt, node = next(items)\n #self.assertEqual(pulldom.END_DOCUMENT, evt)",
"def __parse_all(self):\n try:\n self.__log.debug(_('Seeking file until header is found'))\n self.__find_file_header()\n self.__log.debug(_('Parsing all entries'))\n while True:\n yield self.__parse_one()\n except StartTokenNotFoundError:\n self.__log.debug(_('Finished parsing entries'))\n return",
"def blog_entry(html):\n blog_entry_pattern = r'<span class=\"date\">(.*)</span>\\s*<a href=\"(.*)\" target=\"_blank\" class=\"list-title\">(.*)</a>'\n for m_obj in re.finditer(blog_entry_pattern, html):\n log.debug('(Master) Producing blog entries {} {} {}'\n .format(m_obj.group(1), m_obj.group(2), m_obj.group(3)))\n yield m_obj.group(1), m_obj.group(2), m_obj.group(3)",
"def parse(self, source: Any) -> Any:\n if isinstance(source, etree.ElementTree):\n source = source.getroot()\n\n if isinstance(source, etree.Element):\n ctx = iterwalk(source, {})\n elif self.parser.config.process_xinclude:\n root = etree.parse(source).getroot() # nosec\n base_url = get_base_url(self.parser.config.base_url, source)\n loader = functools.partial(xinclude_loader, base_url=base_url)\n\n xinclude.include(root, loader=loader)\n ctx = iterwalk(root, {})\n else:\n ctx = etree.iterparse(source, EVENTS) # nosec\n\n return self.process_context(ctx)",
"def parse(self, beautiful_html):\n return beautiful_html",
"def processHTML(html, url=\"\"):\n # Decide here what you want to do with the content\n return",
"def parse(self):\n reader_args = (self.filename,\n self.fs,\n self.header,\n self.max_lines,\n self.field_pre_filter,\n self.record_pre_filter)\n\n with Reader(*reader_args) as reader:\n for nr, record in enumerate(reader, 1): # line numbers start from 1\n record = self.record_func(nr, self._parse_fields(record))\n if self.record_post_filter(nr, record):\n yield record",
"def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)",
"def __iter__(self):\n\n # case of the first page\n start_time = time.time()\n page = HTMLPage(self.seed_url)\n self.last_crawl_duration = time.time() - start_time\n self.pages_crawled.add(seed_url)\n self.domains_crawled.add(extract_domains_from_url(seed_url)[1])\n self.update_pages_to_be_crawled(page)\n yield page \n \n # all the other pages\n while (self.pages_to_be_crawled and\n len(self.pages_crawled) < self.max_crawled_pages):\n url = self.pages_to_be_crawled.pop()\n start_time = time.time()\n page = HTMLPage(url)\n self.last_crawl_duration = time.time() - start_time\n self.pages_crawled.add(url)\n self.domains_crawled.add(extract_domains_from_url(url)[1])\n self.update_pages_to_be_crawled(page)\n yield page\n raise StopIteration",
"def parse(self, response):\n\t\tlogging.info('started scraping {}'.format(response.url))\n\t\tpage = json.loads(response.text)['pagecontent']\n\t\tlinks = Selector(text=page).css(\"div.col-xs-12>a::attr(href)\").getall()\n\t\tlogging.info('finished scraping'.format(response.url))\n\t\tif len(links) == self.per_page:\n\t\t\tfor i in range(len(links)):\n\t\t\t\tyield {'links': links[i]}\n\t\telif response.meta['num'] == self.num_of_pages:\n\t\t\tfor i in range(len(links)):\n\t\t\t\tyield {'links': links[i]}\n\t\telse:\n\t\t\tlogging.warning('the chosen selector did not find all the links \\\nwhich are on the page {}'.format(response.url))\n\t\t\traise CloseSpider(\"not all the links were found on the page {}. The\\\n selector has to be changed\".format(response.url))",
"def get_items(self):\n # Use `iterparse`, it's more efficient, specially for big files\n context = ElementTree.iterparse(self.source, events=(\"start\", \"end\"))\n context = iter(context)\n event, root = context.next()\n for event, item in context:\n if item.tag == self.item_tag_name and event == \"end\":\n yield item\n # Releases the item from memory\n item.clear()\n root.clear()",
"def _html_splitlines(lines):\n open_tag_re = re.compile(r'<(\\w+)\\s.*?[^/]?>')\n close_tag_re = re.compile(r'</(\\w+)>')\n open_tags = []\n for line in lines:\n # Reopen tags still open from the previous line\n for tag in open_tags:\n line = tag.group(0) + line\n open_tags = []\n\n # Find all tags opened on this line\n for tag in open_tag_re.finditer(line):\n open_tags.append(tag)\n\n # Find all tags closed on this line\n for ctag in close_tag_re.finditer(line):\n for otag in open_tags:\n if otag.group(1) == ctag.group(1):\n open_tags.remove(otag)\n break\n\n # Close all tags still open at the end of line, they'll get reopened at\n # the beginning of the next line\n for tag in open_tags:\n line += '</%s>' % tag.group(1)\n\n yield line",
"def parse(self, source):\n\n rt, title, title_pic, markdown = libparser.parse(source)\n\n if rt == -1:\n raise SeparatorNotFound\n elif rt == -2:\n raise PostTitleNotFound\n\n # change to unicode\n title, title_pic, markdown = map(to_unicode, (title, title_pic,\n markdown))\n\n # render to html\n html = self.markdown.render(markdown)\n summary = self.markdown.render(markdown[:200])\n\n return {\n 'title': title,\n 'markdown': markdown,\n 'html': html,\n 'summary': summary,\n 'title_pic': title_pic\n }",
"def iter_contents(self):\n return\n yield"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a serializer object for the given method. | def get_serializer(method='xml', **kwargs):
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs) | [
"def get_serializer(method='xml', **kwargs):\r\n if isinstance(method, str):\r\n method = {'xml': XMLSerializer,\r\n 'xhtml': XHTMLSerializer,\r\n 'html': HTMLSerializer,\r\n 'text': TextSerializer}[method.lower()]\r\n return method(**kwargs)",
"def get_serializer(self, format):\n serializer = self._serializers.get(format)\n if not serializer:\n raise ValueError(format)\n return serializer()",
"def get_serializer(self, format):\n creator = self.serializer_format_dict.get(format.upper())\n if not creator:\n raise ValueError(format)\n\n return creator()",
"def get_serializer_class(self):\n \n if self.action == 'list':\n return FooSerializer\n elif self.action == 'retrieve':\n return FooSerializer\n elif self.action == 'create':\n return FooSerializer\n return FooSerializer",
"def get_serializer_class(self):\n if self.action in {\"create\", \"update\"}:\n return TransactionSerializer\n return TransactionSummarySerializer",
"def _get_serializer(self, model, serializer):\n app_lbl = getattr(model, \"_meta\").app_label\n package = apps.get_app_config(app_lbl).module\n\n if \".\" in serializer: # pragma: no cover\n module, serializer = serializer.split(\".\", 1)\n\n else:\n module = \"serializers\"\n\n module = import_module(\".\".join((package.__name__, module)))\n return getattr(module, serializer)",
"def get_pagination_serializer(self, page):\n class SerializerClass(self.pagination_serializer_class):\n class Meta:\n object_serializer_class = self.get_serializer_class()\n\n pagination_serializer_class = SerializerClass\n context = self.get_serializer_context()\n return pagination_serializer_class(instance=page, context=context)",
"def get_pagination_serializer(self, page=None):\n class SerializerClass(self.pagination_serializer_class):\n class Meta:\n object_serializer_class = self.get_serializer_class()\n\n pagination_serializer_class = SerializerClass\n context = self.get_serializer_context()\n return pagination_serializer_class(instance=page, context=context)",
"def get_serializer_class(self):\n if self.action == \"retrieve\":\n return QuizAnalyticsSerializer\n return super().get_serializer_class()",
"def get_model_serializer(model_class):\n serializer = {\n DiscoveredPackage: DiscoveredPackageSerializer,\n CodebaseResource: CodebaseResourceSerializer,\n }.get(model_class, None)\n\n if not serializer:\n raise LookupError(f\"No Serializer found for {model_class}\")\n\n return serializer",
"def get_serializer_class(self):\n if self.action == \"list\":\n return BalanceSummarySerializer\n elif self.action in {\"create\", \"update\"}:\n return BalanceSerializer\n return BalanceDetailSerializer",
"def get_serializer_class(self):\n if self.action in {'list', 'create'}:\n return BalanceSheetSummarySerializer\n return BalanceSheetDetailSerializer",
"def get_serializer_class(self):\n if self.action == \"retrieve\":\n return VideoAnalyticsSerializer\n return super().get_serializer_class()",
"def new(cls, method=\"json\", **kwargs):\n assert method in (\"json\", \"yaml\"), \"Unknown method.\"\n new_ = cls.__new__(cls)\n if method == \"json\":\n new_.from_JSON(**kwargs)\n elif method == \"yaml\":\n new_.from_YAML(**kwargs)\n return new_",
"def serializer_factory(model, serializer_class=serializers.ModelSerializer, attrs=None, meta=None):\n attrs = attrs or {}\n meta = meta or {}\n meta.setdefault(\"model\", model)\n attrs.setdefault(\"Meta\", type(str(\"Meta\"), (object,), meta))\n return type(str(\"%sSerializer\" % model.__name__), (serializer_class,), attrs)",
"def get_serializer_class(self):\n if self.action == \"retrieve\":\n return CourseAnalyticsSerializer\n return super().get_serializer_class()",
"def get_serializer_class(self):\n if self.request.auth and self.request.user.is_active:\n serializer = self.serializer_class\n else:\n serializer = UserPartialSerializer\n\n return serializer",
"def get_serializer_class(self):\n if self.request.method == \"POST\":\n return UserCreationSerializer\n else: \n return UserPublicOnlySerializer",
"def get_serializer_class(self):\n if self.action == \"retrieve\":\n return AssignmentAnalyticsSerializer\n return super().get_serializer_class()",
"def _method(self, verb):\n base_method = super(CustomApiClient, self)._method(verb)\n\n def method(*args, **kwargs):\n kwargs.update(\n {\n \"group\": self.group,\n \"version\": self.version,\n \"plural\": self.plural,\n }\n )\n\n # Convert body to_dict if it's a CustomObject as\n # `python-kubernetes` want a dict or a specific objects with\n # some attributes like `openapi_types`, `attributes_map`, ...\n if isinstance(kwargs.get(\"body\"), CustomObject):\n kwargs[\"body\"] = kwargs[\"body\"].to_dict()\n\n result = base_method(*args, **kwargs)\n\n # TODO: do we have a result for `delete` methods?\n return CustomObject(result)\n\n method.__doc__ = \"{verb} a {kind} {scope} object.\".format(\n verb=verb.capitalize(),\n kind=\"{s.group}/{s.version}/{s.kind}\".format(s=self),\n scope=self.scope.value,\n )\n\n return method"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the HTML serializer. | def __init__(self, doctype=None, strip_whitespace=True, cache=True):
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True | [
"def __init__(self, encoding= 'latin-1'):\n html.parser.HTMLParser.__init__(self)\n self._reset()\n self.encoding = encoding",
"def init_renderers(cls):",
"def __init__(self, template):\n self.template = template\n \n with open(template) as f:\n logging.info(\"HTMLExport has opened the file {}\".format(template))\n self.text = f.read()",
"def __init__(self, temboo_session):\n super(HTMLEscape, self).__init__(temboo_session, '/Library/Utilities/Encoding/HTMLEscape')",
"def __init__(self, page_content):\n self.soup = BeautifulSoup(page_content, \"html.parser\")",
"def __init__(self):\n\n self.nodes = {}\n self.rendered = False",
"def __init__(self, arch):\n HTMLParser.__init__(self)\n self.arch_link = None\n self.links = []\n self._match = '_%s-' % arch",
"def __init__(self):\n try:\n HTMLParser.__init__(self)\n\n self.good_data = True\n\n self.title = None\n self.in_title = False\n self.is_same_month = False\n\n self.in_tbody = False\n self.in_abbr = False\n self.in_td = False\n\n self.year = None\n self.month = None\n\n self.tr_column_count = 3\n\n self.weather = {}\n self.weather_current_key = None\n\n self.temps_list = []\n\n except Exception as error:\n print(f\"WeatherScrapper::__init__::{error}\")",
"def test_initialization(self):\n translator = rest.CustomHTMLTranslator(self.doc)\n self.assertEqual(translator.initial_header_level, 2)\n self.assertEqual(translator.head, [])\n self.assertEqual(translator.meta, [])\n self.assertEqual(translator.head_prefix, ['', '', '', '', ''])\n self.assertEqual(translator.body_prefix, [])\n self.assertEqual(translator.body_suffix, [])\n self.assertEqual(translator.stylesheet, [])\n self.assertEqual(translator.generator, (''))",
"def __init__(self, **kwargs: Any) -> None:\n template_loader = jinja2.FileSystemLoader(searchpath=str(pathlib.Path(__file__).parent / \"jinja\" / \"templates\"))\n self.template_parser = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True,\n autoescape=True,\n **kwargs,\n )",
"def __init__(\n self,\n html_parser,\n css_parser,\n configure,\n symbol_file_name=None\n ):\n\n helper.require_not_none(html_parser, css_parser, configure)\n helper.require_valid_type(html_parser, HTMLDOMParser)\n helper.require_valid_type(css_parser, StyleSheetParser)\n helper.require_valid_type(configure, Configure)\n helper.require_valid_type(symbol_file_name, str)\n\n self.html_parser = html_parser\n self.css_parser = css_parser\n self.configure = configure\n self._set_symbols(symbol_file_name, configure)",
"def html(self):\n if not self._html:\n self._html = parse(self.input_doc, self.options.get('url'))\n\n return self._html",
"def __init__(self, engine, indexer):\n template_t.__init__(self, engine, indexer)\n\n self.list_indent_per_level=4\n self.m_contents = ''\n\n # The list of generated cross references to avoid duplication\n self.m_cross_references = {}",
"def _init(self):\n self.stylesheet = self._get_stylesheet()\n self._register_fonts()",
"def __init__(self, *args, **kwargs):\n\n super(ElementForm, self).__init__(*args, **kwargs)\n\n # Set the form fields based on the model object\n if kwargs.has_key('instance'):\n initial_values = []\n for lang in settings.LANGUAGES:\n html = getattr(kwargs['instance'],'html_%s' % lang[0])\n if html == None:\n html = getattr(kwargs['instance'],'html_%s' % settings.LANGUAGES[0][0])\n soup = BeautifulSoup(html)\n initial_values.append(soup.label.text)\n\n self.initial['question'] = initial_values",
"def __init__(self, link):\n # validate args\n if not isinstance(link, Link) or not link.is_valid:\n raise ArgumentError('HTMLPage class should be instantiated with ' \\\n 'a valid Link object. Got this: %s' %link)\n self.link = link\n\n # fetch the actual webpage\n response = requests.get(link.url)\n self.status_code = response.status_code\n self.html_content = response.text.encode('utf8')\n self.text_content = HTMLUtils.html_to_text(self.html_content)\n self.encoding = response.encoding\n\n # fetch all child links\n self.child_links = self._get_all_links()",
"def __init__(self):\n this = _coin.new_ScXMLAnchorElt()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, code):\n code = str(code)\n if os.path.isfile(code):\n with open(code, 'r') as markup:\n data = markup.read()\n code = data\n self._get_issue_info(code)\n # DOCTYPE fix for Ismaili Insight newsletter\n code = re.sub(\n r'<!DOCTYPE HTML PUBLIC “-//W3C//DTD HTML 4\\.01 Transitional//EN” “http://www\\.w3\\.org/TR/html4/loose\\.dtd”>', # noqa\n '<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">',\n code, flags=re.I)\n self._data = bs4.BeautifulSoup(code, 'html5lib')",
"def __init__(self, widget_id):\n Renderer.__init__(self)\n\n self.widget_id = widget_id\n self.name = None\n self.description = None\n self.creation_date = None\n\n self.load_data(self.widget_id)",
"def __init__(self):\n this = _coin.new_ScXMLDocument()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests if two node tests are equal | def nodes_equal(node1, node2):
if type(node1) is not type(node2):
return False
if type(node1) == LocalNameTest:
return node1.name == node2.name
return True | [
"def test_node_eq(self):\n node1 = ts.Node('a', 2)\n assert node1 == copy.copy(node1)\n assert node1 != ts.Node('b', 2)\n assert node1 != ts.Node('a', 3)\n assert node1 != ts.Node('a', 2, ts.Node('a', 1))\n assert ts.Node('a', 2, ts.Node('a', 1)) != node1\n node1.left = ts.Node('a', 1)\n assert ts.Node('a', 2, ts.Node('a', 1)) == node1\n assert not node1 != node1",
"def isEqualToNode(self, other):\n is_lower = self.nodeName.lower() == other.nodeName.lower()\n same_name = self.namespace == other.namespace\n same_attrs = self.attributes == other.attributes\n is_equal = Node.isEqualToNode(self, other)\n return all([is_lower, same_name, same_attrs, is_equal])",
"def test_compare(self):\n node = DOMNode(1, 'div', 'value', {}, 1, '')\n self.assertEqual(node, node)\n\n node_1 = DOMNode(1, 'div1', 'value', {}, 1, '')\n self.assertNotEqual(node, node_1)\n\n node_2 = DOMNode(2, 'div', 'value', { \"abbc\": \"bbcc\" }, 1, '')\n self.assertNotEqual(node, node_1)\n\n node_3 = DOMNode(3, 'div', 'value', { \"abbc\": \"bbcc\" }, 1, '')\n self.assertNotEqual(node, node_3)\n self.assertEqual(node_2, node_3)",
"def _assert_text_node_equal(expected, actual):\n assert expected.value == actual.value",
"def __eq__(self, other):\n if self.nodes == other.nodes:\n return True\n else:\n return False",
"def _assert_tag_node_equal(expected, actual):\n assert_wikicode_equal(expected.tag, actual.tag)\n if expected.contents is not None:\n assert_wikicode_equal(expected.contents, actual.contents)\n else:\n assert actual.contents is None\n length = len(expected.attributes)\n assert length == len(actual.attributes)\n for i in range(length):\n exp_attr = expected.attributes[i]\n act_attr = actual.attributes[i]\n assert_wikicode_equal(exp_attr.name, act_attr.name)\n if exp_attr.value is not None:\n assert_wikicode_equal(exp_attr.value, act_attr.value)\n assert exp_attr.quotes == act_attr.quotes\n else:\n assert act_attr.value is None\n assert exp_attr.pad_first == act_attr.pad_first\n assert exp_attr.pad_before_eq == act_attr.pad_before_eq\n assert exp_attr.pad_after_eq == act_attr.pad_after_eq\n assert expected.wiki_markup == actual.wiki_markup\n assert expected.self_closing is actual.self_closing\n assert expected.invalid is actual.invalid\n assert expected.implicit is actual.implicit\n assert expected.padding == actual.padding\n assert_wikicode_equal(expected.closing_tag, actual.closing_tag)",
"def are_different(node1, node2):\r\n different_children = True\r\n for child1 in node1.children:\r\n for child2 in node2.children:\r\n if child1.md5 == child2.md5:\r\n different_children = False\r\n\r\n return different_children",
"def test_node_methods(self):\n\n metrics = set()\n\n node = Node('uuid-server-hardware', metrics)\n\n self.assertTrue(node.__eq__(node))\n self.assertFalse(node.__eq__(None))\n self.assertFalse(node is Node('other_node', metrics))\n self.assertEquals(node.__hash__(), node.__hash__())\n self.assertEquals(node.__repr__(), node.__repr__())",
"def isEqualToNode(self, other):\n if len(self.childNodes) != len(other.childNodes):\n return False\n\n for a, b in zip(self.childNodes, other.childNodes):\n if not a.isEqualToNode(b):\n return False\n\n return True",
"def _nodes_are_equivalent(G, node_a, node_b, max_history):\n return G.node[node_a][\"label\"] == G.node[node_b][\"label\"] and (\n _outgoing_edges_are_similar(G, node_a, node_b) or\n _incoming_edges(G, node_a) == _incoming_edges(G, node_b) or\n _fingerprint_node(G, node_a, max_history) == _fingerprint_node(G, node_b, max_history))",
"def test_cmp(self):\n nodes = self.TreeNode\n self.assertEqual(cmp(nodes['a'], nodes['a']), 0)\n self.assertNotEqual(cmp(nodes['b'], nodes['a']), 0)\n self.assertNotEqual(cmp(nodes['a'], nodes['b']), 0)",
"def _assert_wikilink_node_equal(expected, actual):\n assert_wikicode_equal(expected.title, actual.title)\n if expected.text is not None:\n assert_wikicode_equal(expected.text, actual.text)\n else:\n assert actual.text is None",
"def _assert_html_entity_node_equal(expected, actual):\n assert expected.value == actual.value\n assert expected.named is actual.named\n assert expected.hexadecimal is actual.hexadecimal\n assert expected.hex_char == actual.hex_char",
"def _assert_template_node_equal(expected, actual):\n assert_wikicode_equal(expected.name, actual.name)\n length = len(expected.params)\n assert length == len(actual.params)\n for i in range(length):\n exp_param = expected.params[i]\n act_param = actual.params[i]\n assert_wikicode_equal(exp_param.name, act_param.name)\n assert_wikicode_equal(exp_param.value, act_param.value)\n assert exp_param.showkey is act_param.showkey",
"def test_edge_match(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"O\",\"T\")\n self.assertTrue(e1.matches(e2))",
"def test_eq_other_peer(self):\n uri = 'netstring://192.168.0.1:9999'\n version = get_version()\n last_seen = 123\n contact1 = PeerNode(PUBLIC_KEY, version, uri, last_seen)\n contact2 = PeerNode(PUBLIC_KEY, version, uri, last_seen)\n self.assertTrue(contact1 == contact2)",
"def __eq__(self, other):\n if isinstance(other, Node):\n return self.id == other.id\n else:\n return False",
"def _compare_ast(self, left_ast, right_ast):\n self.assertEqual(lmast.dump(left_ast), lmast.dump(right_ast))",
"def __ne__(self, node2):\n\t\t#return self._element == node2._element and self._name == node2._name\n\t\treturn not self == node2"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert value to a scalar. If a single element Attrs() object is passed the value of the single attribute will be returned. | def as_scalar(value):
if isinstance(value, Attrs):
assert len(value) == 1
return value[0][1]
else:
return value | [
"def to_scalar(self, v):\n if v is None:\n return v\n else:\n return v.asnumpy().item()",
"def extractValue(self, model, item):\n return getattr(item, self.attribute.attrname)",
"def Value(self):\n if self.IsNull:\n return None\n elif self.IsBool:\n return self.AsBool\n elif self.IsInt:\n return self.AsInt\n elif self.IsFloat:\n return self.AsFloat\n elif self.IsString:\n return self.AsString\n elif self.IsKey:\n return self.AsKey\n elif self.IsBlob:\n return self.AsBlob\n elif self.IsMap:\n return self.AsMap.Value\n elif self.IsVector:\n return self.AsVector.Value\n elif self.IsTypedVector:\n return self.AsTypedVector.Value\n elif self.IsFixedTypedVector:\n return self.AsFixedTypedVector.Value\n else:\n raise TypeError('cannot convert %r to value' % self)",
"def first_attr_value(self, attrname):\n if not self.members:\n return None\n return getattr(self.members[0], attrname)",
"def extract_scalar(obj, dtype, to_numpy=True):\n val = dtype(obj.value)\n return val if to_numpy else (val, dtype)",
"def get_scalar(self, name):\n return self._entries[\"Scalar\"][name].get_content() if name in self._entries[\"Scalar\"] else None",
"def attr_1(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.attr1\", self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)",
"def attribute_get(self, attr):\n attributes_struct = self.single_query_get('Attributes')\n attribute_struct = [x for x in attributes_struct\n if x['Name'] == attr]\n if len(attribute_struct) > 1:\n raise tdapi.TDException(\"Too many attributes with name {}\".format(attr))\n elif len(attribute_struct) == 0:\n return\n else:\n return attribute_struct[0]['Value']",
"def get_value_for(self, instance):\n value = super(ValueMapFullTextAttr, self).get_value_for(instance)\n # handle error if value_map doesn't have mapping for the given value\n return self.value_map.get(value, None)",
"def get_value(self, agent_id: str, scene_ts: int, attribute: str) -> float:\n raise NotImplementedError()",
"def GetScalar(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetScalar(self)",
"def scalar_value(self, matrix):\r\n return matrix[0, 0]",
"def get_value(self):\n return self.node.value()",
"def get_attribute_value(self, attribute_name):\n return self.attributes[attribute_name]",
"def convert_value(self):\n field = self.output_field\n internal_type = field.get_internal_type()\n if internal_type == \"FloatField\":\n return (\n lambda value, expression, connection: None\n if value is None\n else float(value)\n )\n elif internal_type.endswith(\"IntegerField\"):\n return (\n lambda value, expression, connection: None\n if value is None\n else int(value)\n )\n elif internal_type == \"DecimalField\":\n return (\n lambda value, expression, connection: None\n if value is None\n else Decimal(value)\n )\n return self._convert_value_noop",
"def getBasicAttribute(self, name):\n return getattr(self, \"_\" + name + \"_value_\").getValue()",
"def get_value(self, d):\n try:\n return d['marginal_carbon']['value']\n except (KeyError, TypeError):\n return None",
"def _get_standardized_value(self, value):\n if value is None:\n return None\n\n if isinstance(value, list):\n\n # If the list contains MiniFieldStorage objects then loop\n # through and get the values.\n if any(isinstance(storage_obj, MiniFieldStorage) for storage_obj in value):\n values = [storage_obj.value for storage_obj in value]\n\n # TODO: This needs to be removed in 2.2. A breaking change but\n # this code will result in inconsistent values\n # If there is only 1 element in the list then return the only value in the list\n if len(values) == 1:\n return values[0]\n return values\n\n return value\n\n if isinstance(value, (str, int, dict)):\n return value\n\n if not value.filename:\n return value.value\n\n if value.filename:\n return value\n\n return False",
"def scalar_data(self):\n return self._scalar_data",
"def touch(attribute, attr_value, is_wnva):\n # ====================================================================#\n # Detect Empty Attribute Values\n if attr_value is None or len(str(attr_value)) < 1:\n return None\n # ====================================================================#\n # STR? Search or Create Attribute by Code\n if isinstance(attribute, str):\n attribute = AttributesHelper.touch(attribute, is_wnva)\n if attribute is None:\n Framework.log().error(\"An Error Occurred while Loading Attribute\")\n return None\n # ====================================================================#\n # Search for Value in Attribute\n values = attribute.value_ids.filtered(lambda r: r.name.lower() == attr_value.lower())\n if len(values) > 0:\n return values[0]\n # ====================================================================#\n # Crate New Value for Attribute\n return ValuesHelper.create(attribute, attr_value)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply the given directives to the stream. | def _apply_directives(stream, directives, ctxt, vars):
if directives:
stream = directives[0](iter(stream), directives[1:], ctxt, **vars)
return stream | [
"def directives(self, directives):\n\n self._directives = directives",
"def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp",
"def move_directives(directives, op):\n try:\n cupsd_lines = op.readlines(CUPSD_CONF)\n except IOError as error:\n raise IOError(error)\n\n lines_to_move = []\n for line in cupsd_lines:\n for name in directives:\n if line.lstrip().startswith(name):\n lines_to_move.append(line)\n cupsd_lines[cupsd_lines.index(line)] = ''\n\n op.write(CUPSD_CONF, 'w', cupsd_lines)\n\n if lines_to_move:\n op.write(CUPSFILES_CONF, 'a',\n '\\n# added by Leapp\\n{}'.format(''.join(lines_to_move)))",
"def get_directives(textlines):\n skiplines = 0\n given_directives = dict()\n\n # Parse textlines for directives\n for line in textlines:\n d = re.search(DIRECTIVE_IDENT_STR + r'(.*)', line)\n if not d:\n # All directives are at the top of the document. Halt when no more\n # directives are encountered.\n break\n\n directive = d.groups()[0]\n arg = None\n if DIRECTIVE_ARG_SEPARATOR in directive:\n directive, arg = directive.split(DIRECTIVE_ARG_SEPARATOR, 1)\n\n if directive not in DIRECTIVES:\n msg = '\"{}\" is not a valid directive'.format(directive)\n raise ParserError(msg)\n\n skiplines += 1\n\n expected_type = DIRECTIVES[directive]\n if expected_type is None:\n # If directive takes no args, treat it as a switch then move on to\n # the next directive\n given_directives[directive] = True\n continue\n \n # If directive takes args, do typechecking\n if arg is None:\n msg = ('The directive \"{}\" requires an argument but none was '\n 'provided (syntax: \"{}{}{}argument\" without the '\n 'surrounding doublequotes)'\n ).format(DIRECTIVE_IDENT_STR, directive,\n DIRECTIVE_ARG_SEPARATOR)\n raise ParserError(msg)\n \n # Cast the given argument (which is a str) to the expected_type\n try:\n given_directives[directive] = expected_type(arg)\n except:\n msg = ('bad argument type to directive {} (expected {}, got {})'\n ).format(directive, expected_type, str(arg))\n raise ParserError(msg)\n\n # \n if not given_directives.get('ROOT_SNIP_ID', None):\n msg = ('You must declare the \"ROOT_SNIP_ID\" directive. This will be '\n 'the snip_id assigned to your root snippet.')\n raise ParserError(msg)\n\n # Handle directives dependencies/implications\n if given_directives.get('OVERWRITE_DB_SNIP_IDS', None):\n if not given_directives.get('REF_NUMS_ARE_SNIP_IDS', None):\n msg = ('The \"OVERWRITE_DB_SNIP_IDS\" directive requires the '\n '\"REF_NUMS_ARE_SNIP_IDS\" to be declared as well.')\n raise ParserError(msg)\n\n # Return directives and the textlines with directive lines pruned out\n return textlines[skiplines:], given_directives",
"def read_stream(self, stream):\n self.reset()\n for token in stream:\n self._buf.append(token)",
"def parse_stream_raw(self, stream, debug=False):\n tokens = tokenize.generate_tokens(stream.readline)\n return self.parse_tokens(tokens, debug)",
"def _update_default_directives(self, **dirs):\n self.directives = {}\n self.directives.update(dirs)",
"def _parse_directives(\n directives_ast: Optional[List[dict]]\n) -> List[\"DirectiveNode\"]:\n if directives_ast:\n return [_parse_directive(directive) for directive in directives_ast]\n return []",
"def process(self, lines):\n for line in lines:\n self._process_line(line)",
"def follow_file(self, path, use_stderr=False, period=0.1):\n stream = self.stderr if use_stderr else self.stdout\n if stream is None:\n raise RuntimeError('Cannot follow file outside decoration context')\n with stream.mux.follow_file(path, stream.decorator, period=period):\n yield",
"def apply(self, options):\n # Iterate through the token possibilities\n tok_list_count = len(options.allowed)\n for i in range(0, tok_list_count):\n tok_list = options.allowed[i]\n for tok in tok_list:\n # If the possibility is a number and matches the argument, check whether the encountered\n # data was all inside the likely range.\n if tok.kind == DsToken.KIND_NUMBER and tok.text in self.directives:\n if (options.num_ranges[i][0] >= self.likely_range[0] and\n options.num_ranges[i][1] <= self.likely_range[1]):\n # Positive reinforcement\n tok.score += self.pos_score\n else:\n # Negative reinforcement\n tok.score += self.neg_score",
"def pos_tag_io(self):\n UTF8Reader = codecs.getreader('utf8')\n input_stream = UTF8Reader(sys.stdin)\n UTF8Writer = codecs.getwriter('utf8')\n output_stream = UTF8Writer(sys.stdout)\n\n for line in input_stream:\n for w in self.tagger.tag(word_tokenize(line.strip())):\n output_stream.write(w[0])\n output_stream.write(\"\\t\")\n output_stream.write(w[1])\n output_stream.write(\"\\n\")\n output_stream.write(\"\\n\")",
"def filter_stream(stream, die, filters=None):\n\n if filters is None:\n filters = FILTERS\n\n # I only loosely understand how to manipulate\n # file descriptors. Useful resources:\n #\n # - https://linuxmeerkat.wordpress.com/2011/12/02/\\\n # file-descriptors-explained/\n # - https://stackoverflow.com/a/24277852\n # - https://stackoverflow.com/a/17954769\n # - https://stackoverflow.com/a/10759061\n\n # Redirect the stream into a pipe,\n # and filter the pipe output\n fd = stream.fileno()\n oldfd = os.dup(fd)\n piper, pipew = os.pipe()\n os.dup2(pipew, fd)\n os.close(pipew)\n\n fin = os.fdopen(piper, 'r')\n fout = os.fdopen(oldfd, 'w')\n\n # Use a queue to pass lines from\n # the input stream to the output\n # stream.\n q = queue.Queue()\n\n # Use a Barrier to synchronise the\n # read, write, and calling threads\n alive = threading.Barrier(3)\n\n # The read thread runs forever,\n # just putting lines in the queue.\n def read_loop():\n alive.wait()\n while True:\n line = fin.readline()\n if line == '':\n break\n q.put(line)\n\n\n def testline(line):\n for pat in filters:\n\n if isinstance(pat, tuple): pat, skip = pat\n else: skip = 1\n\n if re.search(pat, line):\n return skip\n return 0\n\n # The write thread runs until both\n # of the following are true:\n #\n # - there are no lines in the queue\n # - the die event has been set\n def write_loop():\n skip = 0\n alive.wait()\n while True:\n try:\n line = q.get(timeout=0.25)\n except queue.Empty:\n if die.is_set(): break\n else: continue\n\n if skip > 0:\n skip -= 1\n continue\n\n skip = testline(line) - 1\n\n if skip < 0:\n fout.write(line)\n fout.flush()\n\n # Restore the original stream\n try:\n os.close(fd)\n os.close(piper)\n os.dup2(oldfd, fd)\n os.close(oldfd)\n except Exception:\n pass\n\n rt = threading.Thread(target=read_loop, daemon=True)\n wt = threading.Thread(target=write_loop, daemon=True)\n rt.start()\n wt.start()\n\n return rt, wt, alive",
"def _simplify(stream, with_attrs=False):\r\n def _generate():\r\n for mark, (kind, data, pos) in stream:\r\n if kind is START:\r\n if with_attrs:\r\n data = (unicode(data[0]), dict((unicode(k), v)\r\n for k, v in data[1]))\r\n else:\r\n data = unicode(data[0])\r\n elif kind is END:\r\n data = unicode(data)\r\n elif kind is ATTR:\r\n kind = ATTR\r\n data = dict((unicode(k), v) for k, v in data[1])\r\n yield mark, kind, data\r\n return list(_generate())",
"def _substitute_stream_ ( klass ) :\n index = klass.find('>>')\n while -1 != index :\n klass = klass.replace('>>','> >')\n index = klass.find( '>>' )\n index = klass.find(' ')\n while -1 != index :\n klass = klass.replace(' ',' ')\n index = klass.find( ' ' )\n return klass",
"def processLines(self, lines):\n\n for line in lines:\n if len(line) == 0:\n continue\n\n if line[-1] == \"\\r\":\n line = line[:-1]\n\n # Automatically make P10 protocols have their lines parsed\n # differently\n lineobj = IRCLine(line, self.protocol.p10)\n\n #debug output\n if self.config[\"etc\"][\"debug\"]:\n self.log(line, \"<<<\")\n\n if lineobj.verb == \"ERROR\":\n #If ERROR is sent, it's already fatal.\n raise IOError\n\n #Handle server commands\n try:\n for impl in self.s2scommands[lineobj.verb]:\n try:\n impl(cod, lineobj)\n except KeyError as e:\n continue\n except Exception as e:\n if not self.config[\"etc\"][\"production\"]:\n self.servicesLog(\"%s %s %s\" %(type(e), e.message, lineobj))\n traceback.print_exc(file=sys.stdout)\n continue\n except KeyError:\n pass",
"def fast_forward(self,removed_instructions):\n for instruction in removed_instructions: \n for group in instruction[\"groups\"]: \n if group.get(\"transfer\"):\n fromLocs = []\n toLocs = []\n volumes = []\n changeSettings = []\n for transfer in group[\"transfer\"]:\n pp.pprint(transfer)\n fromLocs.append(transfer[\"from\"].pop(\"locName\"))\n toLocs.append(transfer[\"to\"].pop(\"locName\"))\n volumes.append(transfer.pop(\"volume\"))\n changeSettings.append(transfer)\n self.protocol.add_transfer_to_stream(fromLocs,toLocs,volumes,changeSettings) \n elif group.get(\"mix\"):\n mixLocs = []\n volumes = []\n changeSettings = []\n for mix in group[\"mix\"]:\n pp.pprint(mix)\n mixLocs.append(mix.pop(\"locName\"))\n volumes.append(mix.pop(\"volume\"))\n changeSettings.append(mix)\n self.protocol.add_mix_to_stream(mixLocs,volumes,changeSettings)\n elif group.get(\"run\"):\n # cycler\n name = group[\"run\"].pop(\"name\")\n changeSettings = group[\"run\"] \n self.protocol.add_cycler_group(name,changeSettings)\n if self.protocol.instruction_stream[\"cmds\"]:\n self.protocol.end_stream()",
"def pipe(data, *funcs):\n for func in funcs:\n data = func(data)\n return data",
"def emit(self, ctx, modules, fd):\n return",
"def _mux(docs: list, process_stdin: IO, q: queue.Queue):\n for i, doc in enumerate(docs):\n count = 0\n sents = doc.strip().split('\\n')\n for line in sents:\n line = line + '\\n'\n process_stdin.write(line.encode('utf-8'))\n count += 1\n q.put((i, count))\n q.put(None) #poison\n process_stdin.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute the given `Suite` object. | def _exec_suite(suite, ctxt, vars=None):
if vars:
ctxt.push(vars)
ctxt.push({})
suite.execute(ctxt)
if vars:
top = ctxt.pop()
ctxt.pop()
ctxt.frames[0].update(top) | [
"def run_suite(self, suite, **kwargs):\n return PyunitConsumer(\n verbosity=self.verbosity,\n failfast=self.failfast,\n ).run(suite)",
"def run_suite(self, suite, **kwargs):\n options = {\n 'verbosity': getattr(settings, 'TEST_OUTPUT_VERBOSE', False),\n 'descriptions': getattr(settings, 'TEST_OUTPUT_DESCRIPTIONS', False),\n 'output': getattr(settings, 'TEST_OUTPUT_DIR', '.'),\n 'outsuffix': 'result', # No date-based file suffix\n }\n return xmlrunner.XMLTestRunner(**options).run(suite)",
"def handle_suite(suite):\n suite_name = suite.getAttribute(\"name\")\n cases = suite.getElementsByTagName(\"testcase\")\n for case in cases:\n handle_testcase(case, suite_name)",
"def execute_random_suite(project):\n test_name = TestUtils.random_string()\n tests = [test_name]\n for t in tests:\n TestUtils.create_test(project, name=t)\n suite_name = TestUtils.random_string()\n TestUtils.create_suite(project, name=suite_name, tests=tests)\n execution = TestUtils.execute_suite(project, suite_name)\n execution['tests'] = tests\n return execution",
"def enterSuite(self, suite, result=None):\n try:\n return self._announceSuiteStart(suite, result=result)\n finally:\n self.level += 1",
"def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:\n raise NotImplementedError()",
"def run_simulation(self):\n\n # create appropriate object\n simulation = self.all_sims[self.testcase](self.testcase, self.params.paramfile, self.root,\n self.plots, self.movies)\n\n simulation.run_simulation()\n self.finishedTestcase()",
"def summariseSuiteResult(self, suite):",
"def run(self, obj, env):\n if not obj.module_begin(self):\n obj.module_skip(self, None)\n return\n with self.context():\n try:\n for test in self.load(env):\n test.run(obj)\n except TestException, ex:\n if not ex.module:\n raise\n if ex.skip:\n obj.module_skip(self, ex.get())\n else:\n obj.module_fail(self, ex.get())\n except KeyboardInterrupt:\n raise\n except:\n reason = traceback.format_exc()\n obj.module_fail(self, reason)\n else:\n obj.module_pass(self)",
"def run_tests(self):\n self.load_tests()\n for name in self.runtests:\n self.logger.info(\"running %s\" % name)\n try:\n self.tests[name].module.run(self)\n except Exception, e:\n self.logger.error(\"ERR: %s\" % e)",
"def run(self):\n logging.info(\"Running benchmark suite...\")\n for benchmark in self._benchmarks:\n result = self.run_method(benchmark)\n print(result)\n if self._table is None:\n self._table = Table([result])\n else:\n self._table.update([result])\n self.write_results()\n self.host_results()",
"def run(self, result=None):\n self.install_fixtures()\n super(TestCase, self).run(result)",
"def __ExecuteSummarize(self):\n\n # If no results file is specified, use a default value.\n if len(self.__arguments) == 0:\n results_path = \"results.qmr\"\n else:\n results_path = self.__arguments[0]\n\n # The remaining arguments, if any, are test and suite IDs.\n id_arguments = self.__arguments[1:]\n # Are there any?\n if len(id_arguments) > 0:\n filter = 1\n # Expand arguments into test IDs.\n try:\n test_ids, suite_ids \\\n = self.GetDatabase().ExpandIds(id_arguments)\n except (qm.test.database.NoSuchTestError,\n qm.test.database.NoSuchSuiteError), exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n except ValueError, exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n else:\n # No IDs specified. Show all test and resource results.\n # Don't show any results by test suite though.\n filter = 0\n suite_ids = []\n\n # Get an iterator over the results.\n try:\n results = base.load_results(open(results_path, \"rb\"),\n self.GetDatabase())\n except (IOError, xml.sax.SAXException), exception:\n raise QMException, \\\n qm.error(\"invalid results file\",\n path=results_path,\n problem=str(exception))\n\n any_unexpected_outcomes = 0\n\n # Compute the list of result streams to which output should be\n # written. Results path only used for HTML/NexTest\n streams = self.__GetResultStreams(results_path)\n \n # Send the annotations through.\n for s in streams:\n s.WriteAllAnnotations(results.GetAnnotations())\n\n # Get the expected outcomes.\n outcomes = self.__GetExpectedOutcomes()\n\n # Our filtering function. Should use itertools.ifilter, once\n # we can depend on having Python 2.3.\n def good(r):\n return r.GetKind() == Result.TEST \\\n and r.GetId() in test_ids\n\n # Simulate the events that would have occurred during an\n # actual test run.\n for r in results:\n if not filter or good(r):\n for s in streams:\n s.WriteResult(r)\n if (r.GetOutcome()\n != outcomes.get(r.GetId(), Result.PASS)):\n any_unexpected_outcomes = 1\n for s in streams:\n s.Summarize()\n\n if any_unexpected_outcomes:\n return 1\n \n return 0",
"def testExecute(self):\n l = TestLayer(\"test2\")\n self.assertFalse(l.executeSet)\n l.execute(1)\n self.assertTrue(l.executeSet)",
"def unpack_test_suite(self, ret_val, test_suite, level):\n if level not in (\"case\", \"method\"):\n raise ValueError(\"level must be either case or method\")\n\n for test in test_suite:\n if self.is_not_suite(test):\n # Case should be suite if it has multiple methods\n t_name = '.'.join(test.id().split('.')[:-1]) if level == \"case\" else test.id()\n if isinstance(test, doctest.DocTestCase):\n t_name = f'doctest.{t_name}'\n if level == \"case\":\n if t_name not in ret_val:\n suite = unittest.TestSuite()\n ret_val[t_name] = suite\n ret_val[t_name].addTest(test)\n else:\n ret_val[t_name] = unittest.TestSuite([test])\n else:\n ret_val = self.unpack_test_suite(ret_val, test, level)\n return ret_val",
"def run(self, **kwargs):\n logger.info(\"Setup \" + self.__class__.__name__)\n self.setup(**kwargs)\n self._do_outer_iteration_stage(**kwargs)",
"def run(self):\n for test in self.mTests:\n self.sendRequest(\"Running\")\n\n # execute the test\n test.runTest()\n\n # flush the output buffer (so that the test information will really\n # be written in the output file)\n sys.stdout.flush()\n\n self.mTestsExecuted = self.mTestsExecuted + 1\n\n return True",
"def runTests(logname, getSuite, args):\n sel = \"unit\"\n vrb = 1\n if len(args) > 1:\n sel = args[1]\n if sel == \"xml\":\n # Run with XML test output for use in Jenkins environment\n if not junitxml_present:\n print(\"junitxml module not available for XML test output\")\n raise ValueError(\"junitxml module not available for XML test output\")\n with open('xmlresults.xml', 'w') as report:\n result = junitxml.JUnitXmlResult(report)\n result.startTestRun()\n try:\n getSuite(select=\"unit\").run(result)\n finally:\n result.stopTestRun()\n else:\n if sel[0:3] in [\"uni\",\"com\",\"all\",\"int\",\"pen\"]:\n logging.basicConfig(level=logging.WARNING)\n if sel[0:3] in [\"com\",\"all\"]: vrb = 2\n else:\n # Run single test with elevated logging to file via new handler\n logging.basicConfig(level=logging.DEBUG)\n # Enable debug logging to a file\n fileloghandler = logging.FileHandler(logname,\"w\")\n fileloghandler.setLevel(logging.DEBUG)\n # Use this formatter for shorter log records\n ###filelogformatter = logging.Formatter('%(levelname)s %(message)s', \"%H:%M:%S\")\n # Use this formatter to display timing information:\n filelogformatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s', \"%H:%M:%S\")\n fileloghandler.setFormatter(filelogformatter)\n logging.getLogger('').addHandler(fileloghandler)\n vrb = 2\n runner = unittest.TextTestRunner(verbosity=vrb)\n tests = getSuite(select=sel)\n if tests: runner.run(tests)\n return",
"def runtest():\n pwd = os.path.abspath(os.path.dirname(__file__))\n response = json.loads(request.body.read())\n testCases = (str(response['testCases'])).split(',')\n testCases.pop()\n _runner = (str(response['Runner']))\n _buildName = (str(response['buildName']))\n _userId = (str(response['userId']))\n _testPlanId = (str(response['testPlanId']))\n totalTestCases = len(testCases)\n if _runner == 'HTMLTestRunner':\n if totalTestCases == 0:\n return \"Select testcases to run..\"\n else:\n shutil.rmtree(pwd+'/Output/')\n os.mkdir(pwd+'/Output/')\n listOfTestSuiteNames = getTestSuiteNames(testCases)\n for testSuite in listOfTestSuiteNames:\n suite = unittest.TestSuite()\n for testCase in testCases:\n testSuiteName = ((str(testCase).split(' '))[0]).split('.')[-1]\n if testSuite == testSuiteName:\n _testSuiteName = ((str(testCase)).split(' ')[0])[1:]\n classObj = my_import(_testSuiteName)\n _testCaseName = ((((str(testCase)).split(' ')[1])[:-1]).split('='))[1]\n suite.addTest(classObj(_testCaseName))\n _testModuleName = testSuiteName#((str(testSuite).split(\".\")[-1])[0:-2]) \n _output = open(pwd+\"/Output/\"+_testModuleName+\".html\",\"w\")\n HTMLRunner = HTMLTestRunner.HTMLTestRunner(stream=_output,title=_testModuleName,description=\"Test case's for the module \"+_testModuleName)\n HTMLRunner.run(suite)\n subprocess.Popen(['python',pwd+\"/ExtLib/Statistics.py\",\"Test Automation\",pwd+\"/Output/\"])\n IndexMaker = HTMLIndexCreator.HTMLIndexCreator(pwd+\"/Output/\")\n IndexMaker.makeHTMLIndexFile() \n return \"Test completed.....\"\n else:\n return \"The specified runner does not exist.\"",
"def execute(self) -> ExecutionResult:\n document_location = self.__extract_document_location(self.__model.open_statement)\n try:\n webdriver_instance = self._start_webdriver_instance(url=document_location)\n\n execution_result = self.__execute_internal(webdriver_instance=webdriver_instance)\n\n wash_result = ExecutionResult(\n parent=None,\n start_url=document_location,\n current_url=webdriver_instance.current_url,\n execution_result=execution_result)\n\n if self.__debug:\n wash_result.add_attributes(**{'script': self.__script})\n\n return wash_result\n except Exception:\n raise\n finally:\n webdriver_instance.quit()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a key for the given directive class that should be used to sort it among other directives on the same `SUB` event. The default implementation simply returns the index of the directive in the `directives` list. | def get_directive_index(self, dir_cls):
if dir_cls in self._dir_order:
return self._dir_order.index(dir_cls)
return len(self._dir_order) | [
"def key_from_class_name(class_name):\n assert class_name in Transaction._class_names\n return Transaction._class_names[class_name]",
"def class_to_idx(self):\n\n return {cat: i for i, cat in enumerate(self.CLASSES)}",
"def match_class(self, key):\n self._keystrokes.append(key)\n candidate = ''.join(self._keystrokes)\n matches = [c for c in self._classes if c.startswith(candidate.rstrip())]\n\n # unambiguous match\n if len(matches) == 1:\n self._keystrokes.clear()\n return matches[0]\n # potentially ambiguous match\n elif len(matches) > 1:\n # unsolvable without a space (e.g. 'ba' -> ['bar', 'baz'])\n if not candidate.endswith(' '):\n return ''\n # solvable if ending in space and a full match (e.g. 'bar ' -> ['bar', 'bar1'], choose 'bar')\n elif len(candidate) in [len(m.rstrip()) for m in matches]:\n self._keystrokes.clear()\n return min(matches, key=len)\n # not solvable yet\n else:\n return ''\n # no matches\n else:\n self._keystrokes.clear()\n\n return ''",
"def _unit_key(self, unit):\r\n cls = unit.__class__\r\n ident = tuple([getattr(unit, name) for name in self._keyattrs[cls]])\r\n return \"%s:%s:%s\" % (self.name, cls.__name__, self.hash(ident))",
"def get_index(usage_key, children):\n children = [str(child) for child in children]\n return children.index(usage_key)",
"def java_class_params_to_key(package: str, class_name: str):\n return f'{package}.{class_name}'",
"def _index_key_for(self, att, value=None):\r\n if value is None:\r\n value = getattr(self, att)\r\n if callable(value):\r\n value = value()\r\n if value is None:\r\n return None\r\n if att not in self.lists:\r\n return self._get_index_key_for_non_list_attr(att, value)\r\n else:\r\n return self._tuple_for_index_key_attr_list(att, value)",
"def lookup_key(self, index: list) -> \"Token\":\n token = self.lookup(index[:-1])\n return token._get_key_token(index[-1])",
"def _get_class_name(self, class_idx):\n class_category = self.config['class_idx_to_category'][class_idx] # Get the Class ID based on the index.\n return dict(self.config['category_names'])[class_category] if class_idx is not None else None",
"def get_class_data_type_key(class_name: str) -> str:\n data_type_key = ''\n for letter in class_name:\n if 'A' <= letter <= 'Z':\n data_type_key += letter\n\n return data_type_key.lower()",
"def keyindex(self, category):\n return self._keyindex[category]",
"def getClassDefByClass(self, class_):\n return self._mapped_classes.get(class_, None)",
"def sort_key(self) -> \"Attribute\":\n return self._values.get(\"sort_key\")",
"def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))",
"def _get_idx_to_class(class_to_idx):\n lst = [None] * len(class_to_idx)\n for key, value in class_to_idx.items():\n lst[value] = key\n return lst",
"def cmp_to_key(mycmp):\n\n class ComparatorClass:\n \"\"\"A class that implements comparison methods.\"\"\"\n\n def __init__(self, obj, *args):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n return ComparatorClass",
"def get_named_comparator(self, key_path: str):\n case_ignored = self._config.matches(DeltaConfig.KeyCaseIgnored)\n for k in self.named_comparators:\n if key_matches(k, key_path, case_ignored):\n return self.named_comparators[k]\n return None",
"def get_key(self):\n\n # defaults\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n\n # return tuple of sort keys\n return sort_notify, sort_type, sort_status, sort_name",
"def getKey(self, element):\r\n return element._key",
"def get_key(self):\n\n # defaults\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n\n peer = self.peers[0]\n try:\n sort_status = self.status_key[peer.status]\n except KeyError:\n sort_status = len(self.status_key) + 1\n sort_name = peer.alias\n\n # return tuple of sort keys\n return sort_notify, sort_type, sort_status, sort_name"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Internal stream filter that performs inclusion of external template files. | def _include(self, stream, ctxt, **vars):
from genshi.template.loader import TemplateNotFound
for event in stream:
if event[0] is INCLUDE:
href, cls, fallback = event[1]
if not isinstance(href, basestring):
parts = []
for subkind, subdata, subpos in self._flatten(href, ctxt,
**vars):
if subkind is TEXT:
parts.append(subdata)
href = ''.join([x for x in parts if x is not None])
try:
tmpl = self.loader.load(href, relative_to=event[2][0],
cls=cls or self.__class__)
for event in tmpl.generate(ctxt, **vars):
yield event
except TemplateNotFound:
if fallback is None:
raise
for filter_ in self.filters:
fallback = filter_(iter(fallback), ctxt, **vars)
for event in fallback:
yield event
else:
yield event | [
"def _include(self, stream, ctxt, **vars):\r\n from genshi.template.loader import TemplateNotFound\r\n\r\n for event in stream:\r\n if event[0] is INCLUDE:\r\n href, cls, fallback = event[1]\r\n if not isinstance(href, str):\r\n parts = []\r\n for subkind, subdata, subpos in self._flatten(href, ctxt,\r\n **vars):\r\n if subkind is TEXT:\r\n parts.append(subdata)\r\n href = ''.join([x for x in parts if x is not None])\r\n try:\r\n tmpl = self.loader.load(href, relative_to=event[2][0],\r\n cls=cls or self.__class__)\r\n for event in tmpl.generate(ctxt, **vars):\r\n yield event\r\n except TemplateNotFound:\r\n if fallback is None:\r\n raise\r\n for filter_ in self.filters:\r\n fallback = filter_(iter(fallback), ctxt, **vars)\r\n for event in fallback:\r\n yield event\r\n else:\r\n yield event",
"def includebase(parser, tocken):\r\n bits = tocken.split_contents()\r\n mitemplate = bits[1]\r\n mitemplate2 = _prefijocomillas(mitemplate, templateCTX.directorio_base + \"/\")\r\n tocken.contents = tocken.contents.replace(mitemplate, mitemplate2)\r\n return template.loader_tags.do_include(parser, tocken)",
"def tryinclude(parser, tocken):\r\n return WrapperIncludeNodeNoError(template.loader_tags.do_include(parser, tocken))",
"def do_include_raw(parser, token):\n bits = token.split_contents()\n if len(bits) != 2:\n raise TemplateSyntaxError, \"%r tag takes one argument: the name of the template to be included\" % bits[0]\n\n template_name = bits[1]\n if template_name[0] in ('\"', \"'\") and template_name[-1] == template_name[0]:\n template_name = template_name[1:-1]\n\n source, path = _loader.load_template_source(template_name)\n\n return template.TextNode(source)",
"def main():\n dest_dir = \".public\"\n if os.path.isdir(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html'])\n )\n\n ignore_files = ignoreFile()\n files_in_dir = os.walk('templates')\n filenames = [filename for _, _, filename in files_in_dir]\n files = [filename for filename in filenames[0] if filename not in ignore_files]\n for i in files:\n template = env.get_template(i)\n final_html = template.render()\n\n\n write_prefix = glob.glob(\".public\")[0]\n write_path = os.path.join(write_prefix, i)\n print write_path\n try:\n html_file = codecs.open(write_path, 'w', 'utf8')\n html_file.write(final_html)\n finally:\n html_file.close()",
"def test_include_in_for_tag_tag(self):\n return self._test_template_tag(\"include/includer.2.html\")",
"def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)",
"def _load_template(self, template_file):\n pass",
"def __preprocess(self, infile, outfile):\n with open(outfile, \"w\") as _outfile:\n _outfile.write(textwrap.dedent(\"\"\"\\\n /*\n * This file is dynamically generated and ignored by Git.\n * DO NOT MAKE CHANGES HERE. Instead, go edit its template:\n * %s\n */\n \"\"\" % infile))\n _outfile.write(Template(filename=str(infile)).render(env=self.__context()))",
"def templateFilter(func):\n jinja2_env.filters[func.__name__] = func",
"def source_file_filter(input_api):\n files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP) + [\n r'.+/bootstrap/.*', # third party\n r'.+/jquery/.*', # third party\n r'.+/pb\\.discovery\\.go$',\n r'.+/pb\\.discovery_test\\.go$',\n r'.+\\.pb\\.go$',\n r'.+\\.pb\\.validate\\.go$',\n r'.+\\.pb_test\\.go$',\n r'.+_dec\\.go$',\n r'.+_mux\\.go$',\n r'.+_string\\.go$',\n r'.+gae\\.py$', # symlinks from outside\n r'common/api/internal/gensupport/.*', # third party\n r'common/goroutine/goroutine_id.go',\n r'common/terminal/.*', # third party\n r'server/static/bower_components/.*', # third party\n r'server/static/upload/bower_components/.*', # third party\n ]\n files_to_check = list(input_api.DEFAULT_FILES_TO_CHECK) + [\n r'.+\\.go$',\n ]\n return lambda x: input_api.FilterSourceFile(\n x, files_to_check=files_to_check, files_to_skip=files_to_skip)",
"def __init__(self):\n self.template_files = {\n 'CCDA': CCDA_TPL_FILENAME,\n 'FHIR-XML': FHIR_TPL_FILENAME,\n 'FHIR-JSON': FHIR_TPL_FILENAME\n }\n self.environment = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATES_DIR))\n\n # load filters defined in custom_filters\n for a in dir(custom_filters):\n if isinstance(custom_filters.__dict__.get(a), types.FunctionType):\n self.environment.filters[a] = custom_filters.__dict__.get(a)\n\n self.templates = {}\n for key in self.template_files:\n self.templates[key] = self.environment.get_template(self.template_files[key])",
"def register_pre_resources_template(self, template):\n pass",
"def _interpolate_templates():\r\n if not os.path.exists(env.rcfile):\r\n raise Exception(\"%(rcfile)s does not exist. See rcfile.sample and run fab --config=rcfile.name <commands>!\" % env)\r\n\r\n interpolated_files = []\r\n # Get a list of all template files in /etc/ that we need to interpolate\r\n template_paths = []\r\n template_paths.extend(env.template_paths)\r\n template_paths.append(env.local_etc_path)\r\n\r\n for template_path in template_paths: \r\n for root, dirs, files in os.walk(template_path):\r\n for name in files:\r\n infilename = os.path.join(root, name)\r\n if re.search('.tmpl$', infilename):\r\n debug(\"Processing template file %s\" % infilename)\r\n \r\n outfilename = os.path.splitext(infilename)[0]\r\n _interpolate_file(infilename, outfilename)\r\n # infile = open(infilename, 'r')\r\n # outfile = open(outfilename, 'w')\r\n # try:\r\n # outfile.write(infile.read() % env)\r\n # except TypeError, e:\r\n # if re.search(\"not enough arguments for format string\", e[0]):\r\n # # We can safely ignore this since it means that there's nothing to interpolate\r\n # print e[0]\r\n # print \"Continuing by using the template file (%s) as the target (ie no interpolation)\" % infilename\r\n # # Remember that we have to go back to the top due to read() being at eof\r\n # infile.seek(0)\r\n # outfile.write(infile.read())\r\n # else:\r\n # raise\r\n # \r\n # outfile.close()\r\n # infile.close()\r\n interpolated_files.append(outfilename)\r\n \r\n return interpolated_files",
"def _handleCheetahInclude(self, srcArg, trans=None,\n includeFrom='file', raw=False):\n _includeID = srcArg\n if _includeID not in self._CHEETAH__cheetahIncludes:\n if not raw:\n if includeFrom == 'file':\n source = None\n if isinstance(srcArg, string_type):\n if hasattr(self, 'serverSidePath'):\n file = path = self.serverSidePath(srcArg)\n else:\n file = path = os.path.normpath(srcArg)\n else:\n file = srcArg # # a file-like object\n else:\n source = srcArg\n file = None\n # @@TR: might want to provide some syntax for specifying the\n # Template class to be used for compilation so compilerSettings\n # can be changed.\n compiler = \\\n self._getTemplateAPIClassForIncludeDirectiveCompilation(\n source, file)\n nestedTemplateClass = compiler.compile(source=source,\n file=file)\n nestedTemplate = nestedTemplateClass(\n _preBuiltSearchList=self.searchList(),\n _globalSetVars=self._CHEETAH__globalSetVars)\n # Set the inner template filters to the initial filter of the\n # outer template:\n # this is the only really safe way to use\n # filter='WebSafe'.\n nestedTemplate._CHEETAH__initialFilter = \\\n self._CHEETAH__initialFilter\n nestedTemplate._CHEETAH__currentFilter = \\\n self._CHEETAH__initialFilter\n self._CHEETAH__cheetahIncludes[_includeID] = nestedTemplate\n else:\n if includeFrom == 'file':\n path = self.serverSidePath(srcArg)\n self._CHEETAH__cheetahIncludes[_includeID] = \\\n self.getFileContents(path)\n else:\n self._CHEETAH__cheetahIncludes[_includeID] = srcArg\n ##\n if not raw:\n self._CHEETAH__cheetahIncludes[_includeID].respond(trans)\n else:\n trans.response().write(self._CHEETAH__cheetahIncludes[_includeID])",
"def add_external(self):\n if self.external_tpl_in_pairs is not None:\n if not isinstance(self.external_tpl_in_pairs,list):\n external_tpl_in_pairs = [self.external_tpl_in_pairs]\n for tpl_file,in_file in self.external_tpl_in_pairs:\n if not os.path.exists(tpl_file):\n self.logger.lraise(\"couldn't find external tpl file:{0}\".\\\n format(tpl_file))\n self.logger.statement(\"external tpl:{0}\".format(tpl_file))\n shutil.copy2(tpl_file,os.path.join(self.m.model_ws,\n os.path.split(tpl_file)[-1]))\n if os.path.exists(in_file):\n shutil.copy2(in_file,os.path.join(self.m.model_ws,\n os.path.split(in_file)[-1]))\n\n if self.external_ins_out_pairs is not None:\n if not isinstance(self.external_ins_out_pairs,list):\n external_ins_out_pairs = [self.external_ins_out_pairs]\n for ins_file,out_file in self.external_ins_out_pairs:\n if not os.path.exists(ins_file):\n self.logger.lraise(\"couldn't find external ins file:{0}\".\\\n format(ins_file))\n self.logger.statement(\"external ins:{0}\".format(ins_file))\n shutil.copy2(ins_file,os.path.join(self.m.model_ws,\n os.path.split(ins_file)[-1]))\n if os.path.exists(out_file):\n shutil.copy2(out_file,os.path.join(self.m.model_ws,\n os.path.split(out_file)[-1]))\n self.logger.warn(\"obs listed in {0} will have values listed in {1}\"\n .format(ins_file,out_file))\n else:\n self.logger.warn(\"obs listed in {0} will have generic values\")",
"def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)",
"def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n search_path = self.search_path\r\n\r\n # Make the filename relative to the template file its being loaded\r\n # from, but only if that file is specified as a relative path, or no\r\n # search path has been set up\r\n if relative_to and (not search_path or not os.path.isabs(relative_to)):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n\r\n filename = os.path.normpath(filename)\r\n cachekey = filename\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[cachekey]\r\n if not self.auto_reload:\r\n return tmpl\r\n uptodate = self._uptodate[cachekey]\r\n if uptodate is not None and uptodate():\r\n return tmpl\r\n except (KeyError, OSError):\r\n pass\r\n\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = list(search_path) + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for loadfunc in search_path:\r\n if isinstance(loadfunc, str):\r\n loadfunc = directory(loadfunc)\r\n try:\r\n filepath, filename, fileobj, uptodate = loadfunc(filename)\r\n except IOError:\r\n continue\r\n else:\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested includes work properly without a\r\n # search path\r\n filename = filepath\r\n tmpl = self._instantiate(cls, fileobj, filepath,\r\n filename, encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[cachekey] = tmpl\r\n self._uptodate[cachekey] = uptodate\r\n finally:\r\n if hasattr(fileobj, 'close'):\r\n fileobj.close()\r\n return tmpl\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()",
"def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n if encoding is None:\r\n encoding = self.default_encoding\r\n if relative_to and not os.path.isabs(relative_to):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n filename = os.path.normpath(filename)\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[filename]\r\n if not self.auto_reload or \\\r\n os.path.getmtime(tmpl.filepath) == self._mtime[filename]:\r\n return tmpl\r\n except KeyError:\r\n pass\r\n\r\n search_path = self.search_path\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = search_path + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for dirname in search_path:\r\n filepath = os.path.join(dirname, filename)\r\n try:\r\n fileobj = open(filepath, 'U')\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested include work properly without a\r\n # search path\r\n filename = os.path.join(dirname, filename)\r\n dirname = ''\r\n tmpl = cls(fileobj, basedir=dirname, filename=filename,\r\n loader=self, lookup=self.variable_lookup,\r\n encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[filename] = tmpl\r\n self._mtime[filename] = os.path.getmtime(filepath)\r\n finally:\r\n fileobj.close()\r\n return tmpl\r\n except IOError:\r\n continue\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the given expression, raising a useful error message when a syntax error is encountered. | def _parse_expr(cls, expr, template, lineno=-1, offset=-1):
try:
return expr and Expression(expr, template.filepath, lineno,
lookup=template.lookup) or None
except SyntaxError, err:
err.msg += ' in expression "%s" of "%s" directive' % (expr,
cls.tagname)
raise TemplateSyntaxError(err, template.filepath, lineno,
offset + (err.offset or 0)) | [
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def test_syntax(self):\n lisp = self.lisp\n for expr in [\n \"(\",\n \"(()\",\n \")\",\n \"())\",\n \".)\"\n ]:\n self.assertRaises(ParseError, lisp.readLisp, expr)",
"def parse(s):\n t = _Tokens(s)\n ret = t.parse_expr(True)\n if len(t) != 0:\n raise ValueError('extra stuff:' + str(t))\n return ret",
"def parse_primary_expression(self):\n if self.peek == \"ID\":\n identifier = self.consume(\"ID\")\n expr = self.semantics.on_variable_access(\n identifier.val, identifier.loc\n )\n elif self.peek == \"NUMBER\":\n number = self.consume()\n expr = self.semantics.on_number(number.val, number.loc)\n elif self.peek == \"FLOAT\":\n number = self.consume()\n expr = self.semantics.on_float(number.val, number.loc)\n elif self.peek == \"CHAR\":\n char = self.consume()\n expr = self.semantics.on_char(char.val, char.loc)\n elif self.peek == \"STRING\":\n txt = self.consume()\n expr = self.semantics.on_string(txt.val, txt.loc)\n elif self.peek in [\"!\", \"*\", \"+\", \"-\", \"~\", \"&\", \"--\", \"++\"]:\n op = self.consume()\n if op.val in [\"--\", \"++\"]:\n operator = op.val + \"x\"\n else:\n operator = op.val\n expr = self.parse_primary_expression()\n expr = self.semantics.on_unop(operator, expr, op.loc)\n elif self.peek == \"__builtin_va_start\":\n location = self.consume(\"__builtin_va_start\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_start(ap, location)\n elif self.peek == \"__builtin_va_arg\":\n location = self.consume(\"__builtin_va_arg\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\",\")\n typ = self.parse_typename()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_arg(ap, typ, location)\n elif self.peek == \"__builtin_va_copy\":\n location = self.consume(\"__builtin_va_copy\").loc\n self.consume(\"(\")\n dest = self.parse_assignment_expression()\n self.consume(\",\")\n src = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_copy(dest, src, location)\n elif self.peek == \"__builtin_offsetof\":\n location = self.consume(\"__builtin_offsetof\").loc\n self.consume(\"(\")\n typ = self.parse_typename()\n self.consume(\",\")\n member = self.consume(\"ID\").val\n self.consume(\")\")\n expr = self.semantics.on_builtin_offsetof(typ, member, location)\n elif self.peek == \"sizeof\":\n location = self.consume(\"sizeof\").loc\n if self.peek == \"(\":\n self.consume(\"(\")\n if self.is_declaration_statement():\n typ = self.parse_typename()\n else:\n typ = self.parse_expression()\n self.consume(\")\")\n expr = self.semantics.on_sizeof(typ, location)\n else:\n sizeof_expr = self.parse_primary_expression()\n expr = self.semantics.on_sizeof(sizeof_expr, location)\n elif self.peek == \"(\":\n loc = self.consume(\"(\").loc\n # Is this a type cast?\n if self.is_declaration_statement():\n # Cast or compound literal!\n to_typ = self.parse_typename()\n self.consume(\")\")\n if self.peek == \"{\":\n init = self.parse_initializer_list(to_typ)\n expr = self.semantics.on_compound_literal(\n to_typ, init, loc\n )\n else:\n casted_expr = self.parse_primary_expression()\n expr = self.semantics.on_cast(to_typ, casted_expr, loc)\n else:\n # Parenthized expression (reset precedence)\n expr = self.parse_expression()\n self.consume(\")\")\n else:\n self.error(\"Expected expression\")\n\n # Postfix operations (have the highest precedence):\n while self.peek in [\"--\", \"++\", \"[\", \".\", \"->\", \"(\"]:\n if self.peek in [\"--\", \"++\"]:\n op = self.consume()\n expr = self.semantics.on_unop(\"x\" + op.val, expr, op.loc)\n elif self.peek == \"[\":\n location = self.consume(\"[\").loc\n index = self.parse_expression()\n self.consume(\"]\")\n expr = self.semantics.on_array_index(expr, index, location)\n elif self.peek == \"(\":\n expr = self.parse_call(expr)\n elif self.peek == \".\":\n location = self.consume(\".\").loc\n field = self.consume(\"ID\").val\n expr = self.semantics.on_field_select(expr, field, location)\n elif self.peek == \"->\":\n location = self.consume(\"->\").loc\n field = self.consume(\"ID\").val\n # Dereference pointer:\n expr = self.semantics.on_unop(\"*\", expr, location)\n expr = self.semantics.on_field_select(expr, field, location)\n else: # pragma: no cover\n self.not_impl()\n return expr",
"def validate_expression(self, expression):\n\t\t#return self.evaluate(expression, 0, 2)\n\t\tvars = set(self.get_column_names(True, True)) | set(self.variables.keys())\n\t\tfuncs = set(expression_namespace.keys())\n\t\treturn vaex.expresso.validate_expression(expression, vars, funcs)",
"def test_invalid(self):\n\n expression = \"- 1 + 3\" # Invalid syntax\n\n self.assertNotEqual(eval(expression), PrefixOperation(expression).evaluate_expression())",
"def test_expression_invalid_ordering(self) -> None:\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"+6\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"+5+\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"6/\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"1 + 2 - + 3\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\")(\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"(((2))\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"(+5\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"6/)\"))",
"def parse(self, sourceStr):\n self.completionMessage = \"No errors\"\n self.parseSuccessful = True\n self.scanner = Scanner(sourceStr)\n self.tree = self.expression()\n self.accept(self.scanner.get(), Token.EOE,\n \"symbol after end of expression\")",
"def syntax_error(self, msg):\n\n # Despite what the Python documentation claims, the ``line`` attribute of the\n # TokenInfo contains the physical, not logical line, i.e. what we need here,\n exc = SyntaxError(msg, (self.filename, self.tokens[self.pos].start[0],\n self.tokens[self.pos].start[1], self.tokens[self.pos].line))\n\n raise exc",
"def test_syntaxerror():\n inp = '@article{name}'\n with pytest.raises(pyparsing.ParseException):\n parse_entry(inp)",
"def parser(string): \n#1 we tokenize the expression, thanks to the lexer and the Token constructor\n# the names are mapped thanks to the token_map dictionary\n tokens = [Token(token_map.get(x, 'ATOM'), x) for x in lex(string)]\n try:\n (e, i) = parse_iff(tokens)\n if not i:\n return e\n else:\n raise Exception('Unparsed input')\n except:\n raise",
"def parse_expression(expr):\n child_expressions = []\n for child_expr in expr:\n if isinstance(child_expr, pyparsing.ParseResults):\n child_expressions.append(parse_expression(child_expr))\n else:\n child_expressions.append(child_expr)\n while len(child_expressions) > 2:\n res = eval(\"\".join(map(str, child_expressions[0:3])))\n child_expressions = [res] + child_expressions[3:]\n return int(child_expressions[0])",
"def parse_expression(expression: str) -> List[str]:\n stream = InputStream(expression)\n lexer = RLexer(stream)\n tokens = CommonTokenStream(lexer)\n\n tokens.fill()\n\n filter_ = RFilter(tokens)\n filter_.stream()\n tokens.reset()\n\n parser = RParser(tokens)\n tree = parser.prog()\n\n progListener = ProgListener(tokens)\n walker = ParseTreeWalker()\n walker.walk(progListener, tree)\n\n return progListener.exps",
"def parse(self, expression_str) -> Expression:\n tree = self._parser.parse(expression_str)\n return self._transformer.transform(tree)",
"def test_unbalanced_parens(self):\n with self.assertRaises(SyntacticError):\n tokens = TokenStream(StringIO(\"(a (b c (d e (f (g))\"))\n lexer = Lexer()\n lexer.parse(tokens)",
"def parse_eval(self, expr, lineno=1):\n\n if isinstance(expr, unicode):\n expr = renpy.python.escape_unicode(expr)\n\n try:\n rv = ast.parse(expr, 'eval').body[0].value\n except SyntaxError as e:\n raise renpy.parser.ParseError(\n filename,\n lineno + e[1][1] - 1,\n \"Syntax error while parsing python expression.\",\n e[1][3],\n e[1][2])\n\n increment_lineno(rv, lineno-1)\n\n return rv",
"def syntaxError(self, filename, msg, lineno, offset, text):\r\n line = text.splitlines()[-1]\r\n if offset is not None:\r\n offset = offset - (len(text) - len(line))\r\n self._stderr.write('%s:%d:%d: %s\\n' %\r\n (filename, lineno, offset + 1, msg))\r\n else:\r\n self._stderr.write('%s:%d: %s\\n' % (filename, lineno, msg))\r\n self._stderr.write(line)\r\n self._stderr.write('\\n')\r\n if offset is not None:\r\n self._stderr.write(re.sub(r'\\S', ' ', line[:offset]) +\r\n \"^\\n\")",
"def testParseWithBraces(self):\n parser = expression_parser.EventFilterExpressionParser()\n\n expression = parser.Parse('(a is 3)')\n self.assertIsNotNone(expression)\n\n # Need to close braces.\n with self.assertRaises(errors.ParseError):\n parser.Parse('(a is 3')\n\n # Need to open braces to close them.\n with self.assertRaises(errors.ParseError):\n parser.Parse('a is 3)')",
"def test_except_infer_pars(self):\n src = Source([])\n self.assertRaises(RuntimeError, src.set_expression, 'a+a')\n self.assertRaises(RuntimeError, src.set_expression, '2*a')\n self.assertRaises(ValueError, src.set_expression, '2*a', ['a'])\n self.assertRaises(ValueError, src.set_expression, '2*a', grads=['2'])\n self.assertRaises(ValueError, src.set_expression, 'a*b', ['a', 'b'], ['b'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes the AST representation of an assignment, and returns a function that applies the assignment of a given value to a dictionary. | def _assignment(ast):
def _names(node):
if isinstance(node, _ast.Tuple):
return tuple([_names(child) for child in node.elts])
elif isinstance(node, _ast.Name):
return node.id
def _assign(data, value, names=_names(ast)):
if type(names) is tuple:
for idx in range(len(names)):
_assign(data, value[idx], names[idx])
else:
data[names] = value
return _assign | [
"def assignment(self, symbol_table):\n symbol_table[self.key] = self.value.evaluate(self.value, symbol_table)",
"def eval_assignment(assignment, caller_parameters, caller_arguments, motif_node_dict, local_dict):\n\tif type(assignment.rvalue).__name__ == 'FuncCall':\n\t\tmotif_node, tree_node = eval_function_call(assignment.rvalue, caller_parameters, caller_arguments, motif_node_dict, local_dict)\n\t\t# it is possible that a function being evaluated returns a non-None MotifNode that need not to be assigned to the LHS variable.\n\t\t# But if the LHS variable is in @local_dict, then the RHS function must return a non-None MotifNode.\n\t\t# consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n\t\tif (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in local_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in local_dict):\n\t\t\tif not motif_node:\n\t\t\t\tprint('\\33[101m' + '[error][eval_assignment/provenance]: ' + assignment.lvalue.name + ' is in the local dictionary. MotifNode should not be None.\\033[0m')\n\t\t\t\texit(1)\n\t\t\telse:\n\t\t\t\tlocal_dict[assignment.lvalue.name].append(motif_node)\n\t\treturn tree_node\n\t# In a case where a provenance node was declared but then assigned or reassigned. For example:\n\t# struct provenance *tprov;\n\t# ...\n\t# tprov = t->provenance;\n\t# tprov must then be in the motif_node_dict.\n\telif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in local_dict:\n\t\t# we can only infer its type from the name of the variable\n\t\tmotif_node = create_motif_node(assignment.lvalue.name)\n\t\tlocal_dict[assignment.lvalue.name].append(motif_node)\n\t\treturn None\n\telif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in local_dict:\n\t\t# similar case as the previous one, except that we have: *tprov = ...\n\t\t# we can only infer its type from the name of the variable\n\t\tmotif_node = create_motif_node(assignment.lvalue.expr.name)\n\t\tlocal_dict[assignment.lvalue.expr.name].append(motif_node)\n\t\treturn None\n\telse:\n\t\t#######################################################\n\t\t# We will consider other conditions if we ever see them\n\t\t# POSSIBLE CODE HERE.\n\t\t#######################################################\n\t\treturn None",
"def ast_evaluate_dict_values(edict):\n returndict = {}\n for key, value in edict.items():\n if isinstance(value, dict):\n value = ast_evaluate_dict_values(value)\n if isinstance(value, str): # Only evaluate str values all other must be correct\n try:\n value = eval(value)\n except Exception as err:\n log.debug(\n \"Could not interpret '{}' in key '{}' as a valid object. Stays as is! Error: {}\".format(\n value, key, err\n )\n )\n\n returndict[key] = value\n return returndict",
"def map_values(function, dictionary):\n return dict((k, function(dictionary[k])) for k in dictionary)",
"def eval(self,opseq,valueDict):\n for (dstName,funName,inputNames) in opseq:\n inputValues = map(lambda a:valueDict[a], inputNames)\n fun = EVAL_FUNS[funName] \n result = fun(*inputValues)\n valueDict[dstName] = result\n return valueDict",
"def map(func):\n # text is an alias for basestring on Python 2, which cannot be\n # instantiated and therefore can't be used to transform the value,\n # so we force to unicode instead.\n if is_py2 and text == func:\n func = unicode\n\n def expand_kv(kv):\n return func(*kv)\n\n def map_values(value):\n cls = type(value)\n if isinstance(value, dict):\n return cls(_map(expand_kv, value.items()))\n else:\n return cls(_map(func, value))\n\n return transform(map_values)",
"def visit_Assignment(self, node):\n # TODO: Arithmetic Assignment\n if isinstance(node.target, asr.Variable):\n target = node.target\n value = node.value\n if isinstance(value, asr.Variable):\n new_node = Assignment(\n Variable(\n target.name\n ),\n Variable(\n value.name\n )\n )\n elif (type(value) == asr.BinOp):\n exp_ast = call_visitor(value)\n for expr in exp_ast:\n new_node = Assignment(\n Variable(target.name),\n expr\n )\n else:\n raise NotImplementedError(\"Numeric assignments not supported\")\n else:\n raise NotImplementedError(\"Arrays not supported\")\n self._py_ast.append(new_node)",
"def map_nested_value(func: Callable, value: Any) -> Any:\n value_type = type(value)\n\n if value_type == list:\n return [map_nested_value(func, item) for item in value]\n\n elif value_type == tuple:\n return tuple([map_nested_value(func, item) for item in value])\n\n elif isinstance(value, tuple) and hasattr(value, \"_fields\"):\n # Namedtuple.\n return type(value)(*[map_nested_value(func, item) for item in value])\n\n elif value_type == set:\n return {map_nested_value(func, item) for item in value}\n\n elif value_type == dict:\n return {\n map_nested_value(func, key): map_nested_value(func, val) for key, val in value.items()\n }\n\n else:\n return func(value)",
"def convert_functions_in_dict_to_values(dict_to_convert):\n return {key: value() if hasattr(value, '__call__') else value for key, value in dict_to_convert.items()}",
"def satisfies(assignment, constraint):\n return constraint(**{var:val for var,val in assignment.items()\n if var in scope(constraint)})",
"def satisfying_assignment(formula):\n #print('new_recursion:')\n #print(formula)\n if len(formula)==0: #Base case: empty formula returns empty assignments\n return {}\n\n assignments = {}\n\n ind = 0 #Which literal are we looking at?\n boolVal = True #What value does the variable in our current literal have?\n\n while ind < len(formula[0]): #Look at all variables in first clause until valid assignment is found\n new_formula = simplify_formula(formula,{formula[0][ind][0]:boolVal}) #Try setting first variable to True\n if new_formula[0] != None:\n assignments[formula[0][ind][0]] = boolVal\n assignments.update(new_formula[1])\n #print(assignments)\n try:\n assignments.update(dict(satisfying_assignment(new_formula[0])))\n break\n except TypeError:\n ind += 1\n continue\n else: #If invalid assignment,\n if boolVal: #Try assigning variable to False\n boolVal = False\n else:\n boolVal = True\n ind += 1\n\n if new_formula[0]==None:\n return None\n\n return assignments",
"def visit_Assignment(self, node):\n var = node.lvalue.name\n op = node.op\n\n if op == '=':\n self.memory[var] = self.visit(node.rvalue)\n elif op == '+=':\n self.memory[var] += self.visit(node.rvalue)\n elif op == '-=':\n self.memory[var] -= self.visit(node.rvalue)\n elif op == '/=':\n self.memory[var] /= self.visit(node.rvalue)\n elif op == '*=':\n self.memory[var] *= self.visit(node.rvalue)\n\n return self.memory[var]",
"def assignment_to_plan(assignment: dict[tuple[str, int], list[int]]) -> PlanDict:\n return {\n 'version': 1,\n 'partitions':\n [{'topic': t_p[0],\n 'partition': t_p[1],\n 'replicas': replica\n } for t_p, replica in assignment.items()]\n }",
"def _apply_func_to_expressions(sympy_expr, function, args=None):\n if args is None:\n\n def func(expr):\n return function(expr)\n\n else:\n\n def func(expr):\n return function(expr, *args)\n\n if isinstance(sympy_expr, dict):\n new_expr = dict((k, func(expr)) for k, expr in iteritems(sympy_expr))\n elif hasattr(sympy_expr, \"__iter__\"):\n new_expr = list(func(expr) for expr in sympy_expr)\n else:\n new_expr = func(sympy_expr)\n\n return new_expr",
"def dict_convert(_dict, keyfn=None, valuefn=None):\n if keyfn is None and valuefn is not None:\n for k in _dict:\n _dict[k] = valuefn(_dict[k])\n return _dict\n\n elif keyfn is not None:\n out_dict = {}\n for k in _dict:\n out_dict[keyfn(k)] = valuefn(_dict[k]) if valuefn else _dict[k]\n return out_dict\n else:\n return _dict",
"def process_assign(self, node, state, *_):\n io_source = False\n is_function_call = False\n maybe_d_type_object_assign = False\n d_type_object_name = None\n # Get the GrFN element of the RHS side of the assignment which are\n # the variables involved in the assignment operations.\n sources = self.gen_grfn(node.value, state, \"assign\")\n\n node_name = node.targets[0].__repr__().split()[0][2:]\n if node_name == \"ast.Attribute\":\n node_value = node.targets[0].value\n attrib_ast = node_value.__repr__().split()[0][2:]\n if (\n attrib_ast == \"ast.Name\"\n and node_value.id in self.derived_type_objects\n ):\n maybe_d_type_object_assign = True\n d_type_object_name = node_value.id\n object_type = self.derived_type_objects[d_type_object_name]\n elif (\n attrib_ast == \"ast.Attribute\"\n and node_value.value.id in self.derived_type_objects\n ):\n maybe_d_type_object_assign = True\n d_type_object_name = node_value.value.id\n object_type = self.derived_type_objects[d_type_object_name]\n\n array_assignment = False\n is_d_type_obj_declaration = False\n # Detect assigns which are string initializations of the\n # following form: String(10). String initialization of the form\n # String(10, \"abcdef\") are valid assignments where the index of the\n # variables will be incremented but for the former case the index\n # will not be incremented and neither will its variable spec be\n # generated\n is_string_assign = False\n is_string_annotation = False\n if len(sources) > 0 and \"call\" in sources[0]:\n type_name = sources[0][\"call\"][\"function\"]\n if type_name == \"String\":\n is_string_assign = True\n # Check if it just an object initialization or initialization\n # with value assignment\n if len(sources[0][\"call\"][\"inputs\"]) == 1:\n # This is just an object initialization e.g. String(10)\n is_string_annotation = True\n elif type_name == \"Array\":\n array_assignment = True\n array_dimensions = []\n inputs = sources[0][\"call\"][\"inputs\"]\n\n # If the array type is string, the structure of inputs will\n # be a bit different than when it is int of float\n if \"call\" in inputs[0][0]:\n if inputs[0][0][\"call\"][\"function\"] == \"String\":\n array_type = \"string\"\n else:\n array_type = inputs[0][0][\"var\"][\"variable\"]\n self._get_array_dimension(sources, array_dimensions, inputs)\n elif type_name in self.derived_types:\n is_d_type_obj_declaration = True\n if isinstance(node.targets[0], ast.Name):\n variable_name = node.targets[0].id\n if variable_name not in self.module_variable_types:\n for program in self.mode_mapper[\"public_objects\"]:\n if (\n variable_name\n in self.mode_mapper[\"public_objects\"][program]\n ):\n self.module_variable_types[variable_name] = [\n program,\n type_name,\n ]\n else:\n pass\n else:\n pass\n\n # This reduce function is useful when a single assignment operation\n # has multiple targets (E.g: a = b = 5). Currently, the translated\n # python code does not appear in this way and only a single target\n # will be present.\n targets = reduce(\n (lambda x, y: x.append(y)),\n [\n self.gen_grfn(target, state, \"assign\")\n for target in node.targets\n ],\n )\n grfn = {\"functions\": [], \"variables\": [], \"containers\": []}\n # Again as above, only a single target appears in current version.\n # The `for` loop seems unnecessary but will be required when multiple\n # targets start appearing.\n target_names = []\n object_attr_num = 1\n for target in targets:\n # Bypass any assigns that have multiple targets.\n # E.g. (i[0], x[0], j[0], y[0],) = ...\n if \"list\" in target:\n return []\n target_names.append(target[\"var\"][\"variable\"])\n # Fill some data structures if this is a string\n # assignment/initialization\n if is_string_assign:\n state.variable_types[target_names[0]] = \"string\"\n state.string_assign_name = target_names[0]\n self.strings[target_names[0]] = {\n \"length\": sources[0][\"call\"][\"inputs\"][0][0][\"value\"]\n }\n if is_string_annotation:\n # If this is just a string initialization,\n # last_definition should not contain this string's index.\n # This happens only during assignments.\n del state.last_definitions[target_names[0]]\n self.strings[target_names[0]][\"annotation\"] = True\n self.strings[target_names[0]][\"annotation_assign\"] = False\n return []\n else:\n self.strings[target_names[0]][\"annotation\"] = False\n self.strings[target_names[0]][\"annotation_assign\"] = True\n\n # Pre-processing and removing certain Assigns which only pertain\n # to the Python code and do not relate to the FORTRAN code in any\n # way.\n io_match = self.check_io_variables(target_names[0])\n if io_match:\n self.exclude_list.append(target_names[0])\n return []\n\n # If the target is a list of variables, the grfn notation for the\n # target will be a list of variable names i.e. \"[a, b, c]\"\n # TODO: This does not seem right. Discuss with Clay and Paul\n # about what a proper notation for this would be\n if target.get(\"list\"):\n targets = \",\".join(\n [x[\"var\"][\"variable\"] for x in target[\"list\"]]\n )\n target = {\"var\": {\"variable\": targets, \"index\": 1}}\n\n if array_assignment:\n var_name = target[\"var\"][\"variable\"]\n state.array_assign_name = var_name\n # Just like the same reason as the variables\n # declared with annotation within function (not\n # function arguments) need to have index of zero.\n # Thus, these 3 lines of code fixes the index to\n # correct value from -1 to 0.\n if target[\"var\"][\"index\"] == -1:\n target[\"var\"][\"index\"] = 0\n state.last_definitions[target_names[0]] = 0\n is_mutable = False\n array_info = {\n \"index\": target[\"var\"][\"index\"],\n \"dimensions\": array_dimensions,\n \"elem_type\": array_type,\n \"mutable\": is_mutable,\n }\n self.arrays[var_name] = array_info\n state.array_types[var_name] = array_type\n if array_type == \"string\":\n length = inputs[0][0][\"call\"][\"inputs\"][0][0][\"value\"]\n self.strings[var_name] = {\n \"length\": length,\n \"annotation\": False,\n \"annotated_assign\": True,\n }\n\n if (\n maybe_d_type_object_assign\n and object_type\n and object_type in self.derived_types_attributes\n and target_names[0]\n in self.derived_types_attributes[object_type]\n ):\n self.current_d_object_name = d_type_object_name\n is_d_type_object_assignment = True\n\n # If targets holds more than 1 variable information and\n # it's greater than the object attribute number, then\n # the derived type object is referencing more than\n # 1 attribute (i.e. x.k.v).\n if len(targets) > 1 and len(targets) > object_attr_num:\n object_attr_num += 1\n # Therefore, we do not want to go any further before\n # collecting all the information of the attribute\n # information, so we need to simply return back to the\n # beginning of loop and restart the process\n continue\n else:\n is_d_type_object_assignment = False\n\n variable_spec = self.generate_variable_definition(\n target_names,\n d_type_object_name,\n is_d_type_object_assignment,\n state,\n )\n\n # Do not add the variable spec if this is a string annotation\n # since this can collide with the variable spec of the first\n # string assignment.\n if not is_string_annotation:\n grfn[\"variables\"].append(variable_spec)\n\n # Since a Python class (derived type) object declaration has syntax\n # is __object_name__ = __class_name__, it's considered as an\n # assignment that will create __assign__ function GrFN,\n # which should not. Thus, simply return the [grfn] here to avoid\n # generating __assign__ function.\n if is_d_type_obj_declaration:\n return [grfn]\n\n # TODO Hack to not print lambda function for IO assigns. Need a\n # proper method to handle IO moving on\n for src in sources:\n if \"call\" in src:\n if self.check_io_variables(src[\"call\"][\"function\"]):\n io_source = True\n function = src[\"call\"][\"function\"]\n # Check if the source is a function call by comparing its\n # value with the list of functions in our program (\n # obtained from the mode mapper)\n for program_functions in self.mode_mapper[\"subprograms\"]:\n if (\n function\n in self.mode_mapper[\"subprograms\"][\n program_functions\n ]\n ):\n is_function_call = True\n\n if is_function_call:\n container_name = self.generate_container_id_name(\n self.fortran_file, [\"@global\"], function\n )\n function_name = {\"name\": container_name, \"type\": \"container\"}\n else:\n function_name = self.generate_function_name(\n \"__assign__\", variable_spec[\"name\"], None\n )\n # If current assignment process is for a derived type object (i.e\n # x.k), then\n if is_d_type_object_assignment:\n # (1) we need to add derived type object as function input.\n src = [\n {\n \"var\": {\n \"variable\": d_type_object_name,\n \"index\": state.last_definitions[\n d_type_object_name\n ],\n }\n }\n ]\n sources.extend(src)\n\n # (2) Generate the object name + attributes variable name\n new_var_name = d_type_object_name\n for target_name in target_names:\n new_var_name += f\"_{target_name}\"\n self.current_d_object_attributes.append(target_name)\n\n # (3) we need to modify thee target to be \"objectName_attribute\"\n # For example, variable: x_k and index: __index_of_x_y__.\n target[\"var\"] = {\n \"variable\": new_var_name,\n \"index\": state.last_definitions[new_var_name],\n }\n\n fn = self.make_fn_dict(function_name, target, sources, state)\n if len(fn) == 0:\n return []\n\n source_list = self.make_source_list_dict(sources)\n\n if not io_source and not is_function_call:\n lambda_string = self.generate_lambda_function(\n node,\n function_name[\"name\"],\n True,\n array_assignment,\n is_string_assign,\n is_d_type_object_assignment,\n source_list,\n state,\n False,\n )\n state.lambda_strings.append(lambda_string)\n\n grfn[\"functions\"].append(fn)\n # We need to cleanup the object attribute tracking list.\n self.current_d_object_attributes = []\n return [grfn]",
"def mapdict(itemfunc, dictionary):\r\n return dict(map(itemfunc, dictionary.items()))",
"def rhs_as_python_func(self, namespace={}):\n rhs = self.rhs\n\n rhs = rhs.replace('!', ' not ')\n rhs = rhs.replace('&', ' and ')\n rhs = rhs.replace('|', ' or ')\n\n name_map = {\n 'true': 'True',\n 'false': 'False'\n }\n\n for frm, to in name_map.iteritems():\n rhs = MathUtil.str_expr_replacement(frm, to, rhs)\n\n lmda_str = \"lambda %s: %s\" % (','.join(self.rhs_names), rhs)\n return eval(lmda_str, str_to_npfunc_map, namespace)",
"def ast_eval(node):\n if isinstance(node, ast.Num):\n return node.n\n elif isinstance(node, ast.Str):\n return node.s\n elif isinstance(node, ast.Name) and node.id in NAMED_CONSTS:\n return NAMED_CONSTS[node.id]\n elif isinstance(node, ast.Tuple):\n return tuple(ast_eval(n) for n in node.elts)\n elif isinstance(node, ast.List):\n return [ast_eval(n) for n in node.elts]\n elif isinstance(node, ast.Dict):\n return zipdict(ast_eval(node.keys), ast_eval(node.values))\n else:\n raise ValueError(\"Don't know how to eval %s\" % node.__class__.__name__)",
"def visitAssignment_statement(\n self, ctx: MPParser.Assignment_statementContext):\n expression = self.visit(ctx.expression())\n assignment_lhs_list = self.visit(ctx.assignment_lhs_list())\n\n rhs_list = assignment_lhs_list[1:] + [expression]\n\n # def compose(arg):\n # def h(x):\n # return Assign(x, arg)\n # return h\n # hoo = list(map(lambda x: compose(x), rhs_list))\n return [Assign(lhs, rhs)\n for lhs, rhs in zip(assignment_lhs_list, rhs_list)][::-1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate the expression against the given data dictionary. | def evaluate(self, data):
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
return eval(self.code, _globals, {'__data__': data}) | [
"def evaluate(expr, locals):",
"def do_eval(expr, context):\n return eval(expr, context.vals)",
"def evaluate(self):\n self.arithmeticInorder()\n return eval(self._expression)",
"def evaluate(self, expr_object, eval_context_object):\n\n from exprs.evaluation import evaluate_expression_on_stack\n\n num_children = len(expr_object.children)\n self._evaluate_children(expr_object, eval_context_object)\n parameter_map = [exprs.Value(eval_context_object.peek(i), self.domain_types[i])\n for i in reversed(range(len(self.domain_types)))]\n eval_context_object.pop(num_children)\n\n orig_valuation_map = eval_context_object.valuation_map\n eval_context_object.set_valuation_map(parameter_map)\n interpretation = eval_context_object.interpretation_map[self.unknown_function_id]\n evaluate_expression_on_stack(interpretation, eval_context_object)\n eval_context_object.set_valuation_map(orig_valuation_map)",
"def eval(self,opseq,valueDict):\n for (dstName,funName,inputNames) in opseq:\n inputValues = map(lambda a:valueDict[a], inputNames)\n fun = EVAL_FUNS[funName] \n result = fun(*inputValues)\n valueDict[dstName] = result\n return valueDict",
"def safeEval(self, expr: str, names: Dict[str, Any]) -> Any:\n\t\treturn self.execute(self.compile(expr), names)",
"def _evalExpression(self):\n value = self.expressionVar.get().strip()\n if value:\n for point in self.data:\n if point.eval(value):\n point.setState(Point.DISCARDED)",
"def eval( self ):\n return self.doEval( self.code, self.ctx )",
"def evaluate(self, values: Dict[Union[str, sympy.Symbol], float], precision: int =3) -> sympy.Expr:\n return self.expr.subs(values).evalf(precision)",
"def evaluate(self, variables,functions):\r\n pass",
"def eval(self, expr):\n self.log_screen()\n self.logger.info(\"eval «%s»\", expr)\n return self.nvim.eval(expr)",
"def eval( self, code, ctx ):\n return self.doEval( code, ctx )",
"def evaluate(self):\n evaluation = {\n \"price\": self._evalulate_price(),\n \"overall\": self._evalulate_overall(),\n \"chemistry\": self._evalulate_chemistry()\n }\n return evaluation",
"def evaluate(self,\n eval_step: Callable[[MiniBatch, Dict[str, Any]], Dict[str, Any]],\n eval_data: Iterator[MiniBatch]) -> Dict[str, float]:\n with torch.no_grad():\n for metric in self._eval_metrics.values():\n metric.reset() # Don't need this reset\n \n self.model.eval()\n output_dict = {}\n\n \n for minibatch in eval_data:\n output_dict = eval_step(minibatch, output_dict)\n for metric in self._eval_metrics.values():\n metric.accumulate(**output_dict)\n \n return {name: metric.compute_and_reset()\n for name, metric in self._eval_metrics.items()}",
"async def eval(self, script, numkeys, *keys_and_args):\n return await self.execute_command('EVAL', script, numkeys, *keys_and_args)",
"def evaluate(self, data: ExchangeObject, extra=None):\n if extra is None:\n extra = {}\n if not isinstance(data, ExchangeObject):\n raise ValueError(f\"expected data to be ExchangeObject but received {type(data)}\")\n\n if self.evaluator is None:\n raise ValueError(\"self.evaluator should not be None.\")\n if self.pre_filters is not None:\n for _filter in self.pre_filters:\n data = _filter(data, extra)\n\n self.phase = FlPhase.EVALUATE\n self.logger.info(f\"Load {self.client_name} weights...\")\n global_weights = convert_global_weights(\n global_weights=data.weights, local_var_dict=get_state_dict(self.evaluator.network)\n )\n\n copy_model_state(src=global_weights, dst=self.evaluator.network)\n self.logger.info(f\"Start {self.client_name} evaluating...\")\n if isinstance(self.trainer, monai.engines.Trainer):\n self.evaluator.run(self.trainer.state.epoch + 1)\n else:\n self.evaluator.run()\n return_metrics = ExchangeObject(metrics=self.evaluator.state.metrics)\n\n if self.post_evaluate_filters is not None:\n for _filter in self.post_evaluate_filters:\n return_metrics = _filter(return_metrics, extra)\n return return_metrics",
"def _run_evaluation(self) -> None:",
"def eval(self, z):",
"def evalFunc(x):\n\t\tpass",
"def test_expressions_with_fame(self):\n c = Context()\n c[\"foo\"] = dict(a=1, b=2, bar=\"apples\")\n c[\"top\"] = 10\n c[\"r\"] = list(range(10))\n tests = [(\"a+b\", 3), (\".top\", 10), (\"a+.top\", 11), (\".r.4+.top\", 14)]\n with c.frame(\"foo\"):\n for expression, result in tests:\n self.assertEqual(c.eval(expression), result)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Raise an ``UndefinedError`` immediately. | def undefined(cls, key, owner=UNDEFINED):
__traceback_hide__ = True
raise UndefinedError(key, owner=owner) | [
"def failure(self):\n raise RuntimeError, \"This function always raises an error.\"",
"def test_undefined_rule(self):\n tree = rule_grammar.parse('boy = howdy\\n')\n assert_raises(UndefinedLabel, RuleVisitor().visit, tree)",
"def assert_undefined(actual: Any, msg: str = '') -> None:\r\n from apysc.expression import expression_file_util\r\n from apysc.string import string_util\r\n _trace_info(\r\n interface_label='assert_undefined', expected='undefined',\r\n actual=actual)\r\n _, actual_str = _get_expected_and_actual_strs(\r\n expected='undefined', actual=actual)\r\n\r\n msg = string_util.escape_str(string=msg)\r\n expression: str = (\r\n f'console.assert(_.isUndefined({actual_str}), \"{msg}\");'\r\n )\r\n expression_file_util.append_js_expression(expression=expression)",
"def test_notLookupMissingByValue(self):\n self.assertRaises(ValueError, self.FXF.lookupByValue, 0x10)",
"def markAsUndefined(self,addr):\n return HopperLowLevel.markAsUndefined(self.__internal_segment_addr__,addr)",
"def writeUndefined(self, data):\n self.writeType(ASTypes.UNDEFINED)",
"def raise_first(self):\n exceptions = self.exceptions\n if exceptions:\n raise exceptions[0]",
"def display_is_unspecified_error(self):\r\n self._display_error(\"This address seems to be unspecified. Please try again.\")",
"def assert_defined(actual: Any, msg: str = '') -> None:\r\n from apysc.expression import expression_file_util\r\n from apysc.string import string_util\r\n _trace_info(\r\n interface_label='assert_defined', expected='other than undefined',\r\n actual=actual)\r\n _, actual_str = _get_expected_and_actual_strs(\r\n expected='other than undefined', actual=actual)\r\n\r\n msg = string_util.escape_str(string=msg)\r\n expression: str = (\r\n f'console.assert(!_.isUndefined({actual_str}), \"{msg}\");'\r\n )\r\n expression_file_util.append_js_expression(expression=expression)",
"def test_error_calls_given_func_if_func_not_None(self):\n error('foo', func=Fake(callable=True, expect_call=True))",
"def verify_fail(self):\n raise MissingDependencyError(self, self.installed_version)",
"def _raisePostponed(self):\n if self.postponedErrors:\n self.postponedErrors[0].raiseException()",
"def test_terms_undefined(self):\n with pytest.raises(qml.operation.TermsUndefinedError):\n MyOp.compute_terms(wires=[1])\n with pytest.raises(qml.operation.TermsUndefinedError):\n op.terms()",
"def test_incorrect_undef_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Undef_Stmt(line)\n assert \"Cpp_Undef_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def fallback(self) -> None:\n pass",
"def test_getattr_missing(self):\n # Get a reference out here to make sure we don't get an exception\n # from an unexpected place\n data_values = self.record.data_values\n with self.assertRaises(AttributeError) as err:\n value = data_values.no_such_key\n self.fail('Should have failed, but got {}'.format(value))\n self.assertIn('no_such_key', str(err.exception))",
"def notYetImplemented():\n raise Exception(\"Not yet implemented: %s\" % _functionId(1))",
"def checkerror(self):\n error = self.qERR()\n if error:\n raise pipython.GCSError(error)",
"def test_error_is_thrown_set_measurement_variable(self):\n with pytest.raises(KeyError):\n self.microscope.setMeasurementVariableValue(\"test-variable\", 0)",
"def name_error():\n no_function()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Factory for a load function that delegates to other loaders depending on the prefix of the requested template path. The prefix is stripped from the filename when passing on the load request to the delegate. >>> load = prefixed( | def prefixed(**delegates):
def _dispatch_by_prefix(filename):
for prefix, delegate in delegates.items():
if filename.startswith(prefix):
if isinstance(delegate, basestring):
delegate = directory(delegate)
filepath, _, fileobj, uptodate = delegate(
filename[len(prefix):].lstrip('/\\')
)
return filepath, filename, fileobj, uptodate
raise TemplateNotFound(filename, list(delegates.keys()))
return _dispatch_by_prefix | [
"def prefixed(**delegates):\r\n def _dispatch_by_prefix(filename):\r\n for prefix, delegate in list(delegates.items()):\r\n if filename.startswith(prefix):\r\n if isinstance(delegate, str):\r\n delegate = directory(delegate)\r\n filepath, _, fileobj, uptodate = delegate(\r\n filename[len(prefix):].lstrip('/\\\\')\r\n )\r\n return filepath, filename, fileobj, uptodate\r\n raise TemplateNotFound(filename, list(delegates.keys()))\r\n return _dispatch_by_prefix",
"def register(prefix: bytes, loader: Callable):\n _loaders[prefix] = loader",
"def create_loader(search_path_string=...):\n ...",
"def autodelegate(prefix=''):\n def internal(self, arg):\n if '/' in arg:\n first, rest = arg.split('/', 1)\n func = prefix + first\n args = ['/' + rest]\n else:\n func = prefix + arg\n args = []\n \n if hasattr(self, func):\n try:\n return getattr(self, func)(*args)\n except TypeError:\n return web.notfound()\n else:\n return web.notfound()\n return internal",
"def default_prefixer(sender, **kwargs):\n request = http.HttpRequest()\n request.META['SCRIPT_NAME'] = ''\n prefixer = urlresolvers.Prefixer(request)\n urlresolvers.set_url_prefix(prefixer)",
"def templateLoader(loadname):\n with open(loadname, 'rb') as loadfile:\n settings = load(loadfile)\n \n return settings",
"def loader(self,func):\n return LoaderWrapper(self,func)",
"def add_prefix(path, prefix=\"E-\"): \n fname = os.path.basename(path)\n dname = os.path.dirname(path)\n return os.path.join(dname, prefix + fname)",
"def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n search_path = self.search_path\r\n\r\n # Make the filename relative to the template file its being loaded\r\n # from, but only if that file is specified as a relative path, or no\r\n # search path has been set up\r\n if relative_to and (not search_path or not os.path.isabs(relative_to)):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n\r\n filename = os.path.normpath(filename)\r\n cachekey = filename\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[cachekey]\r\n if not self.auto_reload:\r\n return tmpl\r\n uptodate = self._uptodate[cachekey]\r\n if uptodate is not None and uptodate():\r\n return tmpl\r\n except (KeyError, OSError):\r\n pass\r\n\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = list(search_path) + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for loadfunc in search_path:\r\n if isinstance(loadfunc, str):\r\n loadfunc = directory(loadfunc)\r\n try:\r\n filepath, filename, fileobj, uptodate = loadfunc(filename)\r\n except IOError:\r\n continue\r\n else:\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested includes work properly without a\r\n # search path\r\n filename = filepath\r\n tmpl = self._instantiate(cls, fileobj, filepath,\r\n filename, encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[cachekey] = tmpl\r\n self._uptodate[cachekey] = uptodate\r\n finally:\r\n if hasattr(fileobj, 'close'):\r\n fileobj.close()\r\n return tmpl\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()",
"def _load_file(file_path, base_path, loader_function, ignore_partitions):\n data = loader_function(file_path)\n\n if ignore_partitions:\n return data\n else:\n partitions = _take_partitions(file_path, base_path)\n return {**partitions, **data}",
"def _get_prefix_parts(full_prefix):\n prefix_parts = full_prefix.split(\"/\")\n file_name, _, file_ext = prefix_parts[-1].partition(\".\")\n return FilePrefix(\n dirs=\"/\".join(prefix_parts[:-1]),\n filename=file_name,\n file_extension=file_ext,\n use_default_filename=(DEFAULT_FILENAME_TOKEN in full_prefix),\n )",
"def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n if encoding is None:\r\n encoding = self.default_encoding\r\n if relative_to and not os.path.isabs(relative_to):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n filename = os.path.normpath(filename)\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[filename]\r\n if not self.auto_reload or \\\r\n os.path.getmtime(tmpl.filepath) == self._mtime[filename]:\r\n return tmpl\r\n except KeyError:\r\n pass\r\n\r\n search_path = self.search_path\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = search_path + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for dirname in search_path:\r\n filepath = os.path.join(dirname, filename)\r\n try:\r\n fileobj = open(filepath, 'U')\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested include work properly without a\r\n # search path\r\n filename = os.path.join(dirname, filename)\r\n dirname = ''\r\n tmpl = cls(fileobj, basedir=dirname, filename=filename,\r\n loader=self, lookup=self.variable_lookup,\r\n encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[filename] = tmpl\r\n self._mtime[filename] = os.path.getmtime(filepath)\r\n finally:\r\n fileobj.close()\r\n return tmpl\r\n except IOError:\r\n continue\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()",
"def patch_load():\n import piglet.runtime\n\n saved = piglet.runtime.load\n piglet.runtime.load = lambda template, *args, **kwargs: template\n yield\n piglet.runtime.load = saved",
"def register_custom_loader(self, format_name, loader_func):\n if not callable(loader_func):\n raise ValueError(\"loader_func must be callable\")\n self._loader_map[format_name] = loader_func",
"def get_loader(spacing, patch_shape):\r\n loader = yaml.SafeLoader\r\n spacing_constructor = SpacingConstructor(spacing)\r\n patch_shape_constructor = PatchShapeConstructor(patch_shape)\r\n\r\n loader.add_constructor(\"!spacing\", spacing_constructor)\r\n loader.add_constructor(\"!patch_shape\", patch_shape_constructor)\r\n return loader",
"def __init__(\n self,\n source_str: str,\n fname: str,\n templated_str: Optional[str] = None,\n sliced_file: Optional[List[TemplatedFileSlice]] = None,\n raw_sliced: Optional[List[RawFileSlice]] = None,\n ):\n self.source_str = source_str\n # An empty string is still allowed as the templated string.\n self.templated_str = source_str if templated_str is None else templated_str\n # If no fname, we assume this is from a string or stdin.\n self.fname = fname\n # Assume that no sliced_file, means the file is not templated\n self.sliced_file: List[TemplatedFileSlice]\n if sliced_file is None:\n if self.templated_str != self.source_str: # pragma: no cover\n raise ValueError(\"Cannot instantiate a templated file unsliced!\")\n # If we get here and we don't have sliced files,\n # then it's raw, so create them.\n self.sliced_file = [\n TemplatedFileSlice(\n \"literal\", slice(0, len(source_str)), slice(0, len(source_str))\n )\n ]\n assert (\n raw_sliced is None\n ), \"Templated file was not sliced, but not has raw slices.\"\n self.raw_sliced: List[RawFileSlice] = [\n RawFileSlice(source_str, \"literal\", 0)\n ]\n else:\n self.sliced_file = sliced_file\n assert raw_sliced is not None, \"Templated file was sliced, but not raw.\"\n self.raw_sliced = raw_sliced\n\n # Precalculate newlines, character positions.\n self._source_newlines = list(iter_indices_of_newlines(self.source_str))\n self._templated_newlines = list(iter_indices_of_newlines(self.templated_str))\n\n # Consistency check raw string and slices.\n pos = 0\n rfs: RawFileSlice\n for rfs in self.raw_sliced:\n assert rfs.source_idx == pos, (\n \"TemplatedFile. Consistency fail on running source length\"\n f\": {pos} != {rfs.source_idx}\"\n )\n pos += len(rfs.raw)\n assert pos == len(self.source_str), (\n \"TemplatedFile. Consistency fail on total source length\"\n f\": {pos} != {len(self.source_str)}\"\n )\n\n # Consistency check templated string and slices.\n previous_slice = None\n tfs: Optional[TemplatedFileSlice] = None\n for tfs in self.sliced_file:\n if previous_slice:\n if tfs.templated_slice.start != previous_slice.templated_slice.stop:\n raise SQLFluffSkipFile( # pragma: no cover\n \"Templated slices found to be non-contiguous. \"\n f\"{tfs.templated_slice} (starting\"\n f\" {self.templated_str[tfs.templated_slice]!r})\"\n f\" does not follow {previous_slice.templated_slice} \"\n \"(starting \"\n f\"{self.templated_str[previous_slice.templated_slice]!r}\"\n \")\"\n )\n else:\n if tfs.templated_slice.start != 0:\n raise SQLFluffSkipFile( # pragma: no cover\n \"First Templated slice not started at index 0 \"\n f\"(found slice {tfs.templated_slice})\"\n )\n previous_slice = tfs\n if self.sliced_file and templated_str is not None:\n if tfs.templated_slice.stop != len(templated_str):\n raise SQLFluffSkipFile( # pragma: no cover\n \"Length of templated file mismatch with final slice: \"\n f\"{len(templated_str)} != {tfs.templated_slice.stop}.\"\n )",
"def load_template(self, templatename, template_string=None):\r\n if template_string is not None:\r\n return self.template_class(template_string)\r\n\r\n if self.use_package_naming:\r\n divider = templatename.rfind('.')\r\n if divider >= 0:\r\n from pkg_resources import resource_filename\r\n package = templatename[:divider]\r\n basename = templatename[divider + 1:] + self.extension\r\n templatename = resource_filename(package, basename)\r\n\r\n return self.loader.load(templatename)",
"def _load_template(self, template_file):\n pass",
"def load_prefix_includes(config, settings):\n lines = [\n l for l in settings.get('pyramid.route_includes', '').split('\\n') if l\n ]\n for line in lines:\n mod, prefix = [i.strip() for i in line.split(',')]\n get_log().info(\n \"Loading module %r with route prefix %r\" % (mod, prefix)\n )\n # '/' is the same as no prefix at all, but useful to denote the root\n # module in the config file\n if prefix == '/':\n prefix = None\n config.include(mod, route_prefix=prefix)",
"def dynamicLoad():\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register a custom `DirectiveFactory` for a given namespace. | def add_directives(self, namespace, factory):
assert not self._prepared, 'Too late for adding directives, ' \
'template already prepared'
self._stream = self._extract_directives(self._stream, namespace,
factory) | [
"def register(self, service, factory=..., instance=..., scope=..., **kwargs):\n ...",
"def add_factory(self, node_name, factory):\n self.factories[node_name] = factory",
"def convert_namespace_to_factory(class_input):\r\n return decorate_class_methods(class_input, to_factory)",
"def register_factory(\n self, factory, iface_or_type=Interface, *, context=None, name=''\n ):\n iface = _iface_for_type(iface_or_type)\n context_iface = _iface_for_context(context)\n wants_context = context is not None\n\n info = ServiceFactoryInfo(factory, iface, context_iface, wants_context)\n factories = self._cache.get()\n _register_factory(info, factories, iface, context_iface, name)",
"def register(name, fn):\n return el.Dotted.register(name, fn)",
"def __getitem__(self, namespace):\n return ElementFactory(namespace)",
"def addNamespace(*args, **kwargs):\n \n pass",
"def addNamespace(self, namespace):\n \n pass",
"def factoryAttribute(func):\n def factory(*args, **kwargs):\n return Factory(func).bind(*args, **kwargs)\n func.factory = factory\n return func",
"def register(dataset_name):\n\n def decorator(decorator_dataset_class, decorator_dataset_name):\n _DATASETS[decorator_dataset_name] = decorator_dataset_class\n return decorator_dataset_class\n\n return lambda dataset_class: decorator(dataset_class, dataset_name)",
"def set_factory(self, name, factory):\n self.factories[name] = factory",
"def register_provider(self, func):\n if \"provides\" not in getattr(func, \"__di__\", {}):\n raise DiayException(\"function %r is not a provider\" % func)\n\n self.factories[func.__di__[\"provides\"]] = func",
"def createElementNS(namespaceURI, qualifiedName, options=None):\n from domonic.html import tag, tag_init\n el = type(qualifiedName, (tag, Element), {'name': qualifiedName, '__init__': tag_init})\n el.namespaceURI = namespaceURI\n return el()",
"def register_namespace(a_prefix, a_uri):\n # pylint: disable=protected-access\n XML2._namespace_map[a_uri] = a_prefix",
"def registerIntruderPayloadGeneratorFactory(self, factory):\n # type: (IIntruderPayloadGeneratorFactory) -> ()",
"def add_factory(name, evaluation_factory):\n EvaluationFactory.factories[name] = evaluation_factory",
"def register_domain(self, domain=0, tokenizer=None, trie=None):\n self.domains[domain] = IntentDeterminationEngine(\n tokenizer=tokenizer, trie=trie)",
"def get_factory(package):\r\n return functools.partial(get, package)",
"def register(registry:list):\n def decorate(func):\n registry.append(func)\n return func\n return decorate",
"def register_from_doc(name: str):\n\n def f(to_doc: FnFromDoc): # pylint: disable=redefined-outer-name\n reg = Registry._inst # pylint: disable=protected-access\n reg.table[name].from_doc = to_doc\n\n return f"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find a template specified in python 'dot' notation, or load one from a string. | def load_template(self, templatename, template_string=None):
if template_string is not None:
return self.template_class(template_string)
if self.use_package_naming:
divider = templatename.rfind('.')
if divider >= 0:
from pkg_resources import resource_filename
package = templatename[:divider]
basename = templatename[divider + 1:] + self.extension
templatename = resource_filename(package, basename)
return self.loader.load(templatename) | [
"def _find_template(self, filename, start=0):\n\n filename = filename.lstrip(\"/\").replace(\"/\", os.sep)\n cachename = \":@@{0}@@:{1}\".format(start, filename)\n\n if not self._path:\n raise RestrictedError(\n \"Attempt to load template from empty search path: {0}\".format(filename)\n )\n\n if not cachename in self._find_cache:\n for (index, path) in enumerate(self._path[start:], start):\n new_filename = os.path.realpath(os.path.join(path, filename))\n if os.path.isfile(new_filename):\n self._find_cache[cachename] = (index, new_filename)\n break\n else:\n raise RestrictedError(\n \"Template not found along search path: {0}\".format(filename)\n )\n\n return self._find_cache[cachename]",
"def lookupTemplate(self, request):\n if self.template:\n return microdom.parseString(self.template, caseInsensitive=0, preserveCase=0)\n if not self.templateDirectory:\n mod = sys.modules[self.__module__]\n if hasattr(mod, '__file__'):\n self.templateDirectory = os.path.split(mod.__file__)[0]\n # First see if templateDirectory + templateFile is a file\n templatePath = os.path.join(self.templateDirectory, self.templateFile)\n if not os.path.exists(templatePath):\n raise RuntimeError, \"The template %r was not found.\" % templatePath\n # Check to see if there is an already parsed copy of it\n mtime = os.path.getmtime(templatePath)\n cachedTemplate = templateCache.get(templatePath, None)\n compiledTemplate = None\n\n if cachedTemplate is not None:\n if cachedTemplate[0] == mtime:\n compiledTemplate = templateCache[templatePath][1].cloneNode(deep=1)\n \n if compiledTemplate is None:\n compiledTemplate = microdom.parse(templatePath, caseInsensitive=0, preserveCase=0)\n templateCache[templatePath] = (mtime, compiledTemplate.cloneNode(deep=1))\n return compiledTemplate",
"def load_template(mol) :\n filename = os.path.join(PROT_INFO_PATH,\"template_%s.txt\"%mol)\n if os.path.isfile(filename) :\n return ProteinTemplate(filename)\n else :\n raise Exception(\"Invalid mol (%s) or file is missing (%s)\"%(mol,filename))",
"def load_template(name: str) -> Template:\n if name not in _templates:\n with open(join(dirname(__file__), 'templates', name + '.j2'), 'r') as f:\n return Template(f.read())\n return _templates[name]",
"def find_template(path, curr_path, config_paths):\n # type: (str, Optional[Union[str, Path]], List[Union[str, Path]])\n # -> Optional[Path]\n path = Path(path)\n if path.is_absolute():\n return path\n\n if not curr_path: # pragma: no cover\n curr_path = Path('.')\n else:\n curr_path = Path(curr_path).parent\n\n config_paths = config_paths + [curr_path]\n for cpath in config_paths:\n thepath = Path(cpath) / path\n if thepath.is_file():\n return thepath\n return None",
"def uiTemplate(string, exists=bool):\n pass",
"def resolve_dotted_name(name: str) -> typing.Any:\n if not isinstance(name, str):\n return name # already an object\n names = name.split(\".\")\n used = names.pop(0)\n found = __import__(used)\n for n in names:\n used += \".\" + n\n try:\n found = getattr(found, n)\n except AttributeError:\n __import__(used)\n found = getattr(found, n)\n\n return found",
"def load_dotted_path(dotted_path, raise_=True, reload=False):\n obj, module = None, None\n\n parsed = _validate_dotted_path(dotted_path, raise_=raise_)\n\n if parsed:\n mod, name = parsed\n\n try:\n module = importlib.import_module(mod)\n except ImportError as e:\n if raise_:\n # we want to raise ethe same error type but chaining exceptions\n # produces a long verbose output, so we just modify the\n # original message to add more context, it's ok to hide the\n # original traceback since it will just point to lines\n # in the importlib module, which isn't useful for the user\n e.msg = ('An error happened when trying to '\n 'import dotted path \"{}\": {}'.format(\n dotted_path, str(e)))\n raise\n\n if module:\n if reload:\n module = importlib.reload(module)\n\n try:\n obj = getattr(module, name)\n except AttributeError as e:\n if raise_:\n # same as in the comment above\n e.args = (\n 'Could not get \"{}\" from module '\n '\"{}\" (loaded from: {}), make sure it is a valid '\n 'callable defined in such module'.format(\n name, mod, module.__file__), )\n raise\n return obj\n else:\n if raise_:\n raise ValueError(\n 'Invalid dotted path value \"{}\", must be a dot separated '\n 'string, with at least '\n '[module_name].[function_name]'.format(dotted_path))",
"def read_template(self, template, space=None):\n pass",
"def templateLoader(loadname):\n with open(loadname, 'rb') as loadfile:\n settings = load(loadfile)\n \n return settings",
"def load_template(name=None):\n if name is None:\n name = \"default\"\n\n logger.info(\"Loading template with name %s\", name)\n try:\n template_file = open(\"%s/%s.yaml\" % (template_path, name))\n except IOError:\n raise TemplateNotFoundError\n\n template = yaml.safe_load(template_file)\n template_file.close()\n if \"extends\" in template:\n logger.debug(\"Merging %s with %s\", name, template[\"extends\"])\n template = _merge(load_template(template[\"extends\"]), template)\n\n return template",
"def get(self, template):\n tenant = tenant_handler.tenant()\n pos = template.rfind('.')\n if pos != -1:\n format = template[pos + 1:]\n template = template[:pos]\n else:\n format = 'pdf'\n return get_document(tenant, template, format)",
"def _load_template(self, template_file):\n pass",
"def get_template(template_filename):\n return env.get_template(template_filename)",
"def _get_template(settings):\r\n puts(\"\\nPick a template\\n\")\r\n template = None\r\n while not template:\r\n _list_templates(settings)\r\n index = raw_input(\"\\nWhich template would you like to use? [1] \")\r\n if not index:\r\n index = \"1\"\r\n try:\r\n index = int(index) - 1\r\n return settings.config[\"project_templates\"][index]\r\n except:\r\n puts(\"\\\"{0}\\\" isn't a valid option!\".format(colored.red(\"{0}\".format(index))))\r\n pass",
"def find_matching_template(question, templates):\n for t in templates.values():\n for r in t[\"regexes\"]:\n if r.fullmatch(question[\"question\"]) is not None:\n return t\n assert False, \"No template found\"",
"def LoadSchemeTemplate(root, name):\n path = os.path.join(root, 'ios', 'build', 'tools', name + '.template')\n with open(path) as file:\n return Template(file.read())",
"def replace_template_path(path):\n segments = path.split(\".\")\n module = \".\".join(segments[0:-1])\n name = segments[-1]\n if module == \"ipypublish.html.ipypublish\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"ipy-{0}.html-tplx.json\".format(name),\n }\n elif module == \"ipypublish.html.standard\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"std-{0}.html-tplx.json\".format(name),\n }\n elif module == \"ipypublish.latex.standard\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"std-{0}.latex-tpl.json\".format(name),\n }\n elif module == \"ipypublish.latex.ipypublish\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"ipy-{0}.latex-tpl.json\".format(name),\n }\n else:\n print(\"Warning: unknown template path: {}\".format(path))\n return {\"module\": module, \"file\": \"{0}.json\".format(name)}",
"def app_model_templates_loader(template_name, template_dirs=None):\r\n for path in get_template_sources(template_name, template_dirs):\r\n logging.debug(\"Looking for tempalte: %s\" % path)\r\n try:\r\n return (open(path).read().decode(settings.FILE_CHARSET), path)\r\n except IOError:\r\n pass\r\n raise TemplateDoesNotExist, template_name"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render the template to a string using the provided info. | def render(self, info, format=None, fragment=False, template=None):
kwargs = self._get_render_options(format=format, fragment=fragment)
return self.transform(info, template).render(**kwargs) | [
"def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)",
"def render_template(self, *args, **kwargs):\n return self.renderer.render(*args, **kwargs)",
"def render(self, template, **kw):\n t = jinja_env.get_template(template) \n self.response.out.write(t.render(kw))",
"def _render_template(*args, **kwargs):\n rendered_template = render_template(*args, **kwargs, environment=current_app.config['ENVIRONMENT'], base_url=app.config['SERVER_BASE_URL'], alert_message=current_app.config['ALERT_MESSAGE'], disable_full_ads_link=current_app.config['DISABLE_FULL_ADS_LINK'])\n return rendered_template",
"def renderTemplate(self,template_path,context):\n html = render_to_string(template_path,context)\n return html",
"def render_to_string(template_name, context):\n return Engine(app_dirs=True).render_to_string(template_name, context=context)",
"def render_template(self, template_name, output_name, context):\n raise NotImplementedError()",
"def render_template(context=None, template=\"default.jinja2\", cls=True):\n if not context:\n context = {}\n screen_cleaner(cls)\n template = env.get_template(template)\n print(template.render(**context))",
"def render_to(self, path, template, **data):\n html = self.render(template, **data)\n return open(path, \"w\").write(html.encode(charset))",
"def render_template(name, context=None, type='html'):\n return template.render(get_template_path('%s.%s'% (name, type)), context)",
"def render_template(text, context=None):\n template = engines[\"django\"].from_string(text)\n if not context:\n context = {}\n return template.render(context)",
"def render(self, *args: Any, **globals_: Any) -> str:\n try:\n return self._mako_def_template.render(*args, **globals_)\n except RenderError as e:\n # _render() can be called by a chain of templates which call each other;\n # passing the original render error to the top so that it could be handled there.\n raise\n except Exception as e:\n # TODO: we could collect mako.exceptions.text_error_template().render() here,\n # because ideally it should point to the line where the error occurred,\n # but for some reason it doesn't. So we don't bother for now.\n raise RenderError(e, args, globals_, self.source)",
"def render_to_string(self, request, context):\n if not self.template_name or not self.has_content:\n return ''\n\n user = request.user\n last_visited = context.get('last_visited')\n\n new_context = context.flatten()\n\n try:\n new_context.update({\n 'entry': self,\n 'entry_is_new': (\n user.is_authenticated and\n last_visited is not None and\n self.is_entry_new(last_visited=last_visited,\n user=user)),\n 'show_entry_statuses_area': (\n self.entry_pos !=\n BaseReviewRequestPageEntry.ENTRY_POS_INITIAL),\n })\n new_context.update(self.get_extra_context(request, context))\n except Exception as e:\n logger.exception('Error generating template context for %s '\n '(ID=%s): %s',\n self.__class__.__name__, self.entry_id, e,\n extra={'request': request})\n return ''\n\n try:\n return render_to_string(template_name=self.template_name,\n context=new_context,\n request=request)\n except Exception as e:\n logger.exception('Error rendering template for %s (ID=%s): %s',\n self.__class__.__name__, self.entry_id, e,\n extra={'request': request})\n return ''",
"def render_template(path, args):\n return read_template(path).render(**args)",
"def render_string(self, template_name, **kwargs):\n if 'tornado' == settings['TEMPLATE_ENGINE']:\n return super(BaseHandler, self).render_string(template_name, **kwargs)\n elif 'jinja2' == settings['TEMPLATE_ENGINE']:\n return jinja2_render(template_name, **kwargs)\n else:\n raise errors.SettingsError(\n '%s is not a supported TEMPLATE_ENGINE, should be `tornado` or `jinja2`'\n % settings['TEMPLATE_ENGINE'])",
"def render(\n request, template_name, context=None, content_type=None, status=None, using=None\n):\n content = loader.render_to_string(template_name, context, request, using=using)\n return HttpResponse(content, content_type, status)",
"def render_template(process, template_string, context):\n from resolwe.flow.managers import manager\n\n # Get the appropriate expression engine. If none is defined, do not evaluate\n # any expressions.\n expression_engine = process.requirements.get('expression-engine', None)\n if not expression_engine:\n return template_string\n\n return manager.get_expression_engine(expression_engine).evaluate_block(template_string, context)",
"def render(self):\n ctx = self.context.copy()\n ctx.content = renderTemplate(self.transform(), ctx)\n layout = self.layouts.get(ctx.layout)\n if layout:\n return renderTemplate(layout.content, ctx)\n else:\n return ctx.content",
"def render(always, template, dest, **kwargs):\n\n dest = plat.path(dest)\n\n if (not always) and os.path.exists(dest):\n return\n\n template = environment.get_template(template)\n text = template.render(**kwargs)\n\n f = file(dest, \"wb\")\n f.write(text.encode(\"utf-8\"))\n f.close()",
"def simple_render(template, context):\n\n def parse_token(token, in_tag):\n if not in_tag:\n return token\n var = token[2:-2].strip()\n return context.get(var, '')\n\n result = []\n in_tag = False\n\n for token in tag_re.split(template):\n if token:\n result.append(parse_token(token, in_tag))\n in_tag = not in_tag\n\n return ''.join(result)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify more complex nesting using otherwise. | def test_complex_nesting_otherwise(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="1">
<div py:when="1" py:choose="2">
<span py:when="1">FAIL</span>
<span py:otherwise="">OK</span>
</div>
</div>
</doc>""")
self.assertEqual("""<doc>
<div>
<div>
<span>OK</span>
</div>
</div>
</doc>""", tmpl.generate().render(encoding=None)) | [
"def is_nested(self, ):\n\t\tpass",
"def IsNestedFamORAssem(self) -> bool:",
"def IsNestedFamANDAssem(self) -> bool:",
"def testTryExceptElse(self):\n token = self.parser.parse(filename='evo/TryExceptElse.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertFalse(res['finally'])",
"def test33(self):\n self.check('aDict.nestedDict.one')",
"def is_nested(input):\n return is_sequence(input) or isinstance(input, dict)",
"def test31(self):\n self.check('aDict.nestedDict')",
"def test_nested(dataset):\n failure = \"flat_legacy\" in dataset or \"directory_default\" in dataset or \"fs_default\" in dataset\n verify(Array(store=NestedDirectoryStore(dataset)), failure)",
"def test_unbalanced_overlapping_bracket():\n assert multi_bracket_validation('([)]') is False",
"def is_nested(self, raise_exception=False):\n\n ###### Check if initial_data is a MultiValueDIct ############\n ###### Convert it to a dict object ##########################\n\n if hasattr(self._initial_data, 'getlist'):\n raw_data = {}\n\n for key, value in dict(self._initial_data).items():\n if len(value) > 1:\n raw_data[key] = value\n else:\n raw_data[key] = value[0]\n\n self._initial_data = raw_data\n\n #############################################################\n\n is_mapping = isinstance(self._initial_data, Mapping)\n conditions = [is_mapping]\n\n #############################################################\n\n if not is_mapping and raise_exception:\n raise ValueError('`data` is not a map type')\n\n #############################################################\n\n matched_keys = []\n\n for key in self._initial_data.keys():\n if self.str_is_nested(key):\n matched_keys.append(True)\n break\n else:\n matched_keys.append(False)\n\n conditions += [any(matched_keys)]\n\n #############################################################\n\n if not any(matched_keys) and raise_exception:\n raise ValueError('`data` is not a nested type')\n\n #############################################################\n\n if all(conditions):\n self._validated_data = self._initial_data\n self.__run__()\n\n return all(conditions)",
"def test_optional_group_mixed_children_all_missing(self):\n\n @environ.config(prefix=\"PARENT\")\n class WithOptionalChild:\n @environ.config(prefix=\"CHILD\")\n class Child:\n grandchild_a = environ.var()\n grandchild_b = environ.var(\"FOO\")\n\n child = environ.group(Child, optional=True)\n\n cfg = environ.to_config(WithOptionalChild, {})\n assert cfg.child is None",
"def IsNestedFamily(self) -> bool:",
"def testTryExceptElseFinallyTrailing(self):\n token = self.parser.parse(\n filename='evo/TryExceptElseFinallyTrailing.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertTrue(res['finally'])",
"def test_unbalanced_bracket_2():\n assert multi_bracket_validation('(](') is False",
"def test55(self):\n self.check('aDict.nestedDict.aClass')",
"def test_requirements_single_nested_item(self):\n # TODO pending revision\n pass",
"def test_recursion_depth(self):\n def recurse(arr):\n with recursion_depth('test_recursion_depth') as recursion_level:\n if recursion_level > 10:\n raise Exception('error')\n if arr and arr.pop():\n recurse(arr)\n\n recurse(range(0,10))\n self.assertRaises(Exception, recurse, range(0,11))\n recurse(range(0,10))\n self.assertRaises(Exception, recurse, range(0,11))",
"def is_nested(collection: Iterable) -> bool:\n return all(map(not_(is_atom), collection))",
"def testTryExceptElseFinally(self):\n token = self.parser.parse(filename='evo/TryExceptElseFinally.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertTrue(res['finally'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that a when directive with a strip directive actually strips of the outer element. | def test_when_with_strip(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="" py:strip="">
<span py:otherwise="">foo</span>
</div>
</doc>""")
self.assertEqual("""<doc>
<span>foo</span>
</doc>""", tmpl.generate().render(encoding=None)) | [
"def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_function_with_strip(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:def=\"echo(what)\" py:strip=\"\">\r\n <b>${what}</b>\r\n </div>\r\n ${echo('foo')}\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <b>foo</b>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_namespace_on_removed_elem(self):\r\n tmpl = MarkupTemplate(\"\"\"<?xml version=\"1.0\"?>\r\n <Test xmlns:py=\"http://genshi.edgewall.org/\">\r\n <Size py:if=\"0\" xmlns:t=\"test\">Size</Size>\r\n <Item/>\r\n </Test>\"\"\")\r\n self.assertEqual(\"\"\"<?xml version=\"1.0\"?>\\n<Test>\r\n \r\n <Item/>\r\n </Test>\"\"\", str(tmpl.generate()))",
"def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def testNonWidgetChild(self):\n try:\n class T(wd.RepeatingWidget):\n child = \"\"\n self.assert_(False)\n except pm.ParameterError:\n self.assert_(True)",
"def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n foo\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def test_clean_uses_noop_sanitizer(self):\n mixin = SanitizerMixin()\n self.assertEqual(noop, mixin.get_sanitizer())",
"def is_strip_closed(self, skey):\n\n\t\treturn not self.is_edge_on_boundary(*self.strip[skey][0])",
"def test_single_strip_with_exif_checksum(self):\n self._evaluate_checksums(\"t_one_strip_with_exif\")",
"def test_remove_disabled_parts_include(self):\n text = 'text <nowiki>tag</nowiki> text'\n self.assertEqual(\n textlib.removeDisabledParts(text, include=['nowiki']), text)",
"def test_list_of_non_modulatory_phrases_is_empty_for_pieces_with_heavy_polymodal_frame():\n assert piece3.non_modulatory_phrases == []\n assert piece4.non_modulatory_phrases == []",
"def dissert_select(strip):\n global _dissert\n _dissert = strip",
"def instr_stripped_gen(self):\n yield from [x.strip() for x in self.instructions.splitlines() if len(x.strip()) > 0]",
"def testEmpty(self):\n r = twilio.Response()\n r.append(twilio.Sms(\"\"))\n r = self.strip(r)\n self.assertEquals(r, '<Response><Sms/></Response>')",
"def build_stripped(self, obj):\n stripped_path = build_common.get_stripped_path(obj)\n assert stripped_path, 'build_stripped takes path under out/target/<target>'\n self.build(stripped_path, 'strip', inputs=obj)",
"def test_include_preventing_itself(self):\n t = parse(\"\"\"\n foo: 1\n include \"example\" if foo else \"\"\n \"\"\",\n example=\"\"\"\n foo: 0\n \"\"\")\n self.assertRaises(errors.ParadoxError, t.resolve)",
"def edge_strip(self, edge):\n\n\t\tfor strip, edges in self.strip.items():\n\t\t\tif edge in edges or tuple(reversed(edge)) in edges:\n\t\t\t\treturn strip"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that a `when` directive outside of a `choose` directive is reported as an error. | def test_when_outside_choose(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:when="xy" />
</doc>""")
self.assertRaises(TemplateRuntimeError, str, tmpl.generate()) | [
"def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_value_if_not_asked__value_if_not_asked_is_not_option__raises_exception():\n with pytest.raises(\n ValueError,\n match=\"The value_if_not_asked is not one of the options.\",\n ):\n Choice(\n SOME_NAME,\n SOME_STRING,\n SOME_OPTIONS,\n SOME_DEFAULT,\n should_ask=lambda answers: True,\n value_if_not_asked=\"a\",\n )",
"def test_models_edx_problem_check_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_check_fail\"\n assert statement.page == \"x_module\"",
"def test_models_edx_ui_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.name == \"problem_check\"",
"def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )",
"def test_models_edx_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.page == \"x_module\"",
"def test_models_edx_ui_problem_show_with_valid_statement(statement):\n assert statement.event_type == \"problem_show\"\n assert statement.name == \"problem_show\"",
"def test_invalid_spec(self):\n self.reject(\"test\")\n self.reject(13)\n self.reject([])\n self.reject({\"IN\": {}})\n self.reject({\"IN\": [{\"bad-name\": True}]})",
"def test_invalid_dropdown_xml(self):\n problem_xml = textwrap.dedent(\"\"\"\n <problem>\n <optionresponse>\n <p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown\n problems. Edit this component to replace this template with your own assessment.</p>\n <label>Add the question text, or prompt, here. This text is required.</label>\n <description>You can add an optional tip or note related to the prompt like this. </description>\n <optioninput>\n <option correct=\"False\">an incorrect answer</option>\n <option correct=\"True\">the correct answer</option>\n <option correct=\"True\">an incorrect answer</option>\n </optioninput>\n </optionresponse>\n </problem>\n \"\"\")\n with pytest.raises(Exception):\n CapaFactory.create(xml=problem_xml)",
"def test_check_options_exception(self, hp, opts):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_is_in_options(hp, opts, msg=\"XXX\")",
"def test_models_edx_problem_rescore_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_rescore_fail\"\n assert statement.page == \"x_module\"",
"def test_complex_nesting_otherwise(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"1\">\r\n <div py:when=\"1\" py:choose=\"2\">\r\n <span py:when=\"1\">FAIL</span>\r\n <span py:otherwise=\"\">OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <div>\r\n <div>\r\n <span>OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def test_incorrect_elif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Elif_Stmt(line)\n assert \"Cpp_Elif_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def check_choice_validity(tokens_choice_inside):\n # TODO: deprecate `/` as choice separators AND percentgen\n # percentgen_count = tokens_choice_inside.count(PERCENT_GEN_SYM)\n # if percentgen_count > 0:\n # raise SyntaxError(\"Choices cannot take a percentage for generation \"+\n # \"modifier.\")\n if len(tokens_choice_inside) > 0:\n if tokens_choice_inside[-1] == CHOICE_SEP:\n raise SyntaxError(\"Choice cannot end with a choice separator. \" +\n \"Did you forget to escape the last character?\")\n if ( len(tokens_choice_inside) > 1\n and tokens_choice_inside[-1] == RAND_GEN_SYM\n and tokens_choice_inside[-2] == CHOICE_SEP):\n raise SyntaxError(\"Choice ends with an empty choice item. \" +\n \"Did you forget to escape the choice separator?\")",
"def test_models_edx_ui_problem_reset_with_valid_statement(statement):\n assert statement.event_type == \"problem_reset\"\n assert statement.name == \"problem_reset\"",
"def expect(condition, error_msg, exc_type=SystemExit, error_prefix=\"ERROR:\"):\n###############################################################################\n if not condition:\n msg = error_prefix + \" \" + error_msg\n raise exc_type(msg)",
"def test_models_edx_ui_problem_graded_with_valid_statement(statement):\n assert statement.event_type == \"problem_graded\"\n assert statement.name == \"problem_graded\"",
"def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that an `otherwise` directive outside of a `choose` directive is reported as an error. | def test_otherwise_outside_choose(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:otherwise="" />
</doc>""")
self.assertRaises(TemplateRuntimeError, str, tmpl.generate()) | [
"def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n foo\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def test_complex_nesting_otherwise(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"1\">\r\n <div py:when=\"1\" py:choose=\"2\">\r\n <span py:when=\"1\">FAIL</span>\r\n <span py:otherwise=\"\">OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <div>\r\n <div>\r\n <span>OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def testSyntaxErrorDoubleElse(self):\n template = '{{ if [var] }} {{ else }} {{ else }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)",
"def test_value_if_not_asked__value_if_not_asked_is_not_option__raises_exception():\n with pytest.raises(\n ValueError,\n match=\"The value_if_not_asked is not one of the options.\",\n ):\n Choice(\n SOME_NAME,\n SOME_STRING,\n SOME_OPTIONS,\n SOME_DEFAULT,\n should_ask=lambda answers: True,\n value_if_not_asked=\"a\",\n )",
"def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)",
"def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )",
"def test_incorrect_else_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Else_Stmt(line)\n assert \"Cpp_Else_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def test_models_edx_problem_check_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_check_fail\"\n assert statement.page == \"x_module\"",
"def testTryExceptElse(self):\n token = self.parser.parse(filename='evo/TryExceptElse.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertFalse(res['finally'])",
"def test_invalid_dropdown_xml(self):\n problem_xml = textwrap.dedent(\"\"\"\n <problem>\n <optionresponse>\n <p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown\n problems. Edit this component to replace this template with your own assessment.</p>\n <label>Add the question text, or prompt, here. This text is required.</label>\n <description>You can add an optional tip or note related to the prompt like this. </description>\n <optioninput>\n <option correct=\"False\">an incorrect answer</option>\n <option correct=\"True\">the correct answer</option>\n <option correct=\"True\">an incorrect answer</option>\n </optioninput>\n </optionresponse>\n </problem>\n \"\"\")\n with pytest.raises(Exception):\n CapaFactory.create(xml=problem_xml)",
"def verify_template_is_not_available(self,template):\n field=npsp_lex_locators[\"adv_mappings\"][\"field_mapping\"].format(\"Template\")\n self.selenium.click_element(field)\n element=self.selenium.get_webelement(field)\n status=element.get_attribute(\"aria-activedescendant\")\n if status is not None:\n self.selenium.page_should_not_contain(template)\n else:\n self.selenium.wait_until_page_contains(\"Default Gift Entry Template\")\n self.selenium.page_should_not_contain(template) \n self.selenium.click_button(\"Cancel\")",
"def check_choice_validity(tokens_choice_inside):\n # TODO: deprecate `/` as choice separators AND percentgen\n # percentgen_count = tokens_choice_inside.count(PERCENT_GEN_SYM)\n # if percentgen_count > 0:\n # raise SyntaxError(\"Choices cannot take a percentage for generation \"+\n # \"modifier.\")\n if len(tokens_choice_inside) > 0:\n if tokens_choice_inside[-1] == CHOICE_SEP:\n raise SyntaxError(\"Choice cannot end with a choice separator. \" +\n \"Did you forget to escape the last character?\")\n if ( len(tokens_choice_inside) > 1\n and tokens_choice_inside[-1] == RAND_GEN_SYM\n and tokens_choice_inside[-2] == CHOICE_SEP):\n raise SyntaxError(\"Choice ends with an empty choice item. \" +\n \"Did you forget to escape the choice separator?\")",
"def test_incorrect_elif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Elif_Stmt(line)\n assert \"Cpp_Elif_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def test_preference_invalid_fail(lfric_sst):\n bbox = panel(\"africa\")\n emsg = \"Expected a preference of 'cell' or 'center' or 'point'\"\n with pytest.raises(ValueError, match=emsg):\n _ = bbox.enclosed(lfric_sst, preference=\"invalid\")",
"def test_invalid_spec(self):\n self.reject(\"test\")\n self.reject(13)\n self.reject([])\n self.reject({\"IN\": {}})\n self.reject({\"IN\": [{\"bad-name\": True}]})",
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def test_incorrect_endif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Endif_Stmt(line)\n assert \"Cpp_Endif_Stmt: '{0}'\".format(line) in str(excinfo.value)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that an `when` directive that doesn't have a `test` attribute is reported as an error. | def test_when_without_test(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="" py:strip="">
<py:when>foo</py:when>
</div>
</doc>""")
self.assertRaises(TemplateRuntimeError, str, tmpl.generate()) | [
"def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_models_edx_problem_check_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_check_fail\"\n assert statement.page == \"x_module\"",
"def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )",
"def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_models_edx_ui_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.name == \"problem_check\"",
"def test_models_edx_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.page == \"x_module\"",
"def test_invalid_assignment():\n with pytest.raises(TypeError):\n MeasurementRun(\"name\", spec=Condition(\"value of pi\", value=NominalReal(3.14159, '')))\n with pytest.raises(TypeError):\n MeasurementRun(\"name\", material=FileLink(\"filename\", \"url\"))",
"def test_invalid(sourcextractor):\n run = sourcextractor('--this-is-not-a-valid-flag')\n assert run.exit_code > 0\n assert 'unrecognised' in run.stderr",
"def test_models_edx_problem_rescore_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_rescore_fail\"\n assert statement.page == \"x_module\"",
"def test_incorrect_if_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_If_Stmt(line)\n assert \"Cpp_If_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def test_signal_describe_fail():\n signal = Signal(name=\"the_none_signal\", value=None)\n with pytest.raises(ValueError) as excinfo:\n signal.describe()\n assert \"failed to describe 'the_none_signal' with value 'None'\" in str(\n excinfo.value\n )",
"def test_extract_device_name_invalid():\n with pytest.raises(AttributeError):\n assert grml2usb.extract_device_name(\"/dev\")\n with pytest.raises(AttributeError):\n assert grml2usb.extract_device_name(\"foobar\")",
"def raises_assertion():\n return pytest.raises(AssertionError)",
"def test_dont_raise(self):\n with self.assert_doesnt_raise():\n pass",
"def test_models_edx_reset_problem_fail_with_valid_statement(statement):\n assert statement.event_type == \"reset_problem_fail\"\n assert statement.page == \"x_module\"",
"def test_invalid_spec(self):\n self.reject(\"test\")\n self.reject(13)\n self.reject([])\n self.reject({\"IN\": {}})\n self.reject({\"IN\": [{\"bad-name\": True}]})",
"def test_incorrect_elif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Elif_Stmt(line)\n assert \"Cpp_Elif_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def test_models_edx_ui_problem_show_with_valid_statement(statement):\n assert statement.event_type == \"problem_show\"\n assert statement.name == \"problem_show\"",
"def test_invalid_spec(self):\n invalid_spec = {\n \"target\": \"abc\",\n \"modes\": 2,\n \"compiler\": [\"Xcov\"],\n }\n with pytest.raises(\n ValueError, match=r\"missing the following keys: \\['gate_parameters', 'layout'\\]\"\n ):\n Device(spec=invalid_spec)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that an `otherwise` directive can be used without a `test` attribute. | def test_otherwise_without_test(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="" py:strip="">
<py:otherwise>foo</py:otherwise>
</div>
</doc>""")
self.assertEqual("""<doc>
foo
</doc>""", tmpl.generate().render(encoding=None)) | [
"def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_complex_nesting_otherwise(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"1\">\r\n <div py:when=\"1\" py:choose=\"2\">\r\n <span py:when=\"1\">FAIL</span>\r\n <span py:otherwise=\"\">OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <div>\r\n <div>\r\n <span>OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def testSyntaxErrorDoubleElse(self):\n template = '{{ if [var] }} {{ else }} {{ else }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)",
"def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def testTagPresenceElse(self):\n template = '{{ ifpresent [tag] }} yes {{ else }} no {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' yes')\n self.assertEqual(self.parse(template), ' no')",
"def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)",
"def testLoopAbsentIndex(self):\n template = '{{ for item in [tag:absent] }} x {{ endfor }}'\n self.assertFalse(self.parse(template, tag='absent'))",
"def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )",
"def test_incorrect_else_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Else_Stmt(line)\n assert \"Cpp_Else_Stmt: '{0}'\".format(line) in str(excinfo.value)",
"def test_standard_failure(self):\n class Resource(object):\n @guard.guard(make_checker(False))\n def denied(self, request):\n pass\n request = http.Request.blank('/')\n try:\n Resource().denied(request)\n except http.UnauthorizedError, e:\n response = e.make_response()\n assert response.headers['Content-Type'] == 'text/plain'\n assert response.body == \"\"\"401 Unauthorized\\n\\nchecker #1 failed\\n\"\"\"\n else:\n self.fail()",
"def test_stop_not_defined(self):\n guest_name = \"some guest\"\n parameters_stop = {}\n self._mock_virsh.return_value.is_defined.return_value = False\n self._hyp.login()\n self.assertRaisesRegex(RuntimeError, \"is not defined\",\n self._hyp.stop, guest_name, parameters_stop)",
"def skip_validate(request):\n if request.node.get_closest_marker('validate'):\n if not request.config.getoption(\"validate\"):\n pytest.skip('Validation tests not requested.')",
"def test_eat_unhealthy(self):\n self.assertEqual(\n eat(\"pizza\", is_healthy=False),\n \"I'm eating pizza, and I don't care.\",\n )",
"def test_html_with_no_visitors_planned(self):\n message = 'Você não possui entradas autorizadas.'\n self.assertContains(self.resp, message)",
"def test_eat_unhealthy(self):\n self.assertEqual(\n eat(\"pizza\", is_healthy=False),\n \"I'm eating pizza, because YOLO\"\n )",
"def test_saml_disabled(self):\n self.enable_saml(enabled=False)\n response = self.client.get(self.METADATA_URL)\n assert response.status_code == 404",
"def assert_not_unitary(self, variable):\n self.assertNotEqual(\n variable.by_treatment('As is'), variable.by_treatment('To be'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that a named template function with a strip directive actually strips of the outer element. | def test_function_with_strip(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:def="echo(what)" py:strip="">
<b>${what}</b>
</div>
${echo('foo')}
</doc>""")
self.assertEqual("""<doc>
<b>foo</b>
</doc>""", tmpl.generate().render(encoding=None)) | [
"def test_when_with_strip(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <span py:otherwise=\"\">foo</span>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <span>foo</span>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def test_namespace_on_removed_elem(self):\r\n tmpl = MarkupTemplate(\"\"\"<?xml version=\"1.0\"?>\r\n <Test xmlns:py=\"http://genshi.edgewall.org/\">\r\n <Size py:if=\"0\" xmlns:t=\"test\">Size</Size>\r\n <Item/>\r\n </Test>\"\"\")\r\n self.assertEqual(\"\"\"<?xml version=\"1.0\"?>\\n<Test>\r\n \r\n <Item/>\r\n </Test>\"\"\", str(tmpl.generate()))",
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def testTemplateConditional(self):\n template = '{{ if [a] == \"foo\" }} foo [b] {{ else }} bar [b] {{ endif }}'\n self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))",
"def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),\n self.strip(example))",
"def isShadowed(self, name, element):\n\t\tvalue=self.resolve(name)[1]\n\t\treturn (value and (value != element))",
"def testBasicTagPresence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' hello')",
"def testNonexistantFuntion(self):\n template = 'This tag function is missing [num|zoink].'\n self.assertEqual(self.parse(template), template)\n # Error is only thrown if we actually pass an argument for the tag:\n self.assertRaises(templateparser.TemplateNameError,\n self.parse, template, num=1)",
"def testInlineExisting(self):\n self.parser['template'] = self.tmpl('This is a subtemplate by [name].')\n template = '{{ inline template }}'\n expected = 'This is a subtemplate by Elmer.'\n self.assertEqual(self.parse(template, name='Elmer'), expected)",
"def is_valid_in_template(var, attr):\n # Remove private variables or methods\n if attr.startswith('_'):\n return False\n # Remove any attributes that raise an acception when read\n try:\n value = getattr(var, attr)\n except:\n return False\n if isroutine(value):\n # Remove any routines that are flagged with 'alters_data'\n if getattr(value, 'alters_data', False):\n return False\n else:\n # Remove any routines that require arguments\n try:\n argspec = getargspec(value)\n num_args = len(argspec.args) if argspec.args else 0\n num_defaults = len(argspec.defaults) if argspec.defaults else 0\n if num_args - num_defaults > 1:\n return False\n except TypeError:\n # C extension callables are routines, but getargspec fails with\n # a TypeError when these are passed.\n pass\n return True",
"def test_clean_uses_noop_sanitizer(self):\n mixin = SanitizerMixin()\n self.assertEqual(noop, mixin.get_sanitizer())",
"def test_strip_grid_from_name_basic(self):\n result = _strip_grid_from_name(\"atm_grid\")\n self.assertEqual(result, \"atm\")",
"def testSimpleClosureWithoutArguments(self):\n template = '[tag|limit()]'\n result = self.parse(template, tag=self.tag)\n self.assertEqual(result, self.tag[:80])",
"def test_decompose_definition(definition, static_part, expected):\n assert (\n templates_utils.decompose_definition(definition, static_part)\n == expected\n )",
"def test_tokenize_strip(self):\r\n input = \"((' <this> \\\"\\\" 'text' has (lots) of (special chars} >>]\"\r\n output = [ (\"<this>\",4),(\"text\",15),(\"has\",21),(\"lots\",26),(\"of\",32),\r\n (\"special\",36),(\"chars}\",44),(\">>\",51)]\r\n self.assertEqual(output,[i for i in basic_tokenize(input)])\r\n for (itmO,itmV) in zip(output,basic_tokenize(input)):\r\n self.assertEqual(itmO,itmV)",
"def test_barname_stripper(self):\n assert bu.stripper(\"base-nto+armle-v7+signed.bar\") == \"base\"",
"def testComplexClosureWithoutArguments(self):\n template = '[tag|strlimit()]'\n result = self.parse(template, tag=self.tag)\n self.assertEqual(len(result), 83)\n self.assertEqual(result[:80], self.tag[:80])\n self.assertEqual(result[-3:], '...')",
"def test_unpatch(self):\n unpatch()\n self.assert_is_not_wrapped(flask.render_template)\n self.assert_is_not_wrapped(flask.render_template_string)\n self.assert_is_not_wrapped(flask.templating._render)",
"def _mwpfh_passes(self, func):\n failing = has_module('wikitextparser')\n patterns = [\n '{{subst:a|b=c}}',\n '{{safesubst:a|b=c}}',\n '{{msgnw:a|b=c}}',\n '{{subst::a|b=c}}'\n ]\n context = self.assertRaises(AssertionError) \\\n if failing else nullcontext()\n\n for template in patterns:\n with self.subTest(template=template, failing=failing):\n name = template.strip('{}').split('|')[0]\n with context:\n self.assertEqual(func(template),\n [(name, OrderedDict((('b', 'c'), )))])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that the directive works as expected in a text template. | def test_in_text_template(self):
tmpl = TextTemplate("""
#def echo(greeting, name='world')
${greeting}, ${name}!
#end
${echo('Hi', name='you')}
""")
self.assertEqual("""
Hi, you!
""", tmpl.generate().render(encoding=None)) | [
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),\n self.strip(example))",
"def testInlineExisting(self):\n self.parser['template'] = self.tmpl('This is a subtemplate by [name].')\n template = '{{ inline template }}'\n expected = 'This is a subtemplate by Elmer.'\n self.assertEqual(self.parse(template, name='Elmer'), expected)",
"def test_unexpandedLiteral(self):\n self.assertEqual(\n u\"hello world\",\n self.expandToText(ConceptTemplate(u\"hello world\"), {}))",
"def testBasicTagPresence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' hello')",
"def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_templates(self):\n\t\tpass",
"def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())",
"def testCompareTag(self):\n template = '{{ if [variable] == 5 }} foo {{ endif }}'\n self.assertFalse(self.parse(template, variable=0))\n self.assertFalse(self.parse(template, variable=12))\n self.assertTrue(self.parse(template, variable=5))",
"def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/centered_text.html', 'The templates were not the same')",
"def testTemplateConditional(self):\n template = '{{ if [a] == \"foo\" }} foo [b] {{ else }} bar [b] {{ endif }}'\n self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))",
"def test_form_template_i18n():",
"def testAddTemplate(self):\n parser = templateparser.Parser()\n self.assertEqual(len(parser), 0)\n parser.AddTemplate(self.name)\n self.assertEqual(len(parser), 1)\n self.assertEqual(parser[self.name], self.template)",
"def testTagPresenceElse(self):\n template = '{{ ifpresent [tag] }} yes {{ else }} no {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' yes')\n self.assertEqual(self.parse(template), ' no')",
"def test_is_placeholder(placeholder, expected):\n assert templates_utils.is_placeholder(placeholder=placeholder) == expected",
"def _test_template(self, template_path):\n full_path = os.path.join(self.template_dir, template_path)\n doc = lxml.html.parse(full_path)\n expecting_vuln = _get_expecting_vuln(doc)\n templ = loader.get_template(template_path)\n context = parse_template.get_default_context()\n templ.render(context)\n methods = [\n parse_template.get_non_js_escaped_results_for_template,\n parse_template.get_non_quoted_attr_vars_for_template\n ]\n for method in methods:\n for result in method(templ):\n self.csw.handle_callback(result)\n self.assertEqual(len(self.csw.results), len(expecting_vuln))\n for result, expected in zip(self.csw.results, expecting_vuln):\n line_no = result.get_line_number()\n part = result.get_vulnerability_text()\n filename = result.get_filename()\n var = str(result._var_node.filter_expression.var)\n self.assertEqual(line_no, expected['line_number'])\n self.assertEqual(var, expected['name'])\n self.assertEqual(filename, full_path)\n self.assertTrue(var in part)",
"def test_render_template_doc(mock_rts, mock_markup):\n test_string = 'some string'\n test_doc = Mock(spec=yattag.Doc)\n test_doc.getvalue.return_value = test_string\n template.render_template(test_doc)\n test_doc.getvalue.assert_called_once_with()\n mock_rts.assert_called_once_with(test_string)\n mock_markup.assert_called_once_with(mock_rts(test_string))",
"def testSafeString(self):\n template = 'Template without any tags'\n parsed_template = self.tmpl(template).Parse()\n self.assertTrue(isinstance(parsed_template, templateparser.SafeString))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that a named template function using "star arguments" works as expected. | def test_function_with_star_args(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:def="f(*args, **kwargs)">
${repr(args)}
${repr(sorted(kwargs.items()))}
</div>
${f(1, 2, a=3, b=4)}
</doc>""")
self.assertEqual("""<doc>
<div>
[1, 2]
[('a', 3), ('b', 4)]
</div>
</doc>""", tmpl.generate().render(encoding=None)) | [
"def test_variable_arguments(self):\n def foo(*args):\n return tuple(args)\n provider = FunctionProvider(foo)\n wrapped_function = provider()\n self.assertSequenceEqual(wrapped_function(1, 2), (1, 2))\n self.assertSequenceEqual(wrapped_function(1), (1,))",
"def foo4(_, *, _): # [duplicate-argument-name, duplicate-argument-name]",
"def test_passed_simplePositional(self):\n\n def func(a, b):\n pass\n\n self.assertEqual(self.checkPassed(func, 1, 2), dict(a=1, b=2))",
"def testNonexistantFuntion(self):\n template = 'This tag function is missing [num|zoink].'\n self.assertEqual(self.parse(template), template)\n # Error is only thrown if we actually pass an argument for the tag:\n self.assertRaises(templateparser.TemplateNameError,\n self.parse, template, num=1)",
"def test_Tucker_args():\n testing_function_with_args('tucker')",
"def test_is_placeholder(placeholder, expected):\n assert templates_utils.is_placeholder(placeholder=placeholder) == expected",
"def test_Complex_args():\n testing_function_with_args('complex')",
"def add_call_starargs(node, name):\n value = name\n if isinstance(name, str):\n value = ast.Name(id=name, ctx=ast.Load())\n\n starred = ast.Starred(\n value=value,\n ctx=ast.Load()\n )\n node.args.append(starred)",
"def test_python_callable_arguments_are_templatized(self):\n recorded_calls = []\n\n # Create a named tuple and ensure it is still preserved\n # after the rendering is done\n Named = namedtuple('Named', ['var1', 'var2'])\n named_tuple = Named('{{ ds }}', 'unchanged')\n\n task = PythonSensor(\n task_id='python_sensor',\n timeout=0.01,\n poke_interval=0.3,\n # a Mock instance cannot be used as a callable function or test fails with a\n # TypeError: Object of type Mock is not JSON serializable\n python_callable=build_recording_function(recorded_calls),\n op_args=[4, date(2019, 1, 1), \"dag {{dag.dag_id}} ran on {{ds}}.\", named_tuple],\n dag=self.dag,\n )\n\n self.dag.create_dagrun(\n run_type=DagRunType.MANUAL,\n execution_date=DEFAULT_DATE,\n start_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n with self.assertRaises(AirflowSensorTimeout):\n task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n ds_templated = DEFAULT_DATE.date().isoformat()\n # 2 calls: first: at start, second: before timeout\n self.assertEqual(2, len(recorded_calls))\n self._assert_calls_equal(\n recorded_calls[0],\n Call(\n 4,\n date(2019, 1, 1),\n f\"dag {self.dag.dag_id} ran on {ds_templated}.\",\n Named(ds_templated, 'unchanged'),\n ),\n )",
"def test_special_names():",
"def foo1(_, _): # [duplicate-argument-name, duplicate-argument-name]",
"def test_HoLE_args():\n testing_function_with_args('hole')",
"def test_extract_parameter_name(variable, name):\n assert tp.get_name(tp.VARIABLE(variable)) == name",
"def test_DistMult_args():\n testing_function_with_args('distmult')",
"def test_match_with_params(self):\n string = 'The {{quick|brown}} fox'\n builder = MultiTemplateMatchBuilder(self.site)\n self.assertIsNotNone(re.search(builder.pattern('quick'), string))\n self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),\n self._template_not_case_sensitive)",
"def check_partial(sig, args, kwargs):\n num_pos_only, func, keyword_exclude, sigspec = sig\n if len(args) < num_pos_only:\n pad = (None,) * (num_pos_only - len(args))\n args = args + pad\n if keyword_exclude:\n kwargs = dict(kwargs)\n for item in keyword_exclude:\n kwargs.pop(item, None)\n return is_partial_args(func, args, kwargs, sigspec)",
"def test_TransD_args():\n testing_function_with_args('transd')",
"def test_python_callable_keyword_arguments_are_templatized(self):\n recorded_calls = []\n\n task = PythonSensor(\n task_id='python_sensor',\n timeout=0.01,\n poke_interval=0.3,\n # a Mock instance cannot be used as a callable function or test fails with a\n # TypeError: Object of type Mock is not JSON serializable\n python_callable=build_recording_function(recorded_calls),\n op_kwargs={\n 'an_int': 4,\n 'a_date': date(2019, 1, 1),\n 'a_templated_string': \"dag {{dag.dag_id}} ran on {{ds}}.\",\n },\n dag=self.dag,\n )\n\n self.dag.create_dagrun(\n run_type=DagRunType.MANUAL,\n execution_date=DEFAULT_DATE,\n start_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n with self.assertRaises(AirflowSensorTimeout):\n task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n # 2 calls: first: at start, second: before timeout\n self.assertEqual(2, len(recorded_calls))\n self._assert_calls_equal(\n recorded_calls[0],\n Call(\n an_int=4,\n a_date=date(2019, 1, 1),\n a_templated_string=\"dag {} ran on {}.\".format(\n self.dag.dag_id, DEFAULT_DATE.date().isoformat()\n ),\n ),\n )",
"def has_arg(func, argname):\n return argname in getargspec(func)[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify an empty 'for' value is an error | def test_for_with_empty_value(self):
try:
MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:for each="">
empty
</py:for>
</doc>""", filename='test.html').generate()
self.fail('ExpectedTemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
if sys.version_info[:2] > (2,4):
self.assertEqual(2, e.lineno) | [
"def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('ExpectedTemplateSyntaxError')\r\n except TemplateSyntaxError as e:\r\n self.assertEqual('test.html', e.filename)\r\n if sys.version_info[:2] > (2,4):\r\n self.assertEqual(2, e.lineno)",
"def testInvalidValueWhileIterating(self):\n self.assertRaises(ValueError,\n list,\n self.manager.snimpyInvalidDescr.iteritems())",
"def ensure_empty(gen):\n try:\n next(gen)\n return False\n except StopIteration:\n return True",
"def test_submit_message_missing_data(self):\n default_args = [self.valid_msg, 1, 'day']\n for i in range(0, 3):\n args = default_args\n args[i] = None\n r = self._submit_message(*args)\n self.assertIn(err_messages['incomplete_form'], r.data)",
"def test_fail_tails_empty(self):\n self.assertFilterErrors(\n {\n 'tails': [],\n },\n\n {\n 'tails': [f.Required.CODE_EMPTY],\n },\n )",
"def testEmpty(self):\n assert Iter.empty(Iter.filter(self.alwayserror,\n self.empty())), \\\n \"Filtering an empty iterator should result in empty iterator\"",
"def testLoopAbsentIndex(self):\n template = '{{ for item in [tag:absent] }} x {{ endfor }}'\n self.assertFalse(self.parse(template, tag='absent'))",
"def has_error(self):\n return self._has_error",
"def test_param_invalid_output_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.data, self.dataempty, 100.0)",
"def iter_is_empty(i):\n try:\n i.next()\n except StopIteration:\n return True\n return False",
"def _check_empty(key, value, empty):\n if not empty and not value:\n raise Exception(\"{} is empty, expecting a value\".format(key))\n elif empty and value:\n raise Exception(\n \"{} is suppose to be empty. value: {} exists\".format(key, value)\n )",
"def test_iter_error():\n\tfrom ..skySurvey import SkySurvey\n\tfile_list = 0\n\ttry:\n\t\tspec = SkySurvey(tuner_list = file_list)\n\texcept TypeError:\n\t\tassert True\n\telse:\n\t\tassert False",
"def _all_data_fields_are_empty(value):\n if not value:\n return True\n for v in value.values():\n if v:\n return False\n\n return True",
"def testEmpty(self):\n assert Iter.equal(self.empty(), iter([]))",
"def has_error(self) -> bool:\n return len(self.errors) > 0",
"def test_index_error_with_data():\n test_list = OffByOneList([])\n for k in (0, 4):\n with pytest.raises(IndexError):\n test_list[k]",
"def test_bug_form_no_data(self):\n\n form = BugForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEquals(len(form.errors), 3)",
"def test_incorrect_value_passed(self) -> None:\n for incorrect_value in self.INCORRECT_VALUES:\n with pytest.raises(\n AEAValidationError,\n match=\"The following errors occurred during validation:\",\n ):\n self._make_configuration(incorrect_value)",
"def test_param_invalid_input_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.dataempty, self.dataout, 100.0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that outputting context data in attribtes escapes quotes. | def test_attr_escape_quotes(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<elem class="$myvar"/>
</div>""")
self.assertEqual("""<div>
<elem class=""foo""/>
</div>""", str(tmpl.generate(myvar='"foo"'))) | [
"def test_quotes(self):\n node1 = Attribute(wraptext(\"id\"), wraptext(\"foo\"), None)\n node2 = Attribute(wraptext(\"id\"), wraptext(\"bar\"))\n node3 = Attribute(wraptext(\"id\"), wraptext(\"foo bar baz\"))\n self.assertIs(None, node1.quotes)\n self.assertEqual('\"', node2.quotes)\n node1.quotes = \"'\"\n node2.quotes = None\n self.assertEqual(\"'\", node1.quotes)\n self.assertIs(None, node2.quotes)\n self.assertRaises(ValueError, setattr, node1, \"quotes\", \"foobar\")\n self.assertRaises(ValueError, setattr, node3, \"quotes\", None)\n self.assertRaises(ValueError, Attribute, wraptext(\"id\"),\n wraptext(\"foo bar baz\"), None)",
"def attributeEscapingDoneOutside(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n return data",
"def quoteattr(data, entities={}):\r\n data = escape(data, entities)\r\n if '\"' in data:\r\n if \"'\" in data:\r\n data = '\"%s\"' % data.replace('\"', \""\")\r\n else:\r\n data = \"'%s'\" % data\r\n else:\r\n data = '\"%s\"' % data\r\n return data",
"def test_text_noescape_quotes(self):\r\n tmpl = MarkupTemplate(\"\"\"<div xmlns:py=\"http://genshi.edgewall.org/\">\r\n $myvar\r\n </div>\"\"\")\r\n self.assertEqual(\"\"\"<div>\r\n \"foo\"\r\n </div>\"\"\", str(tmpl.generate(myvar='\"foo\"')))",
"def test_escape(fb, fb_secure):\n\n assert fb.escape('This has \"quotes\"') == 'This has \\\\\"quotes\\\\\"'\n assert fb.escape('This has a backslash \\\\') == 'This has a backslash \\\\\\\\'\n assert fb.escape('This has \\\\\"both\\\\\"') == 'This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"'",
"def test_section__context_values(self):\n template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'\n context = {'test': {'foo': '<'}}\n\n self._assert_render(u'unescaped: < escaped: <', template, context)",
"def writeWithAttributeEscaping(write):\n def _write(data):\n write(escapeForContent(data).replace(b'\"', b'"'))\n return _write",
"def test_strip_quotes(self):\n self.assertEqual(strip_quotes(\"'text'\"), \"text\")\n self.assertEqual(strip_quotes('\"text\"'), \"text\")",
"def test_tag_with_double_quote(self):\n code, out, err = self.t(\"start 1h ago 'this is a \\\"test\\\"'\")\n self.assertIn(\"Note: '\\\"this is a \\\\\\\"test\\\\\\\"\\\"' is a new tag\", out)\n self.t(\"stop\")\n self.t(\"delete @1\")",
"def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, str) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)",
"def test__escape(self):\n engine = Renderer(escape=lambda s: \"**\" + s)\n\n self._assert_render(u'**bar', '{{foo}}', {'foo': 'bar'}, engine=engine)",
"def test_normalize_quotes_1(self):\n text = 'This is a test that shoudln\\'t change anything.'\n clean_text = normalize_quotes(text, default_quote='\"', quotes=None)\n self.assertEquals(clean_text, text)",
"def test_html_escape(user_input, expected_result):\n assert expected_result == escape_user_input(user_input)",
"def test__escape_does_not_call_literal(self):\n engine = Renderer(literal=lambda s: s.upper(),\n escape=lambda s: \"**\" + s)\n\n template = 'literal: {{{foo}}} escaped: {{foo}}'\n context = {'foo': 'bar'}\n\n self._assert_render(u'literal: BAR escaped: **bar', template, context, engine=engine)",
"def test_simple_pi_with_double_quotes(self):\n pi_data = u\"\"\" \\t att=\"value\"\\n \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"att\": u\"value\"})",
"def quoteAttr(self, value):\n ret = quoteattr(\"'\"+value+\"'\")\n return ret[2:len(ret)-2]",
"def test_simple_pi_with_simple_quotes(self):\n pi_data = u\"\"\" \\t att='value'\\n \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"att\": u\"value\"})",
"def testAddHTMLTags_WebsiteInQuotes(self):\n self.doTestAddHTMLTags(\n 'test \"http://www.example.com\".',\n ('test "<a href=\"http://www.example.com\">'\n 'http://www.example.com</a>".'))",
"def quoted_attribute_value(self, value):\n quote_with = '\"'\n if '\"' in value:\n if \"'\" in value:\n replace_with = '"'\n value = value.replace('\"', replace_with)\n else:\n quote_with = \"'\"\n return quote_with + value + quote_with"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that a namespace declaration on an element that is removed from the generated stream does not get pushed up to the next nonstripped element (see ticket 107). | def test_namespace_on_removed_elem(self):
tmpl = MarkupTemplate("""<?xml version="1.0"?>
<Test xmlns:py="http://genshi.edgewall.org/">
<Size py:if="0" xmlns:t="test">Size</Size>
<Item/>
</Test>""")
self.assertEqual("""<?xml version="1.0"?>\n<Test>
<Item/>
</Test>""", str(tmpl.generate())) | [
"def check_namespace(self):\n if not self.tree:\n self.xml_validate()\n \n root = self.tree.getroot()\n self.namespace = root.get(\"targetNamespace\")\n if self.namespace is None:\n self.namespace = \"\"",
"def remove_namespace(doc, namespace):\n ns = u'{%s}' % namespace\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]",
"def CheckEnd(self, filename, clean_lines, linenum, error):\n line = clean_lines.raw_lines[linenum]\n\n # Check how many lines is enclosed in this namespace. Don't issue\n # warning for missing namespace comments if there aren't enough\n # lines. However, do apply checks if there is already an end of\n # namespace comment and it's incorrect.\n #\n # TODO(unknown): We always want to check end of namespace comments\n # if a namespace is large, but sometimes we also want to apply the\n # check if a short namespace contained nontrivial things (something\n # other than forward declarations). There is currently no logic on\n # deciding what these nontrivial things are, so this check is\n # triggered by namespace size only, which works most of the time.\n if (linenum - self.starting_linenum < 10\n and not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\b', line)):\n return\n\n # Look for matching comment at end of namespace.\n #\n # Note that we accept C style \"/* */\" comments for terminating\n # namespaces, so that code that terminate namespaces inside\n # preprocessor macros can be cpplint clean.\n #\n # We also accept stuff like \"// end of namespace <name>.\" with the\n # period at the end.\n #\n # Besides these, we don't accept anything else, otherwise we might\n # get false negatives when existing comment is a substring of the\n # expected namespace.\n if self.name:\n # Named namespace\n if not Match((r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\s+' +\n regex.escape(self.name) + r'[\\*/\\.\\\\\\s]*$'),\n line):\n error(filename, linenum, 'readability/namespace', 5,\n 'Namespace should be terminated with \"// namespace %s\"' %\n self.name)\n else:\n # Anonymous namespace\n if not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace[\\*/\\.\\\\\\s]*$', line):\n # If \"// namespace anonymous\" or \"// anonymous namespace (more text)\",\n # mention \"// anonymous namespace\" as an acceptable form\n if Match(r'^\\s*}.*\\b(namespace anonymous|anonymous namespace)\\b', line):\n error(filename, linenum, 'readability/namespace', 5,\n 'Anonymous namespace should be terminated with \"// namespace\"'\n ' or \"// anonymous namespace\"')\n else:\n error(filename, linenum, 'readability/namespace', 5,\n 'Anonymous namespace should be terminated with \"// namespace\"')",
"def namespaceExit(self):\n self.doc.namespaceExit(self.p.ident)\n self.doc.namespaceExit(\"stromx\")\n self.doc.blank()",
"def namespaceMismatch(self, linkingPage, linkedPage, counter) -> bool:\n if linkedPage in self.found_in:\n # We have seen this page before, don't ask again.\n return False\n\n if self.origin and self.origin.namespace() != linkedPage.namespace():\n # Allow for a mapping between different namespaces\n crossFrom = self.origin.site.family.crossnamespace.get(\n self.origin.namespace(), {})\n crossTo = crossFrom.get(self.origin.site.lang,\n crossFrom.get('_default', {}))\n nsmatch = crossTo.get(linkedPage.site.lang,\n crossTo.get('_default', []))\n if linkedPage.namespace() in nsmatch:\n return False\n\n if self.conf.autonomous:\n pywikibot.info(\n 'NOTE: Ignoring link from page {} in namespace'\n ' {} to page {} in namespace {}.'\n .format(linkingPage, linkingPage.namespace(), linkedPage,\n linkedPage.namespace()))\n # Fill up found_in, so that we will not write this notice\n self.found_in[linkedPage] = [linkingPage]\n return True\n\n preferredPage = self.getFoundInCorrectNamespace(linkedPage.site)\n if preferredPage:\n pywikibot.info(\n 'NOTE: Ignoring link from page {} in namespace {} to '\n 'page {} in namespace {} because page {} in the '\n 'correct namespace has already been found.'\n .format(linkingPage, linkingPage.namespace(),\n linkedPage, linkedPage.namespace(),\n preferredPage))\n return True\n\n choice = pywikibot.input_choice(\n 'WARNING: {} is in namespace \"{}\", but {} is in '\n 'namespace \"{}\". Follow it anyway?'\n .format(self.origin, self.origin.namespace(),\n linkedPage, linkedPage.namespace()),\n [('Yes', 'y'), ('No', 'n'),\n ('Add an alternative', 'a'), ('give up', 'g')],\n automatic_quit=False)\n\n if choice != 'y':\n # Fill up found_in, so that we will not ask again\n self.found_in[linkedPage] = [linkingPage]\n if choice == 'g':\n self.makeForcedStop(counter)\n elif choice == 'a':\n newHint = pywikibot.input(\n 'Give the alternative for language {}, not '\n 'using a language code:'\n .format(linkedPage.site.lang))\n if newHint:\n alternativePage = pywikibot.Page(\n linkedPage.site, newHint)\n # add the page that was entered by the user\n self.addIfNew(alternativePage, counter, None)\n else:\n pywikibot.info(\n f'NOTE: ignoring {linkedPage} and its interwiki links')\n return True\n\n # same namespaces, no problem\n # or no origin page yet, also no problem\n return False",
"def __strip_ns__(self, tree):\n\t\tfor node in tree.iter():\n\t\t\ttry:\n\t\t\t\thas_namespace = node.tag.startswith('{')\n\t\t\texcept AttributeError:\n\t\t\t\tcontinue\n\t\t\tif has_namespace:\n\t\t\t\tnode.tag = node.tag.split('}', 1)[1]",
"def test_validate_namespace_fail_without_prefix(self):\n namespace = 'telemetry.switches.1.interfaces.232.bytes_in'\n\n with self.assertRaises(NamespaceError):\n influx._validate_namespace(namespace)",
"def testXMLWithUknownData(self):\n self.XMLSchemaService.loadSchema('http://queue.amazonaws.com/doc/2008-01-01/QueueService.xsd', self)\n self.runLoop.run()\n assert(self.schema)\n parser = self.schema.newParser()\n parser.feed(message_response_with_uknown_elements)\n result = parser.finish()\n self.assertEqual('8f2770293f9b94ad705d5fd742f5f885', result.ReceiveMessageResult.Message[0].MD5OfBody)",
"def remove_namespace(xml):\n xmlepured = re.sub(pattern=' xmlns=\"[^\"]+\"', repl=\"\", string=xml, flags=0)\n xmlepured = xmlepured.encode(\"utf-8\")\n return xmlepured",
"def has_html_ns(el: bs4.Tag) -> bool:\n\n ns = getattr(el, 'namespace') if el else None\n return bool(ns and ns == NS_XHTML)",
"def test_ns_tag():\r\n namespaces = ['http://purl.org/dc/elements/1.1/',\r\n 'urn:schemas-upnp-org:metadata-1-0/upnp/',\r\n 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/']\r\n for ns_in, namespace in zip(['dc', 'upnp', ''], namespaces):\r\n res = data_structures.ns_tag(ns_in, 'testtag')\r\n correct = '{{{}}}{}'.format(namespace, 'testtag')\r\n assert res == correct",
"def is_namespace(self, ):\n\t\tpass",
"def soap_element_ns(self):\n if not self.acs_session.soap_namespaces:\n # unable to get soap namespaces for this acs session, return unchanged\n return self.soap_element\n\n # loop over namespaces and find the right one\n for namespace, uri in self.acs_session.soap_namespaces.items():\n if uri in self.soap_element:\n # found the matching namespace\n return self.soap_element.replace(\"{%s}\" % uri, \"%s:\" % namespace)\n\n # this is either an unknown uri or a non-namespaced soap element\n return self.soap_element",
"def in_namespace(namespace):\n if not namespace:\n yield\n return\n\n org_netns_fd = os.open(PROCESS_NETNS, os.O_RDONLY)\n pynetns.setns(namespace)\n try:\n yield\n finally:\n try:\n # NOTE(cby): this code is not executed only if we fail to\n # move in target namespace\n pynetns.setns(org_netns_fd)\n except Exception as e:\n msg = _('Failed to move back in original netns: %s') % e\n LOG.critical(msg)\n raise BackInNamespaceExit(msg)",
"def testExpectationRemoval(self):\n contents = validate_tag_consistency.TAG_HEADER + \"\"\"\n\n# This is a test comment\ncrbug.com/1234 [ win ] foo/test [ Failure ]\ncrbug.com/2345 [ win ] foo/test [ RetryOnFailure ]\n\n# Another comment\n[ linux ] bar/test [ RetryOnFailure ]\n[ win ] bar/test [ RetryOnFailure ]\n\"\"\"\n\n stale_expectations = [\n data_types.Expectation('foo/test', ['win'], ['Failure']),\n data_types.Expectation('bar/test', ['linux'], ['RetryOnFailure'])\n ]\n\n expected_contents = validate_tag_consistency.TAG_HEADER + \"\"\"\n\n# This is a test comment\ncrbug.com/2345 [ win ] foo/test [ RetryOnFailure ]\n\n# Another comment\n[ win ] bar/test [ RetryOnFailure ]\n\"\"\"\n\n with open(self.filename, 'w') as f:\n f.write(contents)\n\n removed_urls = expectations.RemoveExpectationsFromFile(\n stale_expectations, self.filename)\n self.assertEqual(removed_urls, set(['crbug.com/1234']))\n with open(self.filename) as f:\n self.assertEqual(f.read(), expected_contents)",
"def isAbsentNamespace (self):\n return self.__uri is None",
"def test_validate_namespace_success(self):\n namespace = 'kytos.kronos.telemetry.switches.1.interfaces.232.bytes_in'\n\n result = influx._validate_namespace(namespace)\n\n self.assertEqual(result, True)",
"def test_unregister_sequence_decl3(collector, sequence_decl):\n collector.contributions['ecpy_pulses.BaseSequence'] = SequenceInfos()\n sequence_decl.sequence = 'ecpy_pulses.BaseSequence'\n sequence_decl.metadata = {'test': True}\n sequence_decl.register(collector, {})\n sequence_decl.unregister(collector)\n assert not collector.contributions['ecpy_pulses.BaseSequence'].metadata",
"def finish_parsing(self):\n if self.current_element:\n self.diagnostics.append(Diagnostic(\n Severity.ERROR,\n self.current_element.code_range,\n f\"Unclosed element <{self.current_element.tagname}>\\n\"\n f\"Did you mean <{self.current_element.tagname} \"\n f\"...attributes... /> ?\"\n ))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the text with all entities and tags removed. >>> plaintext('1 < 2') u'1 < 2' The `keeplinebreaks` parameter can be set to ``False`` to replace any line | def plaintext(text, keeplinebreaks=True):
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace('\n', ' ')
return text | [
"def stripentities(text, keepxmlentities=False):\r\n def _replace_entity(match):\r\n if match.group(1): # numeric entity\r\n ref = match.group(1)\r\n if ref.startswith('x'):\r\n ref = int(ref[1:], 16)\r\n else:\r\n ref = int(ref, 10)\r\n return chr(ref)\r\n else: # character entity\r\n ref = match.group(2)\r\n if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'):\r\n return '&%s;' % ref\r\n try:\r\n return chr(entities.name2codepoint[ref])\r\n except KeyError:\r\n if keepxmlentities:\r\n return '&%s;' % ref\r\n else:\r\n return ref\r\n return _STRIPENTITIES_RE.sub(_replace_entity, text)",
"def remove_tags(text):\n pattern = re.compile('<.*?>')\n return pattern.sub(r'', text)",
"def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text",
"def remove_unwanted_tags(text):\n # ? is for non-greedy to not go to last tag but end the current first\n # new lines and paragraphs\n text = re.sub(r'<br.?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'<p.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</p>', '', text, flags=re.IGNORECASE)\n # links and other anchors\n text = re.sub(r'<a.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</a>', '', text, flags=re.IGNORECASE)\n return text",
"def removeHTMLParts(text: str, keeptags: Optional[List[str]] = None) -> str:\n # try to merge with 'removeDisabledParts()' above into one generic function\n # thanks to:\n # https://www.hellboundhackers.org/articles/read-article.php?article_id=841\n parser = _GetDataHTML()\n if keeptags is None:\n keeptags = ['tt', 'nowiki', 'small', 'sup']\n with parser:\n parser.keeptags = keeptags\n parser.feed(text)\n return parser.textdata",
"def remove_html(e):\n\n p = re.compile(r'<.*?>')\n return p.sub('', str(e))",
"def PlainText_to_HTML(Text):\n \n Text = string.replace(Text, \"<\", \"<\") \n Text = string.replace(Text, \"\\n\\n\", \"<P>\")\n Text = string.replace(Text, \"\\n\", \"<BR>\")\n return Text",
"def clean(article):\n soup = BeautifulSoup(article, \"html.parser\")\n text = soup.get_text()\n text = text.strip()\n return text",
"def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two regular expressions\n return re_tag_newline.sub(r\"\", rtf_text)",
"def strip_tags(value):\r\n return txcommon.rst.strip_tags(value)",
"def tidy_article_allow_punct( text ): \n #Get rid of new lines. Need this for the <figure> removal\n text = text.replace('\\n', ' ').replace('\\r', '')\n #<figure> tag has contents. Remove all this.\n text = re.sub( '<figure(.*?)/figure>', '', text)\n #<span> contains \"facebook twitter google plus bst\"\n text = re.sub( '<span(.*?)/span>', '', text)\n #<sup> is a little supplementary notice\n text = re.sub( '<sup(.*?)/sup>', '', text)\n #\"Read more\" text\n text = re.sub( '<div class=\"rich-link__read-more(.*?)/div>', '', text)\n text = re.sub( 'Read more here:', '', text)\n #Remove other html tags but keep content\n text = re.sub( '<[^>]+>', ' ', text) \n #Remove numbers\n text = ' '.join(s for s in text.split() if not any(c.isdigit() for c in s))\n # Remove contractions\n text = re.sub('\\'s', '', text)\n text = re.sub('’s', '', text) \n #Remove extra spaces\n text = re.sub('\\s+', ' ' , text) \n return text.strip()",
"def remove_non_alpha_chars_and_html(text) -> str:\n text_output_trimmed = ProjectCommon.trimmer(text)\n\n text_output_no_html = ProjectCommon.remove_html(text_output_trimmed)\n\n text_output_no_html_no_non_alpha_chars = \\\n ProjectCommon.remove_non_alpha_chars(text_output_no_html)\n\n return text_output_no_html_no_non_alpha_chars",
"def strip_markup(styled_text: str) -> str:\n t = markup.render(styled_text)\n return t.plain",
"def plaintext(self, **kwargs):\n return self.doctree().astext()",
"def unescape(text):\n\n return __entity_regex.sub(__replacement_for_entity, text)",
"def plaintext(obj, skip=None):\n\tif isinstance(obj, str):\n\t\th = html.parser.HTMLParser()\n\t\tobj = h.unescape(obj).replace('\\xa0', ' ').strip()\n\telif isinstance(obj, list):\n\t\tfor i, v in enumerate(obj):\n\t\t\tobj[i] = plaintext(v)\n\telif isinstance(obj, dict):\n\t\tfor k, v in obj.items():\n\t\t\tif isinstance(skip, (tuple, list)) and k in skip: continue\n\t\t\tobj[k] = plaintext(v)\n\treturn obj",
"def html_to_text(html_text):\n soup = BeautifulSoup(html_text, \"html.parser\")\n clean_html = ' '.join(soup.find_all(text=True))\n return clean_html",
"def remove_newlines(text):\n # First normalize the newlines using Django's nifty utility\n normalized_text = normalize_newlines(text)\n # Then simply remove the newlines like so.\n return normalized_text.replace('\\n', '<\\br>').replace('\\r','')",
"def _strip_xml(txts):\n txts = html.unescape(html.unescape(txts)) # double unescape because Wikipedia dumps are a mess\n txts = txts.split('\\n')\n\n for i in range(len(txts)):\n for pattern in patterns:\n txts[i] = pattern[0].sub(pattern[1], txts[i])\n\n txts = [''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) for txt in txts if txt != '']\n return '\\n'.join(txts)",
"def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a copy of the given text with any character or numeric entities replaced by the equivalent UTF8 characters. >>> stripentities('1 < 2') u'1 >> stripentities('more …') u'more \u2026' >>> stripentities('&8230;') u'\u2026' >>> stripentities('&x2026;') u'\u2026' If the `keepxmlentities` parameter is provided and is a truth value, the core XML entities (&, ', >, < and ") are left intact. >>> stripentities('1 < 2 …', keepxmlentities=True) u'1 < 2 \u2026' | def stripentities(text, keepxmlentities=False):
def _replace_entity(match):
if match.group(1): # numeric entity
ref = match.group(1)
if ref.startswith('x'):
ref = int(ref[1:], 16)
else:
ref = int(ref, 10)
return unichr(ref)
else: # character entity
ref = match.group(2)
if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'):
return '&%s;' % ref
try:
return unichr(entities.name2codepoint[ref])
except KeyError:
if keepxmlentities:
return '&%s;' % ref
else:
return ref
return _STRIPENTITIES_RE.sub(_replace_entity, text) | [
"def stripentities(text, keepxmlentities=False):\r\n def _replace_entity(match):\r\n if match.group(1): # numeric entity\r\n ref = match.group(1)\r\n if ref.startswith('x'):\r\n ref = int(ref[1:], 16)\r\n else:\r\n ref = int(ref, 10)\r\n return chr(ref)\r\n else: # character entity\r\n ref = match.group(2)\r\n if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'):\r\n return '&%s;' % ref\r\n try:\r\n return chr(entities.name2codepoint[ref])\r\n except KeyError:\r\n if keepxmlentities:\r\n return '&%s;' % ref\r\n else:\r\n return ref\r\n return _STRIPENTITIES_RE.sub(_replace_entity, text)",
"def decode_html_entities(text):\n h = HTMLParser()\n return h.unescape(text)",
"def unescape(text):\n\n return __entity_regex.sub(__replacement_for_entity, text)",
"def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)",
"def unescape(text):\r\n\r\n if not text:\r\n return text\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == \"&#\":\r\n # character reference\r\n try:\r\n if text[:3] == \"&#x\":\r\n return unichr(int(text[3:-1], 16))\r\n else:\r\n return unichr(int(text[2:-1]))\r\n except ValueError:\r\n pass\r\n else:\r\n # named entity\r\n try:\r\n text = unichr(htmlentitydefs.name2codepoint[text[1: -1]])\r\n except KeyError:\r\n pass\r\n return text # leave as is\r\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def remove_entities(self, title):\n\t\tentities, ent_types = self.get_entities(title)\n\t\tif entities == []:\n\t\t\treturn title\n\t\telse:\n\t\t\tsubstitutions = {}\n\t\t\tfor X in entities:\n\t\t\t\tsubstitutions[X] = ''\n\t\t\toutput = self.replace(title, substitutions)\n\t\t\treturn output",
"def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text",
"def strip_tweet(text, remove_url=True):\n if remove_url:\n text = url_pattern.sub('', text)\n else:\n text = expand_url(text)\n text = mention_pattern.sub('', text)\n text = html_parser.unescape(text)\n text = text.strip()\n return text",
"def get_texts_from_entities(entities):\n texts = []\n for e in entities:\n texts.append(e.text)\n return texts",
"def html_unquote(s, encoding=None):\r\n if isinstance(s, str):\r\n if s == '':\r\n # workaround re.sub('', '', u'') returning '' < 2.5.2\r\n # instead of u'' >= 2.5.2\r\n return u''\r\n s = s.decode(encoding or default_encoding)\r\n return _unquote_re.sub(_entity_subber, s)",
"def strip(text):\n\n return text.strip()",
"def remove_defined_articles(self, text: str) -> str:\n cleaned_text = re.sub(self.quote_pattern, \"\", text)\n return cleaned_text.strip()",
"def extract_entities(self, text):\n results = self.fetch(self.base_url, text)\n return [_ for _ in self.process_results(results)]",
"def clean_text(self, text: str):\n text = text.rstrip()\n if '\"\"' in text:\n if text[0] == text[-1] == '\"':\n text = text[1:-1]\n text = text.replace('\\\\\"\"', '\"')\n text = text.replace('\"\"', '\"')\n\n text = text.replace('\\\\\"\"', '\"')\n\n text = html.unescape(text)\n\n text = ' '.join(text.split())\n return text",
"def clean(article):\n soup = BeautifulSoup(article, \"html.parser\")\n text = soup.get_text()\n text = text.strip()\n return text",
"def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&\")\r\n data = data.replace(\"<\", \"<\")\r\n data = data.replace(\">\", \">\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data",
"def tidy_article_allow_punct( text ): \n #Get rid of new lines. Need this for the <figure> removal\n text = text.replace('\\n', ' ').replace('\\r', '')\n #<figure> tag has contents. Remove all this.\n text = re.sub( '<figure(.*?)/figure>', '', text)\n #<span> contains \"facebook twitter google plus bst\"\n text = re.sub( '<span(.*?)/span>', '', text)\n #<sup> is a little supplementary notice\n text = re.sub( '<sup(.*?)/sup>', '', text)\n #\"Read more\" text\n text = re.sub( '<div class=\"rich-link__read-more(.*?)/div>', '', text)\n text = re.sub( 'Read more here:', '', text)\n #Remove other html tags but keep content\n text = re.sub( '<[^>]+>', ' ', text) \n #Remove numbers\n text = ' '.join(s for s in text.split() if not any(c.isdigit() for c in s))\n # Remove contractions\n text = re.sub('\\'s', '', text)\n text = re.sub('’s', '', text) \n #Remove extra spaces\n text = re.sub('\\s+', ' ' , text) \n return text.strip()",
"def __html2unicode(self, s):\n # First the digits:\n ents = set(html_entity_digit_re.findall(s))\n if len(ents) > 0:\n for ent in ents:\n entnum = ent[2:-1]\n try:\n entnum = int(entnum)\n s = s.replace(ent, unichr(entnum))\n except:\n pass\n # Now the alpha versions:\n ents = set(html_entity_alpha_re.findall(s))\n ents = filter((lambda x : x != amp), ents)\n for ent in ents:\n entname = ent[1:-1]\n try: \n s = s.replace(ent, unichr(htmlentitydefs.name2codepoint[entname]))\n except:\n pass \n s = s.replace(amp, \" and \")\n return s",
"def clean_text(text):\n text = re.sub(r\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\", \"URL\", text) # Replace urls with special token\n text = text.replace(\"\\'s\", \"\")\n text = text.replace(\"\\'\", \"\")\n text = text.replace(\"n\\'t\", \" n\\'t\")\n text = text.replace(\"@\", \"\")\n text = text.replace(\"#\", \"\")\n text = text.replace(\"_\", \" \")\n text = text.replace(\"-\", \" \")\n text = text.replace(\"&\", \"\")\n text = text.replace(\">\", \"\")\n text = text.replace(\"\\\"\", \"\")\n text = text.replace(\".\", \"\")\n text = text.replace(\",\", \"\")\n text = text.replace(\"(\", \"\")\n text = text.replace(\")\", \"\")\n text = ' '.join(text.split())\n return text.strip()",
"def clean_html_encodings(text: str) -> str:\n return str(BeautifulSoup(text, 'html.parser'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a copy of the text with any XML/HTML tags removed. >>> striptags('Foo bar') 'Foo bar' >>> striptags('Foo') 'Foo' >>> striptags('Foo') 'Foo' | def striptags(text):
return _STRIPTAGS_RE.sub('', text) | [
"def remove_tags(text):\n pattern = re.compile('<.*?>')\n return pattern.sub(r'', text)",
"def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)",
"def strip_tags(html):\n\n s = HTMLStripper()\n s.feed(html)\n stripped = s.get_data()\n # Remove extra spaces\n return ' '.join(filter(None, stripped.split(' ')))",
"def removeHTMLParts(text: str, keeptags: Optional[List[str]] = None) -> str:\n # try to merge with 'removeDisabledParts()' above into one generic function\n # thanks to:\n # https://www.hellboundhackers.org/articles/read-article.php?article_id=841\n parser = _GetDataHTML()\n if keeptags is None:\n keeptags = ['tt', 'nowiki', 'small', 'sup']\n with parser:\n parser.keeptags = keeptags\n parser.feed(text)\n return parser.textdata",
"def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two regular expressions\n return re_tag_newline.sub(r\"\", rtf_text)",
"def remove_unwanted_tags(text):\n # ? is for non-greedy to not go to last tag but end the current first\n # new lines and paragraphs\n text = re.sub(r'<br.?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'<p.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</p>', '', text, flags=re.IGNORECASE)\n # links and other anchors\n text = re.sub(r'<a.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</a>', '', text, flags=re.IGNORECASE)\n return text",
"def _strip_xml(txts):\n txts = html.unescape(html.unescape(txts)) # double unescape because Wikipedia dumps are a mess\n txts = txts.split('\\n')\n\n for i in range(len(txts)):\n for pattern in patterns:\n txts[i] = pattern[0].sub(pattern[1], txts[i])\n\n txts = [''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) for txt in txts if txt != '']\n return '\\n'.join(txts)",
"def strip_tags(value):\r\n return txcommon.rst.strip_tags(value)",
"def strip_markup(styled_text: str) -> str:\n t = markup.render(styled_text)\n return t.plain",
"def remove_tags(_content: str, *tags) -> str:\n content = _content\n for tag in tags:\n content = re.compile(tag).sub(\"\", content)\n return content",
"def strip_tags( body ):\n var_mask_re = re.compile( u\"[%$]?\\<+([^<>]+)\\>+\" )\n return re.sub( var_mask_re, '', body )",
"def remove_attributes_from_tags(text):\n if text:\n try:\n cleaner = clean.Cleaner(\n safe_attrs_only=True,\n remove_unknown_tags=False,\n )\n text = cleaner.clean_html(text)\n except lxml.etree.ParserError:\n return text\n return text",
"def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text",
"def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)",
"def tag_untokenized_text(self, text):\n return self.tag_untokenized_sentences(self._sent_tokenize(text))",
"def get_stripped_tags(self):\n stripped = []\n for (tags, attrs) in self.getHtmlExclusions():\n if not attrs:\n stripped.extend(tags)\n return stripped",
"def strip_ascii_from_tags(self, text):\n player_ob = self.player_ob or self\n if 'no_ascii' in player_ob.tags.all():\n text = RE_ASCII.sub(\"\", text)\n text = RE_ALT_ASCII.sub(r\"\\1\", text)\n else:\n text = RE_ASCII.sub(r\"\\1\", text)\n text = RE_ALT_ASCII.sub(\"\", text)\n return text",
"def sanitise_tags(tags: str):\n\n # hack out all kinds of whitespace, then split on ,\n # if you run into more illegal characters (simplenote does not want to sync them)\n # add them to the regular expression above.\n illegals_removed = tags_illegal_chars.sub('', tags)\n if len(illegals_removed) == 0:\n # special case for empty string ''\n # split turns that into [''], which is not valid\n return []\n\n else:\n return illegals_removed.split(',')",
"def remove_empty_html_tags(document):\n return re.sub(r'(<\\w+\\s*/?>)', ' ', document)",
"def clean_html_encodings(text: str) -> str:\n return str(BeautifulSoup(text, 'html.parser'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`. | def apply_filters(stream, filters, lexer=None):
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream | [
"def apply(filters: List[Callable], q: Queryable) -> Queryable:\n for f in filters:\n q = f(q)\n return q",
"def set_filters(filter_list):",
"def filters(self, filters):\n\n for f in filters:\n self.filter(f[\"attribute_name\"], f[\"value\"], f[\"operator\"])",
"def apply_filter(value):\n enabled_filters = get_filters()\n for filt in enabled_filters:\n value = filt(value)\n return value",
"def register_filter(self, *filters):\n for f in filters:\n f.controller = self\n self._filters.append(f)",
"def _run_filters(self, word):\n if len(self._filters) > 0:\n for f in self._filters:\n f.run(word)\n # print( 'running filter \\n filtername: %s \\n word: %s' % (f.__name__, word) )\n # if f.run(word) is False:\n # print( 'filter %s failed: %s' % (f.__name__, word) )\n # return False\n return True",
"def applyAllFilters(categories):\n result = filterDisambiguation(categories)\n result = followRedirects(result)\n result = filterBlacklist(result)\n result = filterCountries(result)\n return result",
"def apply_filters(self):\n\t\t# TODO: nahradit map-em\n\t\tfor f,is_row in self.filters:\n\t\t\tif is_row:\n\t\t\t\tself.table = list(map(f,self.table))\n\t\t\telse:\n\t\t\t\tself.table = f(self.table)",
"def __call__(self, buf):\n return all(filter_(buf) for filter_ in self.filters)",
"def runFilters(filters, events):\n for f in filters:\n if len(events) == 0:\n return []\n for event in events:\n event.hide = False\n events = sortEvents(events)\n events = f.process(events)\n \n events = sortEvents(events)\n return events",
"def filter_cascade(filters):\n def newFilter(image):\n for f in filters:\n image = f(image)\n return image\n return newFilter",
"def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))\n\n combined_filters = combine_filters(block_structure, filters)\n block_structure.filter_topological_traversal(combined_filters)",
"def setFilter(self, filters=[logging.INFO, logging.ERROR]):\n self._filters = filters",
"def apply_filters(self, filters, starred):\n\n feed_count = 0\n item_count = 0\n processed_feeds = set()\n\n try:\n print u\"Retrieving subscribed feeds...\"\n subs_list = self._subscription_list()\n if starred:\n print u\"Retrieving starred items...\"\n else:\n print u\"Retrieving unread items...\"\n self._retrieve_entries(starred)\n except KeyboardInterrupt:\n exit(\"cancelled\")\n\n print u\"Applying filters...\"\n\n universal_patterns = filters.get(u\"*\", [])\n\n for tag in subs_list:\n tag_has_matching_feeds = False\n for feed in subs_list[tag]:\n # get the applicable filters\n patterns = universal_patterns[:]\n try:\n patterns.extend(filters[feed[u\"title\"]])\n except KeyError:\n pass\n\n if not feed[u\"feed_id\"] in processed_feeds:\n processed_feeds.add(feed[u\"feed_id\"])\n\n if not patterns:\n # skip to next feed\n continue\n\n # since there are applicable patterns, the current tag has at least one matching feed\n if not tag_has_matching_feeds:\n tag_has_matching_feeds = True\n print u\"\\n{}\\n{}\".format(tag, u\"=\" * len(tag))\n\n feed_count += 1\n items_found = self._apply_filter(feed, patterns)\n if items_found is not None:\n print u\"found {}.\".format(items_found)\n item_count += items_found\n\n if self.to_be_filtered:\n self._filter(starred)\n\n return feed_count, item_count",
"def __iter__(self) -> Generator[ReadAlignments, None, None]:\n for readAlignments in self.iter():\n for filterFunc in self._filters:\n filteredReadAlignments = filterFunc(readAlignments)\n if filteredReadAlignments is False:\n break\n else:\n readAlignments = filteredReadAlignments\n else:\n yield readAlignments",
"def filter(self, **kwargs):\n\n for filter_name, filter_value in kwargs.iteritems():\n self._filters[filter_name] = filter_value\n return self",
"def iter_filter(fn):\r\n\r\n sentinel = object()\r\n\r\n @pipefilter\r\n @wraps(fn)\r\n def wrapped(*args, target=None, **kwargs):\r\n def generator():\r\n # This is the generator that the wrapped function will consume from\r\n while True:\r\n item = greenlet.getcurrent().parent.switch(sentinel)\r\n if isinstance(item, GeneratorExit):\r\n return\r\n else:\r\n yield item\r\n\r\n def run_target():\r\n # greenlet executing wrapped function\r\n fn(generator(), *args, **kwargs)\r\n\r\n def run_target_generator():\r\n for item in fn(generator(), *args, **kwargs):\r\n greenlet.getcurrent().parent.switch(item)\r\n\r\n if inspect.isgeneratorfunction(fn):\r\n # Wrapping a filter (consumes an iterator, is a generator)\r\n g_consume = greenlet(run_target_generator)\r\n g_consume.switch()\r\n\r\n try:\r\n while True:\r\n try:\r\n item = (yield)\r\n except Exception as e:\r\n g_consume.throw(e)\r\n else:\r\n value = g_consume.switch(item)\r\n\r\n # Feed any values the generator yields down the pipeline\r\n while value is not sentinel:\r\n if target is not None:\r\n target.send(value)\r\n value = g_consume.switch()\r\n except GeneratorExit as e:\r\n g_consume.switch(e)\r\n else:\r\n # Wrapping a sink (consumes an iterator)\r\n g_consume = greenlet(run_target)\r\n g_consume.switch()\r\n\r\n try:\r\n while True:\r\n try:\r\n item = (yield)\r\n except Exception as e:\r\n g_consume.throw(e)\r\n else:\r\n g_consume.switch(item)\r\n except GeneratorExit as e:\r\n g_consume.switch(e)\r\n\r\n return wrapped",
"def filter(self, *args):\n # type: (Union[FilterList, cgtwq.Filter]) -> Tuple[FileBoxMeta, ...]\n\n if compat.api_level() == compat.API_LEVEL_5_2:\n return self._filter_v5_2(*args)\n return self._filter_v6_1(*args)",
"def apply_filter(f, ms):\n for m in ms:\n res = f(m)\n if res is not None:\n yield res",
"def flatland_filter(stream, context):\n return Stream(FlatlandFilter()(stream, context))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. | def format(self, tokensource, outfile):
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile) | [
"def write_file(tokens, f):\n for t in tokens:\n f.write(\"%s:\\n\" % t[0])\n for entry in t[1:]:\n f.write(\"\\t%s\\n\" % entry)",
"def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n self.output(f'<keyword> {value} </keyword>')\n elif type == 'symbol': # check for symbol\n #\"\"\" start xml formatting requirements for symbols \"\"\"\n if value == '<':\n self.output(f'<symbol> < </symbol>')\n elif value == '>':\n self.output(f'<symbol> > </symbol>')\n elif value == '&':\n self.output(f'<symbol> & </symbol>')\n #\"\"\" end xml formatting requirements for symbols \"\"\"\n else:\n self.output(f'<symbol> {value} </symbol>')\n elif type == 'integer': # check for integer\n self.output(f'<integerConstant> {value} </integerConstant>')\n elif type == 'identifier': # check for indentifier\n self.output(f'<identifier> {value} </identifier>')\n elif type == 'string': # it's a string\n self.output(f'<stringConstant> {value} </stringConstant>')",
"def writeToFile(name,mode,tok_list):\n\t\n\t# open and create output file\n\tlogfile = open(name,mode)\n\t\n\t# write every token to ouput file each token to a new line\n\tfor tok in tok_list:\n\t\tlogfile.write(str(tok) + '\\n')\n\t\n\t# close file\n\tlogfile.close()",
"def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n try:\n if not outfile:\n realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.formatter import Formatter\n if isinstance(formatter, type) and issubclass(formatter, Formatter):\n raise TypeError('format() argument must be a formatter instance, '\n 'not a class')\n raise",
"def print_tokens(source):\n if isinstance(source[0], Token):\n source = untokenize(source)\n\n for lines in get_lines(source):\n for token in lines:\n print(repr(token))\n print()",
"def tokenize(args):\n if args.profile and not Path(args.profile).exists(): # pragma: no cover\n raise ParserError('--profile must be a path for an existing file')\n _write(args, Tokenizer(profile=args.profile)(_read(args), column=args.mapping))",
"def _format_code(source, preferred_quote):\n if not source:\n return source\n\n modified_tokens = []\n\n sio = io.StringIO(source)\n for (token_type,\n token_string,\n start,\n end,\n line) in tokenize.generate_tokens(sio.readline):\n\n if token_type == tokenize.STRING:\n token_string = unify_quotes(token_string,\n preferred_quote=preferred_quote)\n\n modified_tokens.append(\n (token_type, token_string, start, end, line))\n\n return untokenize.untokenize(modified_tokens)",
"def save_tokens_to_file(self, file_path):\n with open(file_path, 'w', encoding='utf-8') as fp:\n #for token in self.token2id.keys():\n for idd in range(self.size()): \n fp.write(self.id2token[idd] + '\\n')",
"def _write_input(\n self, X: List[str], y: Optional[List[List[str]]], input_path: Path\n ):\n with open(input_path, \"w\") as f:\n if y is not None:\n for text, labels in zip(X, y):\n label_str = \" \".join(\n f\"__label__{FastText._escape_label(label)}\" for label in labels\n )\n f.write(f\"{label_str} {_fasttext_preprocess(text)}\\n\")\n elif y is None:\n for text in X:\n f.write(f\"{_fasttext_preprocess(text)}\\n\")",
"def to_yacc(self, gp):\n d, _ = os.path.split(gp)\n tp = tempfile.mktemp('.y', dir=d)\n with open(tp, 'w') as tf:\n with open(gp) as gf:\n for l in gf:\n if l.startswith('%token'):\n _l = l.replace(',', '').replace(';', '')\n tf.write(_l)\n else:\n if l.startswith('%nodefault'):\n _l = l.replace('%nodefault', '%%')\n tf.write(_l)\n else:\n tf.write(l)\n\n return tp",
"def write_input(codewords, input_file):\n with open(input_file, 'w') as f:\n for codeword in codewords:\n for x in codeword:\n f.write(str(x) + ' ')",
"def parse_str(self, source, flags):\n file = tempfile.NamedTemporaryFile(mode = 'w+', suffix = '.cpp')\n file.write(source)\n file.seek(0)\n self.tu = self.index.parse(\n file.name,\n args=flags,\n options=cin.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD\n )\n file.close()\n for child in self.tu.cursor.get_children():\n if child.kind == cin.CursorKind.VAR_DECL or child.kind == cin.CursorKind.FUNCTION_DECL:\n self._py_nodes.append(self.transform(child))\n return self._py_nodes",
"def tokens_to_string(tokens : List[Token]) -> str:\n output = \"\"\n for token in tokens: \n if any([isinstance(token, token_class) for token_class in \\\n [DiagToken, OperatorToken, ParenToken, MatIdentifierToken, VecIdentifierToken]]):\n output += token.value\n elif isinstance(token, InvToken) or isinstance(token, TransToken):\n sym = token.value\n if isinstance(sym, KernelToken):\n output += sym.value + \"(\" + sym.arg1 + \",\" + sym.arg2 + \")\"\n elif isinstance(sym, GroupToken):\n output += tokens_to_string(sym.tokens())\n else:\n output += sym.value \n \n if isinstance(token, InvToken):\n output += \".I\"\n else:\n output += \".T\"\n elif isinstance(token, GroupToken):\n output += tokens_to_string(token.tokens())\n else:\n output += token.value +\"(\" + token.arg1 + \",\"+ token.arg2 + \")\"\n return output",
"def _format_lines(self, tokensource):\r\n nocls = self.noclasses\r\n lsep = self.lineseparator\r\n # for <span style=\"\"> lookup only\r\n getcls = self.ttype2class.get\r\n c2s = self.class2style\r\n escape_table = _escape_html_table\r\n tagsfile = self.tagsfile\r\n\r\n lspan = ''\r\n line = ''\r\n for ttype, value in tokensource:\r\n if nocls:\r\n cclass = getcls(ttype)\r\n while cclass is None:\r\n ttype = ttype.parent\r\n cclass = getcls(ttype)\r\n cspan = cclass and '<span style=\"%s\">' % c2s[cclass][0] or ''\r\n else:\r\n cls = self._get_css_class(ttype)\r\n cspan = cls and '<span class=\"%s\">' % cls or ''\r\n\r\n parts = value.translate(escape_table).split('\\n')\r\n\r\n if tagsfile and ttype in Token.Name:\r\n filename, linenumber = self._lookup_ctag(value)\r\n if linenumber:\r\n base, filename = os.path.split(filename)\r\n if base:\r\n base += '/'\r\n filename, extension = os.path.splitext(filename)\r\n url = self.tagurlformat % {'path': base, 'fname': filename,\r\n 'fext': extension}\r\n parts[0] = \"<a href=\\\"%s#%s-%d\\\">%s\" % \\\r\n (url, self.lineanchors, linenumber, parts[0])\r\n parts[-1] = parts[-1] + \"</a>\"\r\n\r\n # for all but the last line\r\n for part in parts[:-1]:\r\n if line:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + part + \\\r\n (cspan and '</span>') + lsep\r\n else: # both are the same\r\n line += part + (lspan and '</span>') + lsep\r\n yield 1, line\r\n line = ''\r\n elif part:\r\n yield 1, cspan + part + (cspan and '</span>') + lsep\r\n else:\r\n yield 1, lsep\r\n # for the last line\r\n if line and parts[-1]:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + parts[-1]\r\n lspan = cspan\r\n else:\r\n line += parts[-1]\r\n elif parts[-1]:\r\n line = cspan + parts[-1]\r\n lspan = cspan\r\n # else we neither have to open a new span nor set lspan\r\n\r\n if line:\r\n yield 1, line + (lspan and '</span>') + lsep",
"def _write(self, template: list):\n with open(self.template_location, \"w\") as file:\n for line in template:\n file.write(line)",
"def process_tokens(self, tokens):\n self._tokens = list(tokens)\n self._pos = 0\n self._ast = self._assert(self._chunk(), 'input to be a program')\n self._ast.store_token_groups(self._tokens)",
"def tokeniser(tokenInfo):\n\n source_pass = tokenInfo[\"source_pass\"]\n tokensToSort = []\n tokensToClassify = []\n\n wordStartIndex = []\n wordEndIndex = []\n\n #Going through the words identified in the input and saving their start and end index in an array to use later\n if \"words\" not in tokenInfo[\"tokens\"].keys():\n tokensToSort.append({\n \"content\": source_pass,\n \"start_index\": 0,\n \"end_index\": len(source_pass) - 1\n })\n\n else:\n for word in tokenInfo[\"tokens\"][\"words\"]:\n wordStartIndex.append(word[\"start_index\"])\n wordEndIndex.append(word[\"end_index\"])\n\n tempTokenStartIndex = 0\n tempToken = ''\n for i in range(len(wordStartIndex)):\n tempToken = source_pass[tempTokenStartIndex:wordStartIndex[i]]\n if tempToken != '':\n tokensToSort.append({\n \"content\": tempToken,\n \"start_index\": tempTokenStartIndex,\n \"end_index\": wordStartIndex[i] - 1\n })\n tempTokenStartIndex = wordEndIndex[i] + 1\n\n if len(source_pass) > (wordEndIndex[len(wordEndIndex) - 1] + 1):\n tempToken = source_pass[wordEndIndex[len(wordEndIndex)-1]+1:len(source_pass)]\n tokensToSort.append({\n \"content\": tempToken,\n \"start_index\": wordEndIndex[len(wordEndIndex) - 1],\n \"end_index\": len(source_pass) - 1\n })\n\n #pp.pprint(tokensToSort)\n\n for tokenBeingSorted in tokensToSort:\n if tokenBeingSorted[\"content\"].isalpha() or tokenBeingSorted[\"content\"].isdigit() or isOnlySpecialCharacters(tokenBeingSorted[\"content\"]) or len(tokenBeingSorted[\"content\"])==1:\n tokensToClassify.append({\n \"content\": tokenBeingSorted[\"content\"],\n \"start_index\": tokenBeingSorted[\"start_index\"],\n \"end_index\": tokenBeingSorted[\"end_index\"]\n })\n else:\n tempTokenBeingSorted = tokenBeingSorted[\"content\"][0]\n tempStartIndex = tokenBeingSorted[\"start_index\"]\n\n for chars in range(len(tokenBeingSorted[\"content\"]) -1):\n if classifyCharacter(tokenBeingSorted[\"content\"][chars]) == classifyCharacter(tokenBeingSorted[\"content\"][chars+1]):\n tempTokenBeingSorted+=(tokenBeingSorted[\"content\"][chars+1])\n else:\n tokensToClassify.append({\n \"content\": tempTokenBeingSorted,\n \"start_index\": tempStartIndex,\n \"end_index\": tempStartIndex + len(tempTokenBeingSorted) - 1\n })\n tempStartIndex += len(tempTokenBeingSorted)\n tempTokenBeingSorted = tokenBeingSorted[\"content\"][chars+1]\n\n tokensToClassify.append({\n \"content\": tempTokenBeingSorted,\n \"start_index\": tempStartIndex,\n \"end_index\": tempStartIndex + len(tempTokenBeingSorted) - 1\n })\n\n for token in tokensToClassify:\n\n unclassifiedToken = token[\"content\"]\n classification = classifier(unclassifiedToken)\n if classification not in tokenInfo[\"tokens\"]:\n tokenInfo[\"tokens\"][classification] = []\n\n tokenInfo[\"tokens\"][classification].append({\n \"content\": unclassifiedToken,\n \"start_index\": token[\"start_index\"],\n \"end_index\": token[\"end_index\"]\n })\n\n return tokenInfo",
"def save_tracks_to_file(track_source, output_file):\n for track in tracks_source:\n track_summary = make_track_summary(track)\n output_file.write(serialize_track(track_summary) + '\\n')",
"def write_sources_list(self,filename=None):\n\t\tif not filename:\n\t\t\tfilename = self.filename\n\t\tf = open(filename,'w')\n\t\tf.write('\\n'.join(self.format_for_output()))\n\t\tf.close()",
"def createTaggedNgramsFile(ngrams_file, tagged_ngrams_file):\n\to = open(tagged_ngrams_file, 'w')\n\t\n\tprint('Opening input n-gram counts file...')\n\tc = 0\n\tf = open(ngrams_file)\n\tfor line in f:\n\t\tc += 1\n\t\tif c % 1000000 == 0:\n\t\t\tprint(str(c) + ' n-grams processed.')\n\t\tdata = line.strip().split('\\t')\n\t\ttokens = [t.split('|||') for t in data[0].split(' ')]\n\t\tif len(tokens)==2:\n\t\t\to.write(tokens[0][0] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][0] + '\\t' + data[1] + '\\n')\n\t\telif len(tokens)==3:\n\t\t\to.write(tokens[0][0] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][min(1, len(tokens[2])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][0] + ' ' + tokens[2][min(1, len(tokens[2])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][0] + '\\t' + data[1] + '\\n')\n\t\telif len(tokens)==4:\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][0] + ' ' + tokens[3][min(1, len(tokens[3])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][0] + ' ' + tokens[2][min(1, len(tokens[2])-1)] + ' ' + tokens[3][min(1, len(tokens[3])-1)] + '\\t' + data[1] + '\\n')\n\t\telif len(tokens)==5:\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][0] + ' ' + tokens[3][min(1, len(tokens[3])-1)] + ' ' + tokens[4][min(1, len(tokens[4])-1)] + '\\t' + data[1] + '\\n')\n\tf.close()\n\tprint('N-grams file read!')\n\t\n\tprint('Saving model...')\n\to.close()\n\tprint('Finished!')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the css class of this token type prefixed with the classprefix option. | def _get_css_class(self, ttype):
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return '' | [
"def _get_class_string(self):\n\n classes = self.attrs.get(\"class\", None)\n\n # No classes were set in the attributes\n if not classes:\n return \" \".join(self.classes)\n\n classes = classes.value\n\n # Make room for the classes set in the tag\n if self.classes:\n classes += \" \"\n\n classes += \" \".join(self.classes)\n\n return classes",
"def _cls(self, tag_name, class_name):\n return 'descendant-or-self::node()/%s[contains(concat(\" \", normalize-space(@class), \" \"), \" %s \")]' % (tag_name, class_name)",
"def clau_class(obj):\n return mark_safe(_css_class(obj))",
"def get_class_name(self):\n return self.name[:-6]",
"def css_class(self):\n header = self.data.header\n normalizer = getUtility(IIDNormalizer)\n return \"portlet-multilanguage-%s\" % normalizer.normalize(header)",
"def add_class(self, value: str) -> HTMLNode:\n return self.add_attr(\"class\", value)",
"def _css_class(obj):\n if not obj:\n cls = ''\n\n elif isinstance(obj, str):\n # Alias or e-mail\n if is_valid_email(obj):\n cls = 'claudia-email'\n else:\n cls = 'claudia-error'\n\n elif isinstance(obj, Mapping):\n cls = 'claudia-%s' % obj.type\n if not obj.active:\n cls = 'claudia-inactive %s' % cls\n\n elif isinstance(obj, SharedDrive):\n cls = 'claudia-%s' % Mapping.get_type(obj)\n\n else:\n cls = 'claudia-%s' % Mapping.get_type(obj)\n if not obj.is_active():\n cls = 'claudia-inactive %s' % cls\n\n return cls",
"def class_name(self):\n return self.element_info.class_name",
"def get_class_mnemonic(rrclass):\n if type(rrclass) is type and issubclass(rrclass, Class):\n return rrclass.mnemonic\n elif isinstance(rrclass, int):\n for cls in CLASSES:\n if rrclass == CLASSES[cls].value:\n return CLASSES[cls].mnemonic\n return \"CLASS{}\".format(int(rrclass))\n elif isinstance(rrclass, str):\n if rrclass.upper() in CLASSES:\n return CLASSES[rrclass.upper()].mnemonic\n else:\n match = re.search(r'^CLASS(\\d+)$', rrclass.upper())\n if match:\n return rrclass\n raise ValueError(\n \"rrclass must be a known class mnemonic (e.g. IN, CH), an integer, \"\n \"or a CLASS### text representation of an unknown class (see RFC3597) \"\n \"({!r} is a {})\".format(rrclass, type(rrclass))\n )",
"def class_abbrev(type):\n ...",
"def format_class(self):\n return self.format_class_loader.get_class(name=self.format_class_name)",
"def get_class_name(rrclass):\n if type(rrclass) is type and issubclass(rrclass, Class):\n return rrclass.long_name\n elif isinstance(rrclass, int):\n for cls in CLASSES:\n if rrclass == CLASSES[cls].value:\n return CLASSES[cls].long_name\n elif isinstance(rrclass, str):\n if rrclass.upper() in CLASSES:\n return CLASSES[rrclass.upper()].long_name\n else:\n match = re.search(r'^CLASS(\\d+)$', rrclass.upper())\n if match:\n return rrclass\n raise ValueError(\n \"rrclass must be a known class mnemonic (e.g. IN, CH), an integer, \"\n \"or a CLASS### text representation of an unknown class (see RFC3597) \"\n \"({!r} is a {})\".format(rrclass, type(rrclass))\n )",
"def english_class_name(self):\n return self._en_class_name",
"def type_class(self):\n return type_get_class_name(type_get_class(self))",
"def char_class(self):\n return CLASS_NAMES.get(self.char_class_id)",
"def type_prefix(self):\n if not self._type_prefix:\n self._type_prefix = ''.join(\n (self.command_prefix[0:1].upper(), self.command_prefix[1:]))\n return self._type_prefix",
"def genClassCode(self):\r\n \r\n # Generate _fields.\r\n fieldsstr = self.genFieldsStr()\r\n \r\n # Parse annotations.\r\n self.parseAnnStr()\r\n \r\n tstr = self.genTypesStr()\r\n \r\n attrstr = fieldsstr + tstr\r\n \r\n return self.classtemplate.format(self.classname, self.basename, attrstr)",
"def format_class_name(self):\n if \"json\" in self.format_class_loader.class_names:\n default = \"json\"\n else:\n default = self.format_class_loader.class_names[0]\n return getattr(self, \"_format_class_name\", default)",
"def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class",
"def class_name(self):\n return self.source_file.rsplit('.', maxsplit=1)[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string or list of selectors to insert before the token type classes. | def get_style_defs(self, arg=None):
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, basestring):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.iteritems()
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines) | [
"def get_style_defs(self, arg=None):\r\n if arg is None:\r\n arg = ('cssclass' in self.options and '.'+self.cssclass or '')\r\n if isinstance(arg, str):\r\n args = [arg]\r\n else:\r\n args = list(arg)\r\n\r\n def prefix(cls):\r\n if cls:\r\n cls = '.' + cls\r\n tmp = []\r\n for arg in args:\r\n tmp.append((arg and arg + ' ' or '') + cls)\r\n return ', '.join(tmp)\r\n\r\n styles = [(level, ttype, cls, style)\r\n for cls, (style, ttype, level) in self.class2style.items()\r\n if cls and style]\r\n styles.sort()\r\n lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])\r\n for (level, ttype, cls, style) in styles]\r\n if arg and not self.nobackground and \\\r\n self.style.background_color is not None:\r\n text_style = ''\r\n if Text in self.ttype2class:\r\n text_style = ' ' + self.class2style[self.ttype2class[Text]][0]\r\n lines.insert(0, '%s { background: %s;%s }' %\r\n (prefix(''), self.style.background_color, text_style))\r\n if self.style.highlight_color is not None:\r\n lines.insert(0, '%s.hll { background-color: %s }' %\r\n (prefix(''), self.style.highlight_color))\r\n return '\\n'.join(lines)",
"def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class",
"def _colorize_re(tree, noparen=0):\n result = []\n out = result.append\n \n if len(tree) > 1 and not noparen:\n out('<span class=\"%s\">(</span>' % PAREN_TAG)\n for elt in tree:\n op = elt[0]\n args = elt[1]\n\n if op == sre_constants.LITERAL:\n c = unichr(args)\n if c == '\\t': out(r'<span class=\"%s\">\\t</span>' % ESCAPE_TAG)\n elif c == '\\n': out(r'<span class=\"%s\">\\n</span>' % ESCAPE_TAG)\n elif c == '\\r': out(r'<span class=\"%s\">\\r</span>' % ESCAPE_TAG)\n elif c == '\\f': out(r'<span class=\"%s\">\\f</span>' % ESCAPE_TAG)\n elif c == '\\v': out(r'<span class=\"%s\">\\v</span>' % ESCAPE_TAG)\n elif ord(c)<32 or ord(c)>=127:\n if c < 256: template = r'<span class=\"%s\">\\x%02x</span>'\n else: template = r'<span class=\"%s\">\\u%04x</span>'\n out(template % (ESCAPE_TAG,ord(c)))\n elif c in '.^$\\\\*+?{}[]|()':\n out(r'<span class=\"%s\">\\%c</span>' % (ESCAPE_TAG, c))\n else: out(plaintext_to_html(unichr(args)))\n continue\n \n elif op == sre_constants.ANY:\n out('<span class=\"%s\">.</span>' % ANY_TAG)\n \n elif op == sre_constants.BRANCH:\n if args[0] is not None:\n raise ValueError('Branch expected None arg but got %s'\n % args[0])\n VBAR = '<span class=\"%s\">|</span>' % BRANCH_TAG\n out(VBAR.join([_colorize_re(item,1) for item in args[1]]))\n \n elif op == sre_constants.IN:\n if (len(args) == 1 and args[0][0] == sre_constants.CATEGORY):\n out(_colorize_re(args))\n else:\n out('<span class=\"%s\">[</span>' % CHOICE_TAG)\n out(_colorize_re(args, 1))\n out('<span class=\"%s\">]</span>' % CHOICE_TAG)\n \n elif op == sre_constants.CATEGORY:\n out('<span class=\"%s\">' % CATEGORY_TAG)\n if args == sre_constants.CATEGORY_DIGIT: out(r'\\d')\n elif args == sre_constants.CATEGORY_NOT_DIGIT: out(r'\\D')\n elif args == sre_constants.CATEGORY_SPACE: out(r'\\s')\n elif args == sre_constants.CATEGORY_NOT_SPACE: out(r'\\S')\n elif args == sre_constants.CATEGORY_WORD: out(r'\\w')\n elif args == sre_constants.CATEGORY_NOT_WORD: out(r'\\W')\n else: raise ValueError('Unknown category %s' % args)\n out('</span>')\n \n elif op == sre_constants.AT:\n out('<span class=\"%s\">' % AT_TAG)\n if args == sre_constants.AT_BEGINNING_STRING: out(r'\\A')\n elif args == sre_constants.AT_BEGINNING: out(r'^')\n elif args == sre_constants.AT_END: out(r'$')\n elif args == sre_constants.AT_BOUNDARY: out(r'\\b')\n elif args == sre_constants.AT_NON_BOUNDARY: out(r'\\B')\n elif args == sre_constants.AT_END_STRING: out(r'\\Z')\n else: raise ValueError('Unknown position %s' % args)\n out('</span>')\n \n elif op == sre_constants.MAX_REPEAT:\n min = args[0]\n max = args[1]\n if max == sre_constants.MAXREPEAT:\n if min == 0:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">*</span>' % STAR_TAG)\n elif min == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">+</span>' % PLUS_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,}</span>' % (RNG_TAG, min))\n elif min == 0:\n if max == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">?</span>' % QMRK_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{,%d}</span>' % (RNG_TAG, max))\n elif min == max:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d}</span>' % (RNG_TAG, max))\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,%d}</span>' % (RNG_TAG, min, max))\n\n elif op == sre_constants.MIN_REPEAT:\n min = args[0]\n max = args[1]\n if max == sre_constants.MAXREPEAT:\n if min == 0:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">*?</span>' % STAR_TAG)\n elif min == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">+?</span>' % PLUS_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,}?</span>' % (RNG_TAG, min))\n elif min == 0:\n if max == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">??</span>' % QMRK_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{,%d}?</span>' % (RNG_TAG, max))\n elif min == max:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d}?</span>' % (RNG_TAG, max))\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,%d}?</span>'%(RNG_TAG, min, max))\n\n elif op == sre_constants.SUBPATTERN:\n if args[0] is None:\n out('<span class=\"%s\">(?:</span>' % PAREN_TAG)\n elif isinstance(args[0], (int, long)):\n # This is cheating:\n out('<span class=\"%s\">(</span>' % PAREN_TAG)\n else:\n out('<span class=\"%s\">(?P<</span>' % PAREN_TAG)\n out('<span class=\"%s\">%s</span>' %\n (REF_TAG, plaintext_to_html(args[0])))\n out('<span class=\"%s\">></span>' % PAREN_TAG)\n out(_colorize_re(args[1], 1))\n out('<span class=\"%s\">)</span>' % PAREN_TAG)\n\n elif op == sre_constants.GROUPREF:\n out('<span class=\"%s\">\\\\%d</span>' % (REF_TAG, args))\n\n elif op == sre_constants.RANGE:\n start = _colorize_re( ((sre_constants.LITERAL, args[0]),) )\n end = _colorize_re( ((sre_constants.LITERAL, args[1]),) )\n out('%s<span class=\"%s\">-</span>%s' % (start, CHOICE_TAG, end))\n \n elif op == sre_constants.NEGATE:\n out('<span class=\"%s\">^</span>' % CHOICE_TAG)\n\n elif op == sre_constants.ASSERT:\n if args[0]: out('<span class=\"%s\">(?=</span>' % ASSERT_TAG)\n else: out('<span class=\"%s\">(?<=</span>' % ASSERT_TAG)\n out(''.join(_colorize_re(args[1], 1)))\n out('<span class=\"%s\">)</span>' % ASSERT_TAG)\n \n elif op == sre_constants.ASSERT_NOT:\n if args[0]: out('<span class=\"%s\">(?!</span>' % ASSERT_TAG)\n else: out('<span class=\"%s\">(?<!</span>' % ASSERT_TAG)\n out(''.join(_colorize_re(args[1], 1)))\n out('<span class=\"%s\">)</span>' % ASSERT_TAG)\n\n elif op == sre_constants.NOT_LITERAL:\n lit = _colorize_re( ((sre_constants.LITERAL, args),) )\n out('<span class=\"%s\">[^</span>%s<span class=\"%s\">]</span>' %\n (CHOICE_TAG, lit, CHOICE_TAG))\n else:\n log.error(\"Error colorizing regexp: unknown elt %r\" % elt)\n if len(tree) > 1 and not noparen: \n out('<span class=\"%s\">)</span>' % PAREN_TAG)\n return u''.join(result)",
"def css(self, query: str) -> list[\"Node\"]:\n ...",
"def clau_class(obj):\n return mark_safe(_css_class(obj))",
"def parse_styleguide(element):\n assert element.tag == 'styleguide'\n styles = []\n for child in element:\n styles.append(parse_class(child))\n return styles",
"def __call__(cls, *args, **kwds):\r\n if '_tokens' not in cls.__dict__:\r\n cls._all_tokens = {}\r\n cls._tmpname = 0\r\n if hasattr(cls, 'token_variants') and cls.token_variants:\r\n # don't process yet\r\n pass\r\n else:\r\n cls._tokens = cls.process_tokendef('', cls.get_tokendefs())\r\n\r\n return type.__call__(cls, *args, **kwds)",
"def lazy_style(*args):\n if len(args) == 0:\n raise TypeError(\n \"When applying a style method to a color, the color instance \"\n \"cannot be mutated with the style method - the method can \"\n \"only be used to apply the color and style to a specified \"\n \"argument, which must be provided to the style method.\"\n )\n sty = style(code)\n return sty(self.__call__(args[0]))",
"def __init__(self, sourceFileName, argType=\"file\", **kwargs):\n ## reset global state ##\n global doxygenCommentCache\n doxygenCommentCache = \"\"\n\n if (argType == \"file\"):\n self.sourceFileName = os.path.expandvars(sourceFileName)\n self.mainClass = os.path.split(self.sourceFileName)[1][:-3]\n sourceFileStr = \"\"\n elif argType == \"string\":\n self.sourceFileName = \"\"\n self.mainClass = \"???\"\n sourceFileStr = sourceFileName\n else:\n raise Exception(\"Arg type must be either file or string\")\n self.curClass = \"\"\n \n self.functions = []\n\n if (len(self.sourceFileName)):\n fd = open(self.sourceFileName)\n sourceFileStr = \"\".join(fd.readlines())\n fd.close() \n \n # Make sure supportedAccessSpecifier are sane\n \n self.braceDepth = 0\n lex.lex()\n lex.input(sourceFileStr)\n curLine = 0\n curChar = 0\n function_name=\"\"\n self.nameStack = []\n self.openParenStack = []\n self.closeParenStack = []\n self.openBraceStack = []\n self.closeBraceStack = []\n self.classstack = []\n self.openBraceStackClass = []\n self.closeBraceStackClass = []\n self.paramStack = []\n self.namespace = \"\"\n while True:\n tok = lex.token()\n if not tok: break\n curLine = tok.lineno\n curChar = tok.lexpos\n if tok.type == 'NAME':\n if tok.value in keywords:\n continue\n if len(self.openParenStack)>len(self.closeParenStack):\n continue\n self.nameStack.append(tok)\n\n elif tok.type == 'SEMI_COLON':\n self.nameStack = []\n self.openParenStack = []\n self.closeParenStack = []\n self.namespace = \"\"\n\n elif tok.type == 'OPEN_BRACE':\n if len(self.nameStack)>=2 and self.nameStack[-2].value==\"class\":\n #class named的情况下\n classname = self.nameStack[-1].value\n if len(self.classstack)>0: #如果有class,将class的大括号入栈\n self.openBraceStackClass.append(tok)\n self.classstack.append(classname)\n self.openBraceStackClass = [] #只有一个class\n self.closeBraceStackClass = []\n self.openBraceStackClass.append(tok)\n continue\n\n if len(self.nameStack)>=2 and len(self.openParenStack)==1\\\n and len(self.closeParenStack)==1: #函数的情况\n #只有函数名的情况\n function_name = self.nameStack[-1].value\n self.openBraceStack = []\n self.closeBraceStack = []\n self.openBraceStack.append(tok)\n if function_name == \"const\":\n function_name = self.nameStack[-2].value\n if self.namespace != \"\":\n function_name = self.namespace+\"::\"+function_name\n elif len(self.classstack)>0:\n function_name = self.classstack[-1]+\"::\"+function_name\n fo = FunctionObj()\n fo.name = function_name\n fo.startline = tok.lineno\n self.functions.append(fo)\n self.nameStack = []\n self.openParenStack = []\n self.closeParenStack = []\n continue\n\n self.openBraceStack.append(tok)\n\n self.nameStack = []\n self.namespace = \"\"\n\n elif tok.type == 'CLOSE_BRACE':\n self.closeBraceStack.append(tok)\n self.closeBraceStackClass.append(tok)\n if len(self.closeBraceStack) == len(self.openBraceStack):\n if function_name:\n self.functions[-1].endline = tok.lineno\n function_name = \"\"\n if len(self.closeBraceStackClass) == len(self.openBraceStackClass):\n self.classname = \"\"\n self.namespace = \"\"\n\n elif tok.type == 'OPEN_PAREN':\n self.openParenStack.append(tok) \n elif tok.type == 'CLOSE_PAREN':\n pos = 0\n if len(self.openParenStack)>0:\n pos = self.openParenStack[-1].lexpos\n temp = []\n temp.extend(self.nameStack) \n for idx in range(len(temp)):\n tt = temp[idx]\n if tt.lexpos>pos:\n self.nameStack.remove(tt)\n self.closeParenStack.append(tok) \n elif tok.type == 'COLONCOLON':\n if len(self.openParenStack)>len(self.closeParenStack):\n continue\n if len(self.nameStack)>0:\n self.namespace = self.nameStack[-1].value\n else:\n pass",
"def make_style(self, opts=(), **kwargs):\n if len(kwargs) == 0 and len(opts) == 0:\n return lambda text: text\n return lambda text: self.colorize(text, opts, **kwargs)",
"def find_style(term):\r\n\r\n styles = all_styles()\r\n found_styles = []\r\n\r\n if term =='':\r\n return found_styles\r\n else:\r\n for style in styles:\r\n if term in style:\r\n found_styles.append(style)\r\n else:\r\n continue\r\n\r\n return found_styles",
"def get_color_class(self, pt, classes):\n\n view_id = self.view.buffer_id()\n if not self.color_classes[view_id] or self.view.settings().get('color_helper.refresh', True):\n util.debug(\"Clear color class stash\")\n self.view.settings().set('color_helper.refresh', False)\n self.color_classes[view_id] = util.get_settings_colors()\n\n # Check if the first point within the color matches our scope rules\n # and load up the appropriate color class\n color_class = None\n filters = []\n for item in classes:\n try:\n value = self.view.score_selector(pt, item[\"scopes\"])\n if not value:\n continue\n else:\n class_options = self.color_classes[view_id].get(item[\"class\"])\n if class_options is None:\n continue\n module = class_options.get(\"class\", \"ColorHelper.lib.coloraide.Color\")\n if isinstance(module, str):\n if module == \"ColorHelper.lib.coloraide.Color\":\n color_class = self.base\n else:\n # Initialize the color module and cache it for this view\n color_class = util.import_color(module)\n class_options[\"class\"] = color_class\n else:\n color_class = module\n filters = class_options.get(\"filters\", [])\n break\n except Exception:\n pass\n return color_class, filters",
"def using_classes(order):\n\n # create a list of class instances for each valid `color`\n colors = [Color(**getColor(color)) for color in order if getColor(color) is not None]\n # print the color information for each class instance\n for color in colors:\n print(color)",
"def css_tree(self) -> Tree:\n from rich.columns import Columns\n from rich.console import Group\n from rich.panel import Panel\n\n from .widget import Widget\n\n def render_info(node: DOMNode) -> Columns:\n \"\"\"Render a node for the tree.\"\"\"\n if isinstance(node, Widget):\n info = Columns(\n [\n Pretty(node),\n highlighter(f\"region={node.region!r}\"),\n highlighter(\n f\"virtual_size={node.virtual_size!r}\",\n ),\n ]\n )\n else:\n info = Columns([Pretty(node)])\n return info\n\n highlighter = ReprHighlighter()\n tree = Tree(render_info(self))\n\n def add_children(tree: Tree, node: DOMNode) -> None:\n \"\"\"Add children to the tree.\"\"\"\n for child in node.children:\n info: RenderableType = render_info(child)\n css = child.styles.css\n if css:\n info = Group(\n info,\n Panel.fit(\n Text(child.styles.css),\n border_style=\"dim\",\n title=\"css\",\n title_align=\"left\",\n ),\n )\n branch = tree.add(info)\n if tree.children:\n add_children(branch, child)\n\n add_children(tree, self)\n return tree",
"def get_tokendefs(cls):\r\n tokens = {}\r\n inheritable = {}\r\n for c in itertools.chain((cls,), cls.__mro__):\r\n toks = c.__dict__.get('tokens', {})\r\n\r\n for state, items in toks.items():\r\n curitems = tokens.get(state)\r\n if curitems is None:\r\n tokens[state] = items\r\n try:\r\n inherit_ndx = items.index(inherit)\r\n except ValueError:\r\n continue\r\n inheritable[state] = inherit_ndx\r\n continue\r\n\r\n inherit_ndx = inheritable.pop(state, None)\r\n if inherit_ndx is None:\r\n continue\r\n\r\n # Replace the \"inherit\" value with the items\r\n curitems[inherit_ndx:inherit_ndx+1] = items\r\n try:\r\n new_inh_ndx = items.index(inherit)\r\n except ValueError:\r\n pass\r\n else:\r\n inheritable[state] = inherit_ndx + new_inh_ndx\r\n\r\n return tokens",
"def annotate_conc(tokens):\n from colorama import Fore, Back, Style, init\n init(autoreset=True)\n cols = get_matching_indices(tokens)\n color = tokens[-1]\n if color in ['dim', 'normal', 'bright']:\n sty = 'Style'\n elif 'back' in tokens or 'background' in tokens:\n sty = 'Back'\n else:\n sty = 'Fore'\n #if tokens[-2].lower() in ['back', 'dim', 'fore', 'normal', 'bright', 'background', 'foreground']:\n # sty = tokens[-2].title().replace('ground', '')\n\n for line in cols:\n if not int(line) in list(objs.concordance.index):\n continue\n # if there is already info for this line number, add present info\n if objs._conc_colours[len(objs._old_concs)-1].get(line):\n objs._conc_colours[len(objs._old_concs)-1][line][sty] = color\n else:\n objs._conc_colours[len(objs._old_concs)-1][line] = {}\n objs._conc_colours[len(objs._old_concs)-1][line][sty] = color\n\n single_command_print(['concordance'] + tokens)",
"def _get_annotation_class_attr(self, index, el):\n\n attr = {}\n cls = ['annotatable-span', 'highlight']\n highlight_key = 'highlight'\n color = el.get(highlight_key)\n\n if color is not None:\n if color in self.highlight_colors:\n cls.append('highlight-' + color)\n attr['_delete'] = highlight_key\n attr['value'] = ' '.join(cls)\n\n return {'class': attr}",
"def _generate_header(self, resources):\n header = []\n\n # Add pygments CSS\n\n from pygments.formatters import HtmlFormatter\n formatter = HtmlFormatter(style=self.style)\n pygments_css = formatter.get_style_defs(self.highlight_class)\n header.append(pygments_css)\n\n return header",
"def preprocessor(*args, **kwargs):\n logger.debug(\"Adding preprocessor from %s\", args)\n return _unwrap(_preprocessors, *args, **kwargs,\n is_list=False, cache_name=\"preprocessor\")",
"def add_css_classes(self, *css_classes):\n for cls in css_classes:\n self._css_classes.add(cls)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Just format the tokens, without any wrapping tags. Yield individual lines. | def _format_lines(self, tokensource):
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep | [
"def tokens(self):\n for t in self._ast.tokens:\n yield t",
"def token_layout_generator(self):\n tfs_space, tfs_newline = ' ', os.linesep\n yield (Token.Prompt, self.config['IPYSH_TERMINAL_PROMPT'])\n\n layout = self.config['IPYSH_TOKEN_LAYOUT']\n if layout not in self.LAYOUT_OPTIONS:\n layout = self.defaults['IPYSH_TOKEN_LAYOUT']\n if layout == self.OFF:\n raise StopIteration\n elif layout == self.ONELINE:\n yield (Token, tfs_space)\n else:\n yield (Token, tfs_newline)\n\n token_sep = tfs_newline if layout == self.STACKED else tfs_space\n format_label = '{:{width}} : '.format\n width = max([len(label) for label in self.LABELS.itervalues()])\n\n # Current Dicrectory\n yield (Token.Prompt, ']')\n yield (Token, self._prepare_current_directory_token())\n yield (Token.Prompt, '[')\n\n # Git Branch\n yield (Token, token_sep)\n yield (Token.Prompt, ')')\n yield (Token, self._prepare_current_branch_token())\n #yield (Token.Prompt, '(Branch: ' )\n yield (Token.Prompt, format_label('Branch', width=width))\n yield (Token.Prompt, '(' )\n\n # Python Virtual Environment\n yield (Token, token_sep)\n yield (Token.Prompt, ')')\n yield (Token, self._prepare_current_venv_token())\n #yield (Token.Prompt, '(PyEnv: ' )\n yield (Token.Prompt, format_label('PyEnv', width=width))\n yield (Token.Prompt, '(' )\n\n # Application Environment\n yield (Token, token_sep)\n yield (Token.Prompt, ')')\n yield (Token, os.environ.get('APP_ENVIRONMENT', 'System'))\n yield (Token.Prompt, format_label('AppEnv', width=width))\n yield (Token.Prompt, '(' )",
"def read_tokens(self, include_tag=False):\n for line in self.readlines():\n start = line.find('>') + 1 if not include_tag else 0\n line = line[start:].strip(string.whitespace)\n tokens = line.split()\n for token in tokens:\n yield token",
"def _group_tokens(tokens):\n prev_vid = None\n buffer = []\n for x in tokens:\n vid = {\"chapterId\": x.pop(\"chapterId\"), \"verseNum\": x.pop(\"verseNum\")}\n if prev_vid and prev_vid != vid:\n yield {**prev_vid, **{\"tokens\": buffer}}\n buffer = []\n prev_vid = vid\n buffer.append(x)\n yield {**vid, **{\"tokens\": buffer}}",
"def _render(self, tokens, options, env):\n pending_tags = []\n pending_content = [[]]\n for t, token in enumerate(tokens):\n if token.type == \"fence\": # Special case\n pending_content[-1].append(self.fence(tokens, t, options, env))\n elif token.tag != \"\":\n if not token.nesting: # Directly append to content\n c = [token.content] if token.content else []\n tag = getattr(dominate.tags, token.tag)\n tag = tag(*c) if token.attrs is None else tag(*c, **token.attrs)\n pending_content[-1].append(tag)\n elif len(pending_tags) > 0 and pending_tags[-1] == token.tag: # Closing tag\n t = pending_tags.pop()\n c = pending_content.pop()\n tag = getattr(dominate.tags, t)\n tag = tag(c) if token.attrs is None else tag(c, **token.attrs)\n pending_content[-1].append(tag)\n else: # Opening tag\n if token.tag == \"p\" and len(pending_tags) > 0 and pending_tags[-1] == \"li\":\n continue\n\n pending_tags.append(token.tag)\n pending_content.append([])\n elif token.children is not None:\n assert len(token.children) > 0\n pending_content[-1].extend(self._render(token.children, options, env))\n else:\n if not token.hidden:\n pending_content[-1].append(escapeHtml(token.content))\n\n assert len(pending_tags) == 0, pending_tags\n assert len(pending_content) == 1, pending_content\n\n return pending_content[-1]",
"def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError(\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column//tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n if line[pos] == '#':\r\n comment_token = line[pos:].rstrip('\\r\\n')\r\n nl_pos = pos + len(comment_token)\r\n yield (COMMENT, comment_token,\r\n (lnum, pos), (lnum, pos + len(comment_token)), line)\r\n yield (NL, line[nl_pos:],\r\n (lnum, nl_pos), (lnum, len(line)), line)\r\n else:\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError(\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n newline = NEWLINE\r\n if parenlev > 0:\r\n newline = NL\r\n yield (newline, token, spos, epos, line)\r\n elif initial == '#':\r\n assert not token.endswith(\"\\n\")\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n # This yield is new; needed for better idempotency:\r\n yield (NL, token, spos, (lnum, pos), line)\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')",
"def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError, (\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column/tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError, (\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n yield (parenlev > 0 and NL or NEWLINE,\r\n token, spos, epos, line)\r\n elif initial == '#':\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')",
"def itercodelines(self):\r\n codeline = CodeLine(0)\r\n for token in self.itertokens():\r\n codeline.append(token)\r\n if codeline.complete:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline\r\n codeline = CodeLine(codeline.end_row + 1)\r\n if codeline.string:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline",
"def _print_tokens(self, tokens) -> None:\n print(' '.join([self.get_index_token(tok.item()) for tok in tokens]))\n return",
"def _annotate_first_pass(self, tokens):\n for aug_tok in tokens:\n self._first_pass_annotation(aug_tok)\n yield aug_tok",
"def _annotate_tokens(self, tokens):\n # Make a preliminary pass through the document, marking likely\n # sentence breaks, abbreviations, and ellipsis tokens.\n tokens = self._annotate_first_pass(tokens)\n\n # Make a second pass through the document, using token context\n # information to change our preliminary decisions about where\n # sentence breaks, abbreviations, and ellipsis occurs.\n tokens = self._annotate_second_pass(tokens)\n\n return tokens",
"def merge(self, tokens):\r\n tokens = iter(tokens)\r\n (lasttype, lastval) = tokens.next()\r\n for ttype, value in tokens:\r\n if ttype is lasttype:\r\n lastval += value\r\n else:\r\n yield(lasttype, lastval)\r\n (lasttype, lastval) = (ttype, value)\r\n if lastval.endswith('\\n'):\r\n lastval = lastval[:-1]\r\n if lastval:\r\n yield(lasttype, lastval)",
"def _format(\n self, node: Node, level: int = 0, prefix: Text = \"\", suffix: Text = \"\"\n ) -> Iterator[_Line]:\n\n # noinspection PyTypeChecker\n yield from {\n FlatNode: self._format_flat,\n ListNode: self._format_list,\n MappingNode: self._format_mapping,\n }[node.__class__](node, level, prefix, suffix)",
"def tokenize(self):",
"def transform(self, actual_tokens):\n POGGER.debug(\"\\n\\n---\\n\")\n transform_state, output_html, actual_tokens_size = (\n TransformState(actual_tokens),\n \"\",\n len(actual_tokens),\n )\n for next_token in transform_state.actual_tokens:\n (\n transform_state.add_trailing_text,\n transform_state.add_leading_text,\n transform_state.next_token,\n ) = (None, None, None)\n if (transform_state.actual_token_index + 1) < actual_tokens_size:\n transform_state.next_token = actual_tokens[\n transform_state.actual_token_index + 1\n ]\n if next_token.token_name in self.start_token_handlers:\n start_handler_fn = self.start_token_handlers[next_token.token_name]\n output_html = start_handler_fn(output_html, next_token, transform_state)\n\n elif next_token.is_end_token:\n if next_token.type_name in self.end_token_handlers:\n end_handler_fn = self.end_token_handlers[next_token.type_name]\n output_html = end_handler_fn(\n output_html, next_token, transform_state\n )\n else:\n assert (\n False\n ), f\"Markdown token end type {next_token.type_name} not supported.\"\n else:\n assert False, f\"Markdown token type {type(next_token)} not supported.\"\n\n POGGER.debug(\"======\")\n POGGER.debug(\n \"add_trailing_text-->$<--\",\n transform_state.add_trailing_text,\n )\n POGGER.debug(\"add_leading_text -->$<--\", transform_state.add_leading_text)\n POGGER.debug(\"output_html -->$<--\", output_html)\n\n if transform_state.add_trailing_text:\n output_html = self.__apply_trailing_text(output_html, transform_state)\n\n if transform_state.add_leading_text:\n output_html = self.__apply_leading_text(output_html, transform_state)\n\n POGGER.debug(\"------\")\n POGGER.debug(\"next_token -->$<--\", next_token)\n POGGER.debug(\"output_html -->$<--\", output_html)\n POGGER.debug(\"transform_stack-->$<--\", transform_state.transform_stack)\n\n transform_state.last_token = next_token\n transform_state.actual_token_index += 1\n if output_html and output_html[-1] == ParserHelper.newline_character:\n output_html = output_html[:-1]\n POGGER.debug(\"output_html -->$<--\", output_html)\n return output_html",
"def structure_representation(self):\n lines = []\n for token in self.tokens:\n head = token.head.id if token.head is not None else 0\n lemma = token.lemma if token.lemma is not None else '_'\n line = '{token.id}\\t{token.text}\\t{lemma}\\t{token.pos}\\t_\\t_\\t' \\\n '{head}\\t{token.dependency_relation}' \\\n '' \\\n ''\n line = line.format(token=token, lemma=lemma, head=head)\n lines.append(line)\n\n return '\\n'.join(lines)",
"def print_tokens(source):\n if isinstance(source[0], Token):\n source = untokenize(source)\n\n for lines in get_lines(source):\n for token in lines:\n print(repr(token))\n print()",
"def right_truncations (tokens):\n while tokens:\n yield tokens\n tokens = tokens [1 :]",
"def make_nl_token(self):\n t1 = token_module.NEWLINE\n t2 = '\\n'\n t3 = (0, 0) # Not used.\n t4 = (0, 0) # Not used.\n t5 = '\\n'\n return t1, t2, t3, t4, t5"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Highlighted the lines specified in the `hl_lines` option by postprocessing the token stream coming from `_format_lines`. | def _highlight_lines(self, tokensource):
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value | [
"def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)",
"def _highlit_line(content, offsets, markup, markdown, encoding):\n def chunks():\n try:\n # Start on the line the highlights are on:\n chars_before = content.rindex('\\n', 0, offsets[0][0]) + 1\n except ValueError:\n chars_before = None\n for start, end in offsets:\n yield cgi.escape(content[chars_before:start].decode(encoding,\n 'replace'))\n yield markup\n yield cgi.escape(content[start:end].decode(encoding, 'replace'))\n yield markdown\n chars_before = end\n # Make sure to get the rest of the line after the last highlight:\n try:\n next_newline = content.index('\\n', chars_before)\n except ValueError: # eof\n next_newline = None\n yield cgi.escape(content[chars_before:next_newline].decode(encoding,\n 'replace'))\n return ''.join(chunks()).lstrip()",
"def convert_line_to_html(self, empty):\n\n line = []\n do_highlight = self.curr_row in self.hl_lines\n\n while self.end <= self.size:\n # Get text of like scope\n scope_name = self.view.scope_name(self.pt)\n while self.view.scope_name(self.end) == scope_name and self.end < self.size:\n self.end += 1\n\n color_match = self.view.style_for_scope(scope_name)\n color = color_match.get('foreground', self.fground)\n bgcolor = color_match.get('background')\n style = []\n if color_match.get('bold', False):\n style.append('bold')\n if color_match.get('italic', False):\n style.append('italic')\n if color_match.get('underline', False):\n style.append('underline')\n if color_match.get('glow', False):\n style.append('glow')\n\n if do_highlight:\n sfg = color_match.get('selection_forground', self.defaults.get('selection_forground'))\n if sfg:\n color = sfg\n bgcolor = color_match.get('selection', '#0000FF')\n\n region = sublime.Region(self.pt, self.end)\n # Normal text formatting\n tidied_text = self.html_encode(self.view.substr(region))\n self.format_text(line, tidied_text, color, bgcolor, style, empty)\n\n # Continue walking through line\n self.pt = self.end\n self.end = self.pt + 1\n\n # ~~~\n # # Get the color for the space at the end of a line\n # if self.end < self.view.size():\n # end_key = self.view.scope_name(self.pt)\n # color_match = self.view.style_for_scope(end_key)\n # self.ebground = color_match.get('background')\n # ~~~\n\n # Join line segments\n return ''.join(line)",
"def highlight_line(self, line, fcolor, bcolor):\n pass",
"def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.document().markContentsDirty(block.position(),\r\n block.position() + block.length())\r\n self.rehighlightBlock(block)",
"def apply(self, *args) -> \"void\":\n return _coin.SoLineHighlightRenderAction_apply(self, *args)",
"def highlight_regions(self, replaced_lines):\n # type: (List[HunkReference]) -> None\n add_regions = [] # type: List[sublime.Region]\n add_bold_regions = []\n remove_regions = [] # type: List[sublime.Region]\n remove_bold_regions = []\n\n for section_start, section_end, hunk, line_types, raw_lines in replaced_lines:\n for line_type, lines_ in groupby(\n range(section_start, section_end),\n key=lambda line: line_types[line - section_start]\n ):\n lines = list(lines_)\n start, end = lines[0], lines[-1]\n start_line = self.view.full_line(self.view.text_point(start, 0))\n end_line = (\n self.view.full_line(self.view.text_point(end, 0))\n if start != end\n else start_line\n )\n region = sublime.Region(start_line.begin(), end_line.end())\n container = add_regions if line_type == \"+\" else remove_regions\n container.append(region)\n\n # For symmetric modifications show highlighting for the in-line changes\n if sum(1 if t == \"+\" else -1 for t in line_types) == 0:\n # Determine start of hunk/section.\n section_start_idx = self.view.text_point(section_start, 0)\n\n # Removed lines come first in a hunk.\n remove_start = section_start_idx\n first_added_line = line_types.index(\"+\")\n add_start = section_start_idx + len(\"\".join(raw_lines[:first_added_line]))\n\n removed_part = \"\".join(raw_lines[:first_added_line])\n added_part = \"\".join(raw_lines[first_added_line:])\n changes = util.diff_string.get_changes(removed_part, added_part)\n\n for change in changes:\n if change.type in (util.diff_string.DELETE, util.diff_string.REPLACE):\n # Display bold color in removed hunk area.\n region_start = remove_start + change.old_start\n region_end = remove_start + change.old_end\n remove_bold_regions.append(sublime.Region(region_start, region_end))\n\n if change.type in (util.diff_string.INSERT, util.diff_string.REPLACE):\n # Display bold color in added hunk area.\n region_start = add_start + change.new_start\n region_end = add_start + change.new_end\n add_bold_regions.append(sublime.Region(region_start, region_end))\n\n self.view.add_regions(\n \"git-savvy-added-lines\",\n add_regions,\n scope=\"diff.inserted.git-savvy.inline-diff\"\n )\n self.view.add_regions(\n \"git-savvy-removed-lines\",\n remove_regions,\n scope=\"diff.deleted.git-savvy.inline-diff\"\n )\n self.view.add_regions(\n \"git-savvy-added-bold\",\n add_bold_regions,\n scope=\"diff.inserted.char.git-savvy.inline-diff\"\n )\n self.view.add_regions(\n \"git-savvy-removed-bold\",\n remove_bold_regions,\n scope=\"diff.deleted.char.git-savvy.inline-diff\"\n )",
"def rehighlight_lines(self, lines, errors=True):\r\n if errors:\r\n errors_lines = self._get_errors_lines()\r\n refresh_lines = set(lines + errors_lines)\r\n else:\r\n refresh_lines = set(lines + self.selected_word_lines)\r\n self.selected_word_lines = lines\r\n self._rehighlight_lines(refresh_lines)",
"def _highlight_line_difflib(self, line, next_):\n\n if line['action'] == 'del':\n old, new = line, next_\n else:\n old, new = next_, line\n\n oldwords = self._token_re.split(old['line'])\n newwords = self._token_re.split(new['line'])\n sequence = difflib.SequenceMatcher(None, oldwords, newwords)\n\n oldfragments, newfragments = [], []\n for tag, i1, i2, j1, j2 in sequence.get_opcodes():\n oldfrag = ''.join(oldwords[i1:i2])\n newfrag = ''.join(newwords[j1:j2])\n if tag != 'equal':\n if oldfrag:\n oldfrag = '-'\n if newfrag:\n newfrag = '+'\n oldfragments.append(oldfrag)\n newfragments.append(newfrag)\n\n old['line'] = \"\".join(oldfragments)\n new['line'] = \"\".join(newfragments)",
"def print_highlighted(line, hl_color=Back.WHITE):\n try:\n # Highlight positives\n colorer = re.compile(r'([^\\s]+) POSITIVES: ([1-9]) ')\n line = colorer.sub(Fore.YELLOW + r'\\1 ' + 'POSITIVES: ' + Fore.YELLOW + r'\\2 ' + Style.RESET_ALL, line)\n colorer = re.compile(r'([^\\s]+) POSITIVES: ([0-9]+) ')\n line = colorer.sub(Fore.RED + r'\\1 ' + 'POSITIVES: ' + Fore.RED + r'\\2 ' + Style.RESET_ALL, line)\n # Keyword highlight\n colorer = re.compile(r'([A-Z_]{2,}:)\\s', re.VERBOSE)\n line = colorer.sub(Fore.BLACK + hl_color + r'\\1' + Style.RESET_ALL + ' ', line)\n print line\n except Exception, e:\n pass",
"def rehighlight_lines(self, lines, errors=True):\r\n if errors:\r\n errors_lines = self._get_errors_lines()\r\n refresh_lines = set(lines + errors_lines)\r\n else:\r\n refresh_lines = set(lines)\r\n self._rehighlight_lines(refresh_lines)",
"def _format_lines(self, tokensource):\r\n nocls = self.noclasses\r\n lsep = self.lineseparator\r\n # for <span style=\"\"> lookup only\r\n getcls = self.ttype2class.get\r\n c2s = self.class2style\r\n escape_table = _escape_html_table\r\n tagsfile = self.tagsfile\r\n\r\n lspan = ''\r\n line = ''\r\n for ttype, value in tokensource:\r\n if nocls:\r\n cclass = getcls(ttype)\r\n while cclass is None:\r\n ttype = ttype.parent\r\n cclass = getcls(ttype)\r\n cspan = cclass and '<span style=\"%s\">' % c2s[cclass][0] or ''\r\n else:\r\n cls = self._get_css_class(ttype)\r\n cspan = cls and '<span class=\"%s\">' % cls or ''\r\n\r\n parts = value.translate(escape_table).split('\\n')\r\n\r\n if tagsfile and ttype in Token.Name:\r\n filename, linenumber = self._lookup_ctag(value)\r\n if linenumber:\r\n base, filename = os.path.split(filename)\r\n if base:\r\n base += '/'\r\n filename, extension = os.path.splitext(filename)\r\n url = self.tagurlformat % {'path': base, 'fname': filename,\r\n 'fext': extension}\r\n parts[0] = \"<a href=\\\"%s#%s-%d\\\">%s\" % \\\r\n (url, self.lineanchors, linenumber, parts[0])\r\n parts[-1] = parts[-1] + \"</a>\"\r\n\r\n # for all but the last line\r\n for part in parts[:-1]:\r\n if line:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + part + \\\r\n (cspan and '</span>') + lsep\r\n else: # both are the same\r\n line += part + (lspan and '</span>') + lsep\r\n yield 1, line\r\n line = ''\r\n elif part:\r\n yield 1, cspan + part + (cspan and '</span>') + lsep\r\n else:\r\n yield 1, lsep\r\n # for the last line\r\n if line and parts[-1]:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + parts[-1]\r\n lspan = cspan\r\n else:\r\n line += parts[-1]\r\n elif parts[-1]:\r\n line = cspan + parts[-1]\r\n lspan = cspan\r\n # else we neither have to open a new span nor set lspan\r\n\r\n if line:\r\n yield 1, line + (lspan and '</span>') + lsep",
"def detectLines(self, lines=...) -> lines:\n ...",
"def refresh_lines_highlight(self, linenos):\n Vim.command('setlocal modifiable')\n\n sz = min(len(self.nodes), len(Vim.current.buffer))\n for i in linenos:\n if i < sz:\n self.vim_set_line(i, self.nodes[i].highlight_content)\n Vim.command('setlocal nomodifiable')",
"def print_highlight(source_file, syntax, theme, print_only_matches=False):\n \n def get_colour_code(name):\n \"\"\" Returns:\n str: Colour code associated with syntax name.\n \"\"\"\n \n return \"\\033[{}m\".format(theme[name])\n \n # base colour escape code - no colour\n start_code = end_code = \"\\033[0m\"\n \n # open file containing source code\n with open(source_file, \"r\") as source:\n \n line = source.readline()\n while line:\n regex_match_in_line = False;\n \n # Special case: comments do not contain source code\n comment_line = \"\"\n \n # for each given syntax entry\n for k in syntax.keys():\n \n # get syntax attributes and get regex result\n syntax_name = syntax[k]\n start_code = get_colour_code(syntax_name)\n regex = re.compile(r'{}'.format(k)) # test for regex match in current line\n result = regex.search(line)\n \n i = 0\n # while there is a regex match in line (in case multiple of same syntax present)\n while result:\n regex_match_in_line = True\n \n # GET AND PREPARE MATCHED SLICE OF LINE\n \n # indexes of result\n span = result.span()\n # if there is no such result but still bypassed while, break\n if not result.string or span[0] == span[1] == 0:\n break\n # account for index shift if multiple results in line\n indices = (span[0] + i, span[1] + i)\n \n \n # ADD HIGHLIGHTS\n \n # does not colour encasing whitespaces\n if indices[1] != len(line) and line[indices[1] - 1] == ' ':\n line = line[:indices[1] - 1] + end_code + line[indices[1] - 1:]\n else:\n line = line[:indices[1]] + end_code + line[indices[1]:]\n if indices[0] != 0 and line[indices[0]] == ' ':\n line = line[:indices[0] + 1] + start_code + line[indices[0] + 1:]\n else:\n line = line[:indices[0]] + start_code + line[indices[0]:]\n \n \n \n # SPECIAL SYNTAX CASES\n \n # Special case 1: comments do not contain source code\n if syntax_name == \"comment\":\n comment_line = line[indices[0]:]\n line = line[:indices[0]]\n \n # Special case 2: colour each 'word' in string literals\n if syntax_name == \"string\":\n # get string literal\n string_match = line[indices[0] + len(start_code):indices[1] + len(start_code)]\n initial_string_length = len(string_match)\n # get all whitespaces in string using regex\n whitespaces_in_string = [(match.start(), match.end()) for match in re.finditer(\"\\s\", string_match)]\n \n if whitespaces_in_string:\n j = 0\n # for each whitespace, encase 'word' in highlight colour for string\n for w in whitespaces_in_string:\n string_match = string_match[:w[0] + j] + end_code + string_match[w[0] + j:w[1] + j] + start_code + string_match[w[1] + (w[1]-w[0]) + j - 1:]\n j += len(start_code) + len(end_code)\n # ensure highlight for first 'word' in string literal\n if not string_match[1 + len(end_code)].isspace():\n string_match = string_match[0] + start_code + string_match[1 + len(end_code):]\n i += len(string_match) - initial_string_length\n \n # insert modified string to line\n line = line[:indices[0] + len(start_code)] + string_match + line[indices[0] + len(start_code) + initial_string_length:]\n \n # PREPARE NEXT ITERATION\n # get new i shifted - to search rest of line for same regex\n i = indices[1] + len(end_code) + len(start_code)\n # get result from rest of line\n result = regex.search(line[i:])\n \n # Special case 1: comments do not contain source code\n # remove all other colour codes from comment\n if comment_line:\n # find colour escape code using regex\n colour_regex = re.compile(r'\\033\\[(.*?)m')\n i = 3\n result = colour_regex.search(comment_line[3:])\n # for each colour escape code within comment,\n # remove escape code and update comment line to print\n while result:\n comment_line = comment_line[:(result.span()[0]) + i] + comment_line[(result.span()[1] + i):]\n i = result.span()[0]\n result = colour_regex.search(comment_line[i:])\n comment_line = comment_line.rstrip() + \"\\033[0m\"\n \n # print coloured line and get next line\n if not print_only_matches or regex_match_in_line:\n print(line.strip('\\n') + comment_line.strip('\\n'))\n line = source.readline()",
"def _highlight_composition(self):\n\n self._line.setUpdatesEnabled(False)\n ################# UPDATES DISABLED #################\n\n # clear any existing text colors\n self._color_clear()\n\n # the parse failed, so there will be invalid text to highlight\n if self._parser_error:\n self._color_invalid()\n\n # paint any valid tokens\n self._color_tokens()\n\n ################# UPDATES ENABLED #################\n self._line.setUpdatesEnabled(True)\n\n # done\n return",
"def __call__(self, source, language=None, metadata=None):\n from pygments.formatters import HtmlFormatter\n\n if not language:\n language = self.pygments_lexer\n\n return _pygments_highlight(\n source if len(source) > 0 else \" \",\n # needed to help post processors:\n HtmlFormatter(\n cssclass=escape(f\" highlight hl-{language}\"), **self.extra_formatter_options\n ),\n language,\n metadata,\n )",
"def highlight_line(self, line, fcolor, bcolor):\n self._run_commands([\n \"highlight CoverletLine{0} ctermfg={1} ctermbg={2}\".format(str(line), fcolor, bcolor),\n 'let s:coverlet_match_{0} = matchaddpos(\"CoverletLine{1}\", [[{2}, 1, 1]])'.format(str(line), str(line), str(line))\n ])",
"def highlight_errored_lines(code_edit, error_line_numbers):\n extraSelections = []\n\n cursor = code_edit.textCursor()\n doc = code_edit.document()\n for lineno in error_line_numbers:\n\n selection = QtWidgets.QTextEdit.ExtraSelection()\n lineColor = QtGui.QColor.fromRgbF(0.8,\n 0.1,\n 0,\n 0.2)\n\n selection.format.setBackground(lineColor)\n selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection,\n True)\n\n block = doc.findBlockByLineNumber(lineno-1)\n cursor.setPosition(block.position())\n selection.cursor = cursor\n selection.cursor.clearSelection()\n extraSelections.append(selection)\n code_edit.setExtraSelections(extraSelections)",
"def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the character size. | def get_char_size(self):
return self.fonts['NORMAL'].getsize('M') | [
"def getTextLen(self):\r\n return self.TextLen",
"def getTextSize(self):\n return self.textSize",
"def string_length(self):\n return type_get_string_length(self)",
"def getLength(self) -> \"int\":\n return _coin.SbName_getLength(self)",
"def characters_count(self) -> int:\n return self.characters.count()",
"def get_nbr_of_characters():\n\tchars = Character.objects()\n\treturn len(chars)",
"def getTextLength(self):\n return len(self.pokemon.name)",
"def width(self):\n\n\t\treturn self.fontsize / 2 * len(self.text)",
"def _get_length(self):\n return self.Data.Length",
"def message_size(self):\n string = self.message.encode('utf8')\n return len(string)",
"def getDimensions(self):\n #return .75*self._size*(2+len(self._text)), 2.*self._size\n return _graphicsManager.addCommandToQueue((\"get text size\", self._text, self._size), True)",
"def read_size(self):\n if self.data_length is None:\n return 8 - len(self.buffer)\n if self.data_length:\n return self.data_length - len(self.buffer) # control length is really part of the data.\n if self.control_length is None:\n return 4 - len(self.buffer)\n return self.control_length - len(self.buffer)",
"def getcontentlength(self):\n return DAVElement.getcontentlength( str(self.st_size) )",
"def byte_length(text: str) -> int:\n return len(text.encode(\"utf8\"))",
"def _string_width(self, s):\r\n s = str(s)\r\n w = 0\r\n for i in s:\r\n w += self.character_widths[i]\r\n return w * self.font_size / 1000.0",
"def getFontSize(self) -> \"float\":\n return _coin.SoCallbackAction_getFontSize(self)",
"def char_size_px(self) -> \"tuple[int, int]\":\n px, py = self.term_size_px\n rows, cols = self.output.get_size()\n # If we can't get the pixel size, just guess wildly\n return px // cols or 10, py // rows or 22",
"def max_characters(self) -> int:\n return self._max_characters",
"def getBufferSize(self) -> \"size_t\":\n return _coin.SoOutput_getBufferSize(self)",
"def get_file_size(self):\n return self.file_size"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the font based on bold and italic flags. | def get_font(self, bold, oblique):
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL'] | [
"def Font(self, attr=None):\n if attr is None:\n self._font = 0\n else:\n mask = 1 << attr\n self._font ^= mask\n font = self._font & ((1 << renderer.BOLD) |\n (1 << renderer.CODE) |\n (1 << renderer.ITALIC))\n if font & (1 << renderer.CODE):\n embellishment = 'C'\n elif font == ((1 << renderer.BOLD) | (1 << renderer.ITALIC)):\n embellishment = 'Z'\n elif font == (1 << renderer.BOLD):\n embellishment = 'B'\n elif font == (1 << renderer.ITALIC):\n embellishment = 'I'\n else:\n embellishment = 'N'\n return self._csi + embellishment",
"def font(self, style, substyle=-1):\n if substyle >= 0:\n font = self.__lex.substyleFont(style, substyle)\n else:\n font = self.__lex.font(style)\n \n return font",
"def getFont(self):\r\n return self.font",
"def createFont(family=None, size=16, bold=False, italic=False):\n return {\"font_name\": family, \"font_size\": size, \"bold\": bold,\n \"italic\": italic}",
"def _format_font(self, value):\n try:\n return self.root.fonts[value]\n except KeyError:\n message = \"'%s' font is not defined\" % value\n raise KeyError(message)",
"def findfont(self, fontnames): ###\n\n def matchfont(fontname):\n bold = italic = False\n for i in range(0, 1):\n if fontname.lower().endswith(\" italic\"):\n italic = True\n fontname = fontname[: -len(\" italic\")]\n if fontname.lower().endswith(\" bold\"):\n bold = True\n fontname = fontname[: -len(\" bold\")]\n try:\n f = pygame.font.match_font(fontname, bold=int(bold), italic=int(italic))\n except MemoryError:\n f = (\n pygame.font.get_default_font()\n ) # works around mysterious issue on Japanese systems reported by Chad Boulay 20121115\n return f\n\n if not isinstance(fontnames, (list, tuple)):\n fontnames = [fontnames]\n fontnames = [f for f in fontnames if f != None]\n f = ([_f for _f in map(matchfont, fontnames) if _f] + [None])[0]\n if (\n f == None and sys.platform == \"darwin\"\n ): # pygame on OSX doesn't seem even to try to find fonts...\n f = (\n list(\n filter(\n os.path.isfile,\n [\n os.path.realpath(\"/Library/Fonts/%s.ttf\" % x)\n for x in fontnames\n ],\n )\n )\n + [None]\n )[0]\n return f",
"def font(obj):\n return match(obj, font_matchers)",
"def get_font(font_location:str):\n \n small_font = pygame.font.Font(font_location,20)\n \n medium_font = pygame.font.Font(font_location,28)\n \n large_font = pygame.font.Font(font_location,40)\n \n return (small_font,medium_font,large_font)",
"def font(self, font_name):\n return self._font[font_name]",
"def get_font_string(self):\n return self.font().toString()",
"def __get_font(self, box):\n if box.boxstr not in self.__fonts:\n style_sheet = self.doc.get_style_sheet()\n style_name = style_sheet.get_draw_style(box.boxstr)\n style_name = style_name.get_paragraph_style()\n self.__fonts[box.boxstr] = \\\n style_sheet.get_paragraph_style(style_name).get_font()\n \n return self.__fonts[box.boxstr]",
"def getFontName(self) -> \"SbString\":\n return _coin.SoFontStyle_getFontName(self)",
"def getFontName(self) -> \"SbString\":\n return _coin.SoVRMLFontStyle_getFontName(self)",
"def get_font_asset(self) -> BitmapFontAsset:\n return self.mc.bitmap_fonts[self.options['font_name_r']]",
"def font(self):\n if self._font is not None:\n return self._font\n\n self._font = self.glyphs_module.GSFont()\n for index, ufo in enumerate(self.ufos):\n master = self.glyphs_module.GSFontMaster()\n self.to_glyphs_font_attributes(ufo, master,\n is_initial=(index == 0))\n self._font.masters.insert(len(self._font.masters), master)\n # TODO: all the other stuff!\n return self._font",
"def get_font_info(self):\n return {\n 'antialias': self._font_antialias,\n 'background_color': self._font_background_color,\n 'color': self._font_color,\n 'name': self._font_name,\n 'selected_color': self._font_selected_color,\n 'size': self._font_size\n }",
"def getFont(self):\n from pagebot.fonttoolbox.objects.font import getFont\n from pagebot.contexts.platform import getFontPaths\n fontPath = getFontPaths().get(self.font, self.font)\n return getFont(fontPath)",
"def _get_font(self):\n return self._control.document().defaultFont()",
"def getFonts():# list\n\treturn pygame.font.get_fonts()",
"def findFont(styleNames, italic=False):\n # Any TypeNetwork TYPETR Productus or Proforma installed in the system?\n fontNames = findInstalledFonts(('Proforma', 'Productus'))\n if not forceTN or not fontNames: # Not installed, find something else that is expected to exist in OSX:\n for pattern in ('Bodoni', 'AmericanTypewriter', 'Avenir', 'Georgia'):\n fontNames = findInstalledFonts(pattern)\n if fontNames:\n break\n for styleName in styleNames:\n for fontName in fontNames:\n if styleName in fontName:\n return fontName\n return None # Nothing found."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the height of a line. | def _get_line_height(self):
return self.fonth + self.line_pad | [
"def get_height(self):\n\t\treturn self.y[1] - self.y[0]",
"def lineHeight(scr, lineNode):\n if lineNode is None:\n return 0\n manyLines = (len(lineNode.value)+1)//scr.getmaxyx()[1]+1\n # above solution doesn't account for tabs\n return manyLines",
"def _get_height(self) -> \"int\" :\n return _core.TextCommandPalette__get_height(self)",
"def manh_length(self, line: Line) -> float:\n coords = self.coords_on_line(line)\n return abs(coords[0][0] - coords[1][0]) + abs(coords[0][1] - coords[1][1])",
"def height(self):\n return capi.get_band_ysize(self.ptr)",
"def get_height(self):\n return self.textsurf.get_height()",
"def getHeight( self ):\n return self.height",
"def lineByteLength(self):\n lineLength, remainder = divmod(8 + (self.pixelBitLength * self.width), 8)\n \n if remainder != 0:\n # Line breaks always happen on exact byte boundaries\n lineLength += 1\n \n return lineLength",
"def line_width(self):\n return self._data.get(b'strokeStyleLineWidth')",
"def get_text_height(self, context):\n return self.get_font_extent(context)[2]",
"def _get_height(self) -> \"int\" :\n return _core.Palette__get_height(self)",
"def lineHeight(self, p_float=None, p_float_1=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n return 0.0",
"def getHeight(self, x):\n if np.any(self.h == None):\n self.calculateHeight()\n return self.h_approx(x)",
"def textLineSize(self, text):\n\t\treturn self.window.getTextSize(text, self.font[1], self.font[0])",
"def border_height(self):\r\n return self.padding_height() + self.border_top_width + \\\r\n self.border_bottom_width",
"def height(self):\n return max([point[1] for point in self.points])",
"def get_height(self, x):\n return (x * self._xscale) + self._min_height",
"def Height(self):\n return _handle.OperatorHandle_Height(self)",
"def get_height(self):\n try:\n return self.image.height\n except Exception:\n return 0",
"def _compute_statistics_line_height(page_class: PAGE.Page, verbose: bool=False) -> Tuple[float, float, float]:\n y_lines_coords = [[c.y for c in tl.coords] for tr in page_class.text_regions for tl in tr.text_lines if tl.coords]\n line_heights = np.array([np.max(y_line_coord) - np.min(y_line_coord) for y_line_coord in y_lines_coords])\n\n # Remove outliers\n if len(line_heights) > 3:\n outliers = _is_outlier(np.array(line_heights))\n line_heights_filtered = line_heights[~outliers]\n else:\n line_heights_filtered = line_heights\n if verbose:\n print('Considering {}/{} lines to compute line height statistics'.format(len(line_heights_filtered),\n len(line_heights)))\n\n # Compute mean, std, median\n mean = np.mean(line_heights_filtered)\n median = np.median(line_heights_filtered)\n standard_deviation = np.std(line_heights_filtered)\n\n return mean, standard_deviation, median"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the Y coordinate of a line number. | def _get_line_y(self, lineno):
return lineno * self._get_line_height() + self.image_pad | [
"def ycoord(pt):\n return pt.y",
"def get_ycoord(self, y):\n return (y - self.ylimits[0]) / self.dy",
"def get_pos_y(self):\n return self._position[1]",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def get_y(self):\n\n return math.floor(self.position.y)",
"def _get_y(self) -> \"double\" :\n return _core.Point2D__get_y(self)",
"def get_line_number(self):\n return self.line_number",
"def getY(self):\n return self.pos.y",
"def yposition(self):\n return self._yposition",
"def line_number(self, line):\n ret_val = self._line_number(line)\n return ret_val",
"def GetY(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetY(self)",
"def find_subline_at_pos(self, y):\n subline_no = int(y / self.fontheight)\n if subline_no < len(self.sublines):\n return subline_no, self.sublines[subline_no][1]\n return subline_no, \"\"",
"def get_y(self):\r\n return self.get_3d_position()[\"position\"].y",
"def _get_line_end_pos(self):\n return self._get_line_end_cursor().position()",
"def _y_for_x(self, x):\n if self.slope() == float('inf'):\n return None\n return self.slope() * x + self.y_intercept()",
"def y(self, v):\n return self._ring_coordinates_gens['y'+str(v)]",
"def OriginY(self) -> float:",
"def __get_x_y(self, number):\n return number % self.map.width, number / self.map.width",
"def current_y(self):\n return self._current_position[1]",
"def y(self):\n return self.center[1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the width of a character. | def _get_char_width(self):
return self.fontw | [
"def get_width( o ):\n \"\"\"获取该字符在屏幕上的显示的长度\"\"\"\n global widths\n if o == 0xe or o == 0xf:\n return 0\n for num, wid in widths:\n if o <= chr(num):\n return wid\n return 1",
"def fontwidth(word):\n return sum([lookup.ASCIIPIXELS[letter] + 1\n if letter in lookup.ASCIIPIXELS\n else 10\n for letter in word]) - 1",
"def _string_width(self, s):\r\n s = str(s)\r\n w = 0\r\n for i in s:\r\n w += self.character_widths[i]\r\n return w * self.font_size / 1000.0",
"def _get_width(self) -> \"int\" :\n return _core.TextCommandPalette__get_width(self)",
"def width(self):\n\n\t\treturn self.fontsize / 2 * len(self.text)",
"def getWidth(self) -> \"float\":\n return _coin.SoGlyph_getWidth(self)",
"def get_width(self):\n return self.textsurf.get_width()",
"def _get_width(self) -> \"int\" :\n return _core.Palette__get_width(self)",
"def get_width(self):\n\t\treturn self.x[1] - self.x[0]",
"def characters_count(self) -> int:\n return self.characters.count()",
"def count_number_of_characters(text):\r\n return len(text)",
"def get_nbr_of_characters():\n\tchars = Character.objects()\n\treturn len(chars)",
"def ansi_len(string):\n return len(string) - wcswidth(re.compile(r'\\x1b[^m]*m').sub('', string))",
"def width(self) -> int:\n return self.winfo_width()",
"def Width(self):\n return _handle.OperatorHandle_Width(self)",
"def get_terminal_width(self):\n width = 60 # Use this as a minimum\n try:\n size = os.get_terminal_size()\n except OSError:\n size = None\n if size and size[0] > width:\n width = size[0]\n if os.name == 'nt':\n width -= 1 # Windows needs 1 empty space for newline\n return width",
"def textWidth(data):\n label = pyglet.text.Label(data,\n x=0, y=0,\n anchor_x=textAlignConst[attrib.textAlign[0]],\n anchor_y=textAlignConst[attrib.textAlign[1]],\n **attrib.font)\n return label.content_width",
"def get_width(self):\n dividechars = 1\n table_size = self.hits.get_width() + self.columns[1][0] + self.columns[2][0] + dividechars * 3\n return table_size",
"def width(self) -> float:\n return self._width",
"def columnWidth(string):\n if app.config.strict_debug:\n assert isinstance(string, unicode)\n width = 0\n for i in string:\n width += charWidth(i, width)\n return width"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the correct font for the style. | def _get_style_font(self, style):
return self.fonts.get_font(style['bold'], style['italic']) | [
"def getFont(self):\r\n return self.font",
"def _get_font(self):\n return self._control.document().defaultFont()",
"def font(self, font_name):\n return self._font[font_name]",
"def getFont(self):\n from pagebot.fonttoolbox.objects.font import getFont\n from pagebot.contexts.platform import getFontPaths\n fontPath = getFontPaths().get(self.font, self.font)\n return getFont(fontPath)",
"def get_font_string(self):\n return self.font().toString()",
"def _format_font(self, value):\n try:\n return self.root.fonts[value]\n except KeyError:\n message = \"'%s' font is not defined\" % value\n raise KeyError(message)",
"def font(self, style, substyle=-1):\n if substyle >= 0:\n font = self.__lex.substyleFont(style, substyle)\n else:\n font = self.__lex.font(style)\n \n return font",
"def getFontName(self) -> \"SbString\":\n return _coin.SoFontStyle_getFontName(self)",
"def getFontName(self) -> \"SbString\":\n return _coin.SoVRMLFontStyle_getFontName(self)",
"def __get_font(self, box):\n if box.boxstr not in self.__fonts:\n style_sheet = self.doc.get_style_sheet()\n style_name = style_sheet.get_draw_style(box.boxstr)\n style_name = style_name.get_paragraph_style()\n self.__fonts[box.boxstr] = \\\n style_sheet.get_paragraph_style(style_name).get_font()\n \n return self.__fonts[box.boxstr]",
"def default_font(self):\n \n return self._dflt_rastfont",
"def usedFont(self, defaultFont):\r\n if self.__data.paintAttributes & self.PaintUsingTextFont:\r\n return self.__data.font\r\n return defaultFont",
"def defaultFont(self, style, substyle=-1):\n if substyle >= 0:\n font = self.__lex.substyleDefaultFont(style, substyle)\n else:\n font = self.__lex.defaultFont(style)\n \n return font",
"def default_font(self):\n return {\"family\": \"serif\", \"color\": \"darkred\", \"weight\": \"normal\", \"size\": 16}",
"def style_parse(ttFont):\n if 'fvar' in ttFont:\n dflt_instance_coords = {a.axisTag: a.defaultValue for a in ttFont['fvar'].axes}\n for instance in ttFont['fvar'].instances:\n if instance.coordinates == dflt_instance_coords:\n name = ttFont['name'].getName(instance.subfamilyNameID, 3, 1, 1033).toUnicode()\n return _style_parse(name)\n import os\n filename = os.path.basename(ttFont.reader.file.name)\n if len(filename.split(\"-\")) != 2:\n # Google Fonts policy on font file naming scheme\n # requires that only a single dash is used\n # to separate family name from style.\n return None\n else:\n style = filename.split(\"-\")[1].split(\".\")[0]\n return _style_parse(style)",
"def _supply_font():\n font = \"\"\n if platform == \"linux\" or platform == \"linux2\":\n font = \"/usr/share/fonts/gnu-free/FreeSans.ttf\"\n elif platform == \"darwin\":\n font = \"/Library/Fonts/arial.ttf\"\n elif platform == \"win32\":\n font = \"c:\\\\windows\\\\font\\\\arial.ttf\"\n\n if os.path.isfile(font):\n return font\n\n return None",
"def glutFont(font):\n return GLUTFONTS.get(font,GLUTFONTS['9x15'])",
"def get_font(font_location:str):\n \n small_font = pygame.font.Font(font_location,20)\n \n medium_font = pygame.font.Font(font_location,28)\n \n large_font = pygame.font.Font(font_location,40)\n \n return (small_font,medium_font,large_font)",
"def load_font(font: str, size: int) -> 'pygame.font.Font':\n return pygame_menu.font.get_font(font, size)",
"def nametofont(name):\n try:\n return font.nametofont(name)\n except tk.TclError:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create drawables for the token content. | def _create_drawables(self, tokensource):
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno | [
"def DrawTokensBlue():\r\n for i in range(4):\r\n Tokens(TokenBlue,BlueChips[i][0],BlueChips[i][1])",
"def DrawTokensRed():\r\n for i in range(4):\r\n Tokens(TokenRed,RedChips[i][0],RedChips[i][1])",
"def make_drawable(self):\n drawable_env = []\n for i in range(self.Y):\n drawable_line = \"\"\n for j in range(self.X):\n who = '.'\n for a, b in self.atlas.items():\n if [j, i] == b:\n someone_here = True\n who = self.symbols[a]\n drawable_line += who\n drawable_env.append(drawable_line)\n return drawable_env",
"def generate_image(self) -> None:",
"def display_tokens(tokens, image):\n new_image = image.convert('RGBA')\n dr = ImageDraw.Draw(new_image)\n fnt = ImageFont.load_default()\n font_size = 28\n try:\n # Pretty font, pretty sure I'm the only one to have it though - Andy\n fnt = ImageFont.truetype(\"DejaVuSans.ttf\", font_size, encoding=\"unic\")\n except Exception:\n try:\n # More common font\n fnt = ImageFont.truetype(\"arial.ttf\", font_size, encoding=\"unic\")\n except Exception:\n print(\"fallback to default font\")\n pass\n\n token_colors = [\n (255, 0, 0),\n (0, 255, 0),\n (0, 255, 255),\n (255, 125, 0),\n (0, 255, 255),\n ]\n\n tokens = list(tokens)\n\n for token, color in zip(tokens, itertools.cycle(token_colors)):\n corners = token.pixel_corners\n centre = token.pixel_centre\n avg_point = np.mean(corners, axis=0)\n dr.line(corners + [corners[0]], fill=color, width=4)\n ellipse_pos = [\n (centre[0] - 5, centre[1] - 5),\n (centre[0] + 5, centre[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n for point in corners:\n ellipse_pos = [\n (point[0] - 5, point[1] - 5),\n (point[0] + 5, point[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n _draw_centered(\n (int(avg_point[0]), int(avg_point[1])),\n str(token.id),\n fnt,\n color,\n dr,\n )\n del dr\n return new_image",
"def create_permanent_widgets(self):\n\n\t\timg_soccer_pitch = Image(source='./data/images/background-field.png')\n\t\timg_bottom_bar = Image(source='./data/images/interface-lowpanel-plain.png', pos=(0, -260))\n\t\timg_mid_bar = Image(source='./data/images/interface-midpanel-logo.png',pos=(0, -147))\n\t\tblack_bar = Image(source='./data/images/interface-message-bar.png',pos=(0, -77))\n\n\t\tself.add_widget(img_soccer_pitch)\n\t\tself.add_widget(img_bottom_bar)\n\t\tself.add_widget(img_mid_bar)\n\t\tself.add_widget(black_bar)\n\n\t\twith self.canvas: Rectangle(pos=(0, 120), size=(360, 3))",
"def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,0)\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.dropletSprite)",
"def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,200)\r\n\r\n cloud = pyglet.image.load('images/cloud.png')\r\n self.cloudSprite = pyglet.sprite.Sprite(cloud)\r\n self.cloudSprite.y = 100\r\n\r\n lightening = pyglet.image.load('images/lightening.png')\r\n self.lSprite = pyglet.sprite.Sprite(lightening)\r\n self.lSprite.y = 200\r\n\r\n car = pyglet.image.load('images/car.png')\r\n self.carSprite = pyglet.sprite.Sprite(car, -500, 0)\r\n\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.cloudSprite)\r\n self.drawableObjects.append(self.lSprite)\r\n self.drawableObjects.append(self.dropletSprite)\r\n self.drawableObjects.append(self.carSprite)",
"def draw(self, file_format):\n import pydot\n\n graph = pydot.Dot(graph_type='graph', dpi=\"52\")\n for index, atom in enumerate(self.atoms):\n atom_type = '{0!s} {1!s} '.format(index+1, atom.label if atom.label != '' else '')\n atom_type += ','.join([at.label for at in atom.atomtype])\n atom_type = '\"' + atom_type + '\"'\n graph.add_node(pydot.Node(name=str(index + 1), label=atom_type, fontname=\"Helvetica\", fontsize=\"16\"))\n for atom1 in self.atoms:\n for atom2, bond in atom1.bonds.items():\n index1 = self.atoms.index(atom1)\n index2 = self.atoms.index(atom2)\n if index1 < index2:\n bond_type = ','.join([order for order in bond.get_order_str()])\n bond_type = '\"' + bond_type + '\"'\n graph.add_edge(pydot.Edge(src=str(index1 + 1), dst=str(index2 + 1),\n label=bond_type, fontname=\"Helvetica\", fontsize=\"16\"))\n\n img = graph.create(prog='neato', format=file_format)\n return img",
"def create_graphic(self):\n x, y = self.coords\n self.graphic_id = self.world.create_arc(x - Entity.RADIUS, y - Entity.RADIUS,\n x + Entity.RADIUS, y + Entity.RADIUS,\n # A little mouth\n start=self.heading + self.mouth_angle / 2,\n extent= 360 - self.mouth_angle,\n fill=self.color, outline=self.outline)",
"def create_svg_icon(symbolizers):\n svg_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/svg_1_0.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n icon_paths = []\n for symbolizer in symbolizers:\n if 'PolygonSymbolizer' in symbolizer.original_tagname_:\n styles = []\n styles.extend(process_stroke_styling(symbolizer))\n styles.extend(process_fill_styling(symbolizer))\n fill_found = False\n for style in styles:\n if 'fill=' in style:\n fill_found = True\n if not fill_found:\n print('no fill found, adding it as empty style')\n styles.append('fill=\"none\"')\n polygon_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/polygon.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': polygon_points,\n 'styles': ' '.join(styles)\n }\n content = polygon_template.render(**template_params)\n icon_paths.append(content)\n\n elif 'LineSymbolizer' in symbolizer.original_tagname_:\n styles = []\n styles.extend(process_stroke_styling(symbolizer))\n # TODO: Add support for geometry Handling\n line_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/line.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': line_points,\n 'styles': ' '.join(styles)\n }\n content = line_template.render(**template_params)\n icon_paths.append(content)\n elif 'PointSymbolizer' in symbolizer.original_tagname_:\n # TODO: Check how to handle a Point\n if symbolizer.Graphic:\n if symbolizer.Graphic.Mark:\n styles = []\n for mark in symbolizer.Graphic.Mark:\n styles.extend(process_fill_styling(mark))\n if mark.WellKnownName == 'square':\n polygon_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/polygon.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': square_points,\n 'styles': ' '.join(styles)\n }\n content = polygon_template.render(**template_params)\n icon_paths.append(content)\n elif symbolizer.Geometry:\n # TODO: implement geometry symbolizer\n print('point symbolizer does not support geometry for now')\n # else:\n # styles = [\n # 'stroke=\"black\"',\n # 'stroke-width=\"1\"',\n # 'fill=\"red\"'\n # ]\n # polygon_template = Template(\n # filename=AssetResolver('pyconizer').resolve(\n # 'lib/api/svg/templates/circle.xml').abspath(),\n # input_encoding='utf-8',\n # output_encoding='utf-8'\n # )\n # template_params = {\n # 'x': '2',\n # 'y': '2',\n # 'radius': '1',\n # 'styles': ' '.join(styles)\n # }\n # content = polygon_template.render(**template_params)\n # class_svg_paths.append(content)\n\n # only add a svg path if it would have content\n if len(icon_paths) > 0:\n svg_content = svg_template.render(**{\n 'geometry_tag': '\\n'.join(icon_paths)\n })\n return svg_content",
"def build_door_tags(bg_fname, student_list):\n\n # confirm TMP_DIR exists and is empty\n if os.path.exists(TMP_DIR):\n shutil.rmtree(TMP_DIR)\n os.mkdir(TMP_DIR)\n\n # prepare base image, adding the opaque caption region at bottom\n original = Image.open(bg_fname)\n base_img = original.copy()\n base_img = ImageOps.fit(base_img, SIZE)\n canvas = aggdraw.Draw(base_img)\n brush = aggdraw.Brush('white', opacity=CAPTION_OPACITY)\n canvas.rectangle((0, SIZE[1] - CAPTION_BGHEIGHT, SIZE[0], SIZE[1]), brush)\n canvas.flush()\n\n # read in student list\n residents = [Student(*line)\n for line in reader(open(student_list, 'rU'))\n if not line[0].startswith('#')]\n #residents = [Student.wholeName(*line)\n # for line in reader(open(student_list, 'rU'))\n # if not line[0].startswith('#')]\n residents.sort(key=attrgetter('roomnumber'))\n\n # set fonts for drawing on base image\n font = ImageFont.truetype(FONT, FONTSIZE)\n smallfont = ImageFont.truetype(SMALLFONT, SMALLFONTSIZE)\n\n # for each resident, draw name and room no, and save in TMP_DIR\n for resident in residents:\n tag = base_img.copy()\n canvas = ImageDraw.Draw(tag)\n x, y = font.getsize(resident.first)\n fontsize = (SIZE[0] / 2 - x / 2,\n SIZE[1] - CAPTION_HEIGHT / 2 - y / 2.75)\n canvas.text(fontsize, resident.first, font=font, fill=0)\n canvas.text((12, 12), resident.roomnumber, font=smallfont, fill=0)\n fname = '-'.join([resident.roomnumber, resident.netid])\n fname += '.jpg'\n tag.save(os.path.join(TMP_DIR, fname))\n\n # arrange the images on a pdf document using tables\n doc = SimpleDocTemplate(PDF_FNAME, pagesize=landscape(LETTER))\n table_styles = [('BOTTOMPADDING', (0, 0), (-1, -1), 6),\n ('TOPPADDING', (0, 0), (-1, -1), 6)]\n elements = []\n table_data = []\n images = os.listdir(TMP_DIR)\n for image in images:\n table_data.append(RLImage(os.path.join(TMP_DIR, image),\n width=SIZE[0] * DPI / PPI,\n height=SIZE[1] * DPI / PPI))\n\n # cluster table data into groups of 2 for table cols\n if len(table_data) % 2 != 0:\n table_data.append(table_data[-1])\n table_data = zip(*[iter(table_data)] * 2)\n\n # build and save the pdf doc\n table = Table(table_data, style=table_styles)\n elements.append(table)\n doc.build(elements)",
"def logo():",
"def create_rects(self):\n for row in range(0, self.rows):\n self.sprite_tuples.append([])\n\n for col in range(0, self.cols):\n self.sprite_tuples[row].append((col * self.sprite_width, row * self.sprite_height, self.sprite_width, self.sprite_height))",
"def create_textures(self):\n self.log.info(__name__ + ': ' + 'def ' + self.create_textures.__name__ + '(): ' + self.create_textures.__doc__)\n\n button = pygame.Surface((self.button_x, self.button_y), pygame.SRCALPHA, 32)\n button.fill((0, 0, 0, 0), None, pygame.BLEND_RGBA_MULT)\n pygame.draw.rect(button, Colors.DEEPSKYBLUE, (0, 0, self.button_x, self.button_y))\n pygame.draw.rect(button, Colors.BLACK, (0, 0, self.button_x, self.button_y), 2)\n self.textures['button'] = button",
"def prepare_icons(self):\n icons = []\n cols = np.linspace(0, self.size[1]-1, len(self.modes)+1).astype(np.int64)\n cols = [(cols[i], cols[i+1]) for i in range(len(cols)-1)]\n \n icon_pos = {}\n mode_pos = {}\n for i, image_name in enumerate(os.listdir(self.idir)):\n img = cv2.imread(self.idir+image_name)\n img = cv2.resize(img, (cols[i][1]-cols[i][0], self.vui_part))\n icon_pos[cols[i]] = img\n mode_pos[self.modes[i]] = cols[i]\n self.cols = cols \n self.icon_position = icon_pos\n self.current_icons = icon_pos\n self.mode_pos = mode_pos",
"def draw_game_state(self):\n info_0_x, info_0_y = self.coord['info_0_x'], self.coord['info_0_y']\n data = [\n [f'Game ID: {self.game_id}', 10],\n [f'Your Token: {self.access_token}', 10],\n ['-' * 55, 14],\n [f'cards to take: {self.cards_to_take}', 14],\n [f'turns to wait: {self.turns_to_wait}', 14],\n [f'requests: color: {self.requested_color}, value: {self.requested_value}', 14]\n ]\n for index, info in enumerate(data):\n info_y = info_0_y - index * 20\n label = pyglet.text.Label(text=info[0], x=info_0_x, y=info_y,\n color=self.colors['lbl_menu'], font_size=info[1])\n self.draw_objects.append(label)\n\n name = choice(['red_joker.png', 'black_joker.png'])\n if self.requested_value is not None:\n name = f'hearts_{self.requested_value}.png'\n elif self.requested_color is not None:\n name = f'{self.requested_color}_A.png'\n\n card_image = common.resize_center_card_image(self.card_images[name], self.screen.height, 4)\n info_y = info_0_y - 20 * len(data) - card_image.height / 1.9\n card = pyglet.sprite.Sprite(img=card_image, x=info_0_x + card_image.width * 1.3, y=info_y)\n self.draw_objects.append(card)",
"def make_image_list(self):\n return [\n tools.get_image(48, 0, 16, 16, self.sprite_sheet),\n tools.get_image(0, 0, 22, 16, setup.GFX['sword2'])\n ]",
"def make_assets(self):\n # Handle anchor\n anchor_group = LabeledColorImage(\n self.anchor,\n color=WHITE,\n label=\"Anchor\",\n stroke_width=self.stroke_width,\n font_size=self.font_size,\n buff=self.buff,\n )\n # Handle positive\n positive_group = LabeledColorImage(\n self.positive,\n color=GREEN,\n label=\"Positive\",\n stroke_width=self.stroke_width,\n font_size=self.font_size,\n buff=self.buff,\n )\n # Handle negative\n negative_group = LabeledColorImage(\n self.negative,\n color=RED,\n label=\"Negative\",\n stroke_width=self.stroke_width,\n font_size=self.font_size,\n buff=self.buff,\n )\n # Distribute the groups uniformly vertically\n assets = Group(anchor_group, positive_group, negative_group)\n assets.arrange(DOWN, buff=1.5)\n\n return assets",
"def drawObjects(self):\r\n\t\tpass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Paint the line number background on the image. | def _paint_line_number_bg(self, im):
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw | [
"def draw_horizontal_lines(img):\n row, col = img.shape\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n interval = row / 10\n for i in range(1, 10):\n (x0, y0) = map(int, [0, i * interval])\n (x1, y1) = map(int, [col, i * interval])\n img = cv2.line(img, (x0, y0), (x1, y1), (0, 255, 0), 1)\n\n return img",
"def _draw_background(self):\r\n for i in range(self._size):\r\n for j in range(self._size):\r\n self._grid.draw_entity((i, j), BACK_GROUND)",
"def line(self, start, end, color, width=None, title='image', destroy=True):\n line = cv2.line(self.img, start, end, color, width)\n if destroy == False:\n cv2.imshow(title, self.img)\n if destroy == True:\n cv2.imshow(title, self.img)\n cv2.waitKey(0)\n cv2.destroyAllWindows",
"def generate_lines(height, width, dpi, line_width, path, orientation='vertical', N_lines=None):\n\n ppmm = dpi / 25.4\n w = int(np.round((width * ppmm)))\n h = int(np.round((height * ppmm)))\n\n if N_lines is not None:\n if orientation == 'vertical':\n line_width = width // (2*N_lines)\n else:\n line_width = height // (2*N_lines)\n\n D = int(np.round(line_width * ppmm))\n\n im = np.full((h, w), 255, dtype=np.uint8)\n if orientation == 'vertical':\n black_id = np.hstack( [np.arange(i*D, i*D+D) for i in range(0, w//D, 2)] )\n if black_id[-1] + D < w:\n black_id = np.hstack([black_id, np.arange(w//D*D, w)])\n im[:, black_id] = 0\n else:\n black_id = np.hstack( [np.arange(i*D, i*D+D) for i in range(0, h//D, 2)] )\n if black_id[-1] + D < h:\n black_id = np.hstack([black_id, np.arange(h//D*D, h)])\n im[black_id] = 0\n\n image_comment = f'{orientation} lines\\nline width: {line_width}\\n DPI: {dpi}'\n save_image(path, im, dpi, comment=image_comment)\n print(f'Image saved to {path}.')\n return im",
"def __draw_grid(self):\n for i in range(10):\n color = 'blue' if i % 3 == 0 else \"gray\"\n\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)",
"def draw_lines (self, mm_img, lines, **kwargs) :\n\n r = 255\n g = 0\n b = 132\n\n if kwargs.has_key('color') :\n r = kwargs['color'][0]\n g = kwargs['color'][1]\n b = kwargs['color'][2]\n\n opacity = kwargs.get('opacity', 1)\n line_width = kwargs.get('line_width', 2)\n\n cairo_surface = self._setup_surface(mm_img, **kwargs)\n\n for coords in lines :\n points = []\n\n for c in coords :\n points.append(self._coord_to_point(c))\n\n ctx = self._draw_polyline_points(cairo_surface, points, False)\n ctx.set_source_rgba(r, g, b, opacity)\n ctx.set_line_width(line_width)\n ctx.stroke()\n\n\treturn self._return_surface(cairo_surface, **kwargs)",
"def lines(self):\r\n w, h = self.width, self.height # create local shortcut for image size\r\n board = ImageDraw.Draw(self.board) # create interactive image (for drawing)\r\n for loop in range(8): # draw 8 pairs of random lines on image\r\n xa, ya, xb, yb = rr(w), rr(h), rr(w), rr(h) # select random coordinates\r\n board.line((xa, 0, xb, h), width=2, fill='#000') # line from top to bottom\r\n board.line((0, ya, w, yb), width=2, fill='#000') # line from left to right\r\n self.label['image'] = self.image = ImageTk.PhotoImage(self.board) # update\r",
"def draw_line(self):\n gl.glColor4f(*self.color)\n gl.glLineWidth(self.thickness)\n gl.glBegin(gl.GL_LINES)\n gl.glVertex2f(self.coordx[0], self.coordy[0])\n gl.glVertex2f(self.coordx[1], self.coordy[1])\n gl.glEnd()",
"def Line(self, prePos):\n \n if self.Draw:\n pygame.draw.line(self.Parent, self.Colour, prePos, self.GetPos(), self.Width)",
"def highlight_line(self, start, end):\n a, b = start, end\n startpos = ((a[0]+0.5)*CELL_SIZE, (a[1]+0.5)*CELL_SIZE)\n endpos = ((b[0]+0.5)*CELL_SIZE, (b[1]+0.5)*CELL_SIZE)\n pygame.draw.line(self.board, WINNER_LINE_COLOR, startpos, endpos, 4)\n self.screen.blit(self.board, (0, 0))\n pygame.display.flip()",
"def line_draw(image):\n img = image.copy()\n \n #read in background for paper appearance\n paper = cv2.imread(\"ink-paper.jpg\", cv2.IMREAD_COLOR)\n\n paper = cv2.resize(paper, (img.shape[1], img.shape[0]))\n\n img = cv2.medianBlur(img, 5)\n edges = cv2.Canny(img, 100 , 125)\n\n c_img, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n \n #iterate through each contour found in the image\n for c in contours:\n #draw contours on image. Can vary intensity of lines\n #c_img = cv2.drawContours(c_img, c, -1, (125,125,0), 4)\n c_img = cv2.drawContours(c_img, c, -1, (255,255,255), 2) \n \n #Invert the line drawing\n c_img = 255 - c_img\n c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)\n\n c_img_blur = cv2.blur(c_img, (5,5))\n \n #convert to BGR to enable adding\n edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\n \n edges = np.uint8(edges) \n c_img_blur = np.uint8(c_img_blur)\n \n #add blurred and contoured to paper to create an overlay/blend\n output = cv2.addWeighted(c_img_blur, .35, paper, .65, 0)\n output = np.uint8(output)\n \n return output",
"def draw_lines(img, lines, color=[1, 0, 0], thickness=2):\n for line in lines:\n p1 = line[0]\n p2 = line[1]\n cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color, thickness)",
"def paint_borders(self, color: ColorsType, width: int) -> None:",
"def draw_lines(img, lines, color=(0, 255, 0), thickness=6):\n img_blank = np.zeros_like(img)\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(img_blank, (x1, y1), (x2, y2), color, thickness)\n return img_blank",
"def draw_crosshairs(self):\n center_x = self.frame.shape[1] // 2\n center_y = self.frame.shape[0] // 2\n # horizontal line\n self.frame[center_y, center_x - 10:center_x + 11] = [0, 50, 255]\n # vertical line\n self.frame[center_y - 10:center_y + 11, center_x] = [0, 50, 255]",
"def line(self, y = 0):\n\n if y > self.size[1]:\n return None\n\n # Every 60 lines, change starting\n color = int(y / 60) % 7\n\n r = ((color >> 0) & 1)\n g = ((color >> 1) & 1)\n b = ((color >> 2) & 1)\n\n rgb = b\"\"\n for i in range(0, self.size[0]):\n # Fill with gradient\n pos = int(256 * i / self.size[0])\n if r:\n r_grad = pos\n else:\n r_grad = 255 - pos\n if g:\n g_grad = pos\n else:\n g_grad = 255 - pos\n if b:\n b_grad = pos\n else:\n b_grad = 255 - pos\n rgb += struct.pack(\"BBB\", r_grad, g_grad, b_grad)\n pass\n \n return rgb",
"def draw_line(self, index, a, b, c, d):\n for mask_index in range(min(self.mask_count + 1, self.trail_size)):\n cv2.line(self.masks[mask_index], (a,b),(c,d), \\\n self.color[index].tolist(), 2, lineType=cv2.CV_AA)",
"def create_final_line(self):\n p = Image(\n c.screen_width - 100,\n 0,\n 100,\n c.screen_height,\n 'images/finish_line.png')\n self.finish_line = p\n self.objects.append(p)",
"def render_gap(img_width, line: Line, gap: Gap) -> Image:\n img = Image.new(\"L\", (img_width, img_width), color=0)\n draw = ImageDraw.Draw(img)\n\n gap_start, gap_end = gap.coords_on_line(line)\n width = line.width + 2\n draw.line([*gap_start, *gap_end], width=width, fill=255)\n\n return img",
"def continuous_line(klass, lane, surface, stripes_count, longitudinal, side):\n starts = [longitudinal + 0 * klass._stripe_spacing]\n ends = [longitudinal + stripes_count * klass._stripe_spacing + klass._stripe_length]\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n klass.draw_stripes(lane, surface, starts, ends, lats)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items. | def format(self, tokensource, outfile):
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper()) | [
"def display_tokens(tokens, image):\n new_image = image.convert('RGBA')\n dr = ImageDraw.Draw(new_image)\n fnt = ImageFont.load_default()\n font_size = 28\n try:\n # Pretty font, pretty sure I'm the only one to have it though - Andy\n fnt = ImageFont.truetype(\"DejaVuSans.ttf\", font_size, encoding=\"unic\")\n except Exception:\n try:\n # More common font\n fnt = ImageFont.truetype(\"arial.ttf\", font_size, encoding=\"unic\")\n except Exception:\n print(\"fallback to default font\")\n pass\n\n token_colors = [\n (255, 0, 0),\n (0, 255, 0),\n (0, 255, 255),\n (255, 125, 0),\n (0, 255, 255),\n ]\n\n tokens = list(tokens)\n\n for token, color in zip(tokens, itertools.cycle(token_colors)):\n corners = token.pixel_corners\n centre = token.pixel_centre\n avg_point = np.mean(corners, axis=0)\n dr.line(corners + [corners[0]], fill=color, width=4)\n ellipse_pos = [\n (centre[0] - 5, centre[1] - 5),\n (centre[0] + 5, centre[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n for point in corners:\n ellipse_pos = [\n (point[0] - 5, point[1] - 5),\n (point[0] + 5, point[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n _draw_centered(\n (int(avg_point[0]), int(avg_point[1])),\n str(token.id),\n fnt,\n color,\n dr,\n )\n del dr\n return new_image",
"def _format_lines(self, tokensource):\r\n nocls = self.noclasses\r\n lsep = self.lineseparator\r\n # for <span style=\"\"> lookup only\r\n getcls = self.ttype2class.get\r\n c2s = self.class2style\r\n escape_table = _escape_html_table\r\n tagsfile = self.tagsfile\r\n\r\n lspan = ''\r\n line = ''\r\n for ttype, value in tokensource:\r\n if nocls:\r\n cclass = getcls(ttype)\r\n while cclass is None:\r\n ttype = ttype.parent\r\n cclass = getcls(ttype)\r\n cspan = cclass and '<span style=\"%s\">' % c2s[cclass][0] or ''\r\n else:\r\n cls = self._get_css_class(ttype)\r\n cspan = cls and '<span class=\"%s\">' % cls or ''\r\n\r\n parts = value.translate(escape_table).split('\\n')\r\n\r\n if tagsfile and ttype in Token.Name:\r\n filename, linenumber = self._lookup_ctag(value)\r\n if linenumber:\r\n base, filename = os.path.split(filename)\r\n if base:\r\n base += '/'\r\n filename, extension = os.path.splitext(filename)\r\n url = self.tagurlformat % {'path': base, 'fname': filename,\r\n 'fext': extension}\r\n parts[0] = \"<a href=\\\"%s#%s-%d\\\">%s\" % \\\r\n (url, self.lineanchors, linenumber, parts[0])\r\n parts[-1] = parts[-1] + \"</a>\"\r\n\r\n # for all but the last line\r\n for part in parts[:-1]:\r\n if line:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + part + \\\r\n (cspan and '</span>') + lsep\r\n else: # both are the same\r\n line += part + (lspan and '</span>') + lsep\r\n yield 1, line\r\n line = ''\r\n elif part:\r\n yield 1, cspan + part + (cspan and '</span>') + lsep\r\n else:\r\n yield 1, lsep\r\n # for the last line\r\n if line and parts[-1]:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + parts[-1]\r\n lspan = cspan\r\n else:\r\n line += parts[-1]\r\n elif parts[-1]:\r\n line = cspan + parts[-1]\r\n lspan = cspan\r\n # else we neither have to open a new span nor set lspan\r\n\r\n if line:\r\n yield 1, line + (lspan and '</span>') + lsep",
"def write_file(tokens, f):\n for t in tokens:\n f.write(\"%s:\\n\" % t[0])\n for entry in t[1:]:\n f.write(\"\\t%s\\n\" % entry)",
"def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n try:\n if not outfile:\n realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.formatter import Formatter\n if isinstance(formatter, type) and issubclass(formatter, Formatter):\n raise TypeError('format() argument must be a formatter instance, '\n 'not a class')\n raise",
"def DrawTokensRed():\r\n for i in range(4):\r\n Tokens(TokenRed,RedChips[i][0],RedChips[i][1])",
"def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n self.output(f'<keyword> {value} </keyword>')\n elif type == 'symbol': # check for symbol\n #\"\"\" start xml formatting requirements for symbols \"\"\"\n if value == '<':\n self.output(f'<symbol> < </symbol>')\n elif value == '>':\n self.output(f'<symbol> > </symbol>')\n elif value == '&':\n self.output(f'<symbol> & </symbol>')\n #\"\"\" end xml formatting requirements for symbols \"\"\"\n else:\n self.output(f'<symbol> {value} </symbol>')\n elif type == 'integer': # check for integer\n self.output(f'<integerConstant> {value} </integerConstant>')\n elif type == 'identifier': # check for indentifier\n self.output(f'<identifier> {value} </identifier>')\n elif type == 'string': # it's a string\n self.output(f'<stringConstant> {value} </stringConstant>')",
"def DrawTokensBlue():\r\n for i in range(4):\r\n Tokens(TokenBlue,BlueChips[i][0],BlueChips[i][1])",
"def print_tokens(source):\n if isinstance(source[0], Token):\n source = untokenize(source)\n\n for lines in get_lines(source):\n for token in lines:\n print(repr(token))\n print()",
"def formatter( fname, fout = None, space_count = 2, \n *kargs, special = 0, EXCEPTION = True, DEBUG = False ):\n \n import sys\n if special == None:\n special = 0\n\n # Prevent user from accessing 16\n if special & 16:\n special ^= 16\n \n shift = 0\n shift_delay = 0 #For 4\n cond_shift = 0 #For 16\n cond_delay = 0 #For 16\n mline_shift = 0 #Future Use\n brace_start = '{'\n brace_end = '}'\n stack = [] #For 1\n space_char = ' ' #For 2\n\n #Files \n source_code = open(fname)\n fout = (fname + \"_edit.txt\") if (fout == None) else fout \n dest_code = open(fout, \"w\" )\n ###err_code = open(fname + \"_err.txt\", \"w\" )\n\n print(\"%s starting with %s. \\nOutput is %s.\" % \n (sys._getframe(0).f_code.co_name , fname, fout) )\n\n #SPECIAL\n if special & 2 :\n space_char = '\\t'\n\n for (count,line) in enumerate(source_code) :\n\n ###err_code.write( '%03d | %s' % (len(line.strip()), line))\n\n #Empty Line are Empty\n empty_line = 1 if line.strip() else 0\n \n line = ( ( empty_line * ( shift + cond_shift + mline_shift )* \n space_count * space_char ) \n + line.strip() )\n \n #Insert Extra Formatting here\n if special > 0:\n if special & 4 : \n if r'/*' in line:\n shift_delay +=1\n if r'*/' in line:\n shift_delay -=1\n if special & 8 :\n if (line.lstrip()).startswith('//'):\n if (line[0] == ' ' or line[0] == '\\t' ): #CHECK ME\n line = line[1:]\n if special & 16:\n if ( 'if' in line or 'else' in line \n or 'for' in line or 'while' in line ) and brace_start not in line:\n cond_shift = 1\n else:\n cond_shift = 0\n if special & 1 :\n if brace_start in line and brace_end not in line :\n temp = line.strip()[:-1] \n temp = \"\".join(temp.split('{').split('}'))\n stack.append(temp)\n elif brace_start not in line and brace_end in line :\n line = \"%s%s%s\" % (line, \" // \", stack.pop())\n \n #Write to File\n dest_code.write( \"%s%s\" % (line, '\\n') )\n\n ##Calculate Shift for next line\n if brace_start in line :\n shift += 1\n if brace_end in line :\n shift -= 1\n if shift_delay != 0 :\n shift += shift_delay\n shift_delay = 0\n \n #Check if negative shift\n if EXCEPTION and shift < 0 :\n print( \"\\n File \\\"%s\\\", line %i, in %s\" % \n ( fname, count, sys._getframe().f_code.co_name ) )\n raise UnbalancedBraces( 0 , \"Unbalanced Closing Braces in the file\" )\n \n #Check if there is extra shift at end.\n if EXCEPTION and shift != 0:\n print( \"\\n File \\\"%s\\\" , in %s\" % \n ( fname, sys._getframe().f_code.co_name ) )\n raise UnbalancedBraces( 0 , \"Unbalanced Opening Braces in the file!\" )\n\n print( \"%s compeleted!\" % sys._getframe(0).f_code.co_name )",
"def visualize_activations(\n tokens,\n activations,\n darken=2,\n colors=[\"#d35f5f\", \"#00aad4\"],\n text_direction=\"ltr\",\n char_limit=60,\n font_size=20,\n filter_fn=lambda x: x,\n):\n ################################ Validation ################################\n valid_text_directions = [\"ltr\", \"rtl\"]\n text_direction = text_direction.lower()\n assert (\n text_direction in valid_text_directions\n ), f\"text_direction must be one of {valid_text_directions}\"\n\n assert len(tokens) == len(\n activations\n ), f\"Number of tokens and activations must match\"\n\n ################################ Filtering ################################\n if filter_fn == \"top_tokens\":\n\n def keep_top_tokens(acts):\n max_val = max([abs(a) for a in acts])\n new_acts = [a if abs(a) > 0.8 * max_val else 0 for a in acts]\n return new_acts\n\n filter_fn = keep_top_tokens\n activations_filtered = filter_fn(activations)\n assert len(activations) == len(activations_filtered)\n activations = activations_filtered\n\n ############################## Drawing Setup ###############################\n text = \" \".join(tokens)\n\n # Estimate individual character sizes\n char_width = font_size * 0.601 # Magic number for Courier font\n char_height = font_size * 1.25 # 1.25 is line height of rendered font\n\n # Compute number of lines\n lines = _break_lines(text, limit=char_limit)\n\n # Compute image size based on character sizes and number of lines\n image_height = len(lines) * char_height * 1.2\n image_width = (char_limit + 1) * char_width\n\n # Create drawing canvas\n dwg = svgwrite.Drawing(\"tmp.svg\", size=(image_width, image_height), profile=\"full\")\n dwg.viewbox(0, 0, image_width, image_height)\n group = dwg.g()\n\n ####################### Activation Rendering limits ########################\n scores = activations\n max_score = max(scores)\n min_score = abs(min(scores))\n limit = max(max_score, min_score)\n\n for _ in range(darken):\n word_idx = 0\n line_horizontal_offsets = []\n for line_idx, line in enumerate(lines):\n char_idx = 0\n words = line.split(\" \")\n if text_direction == \"rtl\":\n words = reversed(words)\n for word in words:\n score = scores[word_idx]\n if score > 0:\n color = colors[1]\n opacity = score / limit\n else:\n color = colors[0]\n opacity = abs(score) / limit\n\n # Add rectangle for every character in current word\n for _ in word:\n rect_position = (char_idx * char_width, 7 + line_idx * char_height)\n rect_size = (f\"{char_width:0.3f}px\", f\"{char_height:0.3f}px\")\n group.add(\n dwg.rect(\n insert=rect_position,\n size=rect_size,\n style=_get_rect_style(color, opacity),\n )\n )\n char_idx += 1\n\n # Add rectangle for empty space after word\n final_rect_pos = (char_idx * char_width, 7 + line_idx * char_height)\n final_rect_size = (f\"{char_width:0.3f}px\", f\"{char_height:0.3f}px\")\n group.add(\n dwg.rect(\n insert=final_rect_pos,\n size=final_rect_size,\n style=_get_rect_style(color, opacity),\n )\n )\n\n char_idx += 1\n word_idx += 1\n if text_direction == \"ltr\":\n line_horizontal_offsets.append(0)\n else:\n line_horizontal_offsets.append(char_idx * char_width)\n\n # Draw the actual text over the drawn rectangles\n for line_idx, line in enumerate(lines):\n text_insert = (\n line_horizontal_offsets[line_idx],\n font_size * 1.25 * (line_idx + 1),\n )\n text = dwg.text(\n line, insert=text_insert, fill=\"black\", style=_get_text_style(font_size)\n )\n group.add(text)\n\n dwg.add(group)\n\n return dwg",
"def _drawStrGen(x=x, y=y, string=string, width=width, height=height):\n for char in string:\n if y == height:\n raise TDLError('End of console reached.')\n #batch.append(((x, y), _formatChar(char))) # ((x, y), ch)\n yield((x, y), _formatChar(char))\n x += 1 # advance cursor\n if x == width: # line break\n x = 0\n y += 1",
"def tabbed_generator(self, source_path, source_vocab, target_vocab, eos=None):\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n for line_idx, line in enumerate(source_file):\n if line:\n source, target = split_graphemes_phonemes(line)\n if not (source and target):\n tf.logging.warning(\"Invalid data format in line {} in {}:\\n\"\n \"{}\\nGraphemes and phonemes should be separated by white space.\"\n .format(line_idx, source_path, line))\n continue\n source_ints = source_vocab.encode(source) + eos_list\n target_ints = target_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}",
"def save_tokens_to_file(self, file_path):\n with open(file_path, 'w', encoding='utf-8') as fp:\n #for token in self.token2id.keys():\n for idd in range(self.size()): \n fp.write(self.id2token[idd] + '\\n')",
"def _write_input(\n self, X: List[str], y: Optional[List[List[str]]], input_path: Path\n ):\n with open(input_path, \"w\") as f:\n if y is not None:\n for text, labels in zip(X, y):\n label_str = \" \".join(\n f\"__label__{FastText._escape_label(label)}\" for label in labels\n )\n f.write(f\"{label_str} {_fasttext_preprocess(text)}\\n\")\n elif y is None:\n for text in X:\n f.write(f\"{_fasttext_preprocess(text)}\\n\")",
"def build_edges(self, tokens, M, token2idx):\n window_size = self.WINDOW_SIZE\n\n for i in range(len(tokens) - window_size+1):\n span = tokens[i:i+window_size]\n center, contexts = span[0], span[1:]\n v = token2idx[center]\n for context in contexts:\n w = token2idx[context]\n insert_edge(M, v, w)",
"def _get_format_from_document(self, token: Any, document: Any) -> Any:\n # Modified by EKR.\n # These lines cause unbounded recursion.\n # code, html = next(self._formatter._format_lines([(token, u'dummy')]))\n # self._document.setHtml(html)\n return QtGui.QTextCursor(self._document).charFormat()",
"def _print_tokens(self, tokens) -> None:\n print(' '.join([self.get_index_token(tok.item()) for tok in tokens]))\n return",
"def token_generator(source_path, target_path, token_vocab, eos=None):\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = token_vocab.encode(source) + eos_list\n target_ints = token_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()",
"def generateOutput(self):\n if not hasattr(self, 'xcms'):\n self.getCenterOfMass()\n\n fh = open(self.settings['output'], 'w')\n rg = open(self.settings['output'].split('.')[0]+'.reg', 'w')\n fh.write('#X coordinate in pixels [starts from 1]\\n')\n fh.write('#Y coordinate in pixels [starts from 1]\\n')\n rg.write('#File written on {0:>s}\\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))\n for x, y in zip(self.xcms, self.ycms):\n fh.write('%10.3f %10.3f\\n' % (x + 1, y + 1))\n rg.write('circle({0:.3f},{1:.3f},5)\\n'.format(x + 1, y + 1))\n fh.close()\n rg.close()",
"def plot_token_scores(\n token_probs, sentence, id2label_tok,\n plot_name=None, show=False):\n sentence_length = len(sentence.tokens)\n token_probs = token_probs[:][:sentence_length].T\n (nrows, ncols) = token_probs.shape\n color_data = []\n\n for i, [r, g, b] in enumerate(head_colours[:nrows]):\n row = []\n for j in range(ncols):\n row.append([r, g, b, token_probs[i][j]])\n color_data.append(row)\n\n plt.figure(figsize=(16, 12), dpi=100)\n row_labels = [\"O\"] + [str(id2label_tok[i + 1]) for i in range(nrows-1)]\n col_labels = [token.value for token in sentence.tokens]\n plt.imshow(color_data, vmin=0, vmax=sentence_length)\n plt.xticks(range(ncols), col_labels, rotation=45)\n plt.yticks(range(nrows), row_labels)\n plt.tight_layout()\n if plot_name is not None:\n plt.savefig(\"%s_%d.png\" % (plot_name, int(time.time())),\n format=\"png\", dpi=100, bbox_inches='tight', pad_inches=0)\n if show:\n plt.show()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. | def get_tokens(self, text, unfiltered=False):
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = unicode(text[len(bom):], encoding,
errors='replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = unicode(text, enc.get('encoding') or 'utf-8',
errors='replace')
text = decoded
else:
text = text.decode(self.encoding)
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream | [
"def get_tokens(self, text, unfiltered=False):\r\n if not isinstance(text, str):\r\n if self.encoding == 'guess':\r\n try:\r\n text = text.decode('utf-8')\r\n if text.startswith('\\ufeff'):\r\n text = text[len('\\ufeff'):]\r\n except UnicodeDecodeError:\r\n text = text.decode('latin1')\r\n elif self.encoding == 'chardet':\r\n try:\r\n import chardet\r\n except ImportError:\r\n raise ImportError('To enable chardet encoding guessing, '\r\n 'please install the chardet library '\r\n 'from http://chardet.feedparser.org/')\r\n # check for BOM first\r\n decoded = None\r\n for bom, encoding in _encoding_map:\r\n if text.startswith(bom):\r\n decoded = str(text[len(bom):], encoding,\r\n errors='replace')\r\n break\r\n # no BOM found, so use chardet\r\n if decoded is None:\r\n enc = chardet.detect(text[:1024]) # Guess using first 1KB\r\n decoded = str(text, enc.get('encoding') or 'utf-8',\r\n errors='replace')\r\n text = decoded\r\n else:\r\n text = text.decode(self.encoding)\r\n else:\r\n if text.startswith('\\ufeff'):\r\n text = text[len('\\ufeff'):]\r\n\r\n # text now *is* a unicode string\r\n text = text.replace('\\r\\n', '\\n')\r\n text = text.replace('\\r', '\\n')\r\n if self.stripall:\r\n text = text.strip()\r\n elif self.stripnl:\r\n text = text.strip('\\n')\r\n if self.tabsize > 0:\r\n text = text.expandtabs(self.tabsize)\r\n if self.ensurenl and not text.endswith('\\n'):\r\n text += '\\n'\r\n\r\n def streamer():\r\n for i, t, v in self.get_tokens_unprocessed(text):\r\n yield t, v\r\n stream = streamer()\r\n if not unfiltered:\r\n stream = apply_filters(stream, self.filters, self)\r\n return stream",
"def analyze(text):\n\n for token in tokenize(text):\n normalized = normalize(token)\n if filter_text(normalized):\n yield normalized",
"def __filter_text(self, text):\r\n analyzer_num_tag = self.analyzer_type.num\r\n analyzer_noun_tag = self.analyzer_type.noun\r\n analyzer_loc_tag = self.analyzer_type.loc\r\n surname = clean_text.get_surname(self.url)\r\n sentence = []\r\n out_text = []\r\n surname_re = re.compile(r'' + surname)\r\n for sent in text:\r\n for token in sent:\r\n if (analyzer_num_tag in token and (self.pattern.match(token[0]) is not None)) or (\r\n analyzer_loc_tag in token and analyzer_noun_tag in token and surname_re.match(\r\n str(token[0])) is None):\r\n sentence.append(token)\r\n if [tup for tup in sentence if analyzer_num_tag in tup]:\r\n if [tup for tup in sentence if analyzer_loc_tag in tup]:\r\n out_text.append(sentence)\r\n sentence = []\r\n return out_text",
"def tokenize_with_preprocess(text):\n return map(__stemmer.stem, filter(lambda w: w not in stop,\n nltk.word_tokenize(re.sub(_punc_pattern, '', text.lower()))))",
"def flatten_text(text: Text) -> Iterator[Union[int, str]]:\n for sent in text:\n for token in sent:\n yield token",
"def filter(unfiltered_data: List) -> List:\n filtered_data = [_markdown_to_text(unfiltered_str)\n for unfiltered_str in unfiltered_data]\n return filtered_data",
"def engTokenize(text):\n return [token.text for token in eng.tokenizer(text)]",
"def preprocess(text):\n text = normalize_unicode(text)\n text = remove_newline(text)\n text = text.lower()\n text = decontracted(text)\n text = replace_negative(text)\n text = removePunctuations(text)\n text = remove_number(text)\n text = remove_space(text)\n text = removeArticlesAndPronouns(text)\n text = removeNLTKStopWords(text)\n #text = performStemming(text)\n return text",
"def tag_untokenized_text(self, text):\n return self.tag_untokenized_sentences(self._sent_tokenize(text))",
"def iter_from(self, fieldname, text):\n\n term_info = self.term_info\n for term in self.terms_from(fieldname, text):\n yield (term, term_info(*term))",
"def prepare_for_tokenization(self, text, **kwargs):\n return text",
"def tokenize(self, text: str) -> list:\n indices = self.atomize(text)\n return list(map(lambda x: self.decoder[x], indices))",
"def tokenize(self, text):\n split_tokens = [] # list of `SubToken`s.\n for token, orig_token, is_good_token in self.basic_tokenizer.tokenize(text):\n if not is_good_token:\n split_tokens.append(SubToken(token, orig_token, is_good=False))\n continue\n\n # Preserve special tokens such as '[Q]' and '[SEP]'.\n if bert_tokenization.preserve_token(token, self.vocab):\n split_tokens.append(SubToken(token, orig_token, is_good=True))\n continue\n\n # For everything else, send the text-like tokens that have survived\n # whitespace and puncutation splitting through a wordpiece tokenizer.\n for sub_token in self.wordpiece_tokenizer.tokenize(\n [SubToken(token, orig_token, is_good_token)]):\n # `sub_token` has type `SubToken`.\n split_tokens.append(sub_token)\n\n return split_tokens",
"def process_text(text, stem=True):\r\n #text = text.translate(None,string.punctuation)\r\n tokens = word_tokenize(text)\r\n \r\n if stem:\r\n stemmer = PorterStemmer()\r\n tokens = [stemmer.stem(t) for t in tokens]\r\n \r\n return tokens",
"def FilterInput(self, text):\n return text",
"def extract_phrases(self, text: str):\n\n sentence = Sentence(text)\n self.chunk_tagger.predict(sentence)\n\n token_list: List[str] = []\n token_tags: List[str] = []\n\n for token in sentence:\n token_list.append(token.text)\n\n for label_type in token.annotation_layers.keys():\n # if token.get_labels(label_type)[0].value == \"O\":\n # token_tags.append('O')\n # if token.get_labels(label_type)[0].value == \"_\":\n # token_tags.append('_')\n token_tags.append(token.get_labels(label_type)[0].value) # Append token tags for each token\n\n phrases: List[str] = self._get_flair_phrases(token_list, token_tags)\n\n return phrases",
"def preprocessText(self, text):\n self.rawText = text\n self.stoppedText = self.removeStopWordsFromText(text)\n # self.vectorizedText = self.textToVector(self.stoppedText)\n # self.topic = self.detectTopic(self.vectorizedText)\n # self.POSedText = self.POSTagText(self.stoppedText)",
"def process_all_text(text_string, quick=False, use_placenames=False):\r\n # print(\"Preliminary tagging...\")\r\n token_list = core.tgc(text_string)\r\n # print(\"Name Entity chunking...\")\r\n token_list = core.ne_group_extended(token_list)\r\n # for x in token_list:\r\n # print(type(x), x)\r\n if use_placenames:\r\n # print(\"Tagging Place Names...\")\r\n token_list = pn.tag_all_placenames(token_list, quick)\r\n # print(\"Tagging Geo Features...\")\r\n token_list = gn.tag_geonouns(token_list)\r\n # print(\"Tagging Spatial Grammar...\")\r\n token_list = sg.tag_all_spatial_grammar(token_list)\r\n # print(\"Done\")\r\n # print(token_list)\r\n return token_list",
"def preprocess(text: str) -> List[str]:\n return __PARAGRAPH_SEP.split(\n Tokenizer.join_hyphenated_words_across_linebreaks(text)\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Callback that yields multiple actions for each group in the match. | def bygroups(*args):
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback | [
"def _performManyAct(self, action, args, messages, D):\n readCount = 0\n i = -1\n\n for message in messages:\n if message.read:\n readCount += 1\n yield action(message, **args)\n i += 1\n D.callback((readCount, i+1-readCount))",
"def group_action():\n return _controller.group_action()",
"def apply(self, callback, route):\n @functools.wraps( callback)\n def inner_group_by(**kw):\n all_the_results = callback( **kw)\n grouper = bottle.request.GET.get('group')\n if not grouper:\n return all_the_results\n groups = dict( ( model.full_name.split( grouper, 1)[0], model)\n for model in all_the_results)\n grouper = grouper if grouper not in self.ignored_groupers else None\n return [ FakeModel(grouper,host,name) for name,host in groups.items() ]\n return inner_group_by",
"def applyactions(self):\n \n for a in self.registration.actions:\n actions.apply(a, self.registration, self.log)",
"def droplet_actions_group():\n\tpass",
"def _process_matches(self, match, match_index):\n match = self._process_match_processor(match, self.pre_match_processor)\n if not match:\n return\n\n if not self._process_match(match, match_index):\n return\n\n for child in match.children:\n if not self._process_match(child, match_index, child=True):\n return\n\n match = self._process_match_processor(match, self.post_match_processor)\n if not match:\n return\n\n if (self._should_include_parent or self.private_parent) and match.name not in self.ignore_names:\n yield match\n if self._should_include_children or self.private_children:\n children = [x for x in match.children if x.name not in self.ignore_names]\n for child in children:\n yield child",
"def _step_apply(self, action_tuple):\n for action in action_tuple:\n assert action in range(self.num_actions)\n\n table = self.factory.tables[self.current_agent]\n action_result = do_action(table, self.factory, Action(action))\n self.factory.add_move(self.current_agent, Action(action), action_result)",
"def apply_action_block(stmt):\n return [ast.ActionBlock(stmt)]",
"def process_match(match, team, augment_data=True):\n experiences = []\n\n # This section controls data agumentation of the match. Certain submissions in the draft are\n # submitted consecutively by the same team during the same phase (ie team1 pick0 -> team1 pick1).\n # Although these submissions were produced in a particular order, from a draft perspective\n # there is no difference between submissions of the form\n # team1 pick0 -> team1 pick1 vs team1 pick1 -> team0 pickA\n # provided that the two picks are from the same phase (both bans or both picks).\n # Therefore it is possible to augment the order in which these submissions are processed.\n\n # Note that we can also augment the banning phase if desired. Although these submissions technically\n # fall outside of the conditions listed above, in practice bans made in the same phase are\n # interchangable in order.\n\n # Build queue of actions from match reference (augmenting if desired)\n augments_list = [\n (\"blue\",\"bans\",slice(0,3)), # Blue bans 0,1,2 are augmentable\n (\"blue\",\"bans\",slice(3,5)), # Blue bans 3,4 are augmentable\n (\"red\",\"bans\",slice(0,3)),\n (\"red\",\"bans\",slice(3,5)),\n (\"blue\",\"picks\",slice(1,3)), # Blue picks 1,2 are augmentable\n (\"blue\",\"picks\",slice(3,5)), # Blue picks 3,4 are augmentable\n (\"red\",\"picks\",slice(0,2)) # Red picks 0,1 are augmentable\n ]\n if(augment_data):\n augmented_match = deepcopy(match) # Deepcopy match to avoid side effects\n for aug in augments_list:\n (k1,k2,aug_range) = aug\n count = len(augmented_match[k1][k2][aug_range])\n augmented_match[k1][k2][aug_range] = random.sample(augmented_match[k1][k2][aug_range],count)\n\n action_queue = build_action_queue(augmented_match)\n else:\n action_queue = build_action_queue(match)\n\n # Set up draft state\n draft = DraftState(team)\n\n finish_memory = False\n while action_queue:\n # Get next pick from deque\n submission = action_queue.popleft()\n (submitting_team, pick, position) = submission\n\n # There are two conditions under which we want to finalize a memory:\n # 1. Non-designated team has finished submitting picks for this phase (ie next submission belongs to the designated team)\n # 2. Draft is complete (no further picks in the draft)\n if submitting_team == team:\n if finish_memory:\n # This is case 1 to store memory\n r = get_reward(draft, match, a, a)\n s_next = deepcopy(draft)\n memory = (s, a, r, s_next)\n experiences.append(memory)\n finish_memory = False\n # Memory starts when upcoming pick belongs to designated team\n s = deepcopy(draft)\n # Store action = (champIndex, pos)\n a = (pick, position)\n finish_memory = True\n else:\n # Mask positions for pick submissions belonging to the non-designated team\n if position != -1:\n position = 0\n\n draft.update(pick, position)\n\n # Once the queue is empty, store last memory. This is case 2 above.\n # There is always an outstanding memory at the completion of the draft.\n # RED_TEAM always gets last pick. Therefore:\n # if team = BLUE_TEAM -> There is an outstanding memory from last RED_TEAM submission\n # if team = RED_TEAM -> Memory is open from just before our last submission\n if(draft.evaluate() == DraftState.DRAFT_COMPLETE):\n assert finish_memory == True\n r = get_reward(draft, match, a, a)\n s_next = deepcopy(draft)\n memory = (s, a, r, s_next)\n experiences.append(memory)\n else:\n print(\"Week {} match_id {} {} vs {}\".format(match[\"week\"], match[\"id\"], match[\"blue_team\"],match[\"red_team\"]))\n draft.display()\n print(\"Error code {}\".format(draft.evaluate()))\n print(\"Number of experiences {}\".format(len(experiences)))\n for experience in experiences:\n _,a,_,_ = experience\n print(a)\n print(\"\")#raise\n\n return experiences",
"def choose_action(self):\n for ag in self.agents:\n ag.choose_action()\n self.next_action[ag.name] = ag.action",
"def action_for_all(self, name, **kwargs):\n\n for k in self._manager.keys():\n self._manager[k].action(name, **kwargs)",
"def play_actions(self, target):\n for method_name, args, kwargs in self.actions:\n method = getattr(target, method_name)\n method(*args, **kwargs)",
"def trigger_action_on_multi_resource(data):\n for item in data:\n trigger_action_on_a_resource(item['resource_url'],item['action'],item['provider'][0])\n return \"\",return_code['OK']",
"def expand_actions(self, actions):\n results = list()\n\n for action in actions:\n if action in self.aliased_actions:\n results.append(action)\n for item in self.expand_actions(self.aliased_actions[action]):\n results.append(item)\n else:\n results.append(action)\n\n return results",
"def get_actions(self) -> List[GameAction]:\n pass",
"def test_group_switch_on_all_groups(\n self,\n keymap: Keymap,\n mod_key: str,\n mod: ModifierMask,\n key: str,\n keysyms: tuple[str],\n ):\n for group, keysym in enumerate(keysyms, start=1):\n print(group, keysym)\n keymap.tap_and_check(key, keysym, group=group)\n self.switch_group(keymap, mod_key, mod, group % len(keysyms) + 1)\n # Check the group wraps\n keymap.tap_and_check(key, keysyms[0], group=1)",
"def actions(self, action_dict) -> list:\n # use self.game_state\n return []",
"def _join_match_group(matches):\n data = {}\n for match in matches:\n data.update(_parse_single_match(match))\n return data",
"def at_multimatch_cmd(caller, matches):\r\n string = \"There were multiple matches:\"\r\n for num, match in enumerate(matches):\r\n # each match is a tuple (candidate, cmd)\r\n cmdname, arg, cmd, dum, dum = match\r\n\r\n is_channel = hasattr(cmd, \"is_channel\") and cmd.is_channel\r\n if is_channel:\r\n is_channel = _(\" (channel)\")\r\n else:\r\n is_channel = \"\"\r\n if cmd.is_exit and cmd.destination:\r\n is_exit = (\" (exit to %s)\") % cmd.destination\r\n else:\r\n is_exit = \"\"\r\n\r\n id1 = \"\"\r\n id2 = \"\"\r\n if (not (is_channel or is_exit) and\r\n (hasattr(cmd, 'obj') and cmd.obj != caller) and\r\n hasattr(cmd.obj, \"key\")):\r\n # the command is defined on some other object\r\n id1 = \"%s-%s\" % (num + 1, cmdname)\r\n id2 = \" (%s)\" % (cmd.obj.key)\r\n else:\r\n id1 = \"%s-%s\" % (num + 1, cmdname)\r\n id2 = \"\"\r\n string += \"\\n %s%s%s%s\" % (id1, id2, is_channel, is_exit)\r\n return string",
"def ueach(accept, iterable, *args, **kwargs):\n kwargs[\"_unpack\"] = True\n each(accept, iterable, *args, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate cls after preprocessing its token definitions. | def __call__(cls, *args, **kwds):
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds) | [
"def __init__(self):\n self.__parser = SpaCyParser()\n self.__word_substitutor = WordSubstitutor()",
"def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")",
"def compile_class(self):\n self.tokenizer.advance() # class\n self.class_name = self.tokenizer.advance()[TOKEN_NAME]\n self.tokenizer.advance() # {\n # compile the variables declaration part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods o functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine_dec()\n self.tokenizer.advance() # }",
"def __init__(self, toktype, line, lexeme):\n self.__toktype, self.__line, self.__lexeme = toktype, line, lexeme",
"def __init__(self, text):\n\n self.text = text\n\n self.tokenize()",
"def __init__(self, parser_instance):\n self.parser_instance = parser_instance\n # Dict of sorted lists of constructs by [head_or_tail][trigger_token_label]\n self.construct_lookup_dict = {}\n self.construct_lookup_dict[HEAD] = {}\n self.construct_lookup_dict[TAIL] = {}",
"def __init__(self, tokens: List[Token]):\n super(GroupToken, self).__init__(tokens)",
"def add_new_token(self, *vargs, token_subclass=None, **kwargs):\n if token_subclass == None:\n token_subclass = self.default_token_class\n\n class new_token_class(token_subclass):\n pass\n\n assert(issubclass(new_token_class, Token))\n\n new_token_class.init(*vargs, **kwargs)\n\n # XXX: what is new_token_class.name\n if new_token_class.name in self.__table:\n raise KeyError(\"Class named '{}' was already in the token table.\".format(new_token_class.name))\n\n new_token_class.__name__ = token_subclass.__name__ + \"-\" + new_token_class.name\n\n self.add_token(new_token_class)",
"def __init__(self, corpus, name, tokens):\n\n self.corpus = corpus\n\n self.name = name\n self.tokens = asarray(tokens)",
"def __init__(self, token, state, extra):\n self.state = state\n self.token = token\n self.extra = extra\n pass",
"def __init__(self, depth, tokens=None):\n self.depth = depth\n self._tokens = tokens or []\n self.disable = False\n\n if self._tokens:\n # Set up a doubly linked list.\n for index, tok in enumerate(self._tokens[1:]):\n # Note, 'index' is the index to the previous token.\n tok.previous_token = self._tokens[index]\n self._tokens[index].next_token = tok",
"def Parse(self, lex):\n\n # The next two variables store a stack of commands the user wants\n # to manually add to the list of stackable instance_commands.\n # (Allowing the users to directly manipulate the transformation stack\n # is an experimental feature as of 2015- Most users don't need this.)\n user_push_left_commands = []\n user_push_right_commands = []\n\n #sys.stdout.write(' -- Parse() invoked --\\n')\n\n # Keep track of the location in the users' input files where this\n # class object is first defined. (Keep in mind that the user might\n # augment their original class definition, adding new content to an\n # existing class. In that case self.srcloc_begin will have already\n # been assigned. We don't want to overwrite it in that case.)\n if self.srcloc_begin is None: # <-- not defined yet?\n self.srcloc_begin = lex.GetSrcLoc()\n\n while True:\n\n cmd_token = lex.get_token()\n\n #print('Parse(): token = \\\"'+cmd_token+'\\\", '+lex.error_leader())\n\n if cmd_token == lex.eof:\n #print('Parse(): EOF encountered\\n')\n break\n\n if (cmd_token in ('write',\n 'write_once',\n 'create_var',\n 'create_static_var',\n 'replace')):\n\n open_paren = lex.get_token()\n\n #print('Parse(): open_paren=\\\"'+open_paren+'\\\"')\n if open_paren == '{':\n # ..then the user neglected to specify the \"dest\" file-name\n # argument. In that case, supply the default, ''.\n # (which is shorthand for the standard out in this case)\n open_curly = open_paren[0]\n open_paren = ''\n close_paren = ''\n tmpl_filename = ''\n srcloc = lex.GetSrcLoc()\n else:\n tmpl_filename = lex.get_token()\n if tmpl_filename == ')':\n tmpl_filename = ''\n close_paren = ')'\n else:\n close_paren = lex.get_token()\n open_curly = lex.get_token()\n srcloc = lex.GetSrcLoc()\n\n if ((cmd_token == 'create_var') or\n (cmd_token == 'create_static_var')):\n tmpl_filename = None\n # This means: define the template without attaching\n # a file name to it. (IE., don't write the contents\n # of what's enclosed in the curly brackets { } to a file.\n # Why?\n # \"create_var\" commands are implemented as \"write() {...}\"\n # commands (containing one or more variables) which\n # never get written to a file or the terminal. Parsing\n # the contents of the curly brackets defines the variables \n # inside in the same way as parsing the text inside an\n # ordinary \"write() {...}\" command.\n\n if (cmd_token == 'replace'):\n tmpl_filename = \"ttree_replacements.txt\"\n\n if ((open_curly != '{') or\n ((open_paren == '') and (close_paren != '')) or\n ((open_paren == '(') and (close_paren != ')'))):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in ' + lex.error_leader() + '\\n\\n'\n 'Syntax error at the beginning of the \\\"' + cmd_token + '\\\" command.')\n if tmpl_filename != None:\n tmpl_filename = RemoveOuterQuotes(\n tmpl_filename, lex.quotes)\n # ( The previous line is similar to:\n # tmpl_filename = tmpl_filename.strip(lex.quotes) )\n\n tmpl_contents = lex.ReadTemplate()\n StaticObj.CleanupReadTemplate(tmpl_contents, lex)\n\n #sys.stdout.write(' Parse() after ReadTemplate, tokens:\\n\\n')\n # print(tmpl_contents)\n # sys.stdout.write('\\n----------------\\n')\n\n if (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n\n # Check for a particular bug:\n # Ordinary instance variables (preceded by a '$')\n # should never appear in a write_once() statement.\n for entry in tmpl_contents:\n if (isinstance(entry, VarRef) and\n (entry.prefix[0] == '$')):\n err_msg = ('Error(' + g_module_name + '.StaticObj.Parse()):\\n' +\n ' Error near ' + ErrorLeader(entry.srcloc.infile,\n entry.srcloc.lineno) + '\\n' +\n ' Illegal variable: \\\"' + entry.prefix + entry.descr_str + entry.suffix + '\\\"\\n' +\n ' All variables in a \\\"' + cmd_token + '\\\" statement must be statically\\n' +\n ' defined, and hence they must begin with a \\'@\\' prefix character.\\n' +\n ' (not a \\'$\\' character).\\n')\n if (cmd_token == 'write_once'):\n err_msg += ' Suggestion: Use the \\\"write()\\\" command instead.\\n'\n raise InputError(err_msg)\n\n if cmd_token == 'write':\n commands = self.instance_commands\n elif (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n commands = self.commands\n elif (cmd_token == 'create_var'):\n commands = self.instance_commands\n else:\n assert(False)\n\n command = WriteFileCommand(tmpl_filename,\n tmpl_contents,\n srcloc)\n commands.append(command)\n\n # end of \"if (cmd_token == 'write') or (cmd_token ==\n # 'write_once'):\"\n\n elif cmd_token == 'delete':\n\n instobj_descr_str = lex.get_token()\n instobj_srcloc = lex.GetSrcLoc()\n delete_command = DeleteCommand(instobj_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n self.instance_commands.append(mod_command)\n\n elif cmd_token == 'using':\n\n namespacecom_str = lex.get_token()\n if namespacecom_str != 'namespace':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The \\\"' + cmd_token + '\\\" command must be followed by the \\\"namespace\\\" keyword.')\n namespace_str = lex.get_token()\n\n stnode = StrToNode(namespace_str,\n self,\n lex.GetSrcLoc())\n\n self.namespaces.append(stnode)\n\n elif cmd_token == 'category':\n cat_name = lex.get_token()\n\n cat_count_start = 1\n cat_count_incr = 1\n backup_wordterminators = lex.wordterminators\n lex.wordterminators += ','\n #sys.stderr.write('DEBUG: wordterminators=\"'+str(lex.wordterminators)+'\"\\n')\n\n open_paren = lex.get_token()\n if (open_paren == '('):\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer, float, or string\n try:\n cat_count_start = int(token)\n except ValueError:\n try:\n cat_count_start = float(token)\n except ValueError:\n cat_count_start = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer,float,or string\n try:\n cat_count_incr = int(token)\n except ValueError:\n try:\n cat_count_incr = float(token)\n except ValueError:\n cat_count_incr = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token != ')':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '...\\\" has too many arguments,\\n'\n ' or lacks a close-paren \\')\\'.\\n')\n\n else:\n lex.push_token(open_paren)\n\n if (isinstance(cat_count_start, basestring) or\n isinstance(cat_count_incr, basestring)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '(' +\n str(cat_count_start) + ',' +\n str(cat_count_incr) + ')\\\"\\n'\n ' Only numeric counters are currently supported.\\n')\n\n # check for really stupid and unlikely errors:\n if type(cat_count_start) is not type(cat_count_incr):\n if ((isinstance(cat_count_start, int) or\n isinstance(cat_count_start, float))\n and\n (isinstance(cat_count_incr, int) or\n isinstance(cat_count_incr, float))):\n cat_count_start = float(cat_count_start)\n cat_count_incr = float(cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Problem with \\\"' + cmd_token + '\\\" command.\\n')\n\n prefix = cat_name[0]\n cat_name = cat_name[1:]\n # Add this category to the list.\n if prefix == '@':\n self.categories[cat_name] = Category(cat_name)\n self.categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n elif prefix == '$':\n self.instance_categories[cat_name] = Category(cat_name)\n self.instance_categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' category name = \\\"' + cat_name + '\\\" lacks a \\'$\\' or \\'&\\' prefix.\\n'\n ' This one-character prefix indicates whether the variables in this\\n'\n ' new category will be static or dynamics variables\\n')\n\n\n lex.wordterminators = backup_wordterminators\n\n\n elif (cmd_token == '}') or (cmd_token == ''):\n # a '}' character means we have reached the end of our scope.\n # Stop parsing and let the caller deal with the remaining text.\n # (And a '' means we reached the end of the file... I think.)\n break\n\n # elif (cmd_token == 'include'):\n # \"include filename\" loads a file (adds it to the file stack)\n # The \"TtreeShlex\" class (from which \"lex\" inherits) handles\n # \"include\" statements (ie. \"source\" statements) automatically.\n\n elif ((cmd_token == 'push') or\n (cmd_token == 'push_left') or\n (cmd_token == 'push_right')):\n\n push_cmd_src_loc = lex.GetSrcLoc()\n push_cmd_text = lex.GetParenExpr()\n if ((len(push_cmd_text) < 2) or\n (push_cmd_text[0] != '(') or\n (push_cmd_text[-1] != ')')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Bad \\\"push\\\" command. Expected an expression in parenthesis.\\n')\n push_cmd_text = push_cmd_text[1:-1]\n\n if (cmd_token == 'push_right'):\n push_command = PushRightCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_right_commands.append(push_command)\n else:\n push_command = PushLeftCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_left_commands.append(push_command)\n self.instance_commands.append(push_command)\n\n elif ((cmd_token == 'pop') or\n (cmd_token == 'pop_left') or\n (cmd_token == 'pop_right')):\n\n pop_cmd_text = lex.GetParenExpr()\n pop_cmd_src_loc = lex.GetSrcLoc()\n if (cmd_token == 'pop_right'):\n if len(user_push_right_commands) > 0:\n push_command = user_push_right_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many \\\"pop_right\\\" commands.\\n')\n pop_command = PopRightCommand(push_command,\n pop_cmd_src_loc)\n else:\n if len(user_push_left_commands) > 0:\n push_command = user_push_left_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many pop, (or pop_left) commands.\\n')\n pop_command = PopLeftCommand(push_command,\n pop_cmd_src_loc)\n self.instance_commands.append(pop_command)\n\n else:\n\n # Otherwise, 'cmd_token' is not a command at all.\n # Instead it's the name of an object which needs to be\n # defined or instantiated.\n # First, let's figure out which.\n\n # (small detail: The \"class\" keyword is optional\n # and can be skipped.)\n if cmd_token == 'class':\n object_name = lex.get_token()\n else:\n object_name = cmd_token\n\n next_symbol = lex.get_token()\n #print('Parse(): next_token=\\\"'+next_symbol+'\\\"')\n\n class_parents = []\n\n if next_symbol == 'inherits':\n\n # Then read in the list of classes which are parents of\n # of this class. (Multiple inheritance is allowed.)\n # (We don't yet check to insure that these are valid class\n # names. We'll do this later in LookupStaticRefs().)\n\n syntax_err_inherits = False\n\n while True:\n next_symbol = lex.get_token()\n if ((next_symbol == '{') or\n (next_symbol == lex.eof)):\n break\n elif (next_symbol == '='):\n syntax_err_inherits = True\n break\n else:\n class_parents.append(StrToNode(next_symbol,\n self,\n lex.GetSrcLoc()))\n if len(class_parents) == 0:\n syntax_err_inherits = True\n\n if syntax_err_inherits:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"inherits\\\" should be followed by one or more class names.\\n')\n\n if next_symbol == '{':\n child_name = object_name\n\n # Check to see if this class has already been defined.\n # (IE. check if it present in the list of children.)\n # If the name (child_name) matches another class (child),\n # then the contents of the new class will be appended to\n # the old. This way, class definitions can be augmented\n # later. (This is the way \"namespaces\" work in C++.)\n child = self.children.get(child_name)\n # If found, we refer to it as \"child\".\n # If not, then we create a new StaticObj named \"child\".\n if child is None:\n child = StaticObj(child_name, self)\n self.children[child_name] = child\n assert(child.name == child_name)\n\n # Either way we invoke child.Parse(), to\n # add contents (class commands) to child.\n child.Parse(lex)\n child.class_parents += class_parents\n\n elif next_symbol == '=':\n next_symbol = lex.get_token()\n if next_symbol == 'new':\n base_name = object_name\n base_srcloc = lex.GetSrcLoc()\n array_slice_str = ''\n if base_name.find('/') != -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n'\n ' (You can not instantiate some other object\\'s members.)\\n'\n ' Invalid instance name: \\\"' + base_name + '\\\"\\n')\n\n elif base_name in self.instname_refs:\n ref_srcloc = self.instname_refs[base_name]\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Duplicate class/array \\\"' + base_name + '\\\"\\n'\n ' This occurs near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n'\n ' and also near:\\n'\n ' ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n')\n else:\n self.instname_refs[base_name] = base_srcloc\n\n # Check for syntax allowing the user to instantiate\n # PART of an array. For example, check for this syntax:\n # \"monomers[20-29] = new ...\". This only fills in a\n # portion of the array from: monomers[20]...monomers[29]\n #\n # We also have to deal with multidimensional syntax\n # like this: \"cells[3][2-3][1][4-7] = new...\"\n # Split the \"cells[3][2-3][2][4-7][2]\" string into\n # \"cells[3][\", \"][1][\", and \"]\".\n # Later, we will instantiate InstanceObjs with names:\n # \"cells[3][2][1][4]\"\n # \"cells[3][2][1][5]\"\n # \"cells[3][2][1][6]\"\n # \"cells[3][2][1][7]\"\n # \"cells[3][3][1][4]\"\n # \"cells[3][3][1][5]\"\n # \"cells[3][3][1][6]\"\n # \"cells[3][3][1][7]\"\n\n p1 = base_name.find('[')\n if p1 == -1:\n p1 = len(base_name)\n else:\n p1 += 1\n array_name_tkns = [base_name[0:p1]]\n array_name_offsets = []\n\n p2 = -1\n p4 = p1\n while p4 < len(base_name):\n p3 = base_name.find(']', p1)\n\n if p3 == -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\']\\' character following:\\n'\n ' \\\"' +\n base_name[0:p1] +\n '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n # Search for a '-', ':', or '*' character between []\n # For example \"monomers[20-29] = \"\n # If present, the user wants us to fill a range\n # inside an array. This could be a multi-dimensional\n # array, (eg \"cells[3][2-6][4-11] = \"), so we must\n # figure out which entries in the array the user\n # wants us to fill (in this case, \"[2-6][4-11]\")\n p2 = base_name.find('-', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find(':', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find('*', p1)\n if p2 == -1:\n p2 = len(base_name)\n\n p4 = p3 + 1\n if p4 < len(base_name):\n if base_name[p4] == '[':\n p4 += 1 # skip over it\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\'[\\' character forllowing a \\']\\' character in\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n if p2 > p3:\n # Then no '-', ':', or '*' character was found\n # between '[' and the subsequent ']' character\n # In that case, ignore this token\n\n token = base_name[p1:p4]\n # append all this text to the previous token\n if len(array_name_tkns) == 0:\n array_name_tkns.append(token)\n else:\n array_name_tkns[-1] = array_name_tkns[-1] + token\n array_slice_str = 'slice '\n else:\n\n assert((p1 < p2) and (p2 < p3))\n index_offset_str = base_name[p1:p2]\n if len(index_offset_str) == 0:\n index_offset = 0\n\n elif (not str.isdigit(index_offset_str)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a nonnegative integer preceding the \\'' +\n base_name[\n p2] + '\\' character in:\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n else:\n index_offset = int(index_offset_str)\n token = base_name[p3:p4]\n array_name_tkns.append(token)\n array_name_offsets.append(index_offset)\n\n p1 = p4\n\n # If the statobj_str token contains a ']' character\n # then this means the user wants us to make multiple\n # copies of this template. The number of copies\n # to instantiate is enclosed in the [] characters\n # (Example wat = new Water[3000] creates\n # 3000 instantiations of the Water template\n # named wat[1], wat[2], wat[3], ... wat[3000]).\n\n # Note: Here '[' and ']' have a special meaning.\n # So lex.get_token() should not treat them as\n # ordinary word characters. To prevent this:\n orig_wordterminators = lex.wordterminators\n lex.wordterminators += '[],'\n\n class_name_str = lex.get_token()\n if ((class_name_str == lex.eof) or\n (class_name_str == '}')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n 'Class ends prematurely. (Incomplete \\\"new\\\" statement.)')\n\n assert(len(class_name_str) > 0)\n\n if (class_name_str[0] == '['):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' new ' + class_name_str + '\\n'\n 'Bracketed number should be preceeded by a class name.')\n class_names = []\n weights = []\n num_by_type = []\n if class_name_str == 'random':\n class_names, weights, num_by_type = self._ParseRandom(\n lex)\n tmp_token = lex.get_token()\n if len(tmp_token) > 0:\n if tmp_token[0] == '.':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + tmp_token + '\\\" should not follow random()\\n'\n '\\n'\n ' Coordinate transformations and other commands (such as \\\"' +\n tmp_token + '\\\")\\n'\n ' should appear after each class name inside the random() statement,\\n'\n ' not after it. For example, do not use:\\n'\n ' \\\"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\\\"\\n'\n ' Use this instead:\\n'\n ' \\\"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\\\"\\n')\n lex.push_token(tmp_token)\n else:\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n\n array_size = []\n array_suffixes = []\n array_srclocs = []\n\n # A general \"new\" statement could look like this:\n # \"m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)\n # [3].trans(0,0,4.5)\"\n # So far we have processed \"m = new Mol.scale(3)\".\n # Now, we need to deal with:\n # \"[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)\"\n while True:\n new_token = lex.get_token()\n # if ((new_token == '') or (new_token == lex.eof)):\n # break\n if new_token == '[':\n number_str = lex.get_token()\n close_bracket = lex.get_token()\n if ((not str.isdigit(number_str)) or\n (close_bracket != ']')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near ' + lex.error_leader() + '\\n'\n ' A \\'[\\' character should be followed by a number and a \\']\\' character.')\n array_size.append(int(number_str))\n suffix = lex.get_token()\n\n if ((suffix == '') or (suffix == lex.eof)):\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n break\n if suffix[0] == '.':\n lex.push_token(suffix[1:])\n suffix_func = lex.GetParenExpr()\n suffix = '.' + suffix_func\n array_suffixes.append(suffix)\n array_srclocs.append(lex.GetSrcLoc())\n else:\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n lex.push_token(suffix)\n if suffix != '[':\n break\n else:\n lex.push_token(new_token)\n break\n srcloc_final = lex.GetSrcLoc()\n\n lex.wordterminators = orig_wordterminators\n\n assert(len(array_size) == len(array_suffixes))\n\n if len(array_size) > 0:\n if len(array_name_offsets) == 0:\n assert(len(array_name_tkns) == 1)\n array_name_offsets = [0] * len(array_size)\n array_name_tkns[0] = array_name_tkns[0] + '['\n for d in range(0, len(array_size) - 1):\n array_name_tkns.append('][')\n array_name_tkns.append(']')\n\n if len(array_name_offsets) != len(array_size):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near/before ' + lex.error_leader() + '\\n'\n ' Array ' + array_slice_str +\n 'dimensionality on the left side of the \\'=\\' character (' + str(\n len(array_name_offsets)) + ')\\n'\n ' does not match the array dimensionality on the right side (' + str(len(array_size)) + ').\\n')\n\n # If the user wants us to instantiate a\n # multidimensional array of class instances\n # then we must loop through this multidimensional\n # array and create a new instance for each entry.\n # For example fill a 3 dimensional volume\n # with 1000 water molecules\n # Example 1:\n # solvent = new Water [10][10][10]\n # (The coordinates must be read separately.)\n # In this example array_size = [10,10,10]\n # array_suffixes = ['','','']\n # Example 2:\n # solvent = new Water.transcm(0,0,0)\n # [10].trans(0,0,4)\n # [10].trans(0,4,0).rot(45,0,0,1)\n # [10].trans(4,0,0)\n # (This command generates a 10x10x10 lattice\n # simple cubic lattice of regularly spaced\n # water molecules pointing the same direction.)\n # In this example array_size = [10,10,10]\n # and\n # class_suffix = 'transcm(0,0,0)'\n # and\n # array_suffixes = ['trans(0,0,4)',\n # 'trans(0,4,0).rot(45,0,0,1)',\n # 'trans(4,0,0)']\n # Note that tree ignores the \"trans()\"\n # commands, it stores them so that inherited\n # classes can attempt to process them.\n\n D = len(array_size)\n if D > 0:\n\n i_elem = 0 # (used to look up selection_list[])\n if len(num_by_type) > 0:\n selection_list = []\n for i in range(0, len(num_by_type)):\n selection_list += [i] * num_by_type[i]\n random.shuffle(selection_list)\n\n num_elements = 1\n for d in range(0, D):\n num_elements *= array_size[d]\n err_msg_str = str(array_size[0])\n for d in range(1, D):\n err_msg_str += '*' + str(array_size[d])\n if num_elements != len(selection_list):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near or before ' + lex.error_leader() + '\\n'\n ' The sum of the numbers in the \\\"new random([],[])\\\" command (' + str(\n len(selection_list)) + ')\\n'\n ' does not equal the number of elements in the array (' + err_msg_str + ')\\n')\n\n digits = [0 for d in range(0, D)]\n table_filled = False\n pushed_commands = []\n while (not table_filled):\n instance_name = array_name_tkns[0]\n for d in range(0, D):\n i = digits[d]\n instance_name += str(i +\n array_name_offsets[d]) +\\\n array_name_tkns[d + 1]\n\n # Does the user want us to select\n # a class at random?\n if len(class_names) > 0:\n\n if len(num_by_type) > 0:\n class_name_str = class_names[\n selection_list[i_elem]]\n else:\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(\n class_name_str, lex)\n\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(instance_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n # Now go to the next entry in the table.\n # The indices of this table are similar to\n # a D-digit integer. We increment this d-digit\n # number now.\n d_carry = D - 1\n while True:\n digits[d_carry] += 1\n if digits[d_carry] >= array_size[d_carry]:\n digits[d_carry] = 0\n if array_suffixes[d_carry] != '':\n for i in range(0, array_size[d_carry] - 1):\n partner = pushed_commands.pop()\n command = PopRightCommand(partner,\n srcloc_final)\n self.instance_commands.append(\n command)\n d_carry -= 1\n else:\n if array_suffixes[d_carry] != '':\n command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),\n array_srclocs[d_carry])\n pushed_commands.append(command)\n self.instance_commands.append(\n command)\n break\n if d_carry < 0:\n table_filled = True\n break\n\n # (used to look up selection_list[])\n i_elem += 1\n pass\n\n else:\n if len(class_names) > 0:\n assert(len(num_by_type) == 0)\n # if len(num_by_type) > 0:\n # class_name_str = class_names[selection_list[i_elem]]\n # else:\n # class_name_str = RandomSelect(class_names,\n # weights)\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(base_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n else:\n\n # Now check for commands using this syntax:\n #\n # \"MolNew = MolOld.rot(45,1,0,0).scale(100.0)\"\n # /|\\ /|\\ `-----------.------------'\n # | | |\n # child_name parent_name optional suffix\n\n child_name = object_name\n parent_name_str = next_symbol\n\n child = StaticObj(child_name, self)\n\n parent_name, suffix, suffix_srcloc = \\\n self._ProcessClassName(parent_name_str, lex)\n\n child.class_parents.append(StrToNode(parent_name,\n self,\n lex.GetSrcLoc()))\n\n if suffix != '':\n # Assume the command is a StackableCommand. (This\n # way it will enclose the commands of the parents.)\n # Stackable commands come in (Push...Pop) pairs.\n push_command = PushLeftCommand(suffix,\n suffix_srcloc)\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc)\n push_mod_command = ModCommand(push_command, './')\n pop_mod_command = ModCommand(pop_command, './')\n child.instance_commands_push.append(\n push_mod_command)\n child.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n #sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\\n')\n\n #sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\\n')\n\n # Check to see if this class has already been defined.\n if self.children.get(child_name) is not None:\n if self.children[i].IsDeleted():\n del self.children[child_name]\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The name \\\"' + child_name + '\\\" is already in use.')\n\n self.children[child_name] = child\n\n else:\n\n # Otherwise hopefully this is a post-instance command\n # (a command applied to a class which has been instantiated)\n # In that case, the object_name would be followed by\n # a dot and a function-call containing a '(' paren (which\n # would have ended up stored in the next_symbol variable).\n\n open_paren_encountered = False\n if (next_symbol == '('):\n open_paren_encountered = True\n # put '(' back in the stream\n lex.push_token(next_symbol)\n\n i_dot = object_name.rfind('.')\n i_slash = object_name.rfind('/')\n dot_encountered = ((i_dot != -1) and\n ((i_slash == -1) or (i_slash < i_dot)))\n\n if (open_paren_encountered and dot_encountered and\n (object_name[:1] != '[')):\n\n obj_descr_str, suffix, suffix_srcloc = \\\n self._ExtractSuffix(object_name, lex)\n\n path_tokens = obj_descr_str.split('/')\n\n i_last_ptkn, staticobj = FollowPath(path_tokens,\n self,\n lex.GetSrcLoc())\n instobj_descr_str = './' + \\\n '/'.join(path_tokens[i_last_ptkn:])\n\n # I still support the \"object_name.delete()\" syntax for\n # backwards compatibility. (However newer input files\n # use this equivalent syntax: \"delete object_name\")\n if suffix == 'delete()':\n delete_command = DeleteCommand(suffix_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n staticobj.instance_commands.append(mod_command)\n else:\n push_command = PushLeftCommand(suffix,\n suffix_srcloc,\n '.')\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc,\n '.')\n push_mod_command = ModCommand(push_command,\n instobj_descr_str)\n pop_mod_command = ModCommand(pop_command,\n instobj_descr_str)\n if instobj_descr_str != './':\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands\\n')\n staticobj.instance_commands.append(\n push_mod_command)\n staticobj.instance_commands.append(\n pop_mod_command)\n else:\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands_push\\n')\n # Question: Should I make these PushRight commands and\n # append them in the opposite order?\n # If so I also have to worry about the case\n # above.\n staticobj.instance_commands_push.append(\n push_mod_command)\n staticobj.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n else:\n # Otherwise, the cmd_token is not any of these:\n # \"write\", \"write_once\", \"replace\",\n # \"create_var\", \"create_static_var\",\n # \"delete\", or \"category\".\n # ... and it is ALSO not any of these:\n # the name of a class (StaticObj), or\n # the name of an instance (InstanceObj)\n # followed by either a '.' or \"= new\"\n #\n # In that case, it is a syntax error:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Syntax error at or before ' + lex.error_leader() + '\\n'\n ' \\\"' + object_name + ' ' + next_symbol + '\\\".')\n\n # Keep track of the location in the user's input files\n # where the definition of this object ends.\n self.srcloc_end = lex.GetSrcLoc()\n\n # Finally, if there are any remaining user_push_left_commands or\n # user_push_right_commands, deal with them (by popping them).\n for push_command in user_push_left_commands:\n push_command = user_push_left_commands.pop()\n pop_command = PopLeftCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)\n\n for push_command in user_push_right_commands:\n push_command = user_push_right_commands.pop()\n pop_command = PopRightCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)",
"def __init__(self, lexeme, token_type, line_num):\n self.type = token_type\n self.lexeme = lexeme\n self.lineNum = line_num\n self.lexicalError = None",
"def __init__(self):\n self.commandList = []\n self.tagStack = []\n self.symbolLocationTable = {}\n self.macroMap = {}\n self.endTagSymbol = 1\n\n self.commandHandler = {\n TAL_DEFINE: self.compileCmdDefine,\n TAL_CONDITION: self.compileCmdCondition,\n TAL_REPEAT: self.compileCmdRepeat,\n TAL_CONTENT: self.compileCmdContent,\n TAL_REPLACE: self.compileCmdReplace,\n TAL_ATTRIBUTES: self.compileCmdAttributes,\n TAL_OMITTAG: self.compileCmdOmitTag,\n # Metal commands\n METAL_USE_MACRO: self.compileMetalUseMacro,\n METAL_DEFINE_SLOT: self.compileMetalDefineSlot,\n METAL_FILL_SLOT: self.compileMetalFillSlot,\n METAL_DEFINE_MACRO: self.compileMetalDefineMacro\n }\n\n # Default namespaces\n self.setTALPrefix('tal')\n self.tal_namespace_prefix_stack = []\n self.metal_namespace_prefix_stack = []\n self.tal_namespace_prefix_stack.append('tal')\n self.setMETALPrefix('metal')\n self.metal_namespace_prefix_stack.append('metal')\n\n self.log = logging.getLogger(\"simpleTAL.TemplateCompiler\")",
"def instantiate_classes(self) -> None:\n self.config_init = self.parser.instantiate_classes(self.config)\n self.datamodule = self.config_init.get(\"data\")\n self.model = self.config_init[\"model\"]\n self.instantiate_trainer()",
"def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok",
"def __init__(self, tokeniser, lStopwords):\n\n self.tokeniser = tokeniser\n self.lStopwords = lStopwords",
"def __init__(self, tokenization_src={}, tokenization_tgt={}):\n self.tokenization_src = tokenization_src\n self.tokenization_tgt = tokenization_tgt\n self.tokenization_src_lut = {self.tokenization_src[key]: key for key in self.tokenization_src}\n self.tokenization_tgt_lut = {self.tokenization_tgt[key]: key for key in self.tokenization_tgt}\n self.rules = {} # dict used to look up rules faster (get_rules function)\n self.all_rules = [] # list of all rules",
"def test_constructor(self):\n\n t = TokenKind(5, \"foo\")\n\n self.assertEqual(t.value, 5)\n self.assertEqual(t.name, \"foo\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. | def do_insertions(insertions, tokens):
insertions = iter(insertions)
try:
index, itokens = insertions.next()
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
| [
"def do_insertions(insertions, tokens):\r\n insertions = iter(insertions)\r\n try:\r\n index, itokens = next(insertions)\r\n except StopIteration:\r\n # no insertions\r\n for item in tokens:\r\n yield item\r\n return\r\n\r\n realpos = None\r\n insleft = True\r\n\r\n # iterate over the token stream where we want to insert\r\n # the tokens from the insertion list.\r\n for i, t, v in tokens:\r\n # first iteration. store the postition of first item\r\n if realpos is None:\r\n realpos = i\r\n oldi = 0\r\n while insleft and i + len(v) >= index:\r\n tmpval = v[oldi:index - i]\r\n yield realpos, t, tmpval\r\n realpos += len(tmpval)\r\n for it_index, it_token, it_value in itokens:\r\n yield realpos, it_token, it_value\r\n realpos += len(it_value)\r\n oldi = index - i\r\n try:\r\n index, itokens = next(insertions)\r\n except StopIteration:\r\n insleft = False\r\n break # not strictly necessary\r\n yield realpos, t, v[oldi:]\r\n realpos += len(v) - oldi\r\n\r\n # leftover tokens\r\n while insleft:\r\n # no normal tokens, set realpos to zero\r\n realpos = realpos or 0\r\n for p, t, v in itokens:\r\n yield realpos, t, v\r\n realpos += len(v)\r\n try:\r\n index, itokens = next(insertions)\r\n except StopIteration:\r\n insleft = False\r\n break # not strictly necessary\r",
"def applyToTokens(self, tokens: List[str]) -> None:\n\n if self.__tokenIndex > len(tokens):\n raise IndexError(\"InsertOperation: cannot insert at index {} (out of bounds)\".format(self.__tokenIndex))\n\n tokens.insert(self.__tokenIndex, self.__newToken)",
"def Insert(iterable, index, items):\n items = items if is_iterable(items) else itt.repeat(items)\n for elem, item in zip(iterable, items):\n elem = list(elem)\n head, tail = elem[:index], elem[index:]\n yield tuple(head + as_list(item) + tail)",
"def insert(self, target, insertions):\r\n for string in insertions:\r\n index = random.randint(0, len(target)-1)\r\n target = target[0:index] + string + target[index:]\r\n return target",
"def insert(self, index, elements):\n i = index\n for element in elements:\n self.list.insert(i, element)\n i += 1",
"def insert_sampling(self, token_list: List[str or Tuple[str, str]],\n sample_idx: List[int]) -> List[str or Tuple[str, str]]:\n result = []\n for i, token in enumerate(token_list):\n if i in sample_idx:\n result.append(token)\n result.append(token)\n return result",
"def insert_at_index(self, index: int, items: list) -> None:\n for i in range(len(items)):\n self.entries.insert(index + i, items[i])\n self.list_size += len(items)",
"def process_tokenized_input(self, tokens):\n # Tokenize input\n bert_tokens, orig_to_bert_tok_map = self._wordpiece_tokenization(tokens)\n\n # Pad the sequences\n max_sent = len(max(bert_tokens, key=len))\n bert_tokens = [sent + [PAD] * (max_sent - len(sent)) for sent in bert_tokens]\n\n # Convert token to vocabulary indices\n indexed_tokens = [self.tokenizer.convert_tokens_to_ids(sent) for sent in bert_tokens]\n indexed_tokens = torch.tensor(indexed_tokens).to(self.device)\n\n # Generate attention masks for pad values\n attention_masks = [[float(idx > 0) for idx in sent] for sent in indexed_tokens]\n attention_masks = torch.tensor(attention_masks).to(self.device)\n\n return indexed_tokens, orig_to_bert_tok_map, attention_masks",
"def extract_insertions(\n fusions, # type: Iterable[Fusion]\n gtf_path, # type: pathlib.Path\n features_path, # type: pathlib.Path\n chromosomes=None, # type: List[str]\n assembled_gtf_path=None, # type: pathlib.Path\n ffpm_fastq_path=None # type: pathlib.Path\n): # type: (...) -> Iterable[Insertion]\n\n # Annotate for genes.\n gtf_reference = TranscriptReference.from_gtf(\n gtf_path, chromosomes=chromosomes)\n\n annotated = annotate_fusions_for_genes(fusions, gtf_reference)\n\n # Annotate for assembly (if given).\n if assembled_gtf_path is not None:\n assem_reference = TranscriptReference.from_gtf(\n assembled_gtf_path, chromosomes=chromosomes)\n\n annotated = annotate_fusions_for_assembly(annotated, gtf_reference,\n assem_reference)\n\n # Annotate for transposon.\n annotated = annotate_fusions_for_transposon(annotated, features_path)\n\n # Drop any fusions without a transposon feature.\n annotated = (fusion for fusion in annotated\n if 'feature_name' in fusion.metadata)\n\n # Calculate FFPM scores.\n if ffpm_fastq_path is not None:\n annotated = annotate_ffpm(annotated, fastq_path=ffpm_fastq_path)\n\n # Convert to insertions.\n insertions = Insertion.from_transposon_fusions(\n annotated, id_fmt_str='INS_{}')\n\n for insertion in insertions:\n yield insertion",
"def get_insertions(self, aln, minlength, maxlength, extension):\n return self.__get_insdel(aln, _modeller.mod_alignment_next_insert,\n minlength, maxlength, extension)",
"def insert(self, index, command, chained_by=None):\n command = self._normalize_command(command)\n chained_by = self._normalize_chained_by(chained_by)\n\n self.commands.insert(index, command)\n self._strings.insert(index, str(command))\n self._operators.insert(index, chained_by)",
"def call(self, insertions):\n\n raise NotImplementedError()",
"def insert(self, index, s):\n raise NotImplementedError",
"def applyToTokens(self, tokens: List[str]) -> None:\n\n if self.__tokenIndex >= len(tokens):\n raise IndexError(\"ReplaceOperation: cannot replace at index {} (out of bounds)\".format(self.__tokenIndex))\n\n tokens[self.__tokenIndex] = self.__newToken",
"def insert(self, index=None, *nodes):\n # If no index is provided we assume we are appending\n index = len(self.nodes) if index is None else index\n for node in nodes:\n if self.is_descendant(node) or node == self:\n # You cannot add a node to its descendant/ child or itself\n continue\n node.remove() # Remove node from whatever parent it belongs to\n self.nodes.insert(index, node)\n index += 1\n node.parent_node = self\n node.depth = self.depth + 1\n node.lift(self.body)\n if self._expanded:\n self.collapse()\n self.expand()\n if len(self.nodes) > 0:\n self._set_expander(self.COLLAPSED_ICON)\n self.expand()",
"def insert_sequence(dna1,dna2,index):\r\n return dna1[:index]+dna2+dna1[index:]",
"def bulk_insert(self, keys):\n for k in keys:\n self.insert(k)",
"def insert(list_it, insert_data, n):\n # Reset iterators\n list_it.reset()\n\n # n = int(n)\n\n # Get to the nth line in the list\n for _ in range(n):\n list_it.next()\n\n # Add elements\n for lines in insert_data:\n list_it.add_here(lines)",
"def insert(self, i, x):",
"def append_tokens(self, tokens):\n if type(tokens) != list:\n raise FatalRuntimeError(\"Tokens type error\")\n self.tokens += tokens\n self.length += len(tokens)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a subclass of baselexer that accepts the ObjectiveC syntax extensions. | def objective(baselexer):
# Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,
# since that's quite common in ordinary C/C++ files. It's OK to match
# JavaDoc/Doxygen keywords that only apply to Objective-C, mind.
#
# The upshot of this is that we CANNOT match @class or @interface
_oc_keywords = re.compile(r'@(?:end|implementation|protocol)')
# Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : )
# (note the identifier is *optional* when there is a ':'!)
_oc_message = re.compile(r'\[\s*[a-zA-Z_][a-zA-Z0-9_]*\s+'
r'(?:[a-zA-Z_][a-zA-Z0-9_]*\s*\]|'
r'(?:[a-zA-Z_][a-zA-Z0-9_]*)?:)')
class GeneratedObjectiveCVariant(baselexer):
"""
Implements Objective-C syntax on top of an existing C family lexer.
"""
tokens = {
'statements': [
(r'@"', String, 'string'),
(r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'@0[0-7]+[Ll]?', Number.Oct),
(r'@\d+[Ll]?', Number.Integer),
(r'(in|@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@optional)\b', Keyword),
(r'(id|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b',
Keyword.Type),
(r'@(true|false|YES|NO)\n', Name.Builtin),
(r'(YES|NO|nil)\b', Name.Builtin),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_classname')),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_forward_classname')),
inherit,
],
'oc_classname' : [
# interface definition that inherits
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
],
'oc_forward_classname' : [
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'oc_forward_classname'),
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'root': [
# methods
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name
bygroups(Keyword, Text, using(this),
Text, Name.Function),
'method'),
inherit,
],
'method': [
include('whitespace'),
# TODO unsure if ellipses are allowed elsewhere, see
# discussion in Issue 789
(r',', Punctuation),
(r'\.\.\.', Punctuation),
(r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this),
Name.Variable)),
(r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
('', Text, '#pop'),
],
}
def analyse_text(text):
if _oc_keywords.search(text):
return 1.0
elif '@"' in text: # strings
return 0.8
elif _oc_message.search(text):
return 0.8
return 0
return GeneratedObjectiveCVariant | [
"def create_lexer(self):\n raise NotImplementedError()",
"def test_lexer():\n generator_stream = cStringIO.StringIO()\n generator_stream.write(\"\"\"\n[[:newline:]] NEWLINE\n[[:whitespace:]] IGNORE\n'namespace'[[:whitespace:]]* NAMESPACE\n[a-z][a-z0-9_?!]* ID\n':='[[:whitespace:]]* ASSIGNMENT\n'+'[[:whitespace:]]* PLUS\n'.' DOT\n\"\"\")\n generator_stream.seek(0)\n\n table_1 = toe.symbol.table()\n generator = TLexerGenerator(table_1)\n\n lexer = TLexer()\n lexer.states = generator.load(generator_stream, False)\n\n #for i in range(len(lexer.states)):\n # print generator.string_transitions(i)\n\n assert(len(lexer.states) > 2) # initial, invalid\n yield (\"len(lexer.states)\", len(lexer.states))\n\n test_stream = cStringIO.StringIO()\n test_stream.write(\"\"\"namespace aaa.aaa.aaa\n\n\"\"\")\n test_stream.seek(0)\n lexer.source_stream = test_stream\n\n while not lexer.eof_p:\n yield lexer.token\n lexer.consume()",
"def make_lex(symbols):\n ...",
"def grammar(cls):\n name = pp.Word(pp.alphanums + '_-')\n return (\n pp.Suppress(pp.Literal('@keyframes')) +\n name +\n pp.Suppress(pp.Literal('{')) +\n pp.ZeroOrMore(KeyframeProperties.parser()) +\n pp.Suppress(pp.Literal('}'))\n )",
"def grammar(cls):\n # Todo: Handle keyframe properties where there are more than one\n # keyframe selectors\n keyframe_selector = (\n (\n pp.Word(pp.nums + '.') +\n pp.Suppress(pp.Literal('%')).leaveWhitespace()\n ) |\n pp.Literal('from') |\n pp.Literal('to')\n )\n return (\n keyframe_selector +\n pp.Suppress(pp.Literal('{')) +\n pp.ZeroOrMore(Property.parser()) +\n pp.Suppress(pp.Literal('}'))\n )",
"def generate_syntax(core_grammar_path, extension_grammar_paths):\n\n def load_keywords(core_grammar_path, extension_grammar_paths, keywords):\n \"\"\"Load JSON arammers.\"\"\"\n\n def instruction_exists(instruction):\n \"\"\"Returns True if instruction does, False otherwise.\"\"\"\n for names in keywords.itervalues():\n for inst in names:\n if inst == instruction:\n return True\n return False\n\n with open(core_grammar_path) as grammar_file:\n grammar = json.loads(grammar_file.read())\n\n if 'instructions' in grammar:\n for instruction in grammar['instructions']:\n opname = instruction['opname']\n if not instruction_exists(opname):\n keywords['Instruction'].append(opname)\n\n if 'operand_kinds' in grammar:\n for operand_kind in grammar['operand_kinds']:\n if 'enumerants' in operand_kind:\n for enumerant in operand_kind['enumerants']:\n enumname = enumerant['enumerant']\n if enumname not in keywords['Enumerant']:\n keywords['Enumerant'].append(enumname)\n\n extinst_group_names = []\n for grammar_path in extension_grammar_paths:\n with open(grammar_path) as grammar_file:\n grammar = json.loads(grammar_file.read())\n grammar_name = ''.join(\n word.capitalize()\n for word in os.path.basename(grammar_path).lstrip(\n 'extinst.').rstrip('.grammer.json').split('.'))\n\n if 'instructions' in grammar:\n keywords[grammar_name] = []\n for instruction in grammar['instructions']:\n opname = instruction['opname']\n if not instruction_exists(opname):\n keywords[grammar_name].append(opname)\n\n extinst_group_names.append('Spirv{0}'.format(grammar_name))\n\n return keywords, extinst_group_names\n\n def write(string):\n \"\"\"Append to the content string.\"\"\"\n write.content += string\n\n write.content = ''\n\n keywords, extinst_group_names = load_keywords(\n core_grammar_path, extension_grammar_paths, KEYWORDS)\n\n write('''\" File: spirv.vim\n\" Author: Kenneth Benzie (Benie) <k.benzie83@gmail.com>\n\" Description: Vim syntax file for the Khronos Group's SPIR-V standard.\n\" Last Modified: {0}\n\n\" Don't load the sytnax multiple times\nif exists('b:current_syntax')\n finish\nendif\n\n'''.format(datetime.datetime.now().strftime('%B %d, %Y')))\n\n write(r'''\" Generic matches\nsyn match SpirvSpecialComment contained\n\\ \"\\(SPIR-V\\|\\(Version\\|Generator\\|Bound\\|Schema\\):\\)\"\nsyn match SpirvComment \";.*$\" contains=SpirvSpecialComment\nsyn match SpirvError \"\\w\\+\"\nsyn match SpirvID \"%\\w\\+\"\nsyn region SpirvString start=+\"+ end=+\"+\nsyn match SpirvNumber \"\\s\\zs\\d\\+\"\nsyn match SpirvFloat \"\\s\\zs\\d\\+\\.\\d\\+\"\n''')\n\n for group, group_keywords in keywords.iteritems():\n write('\\n\" %s keywords\\n' % group)\n syn_keyword = 'syn keyword Spirv%s' % group\n write(syn_keyword)\n\n length = len(syn_keyword)\n for keyword in group_keywords:\n opname = ' ' + keyword\n keyword_length = len(opname)\n\n if length + keyword_length > 80:\n write('\\n\\\\')\n length = 1\n\n write(opname)\n length += keyword_length\n write('\\n')\n\n write('\\n\" Define highlight groups\\n')\n for group_name in GROUP_NAMES:\n write('hi default link {0} {1}\\n'.format(group_name[0], group_name[1]))\n\n write('''\n\" Define current ID highlight group\nif exists('g:spirv_enable_current_id') && g:spirv_enable_current_id\n execute 'hi SpirvCurrentID '.g:spirv_current_id_highlight\nendif\n''')\n\n if len(extinst_group_names):\n groups = ([], [])\n for group in extinst_group_names:\n groups[0].append(' hi default link {0} SpirvError'.format(group))\n groups[1].append(' hi default link {0} SpirvInstruction'.format(\n group))\n\n write('''\n\" Define extended instruction highlight groups\nif exists('g:spirv_enable_extinst_error') && g:spirv_enable_extinst_error\n{0}\nelse\n{1}\nendif\n'''.format('\\n'.join(groups[0]), '\\n'.join(groups[1])))\n\n return write.content",
"def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")",
"def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok",
"def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\r\n\r\n # A concrete compiler class that does not override compile()\r\n # should implement _compile().\r\n pass",
"def Parse(self, lex):\n\n # The next two variables store a stack of commands the user wants\n # to manually add to the list of stackable instance_commands.\n # (Allowing the users to directly manipulate the transformation stack\n # is an experimental feature as of 2015- Most users don't need this.)\n user_push_left_commands = []\n user_push_right_commands = []\n\n #sys.stdout.write(' -- Parse() invoked --\\n')\n\n # Keep track of the location in the users' input files where this\n # class object is first defined. (Keep in mind that the user might\n # augment their original class definition, adding new content to an\n # existing class. In that case self.srcloc_begin will have already\n # been assigned. We don't want to overwrite it in that case.)\n if self.srcloc_begin is None: # <-- not defined yet?\n self.srcloc_begin = lex.GetSrcLoc()\n\n while True:\n\n cmd_token = lex.get_token()\n\n #print('Parse(): token = \\\"'+cmd_token+'\\\", '+lex.error_leader())\n\n if cmd_token == lex.eof:\n #print('Parse(): EOF encountered\\n')\n break\n\n if (cmd_token in ('write',\n 'write_once',\n 'create_var',\n 'create_static_var',\n 'replace')):\n\n open_paren = lex.get_token()\n\n #print('Parse(): open_paren=\\\"'+open_paren+'\\\"')\n if open_paren == '{':\n # ..then the user neglected to specify the \"dest\" file-name\n # argument. In that case, supply the default, ''.\n # (which is shorthand for the standard out in this case)\n open_curly = open_paren[0]\n open_paren = ''\n close_paren = ''\n tmpl_filename = ''\n srcloc = lex.GetSrcLoc()\n else:\n tmpl_filename = lex.get_token()\n if tmpl_filename == ')':\n tmpl_filename = ''\n close_paren = ')'\n else:\n close_paren = lex.get_token()\n open_curly = lex.get_token()\n srcloc = lex.GetSrcLoc()\n\n if ((cmd_token == 'create_var') or\n (cmd_token == 'create_static_var')):\n tmpl_filename = None\n # This means: define the template without attaching\n # a file name to it. (IE., don't write the contents\n # of what's enclosed in the curly brackets { } to a file.\n # Why?\n # \"create_var\" commands are implemented as \"write() {...}\"\n # commands (containing one or more variables) which\n # never get written to a file or the terminal. Parsing\n # the contents of the curly brackets defines the variables \n # inside in the same way as parsing the text inside an\n # ordinary \"write() {...}\" command.\n\n if (cmd_token == 'replace'):\n tmpl_filename = \"ttree_replacements.txt\"\n\n if ((open_curly != '{') or\n ((open_paren == '') and (close_paren != '')) or\n ((open_paren == '(') and (close_paren != ')'))):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in ' + lex.error_leader() + '\\n\\n'\n 'Syntax error at the beginning of the \\\"' + cmd_token + '\\\" command.')\n if tmpl_filename != None:\n tmpl_filename = RemoveOuterQuotes(\n tmpl_filename, lex.quotes)\n # ( The previous line is similar to:\n # tmpl_filename = tmpl_filename.strip(lex.quotes) )\n\n tmpl_contents = lex.ReadTemplate()\n StaticObj.CleanupReadTemplate(tmpl_contents, lex)\n\n #sys.stdout.write(' Parse() after ReadTemplate, tokens:\\n\\n')\n # print(tmpl_contents)\n # sys.stdout.write('\\n----------------\\n')\n\n if (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n\n # Check for a particular bug:\n # Ordinary instance variables (preceded by a '$')\n # should never appear in a write_once() statement.\n for entry in tmpl_contents:\n if (isinstance(entry, VarRef) and\n (entry.prefix[0] == '$')):\n err_msg = ('Error(' + g_module_name + '.StaticObj.Parse()):\\n' +\n ' Error near ' + ErrorLeader(entry.srcloc.infile,\n entry.srcloc.lineno) + '\\n' +\n ' Illegal variable: \\\"' + entry.prefix + entry.descr_str + entry.suffix + '\\\"\\n' +\n ' All variables in a \\\"' + cmd_token + '\\\" statement must be statically\\n' +\n ' defined, and hence they must begin with a \\'@\\' prefix character.\\n' +\n ' (not a \\'$\\' character).\\n')\n if (cmd_token == 'write_once'):\n err_msg += ' Suggestion: Use the \\\"write()\\\" command instead.\\n'\n raise InputError(err_msg)\n\n if cmd_token == 'write':\n commands = self.instance_commands\n elif (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n commands = self.commands\n elif (cmd_token == 'create_var'):\n commands = self.instance_commands\n else:\n assert(False)\n\n command = WriteFileCommand(tmpl_filename,\n tmpl_contents,\n srcloc)\n commands.append(command)\n\n # end of \"if (cmd_token == 'write') or (cmd_token ==\n # 'write_once'):\"\n\n elif cmd_token == 'delete':\n\n instobj_descr_str = lex.get_token()\n instobj_srcloc = lex.GetSrcLoc()\n delete_command = DeleteCommand(instobj_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n self.instance_commands.append(mod_command)\n\n elif cmd_token == 'using':\n\n namespacecom_str = lex.get_token()\n if namespacecom_str != 'namespace':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The \\\"' + cmd_token + '\\\" command must be followed by the \\\"namespace\\\" keyword.')\n namespace_str = lex.get_token()\n\n stnode = StrToNode(namespace_str,\n self,\n lex.GetSrcLoc())\n\n self.namespaces.append(stnode)\n\n elif cmd_token == 'category':\n cat_name = lex.get_token()\n\n cat_count_start = 1\n cat_count_incr = 1\n backup_wordterminators = lex.wordterminators\n lex.wordterminators += ','\n #sys.stderr.write('DEBUG: wordterminators=\"'+str(lex.wordterminators)+'\"\\n')\n\n open_paren = lex.get_token()\n if (open_paren == '('):\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer, float, or string\n try:\n cat_count_start = int(token)\n except ValueError:\n try:\n cat_count_start = float(token)\n except ValueError:\n cat_count_start = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer,float,or string\n try:\n cat_count_incr = int(token)\n except ValueError:\n try:\n cat_count_incr = float(token)\n except ValueError:\n cat_count_incr = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token != ')':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '...\\\" has too many arguments,\\n'\n ' or lacks a close-paren \\')\\'.\\n')\n\n else:\n lex.push_token(open_paren)\n\n if (isinstance(cat_count_start, basestring) or\n isinstance(cat_count_incr, basestring)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '(' +\n str(cat_count_start) + ',' +\n str(cat_count_incr) + ')\\\"\\n'\n ' Only numeric counters are currently supported.\\n')\n\n # check for really stupid and unlikely errors:\n if type(cat_count_start) is not type(cat_count_incr):\n if ((isinstance(cat_count_start, int) or\n isinstance(cat_count_start, float))\n and\n (isinstance(cat_count_incr, int) or\n isinstance(cat_count_incr, float))):\n cat_count_start = float(cat_count_start)\n cat_count_incr = float(cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Problem with \\\"' + cmd_token + '\\\" command.\\n')\n\n prefix = cat_name[0]\n cat_name = cat_name[1:]\n # Add this category to the list.\n if prefix == '@':\n self.categories[cat_name] = Category(cat_name)\n self.categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n elif prefix == '$':\n self.instance_categories[cat_name] = Category(cat_name)\n self.instance_categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' category name = \\\"' + cat_name + '\\\" lacks a \\'$\\' or \\'&\\' prefix.\\n'\n ' This one-character prefix indicates whether the variables in this\\n'\n ' new category will be static or dynamics variables\\n')\n\n\n lex.wordterminators = backup_wordterminators\n\n\n elif (cmd_token == '}') or (cmd_token == ''):\n # a '}' character means we have reached the end of our scope.\n # Stop parsing and let the caller deal with the remaining text.\n # (And a '' means we reached the end of the file... I think.)\n break\n\n # elif (cmd_token == 'include'):\n # \"include filename\" loads a file (adds it to the file stack)\n # The \"TtreeShlex\" class (from which \"lex\" inherits) handles\n # \"include\" statements (ie. \"source\" statements) automatically.\n\n elif ((cmd_token == 'push') or\n (cmd_token == 'push_left') or\n (cmd_token == 'push_right')):\n\n push_cmd_src_loc = lex.GetSrcLoc()\n push_cmd_text = lex.GetParenExpr()\n if ((len(push_cmd_text) < 2) or\n (push_cmd_text[0] != '(') or\n (push_cmd_text[-1] != ')')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Bad \\\"push\\\" command. Expected an expression in parenthesis.\\n')\n push_cmd_text = push_cmd_text[1:-1]\n\n if (cmd_token == 'push_right'):\n push_command = PushRightCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_right_commands.append(push_command)\n else:\n push_command = PushLeftCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_left_commands.append(push_command)\n self.instance_commands.append(push_command)\n\n elif ((cmd_token == 'pop') or\n (cmd_token == 'pop_left') or\n (cmd_token == 'pop_right')):\n\n pop_cmd_text = lex.GetParenExpr()\n pop_cmd_src_loc = lex.GetSrcLoc()\n if (cmd_token == 'pop_right'):\n if len(user_push_right_commands) > 0:\n push_command = user_push_right_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many \\\"pop_right\\\" commands.\\n')\n pop_command = PopRightCommand(push_command,\n pop_cmd_src_loc)\n else:\n if len(user_push_left_commands) > 0:\n push_command = user_push_left_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many pop, (or pop_left) commands.\\n')\n pop_command = PopLeftCommand(push_command,\n pop_cmd_src_loc)\n self.instance_commands.append(pop_command)\n\n else:\n\n # Otherwise, 'cmd_token' is not a command at all.\n # Instead it's the name of an object which needs to be\n # defined or instantiated.\n # First, let's figure out which.\n\n # (small detail: The \"class\" keyword is optional\n # and can be skipped.)\n if cmd_token == 'class':\n object_name = lex.get_token()\n else:\n object_name = cmd_token\n\n next_symbol = lex.get_token()\n #print('Parse(): next_token=\\\"'+next_symbol+'\\\"')\n\n class_parents = []\n\n if next_symbol == 'inherits':\n\n # Then read in the list of classes which are parents of\n # of this class. (Multiple inheritance is allowed.)\n # (We don't yet check to insure that these are valid class\n # names. We'll do this later in LookupStaticRefs().)\n\n syntax_err_inherits = False\n\n while True:\n next_symbol = lex.get_token()\n if ((next_symbol == '{') or\n (next_symbol == lex.eof)):\n break\n elif (next_symbol == '='):\n syntax_err_inherits = True\n break\n else:\n class_parents.append(StrToNode(next_symbol,\n self,\n lex.GetSrcLoc()))\n if len(class_parents) == 0:\n syntax_err_inherits = True\n\n if syntax_err_inherits:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"inherits\\\" should be followed by one or more class names.\\n')\n\n if next_symbol == '{':\n child_name = object_name\n\n # Check to see if this class has already been defined.\n # (IE. check if it present in the list of children.)\n # If the name (child_name) matches another class (child),\n # then the contents of the new class will be appended to\n # the old. This way, class definitions can be augmented\n # later. (This is the way \"namespaces\" work in C++.)\n child = self.children.get(child_name)\n # If found, we refer to it as \"child\".\n # If not, then we create a new StaticObj named \"child\".\n if child is None:\n child = StaticObj(child_name, self)\n self.children[child_name] = child\n assert(child.name == child_name)\n\n # Either way we invoke child.Parse(), to\n # add contents (class commands) to child.\n child.Parse(lex)\n child.class_parents += class_parents\n\n elif next_symbol == '=':\n next_symbol = lex.get_token()\n if next_symbol == 'new':\n base_name = object_name\n base_srcloc = lex.GetSrcLoc()\n array_slice_str = ''\n if base_name.find('/') != -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n'\n ' (You can not instantiate some other object\\'s members.)\\n'\n ' Invalid instance name: \\\"' + base_name + '\\\"\\n')\n\n elif base_name in self.instname_refs:\n ref_srcloc = self.instname_refs[base_name]\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Duplicate class/array \\\"' + base_name + '\\\"\\n'\n ' This occurs near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n'\n ' and also near:\\n'\n ' ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n')\n else:\n self.instname_refs[base_name] = base_srcloc\n\n # Check for syntax allowing the user to instantiate\n # PART of an array. For example, check for this syntax:\n # \"monomers[20-29] = new ...\". This only fills in a\n # portion of the array from: monomers[20]...monomers[29]\n #\n # We also have to deal with multidimensional syntax\n # like this: \"cells[3][2-3][1][4-7] = new...\"\n # Split the \"cells[3][2-3][2][4-7][2]\" string into\n # \"cells[3][\", \"][1][\", and \"]\".\n # Later, we will instantiate InstanceObjs with names:\n # \"cells[3][2][1][4]\"\n # \"cells[3][2][1][5]\"\n # \"cells[3][2][1][6]\"\n # \"cells[3][2][1][7]\"\n # \"cells[3][3][1][4]\"\n # \"cells[3][3][1][5]\"\n # \"cells[3][3][1][6]\"\n # \"cells[3][3][1][7]\"\n\n p1 = base_name.find('[')\n if p1 == -1:\n p1 = len(base_name)\n else:\n p1 += 1\n array_name_tkns = [base_name[0:p1]]\n array_name_offsets = []\n\n p2 = -1\n p4 = p1\n while p4 < len(base_name):\n p3 = base_name.find(']', p1)\n\n if p3 == -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\']\\' character following:\\n'\n ' \\\"' +\n base_name[0:p1] +\n '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n # Search for a '-', ':', or '*' character between []\n # For example \"monomers[20-29] = \"\n # If present, the user wants us to fill a range\n # inside an array. This could be a multi-dimensional\n # array, (eg \"cells[3][2-6][4-11] = \"), so we must\n # figure out which entries in the array the user\n # wants us to fill (in this case, \"[2-6][4-11]\")\n p2 = base_name.find('-', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find(':', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find('*', p1)\n if p2 == -1:\n p2 = len(base_name)\n\n p4 = p3 + 1\n if p4 < len(base_name):\n if base_name[p4] == '[':\n p4 += 1 # skip over it\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\'[\\' character forllowing a \\']\\' character in\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n if p2 > p3:\n # Then no '-', ':', or '*' character was found\n # between '[' and the subsequent ']' character\n # In that case, ignore this token\n\n token = base_name[p1:p4]\n # append all this text to the previous token\n if len(array_name_tkns) == 0:\n array_name_tkns.append(token)\n else:\n array_name_tkns[-1] = array_name_tkns[-1] + token\n array_slice_str = 'slice '\n else:\n\n assert((p1 < p2) and (p2 < p3))\n index_offset_str = base_name[p1:p2]\n if len(index_offset_str) == 0:\n index_offset = 0\n\n elif (not str.isdigit(index_offset_str)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a nonnegative integer preceding the \\'' +\n base_name[\n p2] + '\\' character in:\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n else:\n index_offset = int(index_offset_str)\n token = base_name[p3:p4]\n array_name_tkns.append(token)\n array_name_offsets.append(index_offset)\n\n p1 = p4\n\n # If the statobj_str token contains a ']' character\n # then this means the user wants us to make multiple\n # copies of this template. The number of copies\n # to instantiate is enclosed in the [] characters\n # (Example wat = new Water[3000] creates\n # 3000 instantiations of the Water template\n # named wat[1], wat[2], wat[3], ... wat[3000]).\n\n # Note: Here '[' and ']' have a special meaning.\n # So lex.get_token() should not treat them as\n # ordinary word characters. To prevent this:\n orig_wordterminators = lex.wordterminators\n lex.wordterminators += '[],'\n\n class_name_str = lex.get_token()\n if ((class_name_str == lex.eof) or\n (class_name_str == '}')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n 'Class ends prematurely. (Incomplete \\\"new\\\" statement.)')\n\n assert(len(class_name_str) > 0)\n\n if (class_name_str[0] == '['):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' new ' + class_name_str + '\\n'\n 'Bracketed number should be preceeded by a class name.')\n class_names = []\n weights = []\n num_by_type = []\n if class_name_str == 'random':\n class_names, weights, num_by_type = self._ParseRandom(\n lex)\n tmp_token = lex.get_token()\n if len(tmp_token) > 0:\n if tmp_token[0] == '.':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + tmp_token + '\\\" should not follow random()\\n'\n '\\n'\n ' Coordinate transformations and other commands (such as \\\"' +\n tmp_token + '\\\")\\n'\n ' should appear after each class name inside the random() statement,\\n'\n ' not after it. For example, do not use:\\n'\n ' \\\"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\\\"\\n'\n ' Use this instead:\\n'\n ' \\\"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\\\"\\n')\n lex.push_token(tmp_token)\n else:\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n\n array_size = []\n array_suffixes = []\n array_srclocs = []\n\n # A general \"new\" statement could look like this:\n # \"m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)\n # [3].trans(0,0,4.5)\"\n # So far we have processed \"m = new Mol.scale(3)\".\n # Now, we need to deal with:\n # \"[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)\"\n while True:\n new_token = lex.get_token()\n # if ((new_token == '') or (new_token == lex.eof)):\n # break\n if new_token == '[':\n number_str = lex.get_token()\n close_bracket = lex.get_token()\n if ((not str.isdigit(number_str)) or\n (close_bracket != ']')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near ' + lex.error_leader() + '\\n'\n ' A \\'[\\' character should be followed by a number and a \\']\\' character.')\n array_size.append(int(number_str))\n suffix = lex.get_token()\n\n if ((suffix == '') or (suffix == lex.eof)):\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n break\n if suffix[0] == '.':\n lex.push_token(suffix[1:])\n suffix_func = lex.GetParenExpr()\n suffix = '.' + suffix_func\n array_suffixes.append(suffix)\n array_srclocs.append(lex.GetSrcLoc())\n else:\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n lex.push_token(suffix)\n if suffix != '[':\n break\n else:\n lex.push_token(new_token)\n break\n srcloc_final = lex.GetSrcLoc()\n\n lex.wordterminators = orig_wordterminators\n\n assert(len(array_size) == len(array_suffixes))\n\n if len(array_size) > 0:\n if len(array_name_offsets) == 0:\n assert(len(array_name_tkns) == 1)\n array_name_offsets = [0] * len(array_size)\n array_name_tkns[0] = array_name_tkns[0] + '['\n for d in range(0, len(array_size) - 1):\n array_name_tkns.append('][')\n array_name_tkns.append(']')\n\n if len(array_name_offsets) != len(array_size):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near/before ' + lex.error_leader() + '\\n'\n ' Array ' + array_slice_str +\n 'dimensionality on the left side of the \\'=\\' character (' + str(\n len(array_name_offsets)) + ')\\n'\n ' does not match the array dimensionality on the right side (' + str(len(array_size)) + ').\\n')\n\n # If the user wants us to instantiate a\n # multidimensional array of class instances\n # then we must loop through this multidimensional\n # array and create a new instance for each entry.\n # For example fill a 3 dimensional volume\n # with 1000 water molecules\n # Example 1:\n # solvent = new Water [10][10][10]\n # (The coordinates must be read separately.)\n # In this example array_size = [10,10,10]\n # array_suffixes = ['','','']\n # Example 2:\n # solvent = new Water.transcm(0,0,0)\n # [10].trans(0,0,4)\n # [10].trans(0,4,0).rot(45,0,0,1)\n # [10].trans(4,0,0)\n # (This command generates a 10x10x10 lattice\n # simple cubic lattice of regularly spaced\n # water molecules pointing the same direction.)\n # In this example array_size = [10,10,10]\n # and\n # class_suffix = 'transcm(0,0,0)'\n # and\n # array_suffixes = ['trans(0,0,4)',\n # 'trans(0,4,0).rot(45,0,0,1)',\n # 'trans(4,0,0)']\n # Note that tree ignores the \"trans()\"\n # commands, it stores them so that inherited\n # classes can attempt to process them.\n\n D = len(array_size)\n if D > 0:\n\n i_elem = 0 # (used to look up selection_list[])\n if len(num_by_type) > 0:\n selection_list = []\n for i in range(0, len(num_by_type)):\n selection_list += [i] * num_by_type[i]\n random.shuffle(selection_list)\n\n num_elements = 1\n for d in range(0, D):\n num_elements *= array_size[d]\n err_msg_str = str(array_size[0])\n for d in range(1, D):\n err_msg_str += '*' + str(array_size[d])\n if num_elements != len(selection_list):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near or before ' + lex.error_leader() + '\\n'\n ' The sum of the numbers in the \\\"new random([],[])\\\" command (' + str(\n len(selection_list)) + ')\\n'\n ' does not equal the number of elements in the array (' + err_msg_str + ')\\n')\n\n digits = [0 for d in range(0, D)]\n table_filled = False\n pushed_commands = []\n while (not table_filled):\n instance_name = array_name_tkns[0]\n for d in range(0, D):\n i = digits[d]\n instance_name += str(i +\n array_name_offsets[d]) +\\\n array_name_tkns[d + 1]\n\n # Does the user want us to select\n # a class at random?\n if len(class_names) > 0:\n\n if len(num_by_type) > 0:\n class_name_str = class_names[\n selection_list[i_elem]]\n else:\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(\n class_name_str, lex)\n\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(instance_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n # Now go to the next entry in the table.\n # The indices of this table are similar to\n # a D-digit integer. We increment this d-digit\n # number now.\n d_carry = D - 1\n while True:\n digits[d_carry] += 1\n if digits[d_carry] >= array_size[d_carry]:\n digits[d_carry] = 0\n if array_suffixes[d_carry] != '':\n for i in range(0, array_size[d_carry] - 1):\n partner = pushed_commands.pop()\n command = PopRightCommand(partner,\n srcloc_final)\n self.instance_commands.append(\n command)\n d_carry -= 1\n else:\n if array_suffixes[d_carry] != '':\n command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),\n array_srclocs[d_carry])\n pushed_commands.append(command)\n self.instance_commands.append(\n command)\n break\n if d_carry < 0:\n table_filled = True\n break\n\n # (used to look up selection_list[])\n i_elem += 1\n pass\n\n else:\n if len(class_names) > 0:\n assert(len(num_by_type) == 0)\n # if len(num_by_type) > 0:\n # class_name_str = class_names[selection_list[i_elem]]\n # else:\n # class_name_str = RandomSelect(class_names,\n # weights)\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(base_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n else:\n\n # Now check for commands using this syntax:\n #\n # \"MolNew = MolOld.rot(45,1,0,0).scale(100.0)\"\n # /|\\ /|\\ `-----------.------------'\n # | | |\n # child_name parent_name optional suffix\n\n child_name = object_name\n parent_name_str = next_symbol\n\n child = StaticObj(child_name, self)\n\n parent_name, suffix, suffix_srcloc = \\\n self._ProcessClassName(parent_name_str, lex)\n\n child.class_parents.append(StrToNode(parent_name,\n self,\n lex.GetSrcLoc()))\n\n if suffix != '':\n # Assume the command is a StackableCommand. (This\n # way it will enclose the commands of the parents.)\n # Stackable commands come in (Push...Pop) pairs.\n push_command = PushLeftCommand(suffix,\n suffix_srcloc)\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc)\n push_mod_command = ModCommand(push_command, './')\n pop_mod_command = ModCommand(pop_command, './')\n child.instance_commands_push.append(\n push_mod_command)\n child.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n #sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\\n')\n\n #sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\\n')\n\n # Check to see if this class has already been defined.\n if self.children.get(child_name) is not None:\n if self.children[i].IsDeleted():\n del self.children[child_name]\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The name \\\"' + child_name + '\\\" is already in use.')\n\n self.children[child_name] = child\n\n else:\n\n # Otherwise hopefully this is a post-instance command\n # (a command applied to a class which has been instantiated)\n # In that case, the object_name would be followed by\n # a dot and a function-call containing a '(' paren (which\n # would have ended up stored in the next_symbol variable).\n\n open_paren_encountered = False\n if (next_symbol == '('):\n open_paren_encountered = True\n # put '(' back in the stream\n lex.push_token(next_symbol)\n\n i_dot = object_name.rfind('.')\n i_slash = object_name.rfind('/')\n dot_encountered = ((i_dot != -1) and\n ((i_slash == -1) or (i_slash < i_dot)))\n\n if (open_paren_encountered and dot_encountered and\n (object_name[:1] != '[')):\n\n obj_descr_str, suffix, suffix_srcloc = \\\n self._ExtractSuffix(object_name, lex)\n\n path_tokens = obj_descr_str.split('/')\n\n i_last_ptkn, staticobj = FollowPath(path_tokens,\n self,\n lex.GetSrcLoc())\n instobj_descr_str = './' + \\\n '/'.join(path_tokens[i_last_ptkn:])\n\n # I still support the \"object_name.delete()\" syntax for\n # backwards compatibility. (However newer input files\n # use this equivalent syntax: \"delete object_name\")\n if suffix == 'delete()':\n delete_command = DeleteCommand(suffix_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n staticobj.instance_commands.append(mod_command)\n else:\n push_command = PushLeftCommand(suffix,\n suffix_srcloc,\n '.')\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc,\n '.')\n push_mod_command = ModCommand(push_command,\n instobj_descr_str)\n pop_mod_command = ModCommand(pop_command,\n instobj_descr_str)\n if instobj_descr_str != './':\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands\\n')\n staticobj.instance_commands.append(\n push_mod_command)\n staticobj.instance_commands.append(\n pop_mod_command)\n else:\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands_push\\n')\n # Question: Should I make these PushRight commands and\n # append them in the opposite order?\n # If so I also have to worry about the case\n # above.\n staticobj.instance_commands_push.append(\n push_mod_command)\n staticobj.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n else:\n # Otherwise, the cmd_token is not any of these:\n # \"write\", \"write_once\", \"replace\",\n # \"create_var\", \"create_static_var\",\n # \"delete\", or \"category\".\n # ... and it is ALSO not any of these:\n # the name of a class (StaticObj), or\n # the name of an instance (InstanceObj)\n # followed by either a '.' or \"= new\"\n #\n # In that case, it is a syntax error:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Syntax error at or before ' + lex.error_leader() + '\\n'\n ' \\\"' + object_name + ' ' + next_symbol + '\\\".')\n\n # Keep track of the location in the user's input files\n # where the definition of this object ends.\n self.srcloc_end = lex.GetSrcLoc()\n\n # Finally, if there are any remaining user_push_left_commands or\n # user_push_right_commands, deal with them (by popping them).\n for push_command in user_push_left_commands:\n push_command = user_push_left_commands.pop()\n pop_command = PopLeftCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)\n\n for push_command in user_push_right_commands:\n push_command = user_push_right_commands.pop()\n pop_command = PopRightCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)",
"def generate(env):\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n env[\"LEXFLAGS\"] = SCons.Util.CLVar(\"\")\n\n if sys.platform == 'win32':\n # ignore the return - we do not need the full path here\n _ = get_lex_path(env, append_paths=True)\n env[\"LEX\"] = env.Detect(BINS)\n if not env.get(\"LEXUNISTD\"):\n env[\"LEXUNISTD\"] = SCons.Util.CLVar(\"\")\n env[\"LEXCOM\"] = \"$LEX $LEXUNISTD $LEXFLAGS -t $SOURCES > $TARGET\"\n else:\n env[\"LEX\"] = env.Detect(BINS)\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS -t $SOURCES > $TARGET\"",
"def prepare_context(grammar=None, lexer=None, lkt_file=None,\n warning_set=default_warning_set,\n symbol_canonicalizer=None, show_property_logging=False,\n types_from_lkt=False, lkt_semantic_checks=False,\n case_insensitive: bool = False,\n version: Optional[str] = None,\n build_date: Optional[str] = None,\n standalone: bool = False,\n property_exceptions: Set[str] = set()):\n\n # Have a clean build directory\n if P.exists('build'):\n shutil.rmtree('build')\n os.mkdir('build')\n\n # Try to emit code\n ctx = CompileCtx(lang_name='Foo', short_name='foo', lexer=lexer,\n grammar=grammar,\n symbol_canonicalizer=symbol_canonicalizer,\n show_property_logging=show_property_logging,\n lkt_file=lkt_file,\n types_from_lkt=types_from_lkt,\n lkt_semantic_checks=lkt_semantic_checks,\n case_insensitive=case_insensitive,\n version=version,\n build_date=build_date,\n standalone=standalone,\n property_exceptions=property_exceptions)\n ctx.warnings = warning_set\n ctx.pretty_print = pretty_print\n\n return ctx",
"def fol_language():\n def make_symbols(start):\n \"\"\"E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9'].\"\"\"\n return [chr(ord(start) + i) + str(n)\n for i in range(0, 3)\n for n in range(1, 10)]\n\n return Language(\n collections.OrderedDict([\n (IDENTITY_SYMBOL, 0),\n (NEGATION_SYMBOL, 1),\n (AND_SYMBOL, 2),\n (OR_SYMBOL, 2),\n (XOR_SYMBOL, 2),\n (IMPLIES_SYMBOL, 2),\n (FOR_ALL_SYMBOL, 2),\n (EXISTS_SYMBOL, 2),\n (RELATION_SYMBOL.format(1), 2), # unary-relation\n (RELATION_SYMBOL.format(2), 3), # binary-relation\n ]),\n predicates=make_symbols('p'),\n constants=make_symbols('a'),\n variables=make_symbols('x'),\n )",
"def parse(self, tokens):\n self.logger.debug(\"Parsing some nice C code!\")\n self.init_lexer(tokens)\n self.typedefs = set()\n cu = self.parse_translation_unit()\n self.logger.info(\"Parsing finished\")\n return cu",
"def op_compiler(src: str, style: dict=wsstyle.STL, strict=False):\n ins = []\n ins_buff = None\n for token in Lexer(Reader(src, style), strict):\n if hasattr(token, 'ARGS'):\n if token.ARGS == 0:\n # dealing operators\n ins.append(token())\n else:\n ins_buff = token\n if isinstance(token, WSLiteral):\n ins.append(ins_buff(token))\n ins_buff = None\n return ins",
"def minimal_grammar():\n return _make_grammar('target = letter', {})",
"def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError(\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column//tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n if line[pos] == '#':\r\n comment_token = line[pos:].rstrip('\\r\\n')\r\n nl_pos = pos + len(comment_token)\r\n yield (COMMENT, comment_token,\r\n (lnum, pos), (lnum, pos + len(comment_token)), line)\r\n yield (NL, line[nl_pos:],\r\n (lnum, nl_pos), (lnum, len(line)), line)\r\n else:\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError(\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n newline = NEWLINE\r\n if parenlev > 0:\r\n newline = NL\r\n yield (newline, token, spos, epos, line)\r\n elif initial == '#':\r\n assert not token.endswith(\"\\n\")\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n # This yield is new; needed for better idempotency:\r\n yield (NL, token, spos, (lnum, pos), line)\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')",
"def augment_grammar(g):\n new_start = g.start + \"'\"\n old_start = g.start\n g.start = new_start\n g.nonterm.append(new_start)\n new_rule = grammar.Rule([new_start, [old_start]])\n g.rules.append(new_rule)",
"def compileTerm(self):\n self.current_compile = \"compileTerm\"\n # integerConstant\n if self.currentTokenTagEquals(\"integerConstant\"):\n self.vm_writer.writePush(\"constant\", self.eatTag(\"integerConstant\"))\n # stringConstant\n elif self.currentTokenTagEquals(\"stringConstant\"):\n string = self.eatTag(\"stringConstant\")\n self.vm_writer.writePush(\"constant\", len(string)) \n self.vm_writer.writeCall(\"String.new\", 1)\n for char in string:\n self.vm_writer.writePush(\"constant\", ord(char))\n self.vm_writer.writeCall(\"String.appendChar\", 2)\n # This, True, False, Null\n elif self.currentTokenTagEquals(\"keyword\"):\n keyword = self.eatTag(\"keyword\")\n if keyword in \"this\":\n self.vm_writer.writePush(\"pointer\", 0)\n elif keyword in \"true\":\n self.vm_writer.writePush(\"constant\", 0)\n self.vm_writer.writeArithmetic(\"not\")\n elif keyword in [\"false\", \"null\"]:\n self.vm_writer.writePush(\"constant\", 0)\n else:\n print(f\"\\\"{keyword}\\\" keyword not handled\")\n sys.exit(1)\n # ( expression )\n elif self.currentTokenEquals(\"(\"):\n self.eat(\"(\")\n self.compileExpression()\n self.eat(\")\")\n # unaryOp term\n elif self.currentTokenEquals([\"~\", \"-\"]):\n unary_op = self.eat([\"~\", \"-\"])\n self.compileTerm()\n if unary_op in \"~\":\n self.vm_writer.writeArithmetic(\"not\")\n else:\n self.vm_writer.writeArithmetic(\"neg\")\n else:\n identifier = self.eatTag(\"identifier\")\n\n # varName [ expression ]\n if self.currentTokenEquals(\"[\"):\n self.vm_writer.writePush(self.symbol_table.kindOf(identifier), self.symbol_table.indexOf(identifier))\n self.eat(\"[\")\n self.compileExpression()\n self.eat(\"]\")\n self.vm_writer.writeArithmetic(\"add\")\n self.vm_writer.writePop(\"pointer\", 1)\n self.vm_writer.writePush(\"that\", 0)\n # function call\n elif self.currentTokenEquals(\"(\"):\n self.eat(\"(\")\n arguments = self.compileExpressionList()\n self.eat(\")\")\n self.vm_writer.writePush(\"pointer\", 0)\n self.writeCall(f\"{self.class_name}.{identifier}\", arguments + 1)\n # method call\n elif self.currentTokenEquals(\".\"):\n arguments = 0\n self.eat(\".\")\n method_name = self.eatTag(\"identifier\")\n if self.symbol_table.exists(identifier):\n symbol_segment = self.symbol_table.kindOf(identifier)\n symbol_index = self.symbol_table.indexOf(identifier)\n identifier = self.symbol_table.typeOf(identifier)\n self.vm_writer.writePush(symbol_segment, symbol_index)\n arguments = 1\n self.eat(\"(\")\n arguments = self.compileExpressionList() + arguments\n self.eat(\")\")\n self.vm_writer.writeCall(f\"{identifier}.{method_name}\", arguments)\n # var\n elif self.symbol_table.exists(identifier):\n self.vm_writer.writePush(self.symbol_table.kindOf(identifier), self.symbol_table.indexOf(identifier))\n # oops\n else:\n print(self.symbol_table.class_table)\n print(self.symbol_table.sub_table)\n print(f\"\\\"{identifier}\\\" identifier not handled\")\n sys.exit(1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set an explicit indentation level for a block scalar. | def set_block_scalar_indent(token_class):
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | [
"def indent_level(self, indent_level):\n\n self.container['indent_level'] = indent_level",
"def indent(self, lvl=1):\n self.current_level += lvl\n assert self.current_level >= 0, \"Level of indentation cannot become negative\"\"\"",
"def update_indent(self) -> None:\n self.indent = self.base_indent * self.level\n self.newline_indent = \"\\n\" + self.indent",
"def indentTo(level: int):\n OPTIONS.indent = max(0, level)",
"def set_level(self, elem):\n tag_level = int(elem.tag[-1])\n if not self.is_base_level_adjusted:\n self.base_level = self.base_level + 1 - tag_level\n self.is_base_level_adjusted = True\n level = tag_level + self.base_level\n if level > 6:\n level = 6\n elem.tag = \"h%d\" % level",
"def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )",
"def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)",
"def _indent(self, level: int) -> Text:\n\n return self.indent * level",
"def indent(self, levels=1):\n self._indentation_level += levels\n return self",
"def setIndent(self, *args):\r\n return _osgDB.Output_setIndent(self, *args)",
"def increaseIndentation():\n\tglobal indentLength\n\tindentLength = indentLength + 3",
"def SetLevel(self, level):\n self.level = level",
"def incrementIndent(self, levels: 'int const'=1) -> \"void\":\n return _coin.SoOutput_incrementIndent(self, levels)",
"def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"",
"def elementtree_indent(elem, level=...):\n ...",
"def _indent(self):\n if self._debug:\n self._debug += 1",
"def incrementIndent(self, levels = 1):\n return _coin.SoOutput_incrementIndent(self, levels)",
"def indentation():\n try:\n indent()\n yield\n finally:\n unindent()",
"def indent_level(self):\n return self.container['indent_level']",
"def incIndent():\n if _rootLogger != None:\n curLevel = _rootLogger._logIndentLevel\n _rootLogger.incIndent()\n return curLevel\n else:\n return 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process indentation spaces in a block scalar. | def parse_block_scalar_indent(token_class):
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | [
"def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"",
"def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))",
"def __calculate_for_container_blocks(\n parser_state: ParserState,\n grab_bag: ContainerGrabBag,\n ) -> None:\n grab_bag.current_container_blocks = [\n ind for ind in parser_state.token_stack if ind.is_list\n ]\n grab_bag.block_quote_data = BlockQuoteData(\n 0\n if grab_bag.initial_block_quote_count is None\n else grab_bag.initial_block_quote_count,\n parser_state.count_of_block_quotes_on_stack(),\n )\n\n ContainerBlockProcessor.__calculate_adjusted_whitespace(\n parser_state,\n grab_bag,\n )",
"def indentation():\n try:\n indent()\n yield\n finally:\n unindent()",
"def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )",
"def __calculate_adjusted_whitespace(\n parser_state: ParserState,\n grab_bag: ContainerGrabBag,\n ) -> None:\n\n grab_bag.adj_ws = grab_bag.extracted_whitespace\n assert grab_bag.adj_ws is not None\n\n last_block_stack_index = parser_state.find_last_list_block_on_stack()\n if last_block_stack_index <= 0:\n assert not grab_bag.current_container_blocks\n POGGER.debug(\"PLFCB>>No Started lists\")\n if grab_bag.adjusted_block_index is None:\n POGGER.debug(\"PLFCB>>No Started Block Quote\")\n else:\n POGGER.debug(\"PLFCB>>Started Block Quote\")\n assert grab_bag.extracted_whitespace is not None\n grab_bag.adj_ws = grab_bag.extracted_whitespace[\n grab_bag.adjusted_block_index :\n ]\n else:\n assert grab_bag.current_container_blocks\n POGGER.debug(\n \"PLFCB>>Started list-last stack>>$\",\n parser_state.token_stack,\n )\n POGGER.debug(\n \"PLFCB>>Started list-last stack>>$\",\n parser_state.token_stack[last_block_stack_index],\n )\n\n (\n token_index,\n found_block_quote_token,\n ) = ContainerBlockProcessor.__look_for_any_list_start(parser_state)\n\n assert grab_bag.adj_ws is not None\n ContainerBlockProcessor.__calculate_adjusted_whitespace_kludge(\n parser_state,\n token_index,\n found_block_quote_token,\n grab_bag,\n )\n assert grab_bag.adj_ws is not None",
"def _indent(self):\n if self._debug:\n self._debug += 1",
"def _compute_chunk_indentation(self, i1, i2, j1, j2):\n # We'll be going through all the opcodes in this equals chunk and\n # grouping with adjacent opcodes based on whether they have\n # indentation changes or not. This allows us to keep the lines with\n # indentation changes from being collapsed in the diff viewer.\n indentation_changes = {}\n prev_has_indent = False\n prev_start_i = i1\n prev_start_j = j1\n\n a = self.differ.a\n b = self.differ.b\n\n for i, j in zip(range(i1, i2), range(j1, j2)):\n old_line = a[i]\n new_line = b[j]\n new_indentation_changes = {}\n\n indent_info = self._compute_line_indentation(old_line, new_line)\n has_indent = indent_info is not None\n\n if has_indent:\n key = '%d-%d' % (i + 1, j + 1)\n new_indentation_changes[key] = indent_info\n\n if has_indent != prev_has_indent:\n if prev_start_i != i or prev_start_j != j:\n # Yield the previous group.\n yield prev_start_i, i, prev_start_j, j, indentation_changes\n\n # We have a new group. Set it up, starting with the current\n # calculated state.\n prev_start_i = i\n prev_start_j = j\n prev_has_indent = has_indent\n indentation_changes = new_indentation_changes\n elif has_indent:\n indentation_changes.update(new_indentation_changes)\n\n # Yield the last group, if we haven't already yielded it.\n if prev_start_i != i2 or prev_start_j != j2:\n yield prev_start_i, i2, prev_start_j, j2, indentation_changes",
"def get_block_indent(text):\n lines = text.split('\\n')\n cnt = []\n for i in lines:\n if i != '' and not i.isspace():\n cnt.append(get_indent(i))\n return min(cnt)",
"def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)",
"def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)",
"def test_multipleIndents(self):\n self.parser.push(\"js/\")\n\n def push():\n indent = self.parser._indent\n badLine = \"{indent}{indent}x: a\".format(indent=indent)\n self.parser.push(badLine)\n\n self.assertRaises(config.IndentationError, push)",
"def format_indentation(string):\n return string.replace(\" \", \" \")",
"def getIndentation(self, line):\n\t\n\t\tnonSpace = re.search('\\S', line)\n\t\n\t\tif nonSpace is None:\n\t\t\treturn 0\n\t\t\t\n\t\telse:\n\t\t\tif re.match('^\\t*\\S', line):\n\t\t\t\treturn nonSpace.start()\n\t\t\t\t\n\t\t\telif re.match('^\\ *\\S', line):\n\t\t\t\treturn nonSpace.start() / 4",
"def increaseIndentation():\n\tglobal indentLength\n\tindentLength = indentLength + 3",
"def _check_valid_indentation(self, lineno: int, line: str, left_stripped: str) -> None:\n if linelen := len(line):\n indent = linelen - len(left_stripped)\n expected_ind = 0 if line.startswith(('.', '+', '-', '$')) else self.indent\n if indent != expected_ind:\n diag = self.diags.indentation\n loc = self.make_source_range(' ' * indent, line, lineno)\n mess = f'Invalid indentation ({indent}), all regular (non-empty, non-parameter, non-seealso) text must be indented to {self.indent} columns'\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, loc, patch=Patch(loc, ' ' * expected_ind)\n )\n return",
"def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)",
"def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree",
"def test_block_comment_whitespace_signature(self):\n \n inp = '2_3_block_comment.txt'\n self.run_single_file_case(inp)",
"def RetainHorizontalSpacing(self, first_column, depth):\n previous = self.previous_token\n if not previous:\n return\n\n if previous.is_pseudo:\n previous = previous.previous_token\n if not previous:\n return\n\n cur_lineno = self.lineno\n prev_lineno = previous.lineno\n if previous.is_multiline_string:\n prev_lineno += previous.value.count('\\n')\n\n if (cur_lineno != prev_lineno or\n (previous.is_pseudo and previous.value != ')' and\n cur_lineno != previous.previous_token.lineno)):\n self.spaces_required_before = (\n self.column - first_column + depth * style.Get('INDENT_WIDTH'))\n return\n\n cur_column = self.column\n prev_column = previous.column\n prev_len = len(previous.value)\n\n if previous.is_pseudo and previous.value == ')':\n prev_column -= 1\n prev_len = 0\n\n if previous.is_multiline_string:\n prev_len = len(previous.value.split('\\n')[-1])\n if '\\n' in previous.value:\n prev_column = 0 # Last line starts in column 0.\n\n self.spaces_required_before = cur_column - (prev_column + prev_len)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process indentation spaces in a plain scalar. | def parse_plain_scalar_indent(token_class):
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | [
"def get_spaces(self):\n pass",
"def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))",
"def remove_indent(self) -> None:\n w = abs(self.tab_width)\n if self.result:\n s = self.result[-1]\n if s.isspace():\n self.result.pop()\n s = s.replace('\\t', ' ' * w)\n if s.startswith('\\n'):\n s2 = s[1:]\n self.result.append('\\n' + s2[: -w])\n else:\n self.result.append(s[: -w])",
"def _indent(self, level: int) -> Text:\n\n return self.indent * level",
"def get_trim_whitespace(current_value, new_value):\n return get_convert_paragraphs(current_value, new_value)",
"def _indent(self):\n if self._debug:\n self._debug += 1",
"def test_Indent_zero():\n assert fmt.Indent(0, \"abc\\nd\") == \"abc\\nd\"",
"def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)",
"def RetainHorizontalSpacing(self, first_column, depth):\n previous = self.previous_token\n if not previous:\n return\n\n if previous.is_pseudo:\n previous = previous.previous_token\n if not previous:\n return\n\n cur_lineno = self.lineno\n prev_lineno = previous.lineno\n if previous.is_multiline_string:\n prev_lineno += previous.value.count('\\n')\n\n if (cur_lineno != prev_lineno or\n (previous.is_pseudo and previous.value != ')' and\n cur_lineno != previous.previous_token.lineno)):\n self.spaces_required_before = (\n self.column - first_column + depth * style.Get('INDENT_WIDTH'))\n return\n\n cur_column = self.column\n prev_column = previous.column\n prev_len = len(previous.value)\n\n if previous.is_pseudo and previous.value == ')':\n prev_column -= 1\n prev_len = 0\n\n if previous.is_multiline_string:\n prev_len = len(previous.value.split('\\n')[-1])\n if '\\n' in previous.value:\n prev_column = 0 # Last line starts in column 0.\n\n self.spaces_required_before = cur_column - (prev_column + prev_len)",
"def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)",
"def indent(self):\r\n return _osgDB.Output_indent(self)",
"def dumped (text, level, indent=2):\n return indented (\"{\\n%s\\n}\" % indented (text, level+1, indent) or \"None\", level, indent) + \"\\n\"",
"def _consume_whitespace(self):\n try:\n while self.s[self.idx] in \" \\t\\n\\r\":\n self.idx += 1\n except IndexError:\n pass",
"def getIndentation(self, line):\n\t\n\t\tnonSpace = re.search('\\S', line)\n\t\n\t\tif nonSpace is None:\n\t\t\treturn 0\n\t\t\t\n\t\telse:\n\t\t\tif re.match('^\\t*\\S', line):\n\t\t\t\treturn nonSpace.start()\n\t\t\t\t\n\t\t\telif re.match('^\\ *\\S', line):\n\t\t\t\treturn nonSpace.start() / 4",
"def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0):\n if style.Get('USE_TABS'):\n if newlines_before > 0:\n indent_before = '\\t' * indent_level + _TabbedContinuationAlignPadding(\n spaces, style.Get('CONTINUATION_ALIGN_STYLE'),\n style.Get('INDENT_WIDTH'))\n else:\n indent_before = '\\t' * indent_level + ' ' * spaces\n else:\n indent_before = (' ' * indent_level * style.Get('INDENT_WIDTH') +\n ' ' * spaces)\n\n if self.is_comment:\n comment_lines = [s.lstrip() for s in self.value.splitlines()]\n self.value = ('\\n' + indent_before).join(comment_lines)\n\n # Update our own value since we are changing node value\n self.value = self.value\n\n if not self.whitespace_prefix:\n self.whitespace_prefix = ('\\n' * (self.newlines or newlines_before) +\n indent_before)\n else:\n self.whitespace_prefix += indent_before",
"def test_Indent_two():\n assert fmt.Indent(2, \"abc\\nd\") == \" abc\\n d\"",
"def word_spacing(computer, name, value):\r\n if value == 'normal':\r\n return 0\r\n else:\r\n return length(computer, name, value, pixels_only=True)",
"def leveled_indent(lvl: int = 0, spaces_per_indent: int = 3) -> str:\n return (\" \" * spaces_per_indent) * lvl",
"def whitespace(self, value: Whitespace) -> 'Tailwind':\n self.element.classes('whitespace-' + value)\n return self",
"def prologue(_indent):\n return \"\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position. | def scan(self, pattern):
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True | [
"def match(self, text, pos=0):\n error = ParseError(text)\n node = self.match_core(text, pos, defaultdict(dict), error)\n if node is None:\n raise error\n return node",
"def update_matches(self, begin, end):\n if self.entry != None:\n self.__get_matches(self.entry.get_text(), begin, end)",
"def find_changed(self, find_text):\n # Make the search string global. \n self.find_text = find_text\n \n if len(find_text) < 2:\n return\n \n self.match_total = 0\n self.match_position = 0\n \n # Move cursor to Start. - Test with KeepAnchor\n self.textedit.moveCursor(QtGui.QTextCursor.MoveOperation.Start, \n QtGui.QTextCursor.MoveMode.MoveAnchor) \n\n # Count the total number of matches.\n #position_list = [] # For testing the matching. \n while True:\n\n if self.perform_search(find_text, self.case_sensitive):\n self.match_total += 1\n #position_list.append(self.textedit.textCursor().position()) \n else:\n # Reached last match at bottom of the text\n break\n\n # Return to start / top of text.\n self.textedit.moveCursor(QtGui.QTextCursor.MoveOperation.Start, \n QtGui.QTextCursor.MoveMode.MoveAnchor) \n\n # Update the matches label. match_position is 0\n self.sb_label.setText(\"{} of {} matches\".format(self.match_position, \n self.match_total)) \n #print(\"Position list:\", position_list) # Position list: [160, 294]\n\n # Find first match, going forward from start / top of text. \n if self.perform_search(find_text, self.case_sensitive):\n self.match_position += 1 \n \n # Update the matches label. match_position should be 1\n self.sb_label.setText(\"{} of {} matches\".format(self.match_position, \n self.match_total))",
"def re_search(self, pattern): # noqa\n # Converting pattern to regex\n pattern = re.compile(pattern)\n if pattern.search(self.data):\n return True\n else:\n return False",
"def match(self, text, pos, lno):\n mtch = self.pattern.match(text, pos)\n ret = []\n if self.next_rule is not None and mtch is not None:\n pos = 0\n for rule in self.next_rule:\n another_mtch, another_t = rule.match(mtch.group(), pos, 0)\n if another_mtch:\n ret.append(another_t)\n pos += len(another_mtch.group())\n else:\n if mtch:\n ret = mtch.group()\n else:\n ret = ''\n return mtch, Token(self.identifier, content=ret, position=pos, lineno=lno)",
"def _cfa_find_next_match(self, p: Position) -> bool:\n # Called only from unit tests.\n table = []\n if self.search_headline:\n table.append(p.h)\n if self.search_body:\n table.append(p.b)\n for s in table:\n self.reverse = False\n pos, newpos = self.inner_search_helper(s, 0, len(s), self.find_text)\n if pos != -1:\n return True\n return False",
"def indexOf(self, data, pattern, caseSensitive, from_pos, to_pos):\n # type: (bytearray, bytearray, bool, int, int) -> int\n # Original signature had \"from\" and \"to\", from is a reserved keyword in python",
"def getMatch(self, text, pattern):\n\n return re.search(pattern,text,re.MULTILINE + re.DOTALL)",
"def is_match_pattern(self, pattern):\n if len(pattern.pattern) != len(self.pattern):\n return False\n else:\n return self.is_match(range(len(pattern.pattern)), pattern.pattern)",
"def searchLine(self, pattern, count=1, start_from=0, match=0):\n count = self.total_lines if count == 'max' else count\n if start_from > 0:\n self.goToLine(start_from)\n else:\n self.goToStart()\n isdone = False\n eof = False\n output = []\n match_pat = re.compile(pattern)\n lineno = self.line_no\n while not eof and not isdone:\n prev = self.file_obj.tell()\n line = self.file_obj.readline()\n if not line:\n eof = True\n continue\n lineno += 1\n self.offset = self.file_obj.tell()\n if match:\n ismatched = match_pat.match(line)\n else:\n ismatched = match_pat.search(line)\n if ismatched:\n count -= 1\n output.append((line, lineno - 1, prev))\n if count == 0:\n isdone = True\n\n return output",
"def matchPos(self):\n\t\tif self.expressionStateNode: \n\t\t\tself.expressionStateNode.setPos(self.pos)",
"def get_pattern_position(pattern,in_text):\n if in_text.find(pattern) == -1 :\n return in_text.find(pattern)\n else:\n return in_text.find(pattern)+1",
"def patMatch(seq, pat, notDegPos=None):\n assert(len(seq)==len(pat))\n for x in range(0, len(pat)):\n patChar = pat[x]\n nuc = seq[x]\n\n assert(patChar in \"MKYRACTGNWSDVB\")\n assert(nuc in \"MKYRACTGNWSDX\")\n\n if notDegPos!=None and x==notDegPos and patChar!=nuc:\n #print x, seq, pat, notDegPos, patChar, nuc, \"<br>\"\n return False\n\n if nuc==\"X\":\n return False\n if patChar==\"N\":\n continue\n if patChar==\"D\" and nuc in [\"AGT\"]:\n continue\n if patChar==\"B\" and nuc in [\"CGT\"]:\n continue\n if patChar==\"V\" and nuc in [\"ACG\"]:\n continue\n if patChar==\"W\" and nuc in [\"A\", \"T\"]:\n continue\n if patChar==\"S\" and nuc in [\"G\", \"C\"]:\n continue\n if patChar==\"M\" and nuc in [\"A\", \"C\"]:\n continue\n if patChar==\"K\" and nuc in [\"T\", \"G\"]:\n continue\n if patChar==\"R\" and nuc in [\"A\", \"G\"]:\n continue\n if patChar==\"Y\" and nuc in [\"C\", \"T\"]:\n continue\n if patChar!=nuc:\n return False\n return True",
"def matches_seq(self, seq):\n # check matches to start of sequence\n for start_len in range(1, min(len(seq) + 1, self.motif_len)):\n try:\n start_pat, start_mod_pos = self._partial_pats[\n 'start'][start_len]\n except KeyError:\n continue\n if start_pat.match(seq[:start_len]):\n return True\n\n # check central sequence overlaps\n if len(seq) < self.motif_len:\n for short_pat, mod_pos in self._partial_pats['short'][len(seq)]:\n if short_pat.match(seq):\n return True\n else:\n if self.motif_pat.search(seq):\n return True\n\n # check end of seq matches\n for end_len in range(1, min(len(seq) + 1, self.motif_len)):\n try:\n end_pat, end_mod_pos = self._partial_pats['end'][end_len]\n except KeyError:\n continue\n if end_pat.match(seq[-end_len:]):\n return True\n\n return False",
"def search_cpp (self, pattern, body=None,\r\n headers=None, include_dirs=None, lang=\"c\"):\r\n\r\n self._check_compiler()\r\n (src, out) = self._preprocess(body, headers, include_dirs, lang)\r\n\r\n if type(pattern) is StringType:\r\n pattern = re.compile(pattern)\r\n\r\n file = open(out)\r\n match = 0\r\n while 1:\r\n line = file.readline()\r\n if line == '':\r\n break\r\n if pattern.search(line):\r\n match = 1\r\n break\r\n\r\n file.close()\r\n self._clean()\r\n return match",
"def search(self):\n self.search_text.tag_remove(\"match\", \"0.0\", tk.END)\n\n regex = self.regex_entry.get()\n search_text = self.search_text.get(\"0.0\", tk.END)\n\n matches = re.finditer(regex, search_text)\n for match in matches:\n index1 = self.search_text.index(\"0.0 + \" + str(match.span()[0]) + \" chars\")\n index2 = self.search_text.index(\"0.0 + \" + str(match.span()[1]) + \" chars\")\n self.search_text.tag_add(\"match\", index1, index2)",
"def find_next_match(self, p: Position) -> tuple[Position, int, int]:\n if not self.search_headline and not self.search_body: # pragma: no cover\n return None, None, None\n if not self.find_text: # pragma: no cover\n return None, None, None\n attempts = 0\n u = self.c.undoer\n if self.pattern_match:\n ok = self.compile_pattern()\n if not ok:\n return None, None, None\n while p:\n pos, newpos = self._fnm_search(p)\n if pos is not None:\n # Success.\n if self.mark_finds and not p.isMarked(): # pragma: no cover\n undoType = 'Mark Finds'\n bunch = u.beforeMark(p, undoType)\n p.setMarked()\n p.setDirty()\n u.afterMark(p, undoType, bunch)\n return p, pos, newpos\n # Searching the pane failed: switch to another pane or node.\n if self._fnm_should_stay_in_node(p):\n # Switching panes is possible. Do so.\n self.in_headline = not self.in_headline\n s = p.h if self.in_headline else p.b\n ins = len(s) if self.reverse else 0\n self.work_s = s\n self.work_sel = (ins, ins, ins)\n else:\n # Switch to the next/prev node, if possible.\n attempts += 1\n p = self._fnm_next_after_fail(p)\n if p: # Found another node: select the proper pane.\n self.in_headline = self._fnm_first_search_pane()\n s = p.h if self.in_headline else p.b\n ins = len(s) if self.reverse else 0\n self.work_s = s\n self.work_sel = (ins, ins, ins)\n return None, None, None",
"def matchPositionConn():\n with ar_qui.ar_undoChunkOpen('Match Position'):\n ar_gen.ar_matchPosition()",
"def GetMatch(self, start):\n raise NotImplementedError",
"def findPat(seq, pat):\n seq = seq.upper()\n pat = pat.upper()\n for i in range(0, len(seq)-len(pat)+1):\n #print \"new pos\", i, seq[i:i+len(pat)],\"<br>\"\n found = True\n for x in range(0, len(pat)):\n #print \"new step\", x, \"<br>\"\n if pat[x]==\"N\":\n #print \"N\",\"<br>\"\n continue\n seqPos = i+x\n if seqPos == len(seq):\n found = False\n break\n if not matchNuc(pat[x], seq[seqPos]):\n #if not patMatch(seq[seqPos], pat[x]):\n #print i, x, pat[x], seq[seqPos], \"no match<br>\"\n found = False\n break\n #print \"match\", i, x, found, \"<br>\"\n if found:\n #print \"yielding\", i, \"<br>\"\n yield i"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if ``ttype`` is a subtype of ``other``. exists for backwards compatibility. use ``ttype in other`` now. | def is_token_subtype(ttype, other):
return ttype in other | [
"def is_subtype_of(self, other):\n # pylint: disable=protected-access\n if type(self) is not type(\n other) or self._callable_params != other._callable_params:\n return False\n\n try:\n tf.nest.assert_same_structure(self._comparable[:-1],\n other._comparable[:-1])\n except (TypeError, ValueError):\n return False\n\n self_elements = tf.nest.flatten(self._comparable[:-1])\n other_elements = tf.nest.flatten(other._comparable[:-1])\n\n def is_subtype_or_equal(a, b):\n try:\n return a.is_subtype_of(b)\n except AttributeError:\n return a == b\n\n return all(\n is_subtype_or_equal(self_element, other_element)\n for (self_element, other_element) in zip(self_elements, other_elements))",
"def is_equivalent_to(self, other: 'Type') -> bool:\n return self.is_assignable_from(other) and other.is_assignable_from(self)",
"def comparable_with(self, other: 'FieldType') -> bool:\n # https://cloud.google.com/spanner/docs/reference/standard-sql/data-types#comparable_data_types\n return type(self) == type(other)",
"def __eq__(self, other: Any) -> bool:\n return isinstance(other, self.__class__)",
"def check_equivalent_to(self, other: 'Type') -> None:\n if not self.is_equivalent_to(other):\n raise TypesNotEquivalentError(self, other)",
"def __le__(self, other):\n return type(self) == type(other) or type(other) == TAny",
"def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:\n left = mypy.types.get_proper_type(left)\n right = mypy.types.get_proper_type(right)\n if (\n isinstance(left, mypy.types.LiteralType)\n and isinstance(left.value, int)\n and left.value in (0, 1)\n and isinstance(right, mypy.types.Instance)\n and right.type.fullname == \"builtins.bool\"\n ):\n # Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.\n return True\n with mypy.state.strict_optional_set(True):\n return mypy.subtypes.is_subtype(left, right)",
"def isSubtype(firstType: str, secondType: str) -> bool:\n if secondType == firstType:\n return True\n\n if firstType == 'Element':\n return secondType in ('Element', 'Image', 'Feature',\n 'Collection', 'ImageCollection', 'FeatureCollection')\n elif firstType in ('FeatureCollection', 'Collection'):\n return secondType in ('Collection', 'ImageCollection', 'FeatureCollection')\n elif firstType == object:\n return True\n else:\n return False",
"def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoField_isOfType(self, type)",
"def __eq__(self, other):\n return (isvariadic(other) and\n set(self.variadic_type) == set(other.variadic_type))",
"def check_identical_to(self, other: 'Type') -> None:\n if not self.is_identical_to(other):\n raise TypesNotIdenticalError(self, other)",
"def __eq__(self, other):\r\n if not isinstance(self, type(other)):\r\n return False\r\n\r\n for ftr_type in FeatureType:\r\n if not deep_eq(getattr(self, ftr_type.value), getattr(other, ftr_type.value)):\r\n return False\r\n\r\n return self.bbox == other.bbox and self.timestamp == other.timestamp",
"def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoBase_isOfType(self, type)",
"def _issubclass(a, b):\n try:\n return issubclass(a, b)\n except:\n pass\n\n return False",
"def isinstance(self, cls):\n return self.cls.issubclass(cls)",
"def __eq__(self, other):\n return (isinstance(other, self.__class__)\\\n and (self._ontosers == other._ontosers) )",
"def subtype(self, type1_uri, type2_uri):\n # log.info(\"FieldComparison.subtype(%s, %s)\"%(type1_uri, type2_uri))\n if not type2_uri or (type1_uri == type2_uri):\n return True\n if not type1_uri:\n return False\n type1_info = self.get_uri_type_info(type1_uri)\n type1_supertype_uris = (type1_info and type1_info.get_all_type_uris()) or []\n # log.info(\"FieldComparison.subtype: type1_uris (supertypes) %r\"%(type1_uris,))\n return type2_uri in type1_supertype_uris",
"def type_equals(a: drgn.Type, b: drgn.Type) -> bool:\n return type_canonical_name(a) == type_canonical_name(b)",
"def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.ScXMLObject_isOfType(self, type)",
"def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoError_isOfType(self, type)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a static text analysation function that returns float values. | def make_analysator(f):
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse) | [
"def eval_texts_calc_wrapper(line):\n # These signals are needed to run the functions as written in eval texts.\n signals = {\n 'signal0': np.array([1, 1, 1, 0, 0]),\n 'signal1': np.array([1, 1, 1, 0, 0]),\n 'time': np.array([0, 1, 2, 3, 4])}\n\n # These variable names are needed to run the functions as written in\n # eval texts.\n extra_globals = {\n 'value': 1,\n 'shift_value': 1,\n 'window_size': 3,\n 'min_length': 1,\n 'max_length': 2,\n 'from_value': 1,\n 'to_value': 0,\n 'reset_array': np.array([False, True, False, True, False])}\n\n return calc_wrapper(line, signals, extra_globals)",
"def evaluate(self, text):\n normalized = normalize_text(text)\n\n tokens = text_to_ngram(normalized, self.n, self.generate_char_model)\n\n likelihood = 0.0\n\n for ngram in tokens:\n prob = self.smooth(self.delimiter.join(ngram))\n likelihood += math.log(prob,self.log_base)\n return -math.inf if likelihood == 0.0 else likelihood",
"def float_func(self, fl, meta):\n fl = fl[0]\n constAddr = self.compiler.addConst(fl.value, fl.type)\n self.compiler.pushOperando(constAddr)\n self.compiler.pushTipo(fl.type)\n return fl",
"def make_function(text):\n\n try:\n exec 'f = lambda x: ' + text\n 1+f(2.0) ## test to see if there are any errors in the definition\n except ZeroDivisionError: ## ignore zero division errors\n pass\n except:\n raise FunctionError()\n return f",
"def calculation(self):\n\n screen_value = str(self.screen.text()).split(' ')\n screen_text = str(self.screen.text())\n #x = screen_value.split(' ')\n x = (eval(str(screen_text)))\n # val1 = float(screen_value[0])\n # operator = screen_value[1]\n # val2 = float(screen_value[2])\n # result = self.maths(val1, val2, operator)\n self.screen.setText(str(x))",
"def __call__(self, text):\n for unit in self.units:\n text = unit.transform(text)\n return text",
"def getFloatFromTC(objTextControl, default = None):\n try:\n return float(objTextControl.GetValue())\n except:\n return default",
"def _search_float(self, pattern, arg=None):\n string = self._search(pattern)\n if string:\n try:\n return float(string)\n except: pass\n raise WeatherParseError(text=self.text, arg=arg)",
"def test__get_value_types_float(self):\n value, m_type = formatters._get_value_types(1.1)\n assert value == 1.1\n assert m_type == 'float'",
"def parse_function(self, text):\n m = re.match('(triangulo|trapecio): (.*)', text)\n if not m:\n raise Exception('Error parseando la funcion: %s' % text)\n\n func_type = m.group(1)\n points_raw = m.group(2)\n\n points = []\n\n pattern = re.compile(r'\\(([\\d\\.]+),\\s*([\\d\\.]+)\\)')\n m = pattern.search(points_raw)\n\n while m:\n points.append(Point(float(m.group(1)), float(m.group(2))))\n m = pattern.search(points_raw, m.end())\n\n if func_type == 'triangulo':\n cls = TriangularFunction\n else:\n cls = TrapezoidalFunction\n return cls(*points)",
"def parse_text(text):\n\n def tofloats(lst):\n return (float(t) for t in lst)\n\n try:\n text = text.replace(\",\", \"\") # 25,350.10 MB\n if \"--\" in text:\n return None\n if \"/\" in text: # \"6.19/0.88\" total/avg\n return tuple(tofloats(text.split(\"/\")))\n if \":\" in text: # 11:14 hr:mn\n hour, mins = tofloats(text.split(\":\"))\n return timedelta(hours=hour, minutes=mins)\n return float(text)\n except ValueError:\n _LOGGER.error(\"Error parsing traffic meter stats: %s\", text)\n return None",
"def parseDouble(text):\n return float(text or 0)",
"def cal_f1(self,base,comp):\n\n if type(base)==type(\"string\"):\n base=word_tokenize(base)\n base = [w.lower() for w in base]\n else:\n base = [w.lower() for w in base]\n if type(comp)==type(\"string\"):\n comp=word_tokenize(comp)\n comp = [w.lower() for w in comp]\n else:\n comp = [w.lower() for w in comp]\n precision=0\n for item in comp:\n if item in base:\n precision=precision+1\n precision=precision/len(comp)\n\n recall=0\n for item in base:\n if item in comp:\n recall=recall+1\n recall=recall/len(base)\n\n try:\n F1=2 * (precision * recall) / (precision + recall)\n except ZeroDivisionError:\n F1=0\n\n return F1,precision,recall",
"def floats(draw):\n number = draw(st.floats(allow_nan=False, allow_infinity=False))\n fmt = draw(\n st.sampled_from(\n [\"{:.20f}\", \"{:.20e}\", \"{:.20E}\", \"{:+.20f}\", \"{:+.20e}\", \"{:+.20E}\"]\n )\n )\n return Strategy(fmt.format(number), number)",
"def Evaluate(self, , *float):\n ...",
"def test__get_value_types_float_str(self):\n value, m_type = formatters._get_value_types('1.1')\n assert value == 1.1\n assert m_type == 'float'",
"def _parse_float(self, float_element):\n if float_element.text is not None:\n return float(float_element.text)\n else:\n return float()",
"def check_text_recognition(text_rec: str, text_check: str) -> float:\n if len(text_rec) == len(text_check):\n match = re.search(text_check, text_rec)\n if match:\n return 1.0\n else:\n return 0.0\n else:\n match = re.search(r'^[a-zA-Zа-яА-Я]\\d*$', text_check)\n if match:\n text_match = re.search(r'^[a-zA-Zа-яА-Я]\\d*$', text_rec)\n if text_match and text_match[0] == text_check:\n return float(len(text_check) / len(text_rec))\n else:\n return 0.0\n else:\n text_match = re.search(r'\\d*$', text_rec)\n if text_match and text_match[0] == text_check:\n return float(len(text_check) / len(text_rec))\n else:\n return 0.0",
"def transform_floating_literal(self, node):\n try:\n value = next(node.get_tokens()).spelling\n except (StopIteration, ValueError):\n # No tokens\n value = node.literal\n return float(value)",
"def testFloat(self):\n idx = self.d.GetHeaderNames().index('Float')\n \n query = 'Float == 0.10'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('0.1', result[0][idx])\n \n query = 'Float == 1.0'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('1.0', result[0][idx])\n \n query = 'Float < 0'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('-1.5', result[0][idx])\n \n query = 'Float >= 4.3'\n result, ind = self.d.RunQuery(query)\n floats = []\n for i in range(len(result)):\n floats.append(result[i][idx])\n self.assertEqual(['4.3','7.1'], floats)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the given regular expression matches the last part of the shebang if one exists. >>> from pygments.util import shebang_matches >>> shebang_matches('!/usr/bin/env python', r'python(2\.\d)?') True >>> shebang_matches('!/usr/bin/python2.4', r'python(2\.\d)?') True >>> shebang_matches('!/usr/bin/pythonruby', r'python(2\.\d)?') False >>> shebang_matches('!/usr/bin/python/ruby', r'python(2\.\d)?') False >>> shebang_matches('!/usr/bin/startsomethingwith python', ... r'python(2\.\d)?') True | def shebang_matches(text, regex):
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False | [
"def rewrite_shebang(data, target, prefix):\n shebang_match = re.match(SHEBANG_REGEX, data, re.MULTILINE)\n prefix_b = prefix.encode('utf-8')\n\n if shebang_match:\n if data.count(prefix_b) > 1:\n # More than one occurrence of prefix, can't fully cleanup.\n return data, False\n\n shebang, executable, options = shebang_match.groups()\n\n if executable.startswith(prefix_b):\n # shebang points inside environment, rewrite\n executable_name = executable.decode(\"utf-8\").split(\"/\")[-1]\n new_shebang = \"#!/usr/bin/env {}{}\".format(\n executable_name, options.decode(\"utf-8\")\n )\n data = data.replace(shebang, new_shebang.encode(\"utf-8\"))\n\n return data, True\n\n return data, False",
"def _ExtractInterpFromShebang(data):\n firstline = data.splitlines()[:1]\n if not firstline:\n return None\n\n # The format here can be tricky.\n shebang = firstline[0].strip()\n m = re.match(r\"^#!\\s*([^\\s]+)(?:\\s+([^\\s]+))?\", shebang)\n if not m:\n return None\n\n # If the using `env`, find the target program.\n interp = m.group(1)\n if os.path.basename(interp) == \"env\":\n interp = m.group(2)\n\n return interp",
"def test_shebang_test(self):\n with open(\"tests/test_models/test_engine/test_file_storage.py\\\n\", mode='r') as _file:\n readShebang = _file.read()\n lines = readShebang.splitlines()\n self.assertEqual(lines[0], '#!/usr/bin/python3')",
"def path_is_partial_match(regex, path):\n for i in range(1, len(regex) + 1):\n partial_regex = regex[:i].rstrip('$') + '$'\n try:\n if re.match(partial_regex, path):\n return True\n except re.error:\n pass\n return False",
"def guess_language_by_shebang(line: str) -> int:\n pattern = re.compile(r\"#!/(?:\\S+/)+(\\S+)\")\n matched = pattern.match(line)\n if not matched: return 0\n\n language = matched.group(1).lower()\n for supp_language in SUPPORTED_LANGUAGES.keys():\n if language == supp_language:\n return list(SUPPORTED_LANGUAGES.keys()).index(language)\n return 0",
"def resolve_shebang(path, ignoreshell=False):\n try:\n f = file(path)\n try:\n # At most 80 characters in the first line\n header = f.read(80).splitlines()[0]\n finally:\n f.close()\n \n m = _RE_SHEBANG.search(header)\n if not m:\n return []\n cmd, arg = m.group(1,2)\n if os.path.isfile(cmd):\n # Keep this one, the hg script for instance contains a weird windows\n # shebang referencing the current python install.\n cmdfile = os.path.basename(cmd).lower()\n if cmdfile == 'python.exe':\n cmd = 'python'\n pass\n elif cmd not in _SHEBANG_CMDS:\n raise CommandNotFound('Unknown interpreter \"%s\" referenced in '\\\n 'shebang' % header)\n cmd = _SHEBANG_CMDS.get(cmd)\n if cmd is None or (ignoreshell and cmd == 'pysh'):\n return []\n if arg is None:\n return [cmd, win32_to_unix_path(path)]\n return [cmd, arg, win32_to_unix_path(path)]\n except IOError, e:\n if e.errno!=errno.ENOENT and \\\n (e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM\n raise\n return []",
"def shebang(self):\n try:\n first_line = self.stripped_lines()[0]\n if first_line.startswith(\"#!\"):\n return first_line[2:].strip()\n except IndexError:\n pass\n return \"\"",
"def shebang(path):\n return get(path)",
"def test_shebang(self):\n with open(\"models/engine/file_storage.py\", mode='r') as _file:\n readShebang = _file.read()\n lines = readShebang.splitlines()\n self.assertEqual(lines[0], '#!/usr/bin/python3')",
"def command_line_regex(pattern):\n def _pred(process):\n cmdline = process.properties.get('CmdLine', None)\n return cmdline is not None and re.match(pattern, cmdline)\n return _pred",
"def expr_match( expr, text):\n\tif expr[0] == '/':\n\t\tif re.match( expr[1:], text):\n\t\t\treturn True\n\telse:\n\t\tif expr[0:2] == '\\/':\n\t\t\treturn text == expr[1:]\n\t\telse:\n\t\t\treturn text == expr\n\treturn False",
"def _is_python_file(filename):\n if filename.endswith('.py'):\n return True\n else:\n with open(filename, 'r') as file_handle:\n first_line = file_handle.readline()\n return 'python' in first_line and '#!' in first_line",
"def _is_end_comment(line):\n return bool((line.endswith(\"'''\") or line.endswith('\"\"\"')))",
"def test_ends_at(line):\n return TEST_END_RE.match(line)",
"def contains_fragment(path):\r\n return path.count('#') != 0",
"def regex_is_found(response, regex_str):\n if not regex_str:\n return None\n return bool(re.search(regex_str, response.text))",
"def is_regex(self):\n return True",
"def is_regex(self):\n return False",
"def contains_any_py_chars(input_str):\n # return any(c in PYTHON for c in list(input_str.lower()))\n return re.search(r'[python]', input_str.lower()) # good example of search()",
"def detect(source):\r\n source = source.replace(' ', '')\r\n if re.search(r'eval\\(function\\(h,u,n,t,e,r', source):\r\n return True\r\n else:\r\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. | def doctype_matches(text, regex):
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex).match(doctype.strip()) is not None | [
"def parse_doctype(self):\n if self.seen_doctype == 1:\n xmlproc.XMLProcessor.parse_doctype(self)\n else:\n arizonareport.send_out(4, str(\"Ignoring DOCTYPE (%s,%d)\" % (self.get_current_sysid(), self.get_line())) )\n self.scan_to(\"]>\")\n self.seen_doctype = 1",
"def doctype(self, irc, msg, args, url):\n size = conf.supybot.protocols.http.peekSize()\n s = utils.web.getUrl(url, size=size)\n m = self._doctypeRe.search(s)\n if m:\n s = utils.str.normalizeWhitespace(m.group(0))\n irc.reply(s)\n else:\n irc.reply('That URL has no specified doctype.')",
"def _document_is_type_1(text):\n type_1 = re.compile('Document ')\n for line in text:\n if type_1.match(line):\n return True\n return False",
"def return_doctype(self, document_id):\n if not isinstance(document_id, str):\n raise Exception(f\"document_id not a string\")\n for doctype in self.doctypes.values():\n if doctype.is_valid(document_id):\n return doctype\n raise Exception(\"No associated doctype\")",
"def _end_of_type_1_document(text):\n end_of_document = re.compile('Document ')\n if end_of_document.match(text):\n return True\n else:\n return False",
"def is_mimetype(v):\n return rx_mimetype.match(v) is not None",
"def is_stylesheet(self):\n\n if self.typ == 'stylesheet':\n return True\n elif self.typ == 'generic':\n if self.validfilename:\n extn = ((os.path.splitext(self.validfilename))[1]).lower()\n if extn in self.stylesheet_extns:\n return True\n \n return False",
"def validate_mime_type(mimetype):\n valid_prefixes = [\n 'application', 'audio', 'font', 'example', 'image',\n 'message', 'model', 'mulitpart', 'text', 'video'\n ]\n validated = False\n for prefix in valid_prefixes:\n if prefix + '/' == mimetype[:len(prefix)+1]:\n validated = True\n return validated",
"def _test_re(string):\n try:\n x = re.compile(string)\n return True\n except re.error:\n return False",
"def is_assessor_good_type(assessor_obj, types_list, full_regex=False):\n atype = assessor_obj.attrs.get('xsiType')\n proctype = assessor_obj.attrs.get('%s/proctype' % atype)\n for exp in types_list:\n regex = extract_exp(exp, full_regex)\n if regex.match(proctype):\n return True\n return False",
"def is_valid_pdf(fname):\n try:\n pdfrw.PdfReader(fname)\n except pdfrw.PdfParseError:\n return False\n except Exception:\n return True\n return True",
"def is_re(v):\n try:\n re.compile(v)\n return True\n except Exception:\n return False",
"def is_regex(self):\n return True",
"def match(string):\n # Avoid circular dependencies by importing here.\n # pylint: disable=import-outside-toplevel\n from fparser.two.Fortran2008 import Component_Attr_Spec_List\n\n return Type_Declaration_StmtBase.match(\n Declaration_Type_Spec, Component_Attr_Spec_List, Component_Decl_List, string\n )",
"def check_regex(self,regexp) :\n return re.compile(regexp).match(self.name)",
"def isRegularDirective(self):\n return self._isRegularDirective",
"def is_valid_xml(medline_xml, parser=None, tree=None):\n if parser is None:\n parser = etree.XMLParser(load_dtd=True, no_network=False)\n if tree is None:\n tree = etree.parse(medline_xml, parser)\n dtd = tree.docinfo.externalDTD\n return dtd.validate(tree)",
"def accepts_html(accept_header):\r\n return (accept_header is not None\r\n and _accepts_html_re.search(accept_header) is not None)",
"def is_regex(self):\n return False",
"def validate(self):\n\n for line in self.htmlstring.split(\"\\n\"):\n if re.search(r\"<(\\\"[^\\\"]*\\\"|'[^']*'|[^'\\\">])*>\",line):\n print(\"Valid html string!\")\n else:\n print(\"Invalid html string - {}\".format(line))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse block attributes. >>> t = Textile() >>> t.pba(r'\3') '' >>> t.pba(r'\\3', element='td') ' colspan="3"' >>> t.pba(r'/4', element='td') ' rowspan="4"' >>> t.pba(r'\\3/4', element='td') ' colspan="3" rowspan="4"' >>> t.pba('^', element='td') | def pba(self, block_attributes, element=None):
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
block_id = ''
if not block_attributes:
return ''
matched = block_attributes
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vertical_align_re, matched)
if m:
style.append("vertical-align:%s;" % self.vAlign[m.group(1)])
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.horizontal_align_re, matched)
if m:
style.append("text-align:%s;" % self.hAlign[m.group(1)])
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
block_id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang:
return ' lang="%s"' % lang
else:
return ''
result = []
if style:
result.append(' style="%s"' % "".join(style))
if aclass:
result.append(' class="%s"' % aclass)
if lang:
result.append(' lang="%s"' % lang)
if block_id:
result.append(' id="%s"' % block_id)
if colspan:
result.append(' colspan="%s"' % colspan)
if rowspan:
result.append(' rowspan="%s"' % rowspan)
return ''.join(result) | [
"def parse_block(block: str) -> str:\n try:\n match = pattern.search(block)\n charset, encoding, raw_text = match.groups()\n except AttributeError:\n # match is None so .groups fails\n raise ValueError(f\"Could not recognise format of: {block}\") from None\n\n if str.lower(encoding) == 'b':\n text = b64decode(raw_text)\n elif str.lower(encoding) == 'q':\n text = quopri.decodestring(raw_text)\n else:\n raise ValueError(f\"Unknown encoding '{encoding}'\") from None\n exit(1)\n\n decoded = text.decode(charset)\n return decoded",
"def test_pi_with_non_attribute_data(self):\n pi_data = u\"\"\" \\t keyword att1=\"value1\" \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"keyword\": None, u\"att1\": u\"value1\"})",
"def get_attributes(html):\n\n for i, c in enumerate(html):\n if c == '>':\n if USE_BUFFER:\n html = buffer(html, 0, i)\n else:\n html = html[:i]\n break\n return dict((name.lower().strip(), value.strip('\\'\" ')) for (name, value) in attributes_regex.findall(html))",
"def get_attribute(self):\n data = self.data\n # Step 1 (skip chars)\n c = data.skip(skip1)\n assert c is None or len(c) == 1\n # Step 2\n if c in (b\">\", None):\n return None\n # Step 3\n attr_name = []\n attr_value = []\n # Step 4 attribute name\n while True:\n if c == b\"=\" and attr_name:\n break\n elif c in space_chars_bytes:\n # Step 6!\n c = data.skip()\n break\n elif c in (b\"/\", b\">\"):\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_name.append(c)\n # Step 5\n c = next(data)\n # Step 7\n if c != b\"=\":\n data.previous()\n return b\"\".join(attr_name), b\"\"\n # Step 8\n next(data)\n # Step 9\n c = data.skip()\n # Step 10\n if c in (b\"'\", b'\"'):\n # 10.1\n quote_char = c\n while True:\n # 10.2\n c = next(data)\n # 10.3\n if c == quote_char:\n next(data)\n return b\"\".join(attr_name), b\"\".join(attr_value)\n # 10.4\n else:\n attr_value.append(c)\n elif c == b\">\":\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_value.append(c)\n # Step 11\n while True:\n c = next(data)\n if c in spaces_angle_brackets:\n return b\"\".join(attr_name), b\"\".join(attr_value)\n elif c is None:\n return None\n else:\n attr_value.append(c)",
"def parse_attrs(buf):\n attrs = []\n while buf:\n t = ord(buf[0])\n l = ord(buf[1])\n if l < 2:\n break\n d, buf = buf[2:l], buf[l:]\n attrs.append((t, d))\n return attrs",
"def handleBlock(block):\n mlines = filter(lambda line : line.startswith('-'), block)\n plines = filter(lambda line : line.startswith('+'), block)\n mcount = len(mlines)\n pcount = len(plines)\n if mcount > pcount:\n plines.extend([''] * (mcount - pcount))\n elif pcount > mcount:\n mlines.extend([''] * (pcount - mcount))\n count = max(mcount, pcount)\n return [(mlines[i],plines[i]) for i in range(count)]",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def findAttributes(self, tagName, tagLine):\n\t\n\t\tattributes = {}\n\t\t\n\t\t# Try to find links\n\t\tlinkMatch = re.search('till\\s(BILD|SIDA)?\\s?\\'([a-zåäö\\.\\/\\-\\_]+)\\'', tagLine)\n\t\n\t\t# If link is found\n\t\tif linkMatch:\n\t\t\t\n\t\t\t# Modifier can be BILD or SIDA\n\t\t\tmodifier = linkMatch.group(1)\n\t\t\thref = linkMatch.group(2)\n\t\t\n\t\t\t# If image\n\t\t\tif modifier == 'BILD':\n\t\t\t\thref = 'bilder/'+href\n\t\t\t\t\n\t\t\telif modifier == 'SIDA':\n\t\t\t\thref = 'sidor/'+href\n\t\t\t\t\n\t\t\t\t# If .html is missing\n\t\t\t\tif href[-5:] != '.html':\n\t\t\t\t\thref += '.html'\n\t\t\t\t\t\n\t\t\t# Find www. links\n\t\t\telif href[:4] == 'www':\n\t\t\t\thref = 'http://'+href\n\t\t\t\t\n\t\t\t\n\t\t\t# Remove href part from tagName\n\t\t\ttagLine = tagLine[:linkMatch.start()] + tagLine[linkMatch.end():]\n\t\t\t\n\t\t\t# Set link attribute\n\t\t\tattributes['href'] = href\n\t\t\n\t\t# If image, set src\n\t\tif tagName == 'img':\n\t\t\tsrcMatch = re.search('\\'([a-zåäö\\.\\/\\-\\_]+)\\'', tagLine)\n\t\t\n\t\t\tif srcMatch:\n\t\t\t\tsrc = 'bilder/'+srcMatch.group(1)\n\t\t\t\n\t\t\t\t# Remove href part from tagName\n\t\t\t\ttagLine = tagLine[:srcMatch.start()] + tagLine[srcMatch.end():]\n\t\t\t\n\t\t\tattributes['src'] = src\n\t\t\n\t\t# Find classes\n\t\tclassMatch = re.findall('\\.([a-zåäö\\-]+)', tagLine)\n\t\n\t\tif classMatch:\n\t\n\t\t\tattributes['class'] = ' '.join(classMatch)\n\t\n\t\treturn attributes",
"def _parse_bead(line):\n\n bead_index = int(line.split(\":\")[0].strip())\n bead_name = line.split(\":\")[1].strip()\n mapping_indices = eval(line.split(\":\")[2])\n bead_mapping = BeadMapping(bead_name, mapping_indices)\n return (bead_index, bead_mapping)",
"def _process_attributes(self, attributes_element):\n for element in list(attributes_element):\n if element.tag != \"attribute\":\n raise AglyphError(\n \"unexpected element: attributes/%s\" % element.tag)\n name = element.get(\"name\")\n if not name:\n raise AglyphError(\n \"attribute/@name is required and cannot be empty\")\n value = self._unserialize_element_value(element)\n yield (name, value)",
"def glyphparse(self, addr):\n s = struct.Struct('<bbbbL')\n width, height, bytesperline, somea, pixptr = s.unpack_from(self.mem, addr - self.addr)\n # somea seems to be padding; always 0 in v2.32\n\n # sys.stdout.write('glyphparse: address 0x%x wid=%d height=%d bpl=%d somea=%d pixptr=0x%x\\n' % (addr, width, height, bytesperline, somea, pixptr) )\n if height <= 8:\n height *= 2 # CN characters are right height, latin are reported 1/2 height\n\n img = {'address': addr, 'width': width, 'height': height, 'palette': None, 'pixels': []}\n\n for y in range(height):\n linebits = self.readbits(pixptr + y * bytesperline, width)\n line = [int(color) for color in linebits]\n img['pixels'].append(line)\n\n img['checksum'] = self.gfxchecksum(img)\n return img",
"def parse_PAUP_log(branch_lengths):\n BL_table = get_BL_table(branch_lengths)\n BL_dict = {}\n for line in BL_table:\n info = find_fields(line)\n parent = info[\"parent\"]\n bl = float(info[\"bl\"])\n taxa = parse_taxa(info[\"taxa\"])\n\n BL_dict[taxa] = (parent, bl)\n\n return BL_dict",
"def extract_dynamic_tag_attributes(line, source, syntax, inside_parentheses=False):\r\n if not line.startswith(DYNAMIC_ATTRIBUTES_PREFIX):\r\n return None\r\n line = line[len(DYNAMIC_ATTRIBUTES_PREFIX):]\r\n\r\n terminators = {\r\n WHITESPACE,\r\n NEWLINE,\r\n LITERAL_CONTENT_PREFIX,\r\n LITERAL_CONTENT_SPACE_PREFIX,\r\n # we want to terminate extract_identifier() by DYNAMIC_ATTRIBUTES_PREFIX,\r\n # but it contains two characters, whereas the function checks only one character.\r\n # Therefore, we use a single asterisk terminator here instead of DYNAMIC_ATTRIBUTES_PREFIX.\r\n '*',\r\n INLINE_TAG_SEPARATOR,\r\n LINE_BREAK\r\n }\r\n if inside_parentheses:\r\n terminators.add(CLOSE_BRACE)\r\n\r\n result = extract_identifier(line, source, '', terminators)\r\n if result is None:\r\n return None\r\n\r\n expr, tail, source = result\r\n attributes = u(\r\n '\\n%for __plim_key__, __plim_value__ in {expr}.items():\\n'\r\n '{var_start}__plim_key__{var_end}=\"{var_start}__plim_value__{var_end}\"\\n'\r\n '%endfor\\n'\r\n ).format(\r\n expr=expr,\r\n var_start=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,\r\n var_end=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE\r\n )\r\n return attributes, tail, source",
"def buildBlock(self, b):\n \"\"\"\n s = self.style\n colClass = self.getColClass(s.colWidth)\n b.block(self)\n b.div(class_=colClass, marginright=s.columnMarginRight, width=s.colWidth,\n marginleft=s.columnMarginLeft, margintop=s.columnMarginTop,\n paddingleft=s.columnPaddingLeft, float=s.columnFloat,\n display=s.columnDisplay,\n media=(\n \tMedia(width=s.columnWidthMobile,\n\t\t\t\tdisplay=s.columnDisplayMobile,\n float=s.columnFloatMobile,\n marginleft=s.columnMarginLeftMobile,\n marginright=s.columnMarginRightMobile,\n paddingleft=s.columnPaddingLeftMobile,\n paddingright=s.columnPaddingRightMobile,),\n ))\n \"\"\"\n self.buildColumn(b)\n \"\"\"\n b._div(comment=colClass)\n b._block(self)\n \"\"\"",
"def readBpseq(bpseq_fn):\n content = open(bpseq_fn).readlines()\n seq = [-1] * len(content)\n struct = [-1] * len(content)\n for i, entry in enumerate(content):\n pos, base, pair = entry.strip().split()\n seq[i] = base\n p = int(pair)\n struct[i] = [1, p][p == 0]\n return \"\".join(seq), struct",
"def compute_bb_properties(md):\n image_widths = md.apply(lambda row: row['bbx2'] - row['bbx1'], axis=1)\n image_heights = md.apply(lambda row: row['bby2'] - row['bby1'], axis=1)\n image_area = image_widths * image_heights\n image_properties = pd.concat([image_widths, image_heights, image_area], axis = 1)\n image_properties.columns = ['Width', 'Height', 'Area']\n return image_properties",
"def test_placement(self):\n p = url_block_pattern\n \n self.assertEqual(_re_match(p, '[text](page)'), '[text](page)')\n self.assertEqual(_re_match(p, 'Lorem [text](page) ipsum.'),\n '[text](page)')\n self.assertEqual(_re_match(p, 'Lorem ipsum [text.](page)'), \n '[text.](page)')\n self.assertEqual(_re_match(p, '[Text](page) lorem ipsum.'), \n '[Text](page)')\n \n self.assertEqual(_re_match(p, 'Lorem\\n[text](page)\\nipsum'),\n '[text](page)')",
"def test_padding(self):\n for pad in [\"pad_first\", \"pad_before_eq\", \"pad_after_eq\"]:\n node = Attribute(wraptext(\"id\"), wraptext(\"foo\"), **{pad: \"\\n\"})\n self.assertEqual(\"\\n\", getattr(node, pad))\n setattr(node, pad, \" \")\n self.assertEqual(\" \", getattr(node, pad))\n setattr(node, pad, None)\n self.assertEqual(\"\", getattr(node, pad))\n self.assertRaises(ValueError, setattr, node, pad, True)",
"def parse_cell_parameters(txt):\n # Define re for the card block.\n cell_parameters_block_re = re.compile(r\"\"\"\n ^ [ \\t]*\n CELL_PARAMETERS [ \\t]*\n [{(]? \\s* (?P<units>[a-z]*) \\s* [)}]? \\s* [\\n]\n (?P<block>\n (\n (\n \\s* # White space in front of the element spec is ok\n (\n # First number\n (\n [-|+]? # Plus or minus in front of the number (optional)\n (\\d* # optional decimal in the beginning .0001 is ok, for example\n [\\.] # There has to be a dot followed by\n \\d+) # at least one decimal\n | # OR\n (\\d+ # at least one decimal, followed by\n [\\.]? # an optional dot\n \\d*) # followed by optional decimals\n ([E|e|d|D][+|-]?\\d+)? # optional exponents E+03, e-05, d0, D0\n \n (\n \\s+ # White space between numbers\n [-|+]? # Plus or minus in front of the number (optional)\n (\\d* # optional decimal in the beginning .0001 is ok, for example\n [\\.] # There has to be a dot followed by\n \\d+) # at least one decimal\n | # OR\n (\\d+ # at least one decimal, followed by\n [\\.]? # an optional dot\n \\d*) # followed by optional decimals\n ([E|e|d|D][+|-]?\\d+)? # optional exponents E+03, e-05, d0, D0\n ){2} # I expect three float values\n )\n |\n \\#\n |\n ! # If a line is commented out, that is also ok\n )\n .* # I do not care what is after the comment or the vector\n | # OR\n \\s* # A line only containing white space\n )\n [\\n] # line break at the end\n ){3} # I need exactly 3 vectors\n )\n \"\"\", RE_FLAGS)\n \n \n cell_vector_regex = re.compile(r\"\"\"\n ^ # Linestart\n [ \\t]* # Optional white space\n (?P<x> # Get x\n [\\-|\\+]? ( \\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]+\n (?P<y> # Get y\n [\\-|\\+]? (\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]+\n (?P<z> # Get z\n [\\-|\\+]? (\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n \"\"\", re.X | re.M) \n #~ cell_parameters_block_re = re.compile(r\"\"\"\n #~ ^ [ \\t]* CELL_PARAMETERS [ \\t]*\n #~ [{(]? [ \\t]* (?P<units>\\S+?)? [ \\t]* [)}]? [ \\t]* $\\n\n #~ (?P<block>\n #~ (?:\n #~ ^ [ \\t]* \\S+ [ \\t]+ \\S+ [ \\t]+ \\S+ [ \\t]* $\\n?\n #~ ){3}\n #~ )\n #~ \"\"\", RE_FLAGS)\n # Define re for the info contained in the block.\n #~ atomic_species_re = re.compile(r\"\"\"\n #~ ^ [ \\t]* (\\S+) [ \\t]+ (\\S+) [ \\t]+ (\\S+) [ \\t]* $\\n?\n #~ \"\"\", RE_FLAGS)\n # Find the card block and extract units and the lines of the block.\n match = cell_parameters_block_re.search(txt)\n if not match:\n return None\n # Use specified units or None if not specified.\n units = match.group('units')\n if units is not None:\n units = units.lower()\n # Get the string containing the lines of the block.\n if match.group('block') is None:\n raise ParsingError(\n 'The CELL_PARAMETER card block was parsed as empty in\\n' + txt\n )\n else:\n blockstr = match.group('block')\n # Define a small helper function to convert strings of fortran-type floats.\n fortfloat = lambda s: float(s.replace('d', 'e').replace('D', 'E'))\n # Now, extract the lattice vectors.\n lattice_vectors = []\n for match in cell_vector_regex.finditer(blockstr):\n lattice_vectors.append(map(fortfloat, (match.group('x'),match.group('y'),match.group('z'))))\n info_dict = dict(units=units, cell=lattice_vectors)\n return info_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks whether the text has text not already enclosed by a block tag >>> t = Textile() >>> t.hasRawText('foo bar biz baz') False >>> t.hasRawText(' why yes, yes it does') True | def hasRawText(self, text):
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>',
re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r | [
"def has_text(self):",
"def is_html_like(text):\n if isinstance(text, str):\n text = text.strip()\n if text.startswith(\"<\"):\n return True\n return False\n return False",
"def has_text_content(element):\n return element.string is not None",
"def is_html(text):\n if text is not None and '<html' in text[:300].lower():\n return True\n return False",
"def is_unstructured_text(self):\r\n\r\n return not self.label.isupper()",
"def has_richtext_widget(self):\n return self.has_field([self.rich_text_heading, strip_tags(self.rich_text)])",
"def has_text(self, *args, **kwargs):\n\n return self.assert_text(*args, **kwargs)",
"def mightRender(self, text):\r\n return True",
"def inHTML(text, index, body):\n # if there is a < then lxml will interpret that as a tag, so only search for the stuff before it\n text = text.split(b\"<\")[0]\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" not in path\n except IndexError:\n return False",
"def isnewblock(self):\r\n # First get read of the leading empty lines:\r\n string = re.sub(r\"\\A([\\t ]*\\n)*\", \"\", self.string)\r\n if re.match(r\"elif|else|finally|except| |\\t\", string):\r\n return False\r\n else:\r\n return True",
"def wait_for_text(self, text):\n self.wait_for(lambda: text in self.currentFrame().toPlainText(),\n 'Can\\'t find \"%s\" in current frame' % text)\n return True",
"def has_dirty_blocktrans(path):\n with io.open(path, encoding='utf-8') as infile:\n for line in infile:\n if '{% blocktrans' in line and '{% endblocktrans' not in line:\n if 'trimmed' not in line:\n return True\n return False",
"def verify(self, plain_text):",
"def _isTextValid(self, strText):\n clusterLanguageId = self.getLanguageId()\n\n #Some regex\n for regex, regexLanguageId in self.document.regex_filter_list:\n regexLanguageId = int(regexLanguageId)\n #Does it match the text language\n if regexLanguageId != clusterLanguageId and \\\n regexLanguageId != 0:\n continue\n #Ignore case available\n #if re.search(regex, strText, re.IGNORECASE) != None:\n if re.search(regex, strText, flags=re.UNICODE) != None:\n TextCluster.logger.info(\"Discard:%s\\n%s\" % (regex.encode(\"utf-8\"), strText.encode(\"utf-8\")))\n return False\n\n return True",
"def _isTextValid(self, strText):\n clusterLanguageId = self.getLanguageId()\n\n # Some regex\n for regex, regexLanguageId in self.document.regex_filter_list:\n regexLanguageId = int(regexLanguageId)\n # Does it match the text language\n if regexLanguageId != clusterLanguageId and \\\n regexLanguageId != 0:\n continue\n # Ignore case available\n # if re.search(regex, strText, re.IGNORECASE) != None:\n if re.search(regex, strText, flags=re.UNICODE) != None:\n TextCluster.logger.info(\"Discard:%s\\n%s\" % (\n regex, strText))\n return False\n\n return True",
"def any_text_contains(\n self, text: str, deep: bool = True, separator: str = \"\", strip: bool = False\n ) -> bool:\n ...",
"def paragraph_is_text_like(p):\n return not isinstance(p, pyth.document.Image)",
"def has_raw(self):\n return self.__has_volume(\"/volumes/raw\")",
"def check_text_slots(response_dict : Dict) -> bool:\n if re.findall(\"(?<=\\{)(.*?)(?=\\})\", response_dict['text']):\n return True\n else:\n False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" >>> t = Textile() >>> t.table('(rowclass). |one|two|three|\n|a|b|c|') '\t\n\t\t\n\t\t\tone\n\t\t\ttwo\n\t\t\tthree\n\t\t\n\t\t\n\t\t\ta\n\t\t\tb\n\t\t\tc\n\t\t\n\t\n\n' | def table(self, text):
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n'
% {'s': self.table_span_re,
'a': self.align_re,
'c': self.c},
re.S | re.M | re.U)
return pattern.sub(self.fTable, text) | [
"def test_multi_line(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),\n ('|', ' Column One ', '|', ' ', '|', ' ', '|'),\n ]\n assert actual == expected",
"def split_tables(text):\n tables = RE_TABLE.findall(text)\n text = RE_TABLE.sub(\"\", text)\n return text, tables",
"def tabulate(self, separator=' ', filler=None):\n\n if filler is None:\n filler = separator\n\n split_regex = re.compile(separator)\n\n def do_split(line):\n return (cell for cell in split_regex.split(line) if cell)\n\n # \"Transpose\" the list of lines into a list of columns\n table_cols = list(itertools.zip_longest(\n *(do_split(line) for line in self._line_list),\n fillvalue=''\n ))\n\n # Compute the width of each column according to the longest cell in it\n table_cols_width = (\n # +1 to keep at least one filler string between columns\n max(len(cell) for cell in col) + 1\n for col in table_cols\n )\n\n # Reformat all cells to fit the width of their column\n table_cols = [\n [\n '{cell:{filler}<{w}}'.format(cell=cell, filler=filler, w=width)\n for cell in col\n ]\n for width, col in zip(table_cols_width, table_cols)\n ]\n # Transpose back the columns to lines\n table_lines = (\n ''.join(cells).rstrip(filler)\n for cells in zip(*table_cols)\n )\n\n return MLString(lines=table_lines)",
"def test_text_outside_table(self) -> None:\n assert OUTPUT.body[3] == [\n [\n [\n \"Text outside table\",\n \"Reference footnote 1----footnote1----\",\n \"Reference footnote 2----footnote2----\",\n \"Reference endnote 1----endnote1----\",\n \"Reference endnote 2----endnote2----\",\n \"\",\n \"----media/image2.jpg----\",\n ]\n ]\n ]",
"def test_single_line(style):\n row = ['Row One Column One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [18, 3, 5], 1)]\n expected = [\n ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'),\n ]\n assert actual == expected",
"def test_two_rows_equal_size(self):\n tab = tabl.Tabl()\n string = tab.to_table([['ab', 'c', 'def'], ['gh', 'i', 'jkl']])\n self.assertEqual('+--+-+---+\\n' + \\\n '|ab|c|def|\\n' + \\\n '+--+-+---+\\n' + \\\n '|gh|i|jkl|\\n' + \\\n '+--+-+---+\\n', string)",
"def _enclose_in_table (text):\n\treturn tag_with_contents (\n\t\t'table',\n\t\ttext,\n\t\tclass_='revi_formtable',\n\t)",
"def textile(text, head_offset=0, html_type='xhtml', auto_link=False,\r\n encoding=None, output=None):\r\n return Textile(auto_link=auto_link).textile(text, head_offset=head_offset,\r\n html_type=html_type)",
"def test_two_rows_first_longer(self):\n tab = tabl.Tabl()\n string = tab.to_table([['abb', 'c', 'def'], ['gh', 'i', 'jkl']])\n self.assertEqual('+---+-+---+\\n' + \\\n '|abb|c|def|\\n' + \\\n '+---+-+---+\\n' + \\\n '|gh |i|jkl|\\n' + \\\n '+---+-+---+\\n', string)",
"def test_custom_hor_split(self):\n tab = tabl.Tabl()\n tab.set_hor('~')\n string = tab.to_table([['a']])\n self.assertEqual('+~+\\n' + \\\n '|a|\\n' + \\\n '+~+\\n', string)",
"def TableExtract(self):\n\n Regex = r\"\\\\begin\\{table\\}.*?\\\\end\\{table\\}\" # no closing brace on purpose -- this is so that table* is included\n self.TableRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n Regex = r\"\\\\begin\\{table\\*\\}.*?\\\\end\\{table\\*}\"\n self.TableStarRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n\n TableExtracted = self.TableRegex.findall(self.ParsedText) + self.TableStarRegex.findall(self.ParsedText)\n\n for TableText in TableExtracted:\n ThisUID = self.GenerateUID()\n self.ParsedTables[ThisUID] = Table(TableText, ThisUID)",
"def _read_torchtext_tabular(cls, input_file):\n return open_split(input_file, lower_case=False)",
"def render_text(table, data):\n print(render(renderers.FancyRenderer, table, data))",
"def create_table_element(self):\n element = etree.Element('table')\n element.text = '\\n'\n element.tail = '\\n'\n return element",
"def test_biolink_tsv(self):\n\n def filtr(s: str) -> str:\n return s.replace(\"\\r\\n\", \"\\n\")\n\n self.single_file_generator(\"tsv\", CsvGenerator, format=\"tsv\", filtr=filtr)",
"def test_make_quoted_table(): # ***Incomplete test\n ##########################\n # Arrange.\n intablepath = \"intablepath\"\n outtablepath = \"outtablepath\"\n\n ##########################\n # Act.\n #x = make_quoted_table(intablepath,\n #\t\touttablepath)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '⟨'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'⟩'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht",
"def make_table(self, a, b, adesc=None, bdesc=None, context=5):\n adesc = six.ensure_text(adesc) or ''\n bdesc = six.ensure_text(bdesc) or ''\n diff = difflib._mdiff(a, b, context=context)\n lines = [self._make_line(d) for d in diff]\n return h.really_unicode(\n self.table_tmpl % (adesc, bdesc, '\\n'.join(lines)))",
"def table(self, header, body):\r\n return (\r\n '<table%s>\\n<thead>%s</thead>\\n'\r\n '<tbody>\\n%s</tbody>\\n</table>\\n'\r\n ) % (self._table, header, body)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> t = Textile() >>> t.fBlock("bq", "", None, "", "Hello BlockQuote") ('\\t\\n', '\\t\\t', 'Hello BlockQuote', '', '\\n\\t') | def fBlock(self, tag, atts, ext, cite, content):
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = "\t<blockquote%s%s>\n" % (cite, atts)
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") +
"\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") +
"\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = "\t<%s%s>" % (tag, atts)
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1 | [
"def makeNewBlock(self):\n\n block = textlayout.Block(\n width=self._propertyToPoints(\"width\"),\n lineHeight=self._propertyToPoints(\"line_height\"),\n marginTop=self._propertyToPoints(\"margin_top\"),\n marginBottom=self._propertyToPoints(\"margin_bottom\"),\n textAlign=self._property(\"text_align\"),\n maxLines=self._propertyToInt(\"max_lines\"),\n ellipsify=self._propertyToBool(\"ellipsify\")\n )\n\n return block",
"def function_from_block(block):\n return Function(block.fields.get('Function', None),\n block.fields.get('Purpose', None), block.fields.get('Inputs', None),\n block.fields.get('Outputs', None))",
"def test_nested_three_block_nl_block_nl_block_no_bq1():\n\n # Arrange\n source_markdown = \"\"\">\n> >\n > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::>]\",\n \"[BLANK(1,2):]\",\n \"[block-quote(2,1)::> >\\n > > ]\",\n \"[BLANK(2,4):]\",\n \"[para(3,7):]\",\n \"[text(3,7):list:]\",\n \"[end-para:::True]\",\n \"[block-quote(4,1)::> > > ]\",\n \"[para(4,7):]\",\n \"[text(4,7):item:]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<p>list</p>\n<blockquote>\n<p>item</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def test_nested_three_block_block_block():\n\n # Arrange\n source_markdown = \"\"\"> > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::]\",\n \"[block-quote(1,3)::]\",\n \"[block-quote(1,5)::> > > \\n> > > ]\",\n \"[para(1,7):\\n]\",\n \"[text(1,7):list\\nitem::\\n]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list\nitem</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def test_block_comment_whitespace_signature(self):\n \n inp = '2_3_block_comment.txt'\n self.run_single_file_case(inp)",
"def codeblock(text):\n import textwrap # this is a slow import, do it lazy\n return textwrap.dedent(text).strip('\\n')",
"def f_blocks(self, f_blocks):\n \n self._f_blocks = f_blocks",
"def executeblock(self, block):\r\n \r\n block_text = \"\\n\\n\" + block.string\r\n line_number = block.start_row\r\n #self.options._update_loose(block.options)\r\n out_value = \"\"\r\n \r\n # This import should not be needed, but it works around a very\r\n # strange bug I encountered once.\r\n import cStringIO\r\n # create file-like string to capture output\r\n code_out = cStringIO.StringIO()\r\n code_err = cStringIO.StringIO()\r\n \r\n captured_exception = None\r\n # capture output and errors\r\n sys.stdout = code_out\r\n sys.stderr = code_err\r\n try:\r\n exec block_text in self.namespace\r\n except Exception, captured_exception:\r\n if isinstance(captured_exception, KeyboardInterrupt):\r\n raise captured_exception\r\n print >> sys.stderr, format_exc() \r\n \r\n # restore stdout and stderr\r\n sys.stdout = sys.__stdout__\r\n sys.stderr = sys.__stderr__\r\n \r\n out_value = code_out.getvalue()\r\n error_value = code_err.getvalue()\r\n \r\n code_out.close()\r\n code_err.close()\r\n\r\n if captured_exception: \r\n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \r\n print >> sys.stderr, error_value\r\n self.namespace = globals()\r\n self.namespace.update(locals())\r\n\r\n if out_value and not self.options.noecho:\r\n if self.options.outfilename == \"-\" :\r\n print >> sys.stderr, out_value\r\n else:\r\n print out_value\r\n if self.myshow:\r\n self.current_figure_list = self.myshow.figure_list[\r\n len(self.total_figure_list):]\r\n self.total_figure_list = self.myshow.figure_list\r\n\r\n #if self.options.silent:\r\n # error_value = \"\"\r\n \r\n return (block.start_row, block.string, out_value, error_value, \r\n self.current_figure_list)",
"def make_block(text, blocksize=60, spaces=False, newlines=False):\n if not spaces:\n # Simple block by chars.\n return (text[i:i + blocksize] for i in range(0, len(text), blocksize))\n if newlines:\n # Preserve newlines\n lines = []\n for line in text.split('\\n'):\n lines.extend(make_block(line, blocksize=blocksize, spaces=True))\n return lines\n\n # Wrap on spaces (ignores newlines)..\n words = text.split()\n lines = []\n curline = ''\n for word in words:\n possibleline = ' '.join((curline, word)) if curline else word\n\n if len(possibleline) > blocksize:\n lines.append(curline)\n curline = word\n else:\n curline = possibleline\n if curline:\n lines.append(curline)\n return lines",
"def format_string_block(self):\n if self._trim:\n return self._form_trimmed_format_string()\n else:\n return self._form_format_string()",
"def testBlock(self):\n\t\t\n\t\tbo = DBSBlock(self.logger, self.dbi, self.owner)\n\t\tbinput = {'block_name': u'/QCD_BCtoMu_Pt20/Summer08_IDEAL_V9_v1/GEN-SIM-RAW#f930d82a-f72b-4f9e-8351-8a3cb0c43b79', 'file_count': u'100', \n\t\t\t 'origin_site': u'cmssrm.fnal.gov', 'last_modification_date': u'1263231733', \n\t\t\t 'create_by': u'/DC=org/DC=doegrids/OU=People/CN=Ajit Kumar Mohapatra 867118', 'block_size': u'228054411650', \n\t\t\t 'open_for_writing': 1, 'last_modified_by': u'anzar@cmssrv49.fnal.gov', 'creation_date': u'1228050132'}\n\t\tbo.insertBlock(binput)",
"def test_already_formatted_block_comment(self):\n \n inp = '2_5_block_comment.txt'\n self.run_single_file_case(inp)",
"def executeblock(self, block):\n \n block_text = \"\\n\\n\" + block.string\n line_number = block.start_row\n #self.options._update_loose(block.options)\n out_value = \"\"\n \n # This import should not be needed, but it works around a very\n # strange bug I encountered once.\n import cStringIO\n # create file-like string to capture output\n code_out = cStringIO.StringIO()\n code_err = cStringIO.StringIO()\n \n captured_exception = None\n # capture output and errors\n sys.stdout = code_out\n sys.stderr = code_err\n try:\n exec block_text in self.namespace\n except Exception, captured_exception:\n if isinstance(captured_exception, KeyboardInterrupt):\n raise captured_exception\n print >> sys.stderr, format_exc() \n \n # restore stdout and stderr\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n \n out_value = code_out.getvalue()\n error_value = code_err.getvalue()\n \n code_out.close()\n code_err.close()\n\n if captured_exception: \n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \n print >> sys.stderr, error_value\n self.namespace = globals()\n self.namespace.update(locals())\n\n if out_value and not self.options.noecho:\n if self.options.outfilename == \"-\" :\n print >> sys.stderr, out_value\n else:\n print out_value\n if self.myshow:\n self.current_figure_list = self.myshow.figure_list[\n len(self.total_figure_list):]\n self.total_figure_list = self.myshow.figure_list\n\n #if self.options.silent:\n # error_value = \"\"\n \n return (block.start_row, block.string, out_value, error_value, \n self.current_figure_list)",
"def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])",
"def code_block(name, arguments, options, content, lineno,\r\n content_offset, block_text, state, state_machine):\r\n language = arguments[0]\r\n highlighter = get_highlighter(language)\r\n if highlighter is None:\r\n error = state_machine.reporter.error(\r\n 'The \"%s\" directive does not support language \"%s\".' % (name, language),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n\r\n if not content:\r\n error = state_machine.reporter.error(\r\n 'The \"%s\" block is empty; content required.' % (name),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n return [error]\r\n\r\n include_text = highlighter(\"\\n\".join(content))\r\n html = '<div class=\"syntax %s\">\\n%s\\n</div>\\n' % (language, include_text)\r\n raw = nodes.raw('',html, format='html')\r\n return [raw]",
"def block(self, dataset: 'Dataset', function_: Callable = None, property_: str = None,\n block: Block = None, block_black_list: BlockBlackList = None, base_on: Block = None):\n block = BlockGenerator._block_args_check(function_, property_, block)\n return block",
"def buildBlock(self, b):\n \"\"\"\n s = self.style\n colClass = self.getColClass(s.colWidth)\n b.block(self)\n b.div(class_=colClass, marginright=s.columnMarginRight, width=s.colWidth,\n marginleft=s.columnMarginLeft, margintop=s.columnMarginTop,\n paddingleft=s.columnPaddingLeft, float=s.columnFloat,\n display=s.columnDisplay,\n media=(\n \tMedia(width=s.columnWidthMobile,\n\t\t\t\tdisplay=s.columnDisplayMobile,\n float=s.columnFloatMobile,\n marginleft=s.columnMarginLeftMobile,\n marginright=s.columnMarginRightMobile,\n paddingleft=s.columnPaddingLeftMobile,\n paddingright=s.columnPaddingRightMobile,),\n ))\n \"\"\"\n self.buildColumn(b)\n \"\"\"\n b._div(comment=colClass)\n b._block(self)\n \"\"\"",
"def test_nested_three_block_max_block_max_block_max_empty_no_bq2():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > \\n > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >]\",\n \"[BLANK(1,15):]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[icode-block(2,10): :]\",\n \"[text(2,10):\\a>\\a>\\a list: ]\",\n \"[end-icode-block:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n</blockquote>\n</blockquote>\n<pre><code> > list\n</code></pre>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def render_table(self, block):\n before = '<table>\\n<tr>\\n<td>'\n end = '</td>\\n</tr>\\n</table>'\n content = [\"</td>\\n<td>\".join(row) for row in block.data]\n content = \"</td>\\n</tr>\\n<tr>\\n<td>\".join(content)\n block.data = before + content + end\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> t = Textile() >>> t.glyphs("apostrophe's") 'apostrophe&8217;s' >>> t.glyphs("back in '88") 'back in &8217;88' >>> t.glyphs('foo ...') 'foo &8230;' >>> t.glyphs('') '&8212;' >>> t.glyphs('FooBar[tm]') 'FooBar&8482;' >>> t.glyphs("Cat's Cradle by Vonnegut") 'Cat&8217;s Cradle by Vonnegut' | def glyphs(self, text):
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
glyph_search = (
# apostrophe's
re.compile(r"(\w)\'(\w)"),
# back in '88
re.compile(r'(\s)\'(\d+\w?)\b(?!\')'),
# single closing
re.compile(r'(\S)\'(?=\s|' + self.pnct + '|<|$)'),
# single opening
re.compile(r'\'/'),
# double closing
re.compile(r'(\S)\"(?=\s|' + self.pnct + '|<|$)'),
# double opening
re.compile(r'"'),
# 3+ uppercase acronym
re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'),
# 3+ uppercase
re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'),
# ellipsis
re.compile(r'\b(\s{0,1})?\.{3}'),
# em dash
re.compile(r'(\s?)--(\s?)'),
# en dash
re.compile(r'\s-(?:\s|$)'),
# dimension sign
re.compile(r'(\d+)( ?)x( ?)(?=\d+)'),
# trademark
re.compile(r'\b ?[([]TM[])]', re.I),
# registered
re.compile(r'\b ?[([]R[])]', re.I),
# copyright
re.compile(r'\b ?[([]C[])]', re.I),
)
glyph_replace = [x % dict(self.glyph_defaults) for x in (
r'\1%(txt_apostrophe)s\2', # apostrophe's
r'\1%(txt_apostrophe)s\2', # back in '88
r'\1%(txt_quote_single_close)s', # single closing
r'%(txt_quote_single_open)s', # single opening
r'\1%(txt_quote_double_close)s', # double closing
r'%(txt_quote_double_open)s', # double opening
r'<acronym title="\2">\1</acronym>', # 3+ uppercase acronym
r'<span class="caps">\1</span>', # 3+ uppercase
r'\1%(txt_ellipsis)s', # ellipsis
r'\1%(txt_emdash)s\2', # em dash
r' %(txt_endash)s ', # en dash
r'\1\2%(txt_dimension)s\3', # dimension sign
r'%(txt_trademark)s', # trademark
r'%(txt_registered)s', # registered
r'%(txt_copyright)s', # copyright
)]
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
for s, r in zip(glyph_search, glyph_replace):
line = s.sub(r, line)
result.append(line)
return ''.join(result) | [
"def get_glyphs(self, text):\n glyph_renderer = None\n glyphs = [] # glyphs that are committed.\n for c in get_grapheme_clusters(str(text)):\n # Get the glyph for 'c'. Hide tabs (Windows and Linux render\n # boxes)\n if c == '\\t':\n c = ' '\n if c not in self.glyphs:\n if not glyph_renderer:\n glyph_renderer = self.glyph_renderer_class(self)\n self.glyphs[c] = glyph_renderer.render(c)\n glyphs.append(self.glyphs[c])\n return glyphs",
"def SoGlyph_getGlyph(*args) -> \"SoGlyph const *\":\n return _coin.SoGlyph_getGlyph(*args)",
"def load_glyphs(self): \n self.glyphs = {}\n for id, glyph in self.font_spec[\"char\"].iteritems(): \n g = Glyph(**glyph)\n if id<256:\n self.glyphs[chr(id)] = g",
"def get_glyphs(fig):\n return [x for x in fig.renderers if isinstance(x, GlyphRenderer)]",
"def textCurves(string, font=\"string\", text=\"string\", name=\"string\", object=bool):\n pass",
"def getGlyph(self, char):\n return FontGlyph(char, self, self.cairoContext)",
"def add_glyphs(self, directory):\n space = self.font.createMappedChar(ord(\" \"))\n space.width = 500\n\n for k in self.config[\"glyphs\"]:\n # Create character glyph\n g = self.font.createMappedChar(k)\n self.unicode_mapping.setdefault(k, g.glyphname)\n # Get outlines\n src = \"{}/{}.svg\".format(k, k)\n src = directory + os.sep + src\n g.importOutlines(src, (\"removeoverlap\", \"correctdir\"))\n g.removeOverlap()",
"def _get_text_glyphs(\n font, text,\n direction, line_direction, base_direction,\n missing='raise'\n ):\n if isinstance(text, str) and direction not in ('top-to-bottom', 'bottom-to-top'):\n # reshape Arabic glyphs to contextual forms\n try:\n text = reshape(text)\n except ImportError as e:\n # check common Arabic range - is there anything to reshape?\n if any(ord(_c) in range(0x600, 0x700) for _c in text):\n logging.warning(e)\n # put characters in visual order instead of logical\n if direction == 'normal':\n # decide direction based on bidi algorithm\n base_dir = {\n 'left-to-right': 'L',\n 'right-to-left': 'R'\n }[base_direction]\n text = get_display(text, base_dir=base_dir)\n lines = text.splitlines()\n if direction in ('right-to-left', 'bottom-to-top'):\n # reverse glyph order for rendering\n lines = tuple(_row[::-1] for _row in lines)\n if line_direction in ('right-to-left', 'bottom-to-top'):\n # reverse line order for rendering\n lines = lines[::-1]\n return tuple(\n tuple(_iter_labels(font, _line, missing))\n for _line in lines\n )",
"def _pdfmark_unicode(string):\n try:\n string.encode('ascii')\n except UnicodeEncodeError:\n b = codecs.BOM_UTF16_BE + string.encode('utf-16-be')\n return '<{}>'.format(''.join('{:02X}'.format(byte) for byte in b))\n else:\n # escape special characters\n for a, b in [('\\\\', '\\\\\\\\'), ('(', '\\\\('), (')', '\\\\)'),\n ('\\n', '\\\\n'), ('\\t', '\\\\t')]:\n string = string.replace(a, b)\n return '({})'.format(string)",
"def unicode(ctx, text):\n text = ' '.join(text)\n final_text = ''\n for char in text:\n final_text += f\"U+{ord(char):06x} {char} {unicodedata.name(char)}\\n\"\n chat(ctx).send_file(final_text.encode('utf8'), filename='UnicodeAnalysis.txt', title='Unicode', filetype='txt')",
"def named_entities_codec(text):\r\n \r\n if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):\r\n s = []\r\n for c in text.object[text.start:text.end]:\r\n if ord(c) in codepoint2name:\r\n s.append(u'&%s;' % codepoint2name[ord(c)])\r\n else:\r\n s.append(u'&#%s;' % ord(c))\r\n return ''.join(s), text.end\r\n else:\r\n raise TypeError(\"Can't handle %s\" % text.__name__)",
"def test_non_varying_glyphs_bug356():\n actual_path = get_temp_file_path()\n font_path = get_input_path('bug356.otf')\n stderr_path = runner(CMD + ['-s', '-e', '-a', '-o', 'cff',\n '-f', font_path, actual_path])\n expected_path = get_expected_path('bug356.txt')\n assert differ([expected_path, stderr_path, '-l', '1'])",
"def get_glyph_as_png(fontfile, cp):\n xcp = '%04X' % cp\n fn = \"/tmp/U+%s.png\" % xcp\n tfn = \"/tmp/U+%s.tmp.png\" % xcp\n subprocess.call([\n \"convert\", \"-background\", \"none\", \"-gravity\", \"center\",\n \"-size\", \"16x16\", \"-fill\", \"black\", \"-font\", fontfile,\n \"-pointsize\", \"16\", \"label:\"+unichr(cp).encode(\"UTF-8\"), tfn])\n subprocess.call([\"pngcrush\", \"-q\", \"-rem\", \"alla\", tfn, fn])\n os.unlink(tfn)\n with open(fn) as fo:\n result = b64encode(fo.read())\n os.unlink(fn)\n return result",
"def fromFonttoolsGlyph(klass,font,glyphname):\n glyphset = font.getGlyphSet()\n from beziers.utils.pens import BezierPathCreatingPen\n pen = BezierPathCreatingPen(glyphset)\n glyph = font.getGlyphSet()[glyphname]\n glyph.draw(pen)\n return pen.paths",
"def copy_character_glyphs(self, chars):\n if not self.should_copy_character_glyphs:\n return\n print(\" ...copying %d character glyphs...\" % (len(chars)))\n\n for char in chars:\n self.liga_font.selection.none()\n self.liga_font.selection.select(char)\n self.liga_font.copy()\n self.font.selection.none()\n self.font.selection.select(char)\n self.font.paste()\n self.correct_character_width(self.font[ord(char_dict[char])])",
"def set_font_name(text):\r\n try:\r\n text.encode(encoding='utf-8').decode('ascii')\r\n except UnicodeDecodeError:\r\n return \"ARIALUNI.TTF\"\r\n return \"impact.ttf\"",
"def text_image(self, char):\r\n if char in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':\r\n _index = ord(char)\r\n if _index >= 97:\r\n _index -= 97\r\n else:\r\n _index -= 38\r\n else:\r\n _index = 26\r\n\r\n return alpha_image[_index]",
"def check_font_chars(ttf, charset):\n #chars = chain.from_iterable([y + (Unicode[y[0]],) for y in x.cmap.items()] for x in ttf[\"cmap\"].tables)\n try:\n chars_int=set()\n for table in ttf['cmap'].tables:\n for k,v in table.cmap.items():\n chars_int.add(k)\n\n unsupported_chars = []\n supported_chars = []\n for c in charset:\n if ord(c) not in chars_int:\n unsupported_chars.append(c)\n else:\n supported_chars.append(c)\n\n ttf.close()\n return unsupported_chars, supported_chars\n except:\n return False",
"def font(obj):\n return match(obj, font_matchers)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Capture and store URL references in self.urlrefs. >>> t = Textile() | def getRefs(self, text):
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http(?:s?):\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text | [
"def RefExtract(self):\n Regex = r\"\\\\ref\\{.*?\\}\"\n self.RefRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n\n RefExtracted = self.RefRegex.findall(self.ParsedText)\n\n for Reference in RefExtracted:\n ThisUID = self.GenerateUID()\n self.ParsedRef[ThisUID] = Ref(Reference, ThisUID)",
"def add_reference(self, uri, text):\n el = SubElement(self.get_element_person(), 'ref')\n el.set('target', uri)\n el.text = text\n return el",
"def _reference(self):\n\t\tpass",
"def migrate_url_to_reference_url(connection):\n migration_user_id = migrator.get_migration_user_id(connection)\n doc_ids = connection.execute(\n text(\"SELECT d.id FROM documents d WHERE d.kind='URL'\")).fetchall()\n\n doc_ids = [d.id for d in doc_ids]\n utils.add_to_objects_without_revisions_bulk(connection, doc_ids, \"Document\")\n\n sql = \"\"\"\n UPDATE documents SET\n kind='REFERENCE_URL',\n modified_by_id=:modified_by_id,\n updated_at=NOW()\n WHERE kind='URL'\n \"\"\"\n connection.execute(text(sql),\n modified_by_id=migration_user_id)\n\n connection.execute(text(\"\"\"\n ALTER TABLE documents MODIFY\n kind enum('FILE','REFERENCE_URL') NOT NULL DEFAULT 'REFERENCE_URL';\n \"\"\"))",
"def listLinks(self):\n\t\tfor _id in self.refs:\n\t\t\tref = self.refs[_id]\n\t\t\tprint(ref.path)",
"def get_referenced_objects(self):\n refs = set()\n for tile_uuid in self.list_tiles():\n tile = self.get_tile(tile_uuid)\n uuid = tile.data.get(\"uuid\", None)\n if uuid is not None:\n refs |= set([uuidToObject(uuid)])\n if IListTile.providedBy(tile):\n uuids = tile.data.get(\"uuids\", [])\n if uuids is None:\n continue\n for uuid in uuids:\n refs |= set([uuidToObject(uuid)])\n elif IRichTextTile.providedBy(tile):\n value = tile.data.get(\"text\")\n if value is None:\n continue\n value = value.raw\n links = extractLinks(value)\n refs |= getObjectsFromLinks(self, links)\n return refs",
"def upload_reference(self, task, file_object, filename):\n import shutil\n\n ############################################################\n # ORIGINAL\n ############################################################\n file_path = os.path.join(os.path.join(task.absolute_path), self.reference_path)\n\n # upload it\n reference_file_full_path = self.upload_file(file_object, file_path, filename)\n\n reference_file_file_name = os.path.basename(reference_file_full_path)\n reference_file_base_name = os.path.splitext(reference_file_file_name)[0]\n\n # create a Link instance and return it.\n # use a Repository relative path\n repo = task.project.repository\n\n from stalker import Repository, Link\n\n assert isinstance(repo, Repository)\n relative_full_path = repo.make_relative(reference_file_full_path)\n\n link = Link(full_path=relative_full_path, original_filename=filename)\n\n # create a thumbnail for the given reference\n # don't forget that the first thumbnail is the Web viewable version\n # and the second thumbnail is the thumbnail\n\n ############################################################\n # WEB VERSION\n ############################################################\n web_version_temp_full_path = self.generate_media_for_web(\n reference_file_full_path\n )\n web_version_extension = os.path.splitext(web_version_temp_full_path)[-1]\n\n web_version_file_name = \"%s%s\" % (\n reference_file_base_name,\n web_version_extension,\n )\n web_version_full_path = os.path.join(\n os.path.dirname(reference_file_full_path), \"ForWeb\", web_version_file_name\n )\n web_version_repo_relative_full_path = repo.make_relative(web_version_full_path)\n web_version_link = Link(\n full_path=web_version_repo_relative_full_path,\n original_filename=web_version_file_name,\n )\n\n # move it to repository\n try:\n os.makedirs(os.path.dirname(web_version_full_path))\n except OSError: # path exists\n pass\n shutil.move(web_version_temp_full_path, web_version_full_path)\n\n ############################################################\n # THUMBNAIL\n ############################################################\n # finally generate a Thumbnail\n thumbnail_temp_full_path = self.generate_thumbnail(reference_file_full_path)\n thumbnail_extension = os.path.splitext(thumbnail_temp_full_path)[-1]\n thumbnail_file_name = \"%s%s\" % (reference_file_base_name, thumbnail_extension)\n\n thumbnail_full_path = os.path.join(\n os.path.dirname(reference_file_full_path), \"Thumbnail\", thumbnail_file_name\n )\n thumbnail_repo_relative_full_path = repo.make_relative(thumbnail_full_path)\n thumbnail_link = Link(\n full_path=thumbnail_repo_relative_full_path,\n original_filename=thumbnail_file_name,\n )\n\n # move it to repository\n try:\n os.makedirs(os.path.dirname(thumbnail_full_path))\n except OSError: # path exists\n pass\n shutil.move(thumbnail_temp_full_path, thumbnail_full_path)\n\n ############################################################\n # LINK Objects\n ############################################################\n # link them\n # assign it as a reference to the given task\n task.references.append(link)\n link.thumbnail = web_version_link\n web_version_link.thumbnail = thumbnail_link\n\n return link",
"def _process_biblio_ref(self, item):\n a_ref = nodes.Reference()\n logger = logging.getLogger(self.__class__.__name__)\n\n a_ref.set_name(item[\"name\"])\n a_ref.set_title(item.get(\"title\", \"\"))\n a_ref.set_organization(item.get(\"organization\", \"\"))\n a_ref.set_category(item.get(\"category\", \"\"))\n a_ref.set_date(item.get(\"date\", \"\"))\n\n logger.debug(\"Processing Bibliography Reference: \\\"{}\\\"\".format(a_ref.get_name()))\n\n if \"hyperlink\" in item:\n if isinstance(item[\"hyperlink\"], list):\n for hyperlink_item in item[\"hyperlink\"]:\n a_ref.add_hyperlink(hyperlink_item)\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(hyperlink_item))\n else:\n a_ref.add_hyperlink(item[\"hyperlink\"])\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(item[\"hyperlink\"]))\n\n return a_ref",
"def text_ref(self, ref):\n self._text_ref(ref)",
"def update_reference(self, index, uri, text):\n el = self.xpath('./person/ref')[index]\n assert el.tag == 'ref' #check sanity\n el.set('target', uri)\n el.text = text\n return el",
"def create_link(self):\n #Access filepath created from capture method\n file_path = App.get_running_app().root.ids.camera_screen.filepath\n #create FileShare object and assign the filepath parameter as file_path and upload it\n #to the web using the api key parameter of FileShare class\n filesharer = FileShare(filepath = file_path)\n #extract url using share method\n #add self. to url so it can be access by other methods in the ImageScreen class\n self.url = filesharer.share()\n #set text of Label widget to url\n self.ids.link.text = self.url",
"def references(self):\n out = []\n fields = 'position id doi title authors sourcetitle publicationyear '\\\n 'volume issue first last text fulltext'\n ref = namedtuple('Reference', fields)\n items = self._tail.get('bibliography', {}).get('reference', [])\n if not isinstance(items, list):\n items = [items]\n for item in items:\n info = item['ref-info']\n volisspag = info.get('ref-volisspag', {})\n try:\n auth = info['ref-authors']['author']\n if not isinstance(auth, list):\n auth = [auth]\n authors = [', '.join([d['ce:surname'], d['ce:initials']])\n for d in auth]\n except KeyError: # No authors given\n authors = None\n ids = info['refd-itemidlist']['itemid']\n if not isinstance(ids, list):\n ids = [ids]\n try:\n doi = [d['$'] for d in ids if d['@idtype'] == 'DOI'][0]\n except IndexError:\n doi = None\n new = ref(position=item.get('@id'),\n id=[d['$'] for d in ids if d['@idtype'] == 'SGR'][0],\n doi=doi, authors=authors,\n title=info.get('ref-title', {}).get('ref-titletext'),\n sourcetitle=info.get('ref-sourcetitle'),\n publicationyear=info.get('ref-publicationyear', {}).get('@first'),\n volume=volisspag.get('voliss', {}).get('@volume'),\n issue=volisspag.get('voliss', {}).get('@issue'),\n first=volisspag.get('pagerange', {}).get('@first'),\n last=volisspag.get('pagerange', {}).get('@last'),\n text=info.get('ref-text'),\n fulltext=item.get('ref-fulltext'))\n out.append(new)\n return out or None",
"def __init__(self, url):\n\n self._url = url",
"def _ref_path(self, name):\n assert name.startswith('refs/')\n return posixpath.join(self._url, name)",
"def __init__(self, fullurl):\n self.fullurl = fullurl\n self.urls = []\n self.last_modified = ''",
"def add_url(\n self, value, tags=[], context={}, description=\"\", source=\"API\", **kwargs\n ):\n return self.__observable_add(\n value,\n type_obs=\"Url\",\n tags=tags,\n context=context,\n description=description,\n source=source,\n **kwargs\n )",
"def test_pathurl_argument_is_working_properly(self):\n f = File(pathurl='shot2')\n self.assertEqual('file://localhost/shot2', f.pathurl)",
"def __handle_link_reference_definition_token(\n cls, output_html, next_token, transform_state\n ):\n _ = (transform_state, next_token)\n\n return output_html",
"def _put_link(self, url, height, txt):\n self.set_text_color(0, 0, 255)\n self._set_style(\"U\", True)\n self.write(height, txt, url)\n self._set_style(\"U\", False)\n self.set_text_color(0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> t = Textile() >>> t.span(r"hello %(bob)span strong and bold% goodbye") 'hello span strong and bold goodbye' | def span(self, text):
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__',
r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:("
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s])|([\[{]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::\(([^)]+?)\))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag': qtag, 'c': self.c, 'pnct': pnct,
'selfpnct': self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text | [
"def _span_word(tag: Callable, text: Callable, word: str, score: float,\n colormap: Callable):\n bg = colormap(score)\n style = \"color:\" + _get_rgb(bg) + \";font-weight:bold;background-color: \" \\\n \"#ffffff\"\n with tag(\"span\", style=style):\n text(\" \" + word + \" \")\n text(\" \")",
"def tags_to_spans(tag_sequence: List[str], tag_regex: Pattern) -> List[TypedStringSpan]:\n\n spans: Set[Tuple[str, Tuple[int, int]]] = set()\n span_start = 0\n span_end = 0\n active_tag = None\n for index, string_tag in enumerate(tag_sequence):\n # entity label should math {S,B,I,E}-Entity:Property template\n m = tag_regex.match(string_tag)\n if m is not None:\n span_tag = m.groupdict()['span_tag']\n entity = m.groupdict()['entity']\n if span_tag == 'B':\n # entering new span\n if active_tag is not None:\n spans.add((active_tag, (span_start, span_end)))\n active_tag = entity\n span_start = index\n span_end = index\n elif span_tag == 'S':\n # entity with one token\n if active_tag is not None:\n # add existing span\n spans.add((active_tag, (span_start, span_end)))\n # also add current one-token entity\n active_tag, span_start, span_end = entity, index, index\n spans.add((active_tag, (span_start, span_end)))\n active_tag = None\n elif span_tag == 'E':\n # end of span\n if active_tag == entity:\n # finish current span\n span_end = index\n spans.add((active_tag, (span_start, span_end)))\n else:\n # unexpected: just make span with one token\n if active_tag is not None:\n # add existing span\n spans.add((active_tag, (span_start, span_end)))\n # also add current entity\n active_tag, span_start, span_end = entity, index, index\n spans.add((active_tag, (span_start, span_end)))\n active_tag = None\n elif span_tag == 'I':\n if active_tag == entity:\n # inside span\n span_end += 1\n else:\n # unexpected: assume that this is begin of another span\n if active_tag is not None:\n spans.add((active_tag, (span_start, span_end)))\n active_tag = entity\n span_start = index\n span_end = index\n else:\n assert False, \"Unexpected case\"\n else:\n # The span has ended\n if active_tag is not None:\n spans.add((active_tag, (span_end, span_end)))\n active_tag = None\n\n # Last token might have been a part of a valid span.\n if active_tag is not None:\n spans.add((active_tag, (span_start, span_end)))\n\n return list(spans)",
"def span_to_whitespace(html_string, span):\n start = \"<span class=\\\"%s\\\">\" % span\n stop = \"</span>\"\n while True:\n try:\n s = html_string.index(start)\n f = html_string.index(stop, s) + 7\n except ValueError:\n # No more occurances of this span exist in the file.\n break\n\n strip = html_string[s:f]\n stripped = strip_tags(strip)\n chars = whitespacegen(get_spacing(stripped, \"times new roman\"))\n html_string = html_string.replace(strip, chars)\n return html_string",
"def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:\n stack: List[int] = [] # our curly paren stack\n i = 0\n while i < len(s):\n if s[i] == \"{\":\n # if we're in a string part of the f-string, ignore escaped curly braces\n if not stack and i + 1 < len(s) and s[i + 1] == \"{\":\n i += 2\n continue\n stack.append(i)\n i += 1\n continue\n\n if s[i] == \"}\":\n if not stack:\n i += 1\n continue\n j = stack.pop()\n # we've made it back out of the expression! yield the span\n if not stack:\n yield (j, i + 1)\n i += 1\n continue\n\n # if we're in an expression part of the f-string, fast forward through strings\n # note that backslashes are not legal in the expression portion of f-strings\n if stack:\n delim = None\n if s[i : i + 3] in (\"'''\", '\"\"\"'):\n delim = s[i : i + 3]\n elif s[i] in (\"'\", '\"'):\n delim = s[i]\n if delim:\n i += len(delim)\n while i < len(s) and s[i : i + len(delim)] != delim:\n i += 1\n i += len(delim)\n continue\n i += 1",
"def makespan(sol={}):\n return sol[\"makespan\"]",
"def lex_span(self, name=None):\n if name:\n return self.get(name+self._span_suffix)\n else:\n return self._lex_span",
"def make_text(text: str) -> SubAnnotation:\n return SubAnnotation(\"text\", text)",
"def highlight(text, phrase, hilighter='<strong class=\"hilight\">\\\\1</strong>'):\n if not phrase or not text:\n return text\n return re.sub(re.compile('(%s)' % re.escape(phrase)), hilighter, text, re.I)",
"def pos_to_span(pos: List[str]) -> Span:\n (start, path) = pos[0].split(\":\")\n (end, _) = pos[-1].split(\":\")\n return Span(int(start), int(end), path)",
"def bio_to_spans(text: List[str], tags: List[str]) -> List[Tuple[int, int, str]]:\n pointer = 0\n starts = []\n for (\n i,\n t,\n ) in enumerate(tags):\n if t.startswith(\"B-\"):\n starts.append((i, pointer))\n pointer += len(text[i]) + 1\n\n spans = []\n for s_i, s_char in starts:\n label_str = tags[s_i][2:]\n e = 0\n e_char = len(text[s_i + e])\n while len(tags) > s_i + e + 1 and tags[s_i + e + 1].startswith(\"I-\"):\n e += 1\n e_char += 1 + len(text[s_i + e])\n spans.append((s_char, s_char + e_char, label_str))\n return spans",
"def text_before_after(txt: str, span: Tuple[int, int], nb_words: int) -> Tuple[str, int, int]:\n start, end = span\n before_txt = txt[:start]\n span_txt = txt[start:end]\n after_txt = txt[end:]\n\n before_txt = ' '.join(before_txt.split(' ')[-nb_words:])\n after_txt = ' '.join(after_txt.split(' ')[:nb_words])\n\n total_txt = ''.join([before_txt, span_txt, after_txt])\n return total_txt, len(before_txt), len(after_txt)",
"def helper_fn_sub(text, *params):\n return Sub(\n text.format(*[\"${\" + param.title + \"}\" for param in params]),\n {\n param.title: Ref(param)\n for param in params\n }\n )",
"def convert_span(span):\n p = span.getparent()\n\n style = span.get('style')\n if style is None:\n return\n\n builders = []\n if 'bold' in style:\n builders.append(builder.STRONG)\n if 'italic' in style:\n builders.append(builder.EM)\n\n if builders:\n children = []\n if span.text is not None:\n children.append(span.text)\n for c in span.getchildren():\n children.append(c)\n if c.tail is not None and c.tail.strip():\n # Have to wrap the tail text in a span tag,\n # or else it won't get added.\n children.append(builder.SPAN(c.tail))\n\n # Recursively apply the builders.\n el = builders[0](*children)\n for b in builders[1:]:\n el = b(el)\n\n # Replace the old element with the new one.\n p.replace(span, el)",
"def markup_text(text, pos=None, trg_pos=None, start_pos=None):\n positions_and_markers = []\n if pos is not None: positions_and_markers.append(( pos, '<|>'))\n if trg_pos is not None: positions_and_markers.append(( trg_pos, '<+>'))\n if start_pos is not None: positions_and_markers.append((start_pos, '<$>'))\n positions_and_markers.sort()\n\n m_text = \"\"\n m_pos = 0\n for position, marker in positions_and_markers:\n m_text += text[m_pos:position] + marker\n m_pos = position\n m_text += text[m_pos:]\n return m_text",
"def makeTextCell(table, span, widths, heights, use_headers):\n width = getTotalSpanWidth(span, widths)\n height = getTotalSpanHeight(span, heights)\n text_row = span[0][0]\n text_column = span[0][1]\n text = table[text_row][text_column]\n\n lines = text.split(\"\\n\")\n for i in range(len(lines)):\n width_difference = width - len(lines[i])\n lines[i] = lines[i] + lineBreak(width_difference, \" \")\n\n height_difference = height - len(lines)\n empty_lines = []\n for i in range(0, height_difference):\n empty_lines.append(lineBreak(width, \" \"))\n lines.extend(empty_lines)\n\n output = [\"+\" + lineBreak(width, \"-\") + \"+\"]\n for i in range(0, height):\n output.append(\"|\" + lines[i] + \"|\")\n\n if use_headers and span[0][0] == 0:\n symbol = \"=\"\n else:\n symbol = \"-\"\n output.append(\"+\" + lineBreak(width, symbol) + \"+\")\n\n text = \"\\n\".join(output)\n row_count = getSpanRowCount(span)\n column_count = getSpanColumnCount(span)\n cell = Cell(text, text_row, text_column, row_count, column_count)\n\n return cell",
"def styleText(self, start: Any, end: Any) -> None:",
"def to_spans(tags: List[str], tokens: List[str], probs: List[float]) -> List[Span]:\n assert len(tags) == len(tokens) == len(probs)\n spans = []\n idx = 0\n while idx < len(tags):\n if tags[idx].startswith(\"B-\"):\n label = tags[idx].split(\"-\")[1]\n span = Span(label, idx, idx, tokens[idx], probs[idx])\n idx += 1\n while idx < len(tags) and tags[idx].startswith(\"I-\"):\n span.end += 1\n span.text += tokens[idx]\n span.confidence += probs[idx]\n idx += 1\n span.confidence /= (span.end - span.start + 1)\n spans.append(span)\n else:\n idx += 1\n\n return spans",
"def markupSeq(seq, ulPosList, boldPosList, annots = {}):\n annotStarts = {}\n annotEnds = defaultdict(set)\n for (start, end), aDict in annots.iteritems():\n annotStarts[start] = aDict\n aDict[\"end\"] = end\n\n ulStarts = set([x[0] for x in ulPosList])\n ulEnds = set([x[1] for x in ulPosList])\n boldStarts = set([x[0] for x in boldPosList])\n boldEnds = set([x[1] for x in boldPosList])\n ret = []\n openAnnots = defaultdict(int) # current number of open spans, per cssString\n openTags = set()\n for i, nucl in enumerate(seq):\n if i in annotEnds:\n for tagStr in annotEnds[i]:\n if tagStr in openAnnots:\n openAnnots[tagStr]-=1\n if openAnnots[tagStr]==0:\n ret.append(\"</span>\")\n del openAnnots[tagStr]\n\n if i in annotStarts:\n aDict = annotStarts[i]\n cssParts = []\n for key, val in aDict[\"css\"].iteritems():\n cssParts.append(\"%s:%s\" % (key, val))\n cssStr = \";\".join(cssParts)\n tagStr = \"<span style='%s'>\" % cssStr\n if not tagStr in openAnnots:\n ret.append(tagStr)\n openAnnots[tagStr]+=1\n annotEnds[aDict[\"end\"]].add(tagStr)\n\n if i in ulStarts:\n ret.append(\"<u>\")\n openTags.add(\"u\")\n if i in ulEnds:\n ret.append(\"</u>\")\n if \"u\" in openTags:\n openTags.remove(\"u\")\n if i in boldStarts:\n ret.append(\"<b>\")\n openTags.add(\"b\")\n if i in boldEnds:\n ret.append(\"</b>\")\n if \"strong\" in openTags:\n openTags.remove(\"b\")\n ret.append(nucl)\n if (i+1) % 80==0:\n ret.append(\"<br>\")\n for tag in openTags:\n ret.append(\"</%s>\" % tag)\n return \"\".join(ret)\n #return seq[:start]+\"<u>\"+seq[start:end]+\"</u>\"+seq[end:]",
"def highlight_sentence(sent, target, tag_s=\"<span class='target-highlight'>\", tag_e=\"</span>\"):\n\n # case insensitive sub, replaces original casing\n # sent_ = re.sub(target, \"%s%s%s\" % (tag_s, target, tag_e), sent, flags=re.IGNORECASE)\n\n # Case insensitive detection, case-preserving substitution.\n sent_ = re.sub(r\"(?=%s)\" % target, tag_s, sent, flags=re.IGNORECASE)\n sent_ = re.sub(r\"(?<=%s)\" % target, tag_e, sent_, flags=re.IGNORECASE)\n return sent_"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply Textile to a block of text. | def textile(text, head_offset=0, html_type='xhtml', auto_link=False,
encoding=None, output=None):
return Textile(auto_link=auto_link).textile(text, head_offset=head_offset,
html_type=html_type) | [
"def __call__(self, text):\n for unit in self.units:\n text = unit.transform(text)\n return text",
"def highlightBlock(self, text):\r\n self.highlight_function(text)",
"def apply_to_fig_text(fig: mpl.figure.Figure, fn: Callable[[str], str]):\n for text in fig.findobj(match=plt.Text):\n text.set_text(fn(text.get_text()))",
"def text(self, new_text):\n if isinstance(new_text, str):\n self._text = list(new_text[row * self._columns:self._columns] for row in range(self._rows))\n self._update()\n elif isinstance(new_text, list):\n self._text = [''] * self._rows\n for i in range(min(self._rows, len(new_text))):\n self._text[i] = new_text[i][:self._columns]\n self._update()\n else:\n self.text = str(new_text)",
"def pushText(self, text):\n for line in text.splitlines():\n self.pushLine(line)",
"def write_on_canvas (self, text, line = 0):\n for ind, char in enumerate(text):\n if ind >= self.canvas.size().x:\n return\n self.canvas.__setitem__(coord.Coord(ind, line), char)",
"def get_text_blocks(self, method=Ocr.DILATION, params=None):\n if len(self.textBlocks) != 0:\n raise ValueError('self.textLines already achieved!')\n\n block_boxes = []\n blocks = []\n if method == Ocr.DILATION:\n block_boxes = self._get_text_block_by_dilation(params)\n else:\n raise ValueError('Invalid method in get_text_blocks: ' + str(method))\n\n for block_box in block_boxes:\n crop_img = self.img[block_box.y: block_box.y + block_box.h, block_box.x: block_box.x + block_box.w]\n crop_bin_img = self.bin_img[block_box.y: block_box.y + block_box.h, block_box.x: block_box.x + block_box.w]\n blocks.append(TextBlock(crop_img, crop_bin_img, block_box))\n\n if TESTING:\n text_image_copy = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)\n for l in block_boxes:\n cv2.rectangle(text_image_copy, (l.x, l.y), (l.x + l.w, l.y + l.h), (0, 255, 0), 1)\n cv2.imshow('test_blocks', text_image_copy)\n cv2.waitKey(0)\n\n # Assign text block inside:\n self.textBlocks = blocks",
"def format_contents(self, text, col_text=COL_TEXT, col_frame=COL_FRAME):\n\n contents = []\n lines = text.splitlines()\n\n for line in lines:\n if len(line) < 1:\n line = ' ' * (self.INNER)\n else:\n line = self.center_text(line, self.INNER)\n\n contents.append(f\"{col_frame}{FRAME['boxV']}{col_text}{line}{col_frame}{FRAME['boxV']}\")\n\n return contents",
"def wrap(self, text, width=None, **kwargs):\n width = self.width if width is None else width\n lines = []\n for line in text.splitlines():\n lines.extend(\n (_linewrap for _linewrap in SequenceTextWrapper(\n width=width, term=self, **kwargs).wrap(text))\n if line.strip() else (u'',))\n\n return lines",
"def process_text(self, live_post, message_text):\n\n text = message_text[\"text\"]\n len_text = len(text)\n entities = message_text[\"entities\"]\n\n # Process the entities in reversed order to be able to edit the text in place.\n for entity in reversed(entities):\n url = \"\"\n start = entity[\"offset\"]\n end = start + entity[\"length\"]\n\n if entity[\"type\"] == \"url\":\n url = description = text[start:end]\n\n if is_embed(url):\n # Check if this can match an embed block, if so no conversion happens.\n # It matches an embed block if it has a line in the text for itself.\n if end == len_text or text[end] == \"\\n\":\n if start == 0 or text[start - 1] == \"\\n\":\n # This is an embed block, skip to the next entity\n continue\n\n if entity[\"type\"] == \"text_link\":\n url = entity[\"url\"]\n description = text[start:end]\n\n if url:\n url = format_url(url)\n link = f'<a href=\"{url}\">{description}</a>'\n text = text[:start] + link + text[end:]\n\n return super().process_text(live_post=live_post, message_text=text)",
"def apply_cipher(func):\n text = args.in_file.read()\n changed_text = func(text)\n args.out_file.write(changed_text)",
"def _prep_text(self, text):\n self.text_image = self.font.render(text, True, self.text_color, self.button_color)\n self.text_image_rect = self.text_image.get_rect()\n self.text_image_rect.center = self.rect.center",
"def add_text(self, text, *args, **kwargs):\n # Pull down some kwargs.\n section_name = kwargs.pop('section', None)\n\n # Actually do the formatting.\n para, sp = self._preformat_text(text, *args, **kwargs)\n\n # Select the appropriate list to update\n if section_name is None:\n relevant_list = self.story\n else:\n relevant_list = self.sections[section_name]\n\n # Add the new content to list.\n relevant_list.append(para)\n relevant_list.append(sp)\n return",
"def textManip(visible=bool):\n pass",
"def add_text(self, text):\n if text.startswith(nl):\n text = text[1:]\n\n cls = ''\n prefix = ''\n is_code = False\n is_output = False\n interp_line = False\n after_blank = False # state 'after blank line'\n blank = False\n bullets = 0\n code_indent = 0\n output_indent = 0\n\n for line in text.split(nl):\n sline = line.strip()\n if sline.startswith('#'):\n continue\n\n # handle <ul> <li> ...\n if sline == '*':\n bullets = 1\n elif bullets == 1 and sline.startswith('*'):\n bullets = 2\n elif bullets == 2 and not sline.startswith('*'):\n bullets = 0\n self.commands.append( dict(cmd=\"text\", arg=\"</ul>\", indent=indent, cls=cls, prefix=prefix) )\n\n line = line.rstrip()\n blank = bool(not line)\n indent = len(line) - len(line.lstrip()) + 1\n\n if interp_typecmd and line.strip().startswith(\">>>\"):\n self.commands.append(dict(cmd=\"type\", arg=None))\n cls = \"code\"\n prefix = escape(\">>>\") + nbsp\n is_code = True\n interp_line = True\n # interp.prompt, space, 1 level of block indent\n code_indent = indent + 3+1\n output_indent = code_indent - 4\n\n # blank line; next line at code indent: still code; ELSE reset code\n # non-blank line; next line at code indent - 4: output\n\n # shorter indent than code should be means end of code block; ignore blank lines\n if not interp_line and indent < code_indent and not blank:\n is_code = False; cls = ''\n\n if not interp_line and after_blank and indent != code_indent and not blank:\n is_code = False; cls = ''\n\n if indent==output_indent and not interp_line:\n is_output = True; cls = \"output\"\n\n if is_output and indent < output_indent:\n is_output = False; cls = ''\n\n # ugly hack: force bigger indent on lines of code except for interp lines\n if is_code and not interp_line:\n indent += 4\n\n line = line.lstrip(\"> \")\n arg = escape(line)\n arg = arg.replace(space, nbsp).replace(\"--\", \"—\")\n if is_code or is_output:\n for name, fn, tag in images:\n arg = arg.replace(name+\"png\", fn)\n arg = arg.replace(fn, tag)\n\n if bullets == 1:\n self.commands.append( dict(cmd=\"text\", arg=\"<ul>\", indent=indent, cls=cls, prefix=prefix) )\n elif bullets == 2:\n arg = \"<li>%s</li>\" % arg.lstrip('*')\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n else:\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n\n prefix = ''\n interp_line = False\n after_blank = bool(not line.strip())",
"def texCutContext(*args, **kwargs):\n\n pass",
"def replace_text_in_cell(cell, text):\n for paragraph in cell.paragraphs[1:]:\n delete_paragraph(paragraph)\n cell.paragraphs[0].runs[0].text = text\n del cell.paragraphs[0].runs[1:]",
"def wrap_text(surface, text, size=20, color=WHITE_SMOKE, rect=(), aa=True):\n font = pygame.font.Font(STYLE, size)\n frame = pygame.Rect(rect)\n y = frame.top\n line_spacing = -2\n font_height = font.size(\"Tg\")[1]\n\n while text:\n i = 1\n # Determines if the row of text will be outside our area.\n if y + font_height > frame.bottom:\n break\n # Determine maximum width of line.\n while font.size(text[:i])[0] < frame.width and i < len(text):\n i += 1\n # If text is wrapped, then adjust the wrap to the last word.\n if i < len(text):\n i = text.rfind(\" \", 0, i) + 1\n # Blit.\n image = font.render(text[:i], aa, color).convert_alpha()\n surface.blit(image, (frame.left, y))\n y += font_height + line_spacing\n text = text[i:]\n return text",
"def styleText(self, start: Any, end: Any) -> None:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempts to determine an image's width and height, and returns a string suitable for use in an tag, or an empty string in case of failure. Requires that PIL is installed. | def getimagesize(url):
try:
from PIL import ImageFile
import urllib2
except ImportError:
return ''
try:
p = ImageFile.Parser()
f = urllib2.urlopen(url)
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
if p.image:
return 'width="%i" height="%i"' % p.image.size
except (IOError, ValueError):
return '' | [
"def info(img):\n if hasattr(aq_base(img), 'meta_type') and img.meta_type == 'Image':\n ct, w, h = img.content_type, img.width, img.height\n # Zope Image object can be buggy (tiff)\n if isinstance(w, int) and isinstance(h, int) and ct.startswith('image/'):\n return ct, w, h\n\n if isinstance(img, File):\n img = ofsFileHandler(img)\n\n # now we are working either on python file-like object or on str\n if hasattr(img, 'seek'):\n img_header = img.read(30)\n img.seek(0)\n elif isinstance(img, str):\n img_header = img[:30]\n img = StringIO(img)\n else:\n raise ValueError(\"%r\" % img)\n\n format, width, height = zopeGetImageInfo(img_header)\n if (width < 0 or height < 0) and PIL_OK:\n try:\n img = PIL.Image.open(img)\n width, height = img.size\n if not format:\n format = PIL.Image.MIME.get(img.format,\n 'application/octet-stream')\n except IOError, e: # TODO a lot\n format = 'application/octet-stream'\n width, height = -1, -1\n\n if width < 0:\n width = None\n if height < 0:\n height = None\n\n return format, width, height",
"def get_format_image(filename):\n imagename = \"app/static/img/\" + str(filename)\n image = Picture.open(imagename)\n\n width,height=image.size \n\n if width >= height :\n return \"horizontal\"\n return \"vertical\"",
"def get_image_size(image):\n pil_image = PIL.Image.open(image)\n exif = _image_exif_data(pil_image)\n # check if image has exif data\n if (\n exif\n and \"Orientation\" in exif.keys()\n and (exif[\"Orientation\"] == 6 or exif[\"Orientation\"] == 8)\n ):\n # (height, width)\n return (pil_image.size[0], pil_image.size[1])\n\n return (pil_image.size[1], pil_image.size[0])",
"def imgsize(path):\n pict = Image.open(path) #Too easy. Thanks Fredrik!\n width, height = pict.size\n return width, height",
"def get_image_size(fname):\n try:\n with open(fname, \"rb\") as fhandle:\n head = fhandle.read(24)\n if len(head) != 24:\n return -1, -1\n if imghdr.what(fname) == \"png\":\n check = struct.unpack(\">i\", head[4:8])[0]\n if check != 0x0D0A1A0A:\n return\n width, height = struct.unpack(\">ii\", head[16:24])\n elif imghdr.what(fname) == \"gif\":\n width, height = struct.unpack(\"<HH\", head[6:10])\n elif imghdr.what(fname) == \"jpeg\":\n try:\n fhandle.seek(0) # Read 0xff next\n size = 2\n ftype = 0\n while not 0xC0 <= ftype <= 0xCF:\n fhandle.seek(size, 1)\n byte = fhandle.read(1)\n while ord(byte) == 0xFF:\n byte = fhandle.read(1)\n ftype = ord(byte)\n size = struct.unpack(\">H\", fhandle.read(2))[0] - 2\n # We are at a SOFn block\n fhandle.seek(1, 1) # Skip `precision' byte.\n height, width = struct.unpack(\">HH\", fhandle.read(4))\n except Exception: # IGNORE:W0703\n return\n else:\n return\n return width, height\n except EnvironmentError:\n return -1, -1",
"def get_image_size(self, fname):\n import struct\n import imghdr\n\n with open(fname, \"rb\") as fhandle:\n head = fhandle.read(24)\n if len(head) != 24:\n return\n if imghdr.what(fname) == \"png\":\n check = struct.unpack(\">i\", head[4:8])[0]\n if check != 0x0D0A1A0A:\n return\n width, height = struct.unpack(\">ii\", head[16:24])\n elif imghdr.what(fname) == \"gif\":\n width, height = struct.unpack(\"<HH\", head[6:10])\n elif imghdr.what(fname) == \"jpeg\":\n try:\n fhandle.seek(0) # Read 0xff next\n size = 2\n ftype = 0\n while not 0xC0 <= ftype <= 0xCF:\n fhandle.seek(size, 1)\n byte = fhandle.read(1)\n while ord(byte) == 0xFF:\n byte = fhandle.read(1)\n ftype = ord(byte)\n size = struct.unpack(\">H\", fhandle.read(2))[0] - 2\n # We are at a SOFn block\n fhandle.seek(1, 1) # Skip `precision' byte.\n height, width = struct.unpack(\">HH\", fhandle.read(4))\n except Exception: # IGNORE:W0703\n return\n else:\n return\n return width, height",
"def get_image_size(fname):\n with open(fname, 'rb') as fhandle:\n head = fhandle.read(24)\n if len(head) != 24:\n return\n if imghdr.what(fname) == 'png':\n check = struct.unpack('>i', head[4:8])[0]\n if check != 0x0d0a1a0a:\n return\n width, height = struct.unpack('>ii', head[16:24])\n elif imghdr.what(fname) == 'gif':\n width, height = struct.unpack('<HH', head[6:10])\n elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':\n try:\n # Read 0xff next\n fhandle.seek(0)\n size = 2\n ftype = 0\n while not 0xc0 <= ftype <= 0xcf:\n fhandle.seek(size, 1)\n byte = fhandle.read(1)\n while ord(byte) == 0xff:\n byte = fhandle.read(1)\n ftype = ord(byte)\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\n # We are at a SOFn block\n # Skip `precision' byte.\n fhandle.seek(1, 1)\n height, width = struct.unpack('>HH', fhandle.read(4))\n # IGNORE:W0703\n except Exception:\n return\n else:\n return\n return width, height",
"def get_size_for_img(src):\n path = src.replace(app.static_url_path, app.static_folder, 1)\n with Image.open(path) as im:\n return im.size",
"def _get_img_size(self, path: str):\n self._init_img_lib()\n if self._can_create_tmb():\n try:\n im = self._im.open(path)\n return f\"{im.size[0]}x{im.size[1]}\"\n except:\n pass\n\n return False",
"def get_image_size(path):\n img = Image.open(path)\n return img.size",
"def get_imgsize(self):\n return self.img_pil.size",
"def get_image_dim(topic_name=topicname('camera_info')):\n cam_info = rospy.wait_for_message(topic_name, CameraInfo, None)\n return (cam_info.width, cam_info.height)",
"def _get_size(self):\n if 'height' not in self or 'width' not in self:\n return None\n width = int(float(self.params['width'])) # allow float sizes (100.0), but truncate decimals\n height = int(float(self.params['height']))\n return (width, height)",
"def itkImageBase2_GetImageDimension() -> \"unsigned int\":\n return _itkImagePython.itkImageBase2_GetImageDimension()",
"def get_image_size(scale):\n image_width = int(config.camera_image['width']*float(scale))\n image_height = int(config.camera_image['height']*float(scale))\n return image_width, image_height",
"def test_width(self):\n img_div = pq_img(self.p, '[[Image:img.png|width=10]]')\n img = img_div('img')\n\n eq_('10', img.attr('width'))",
"def itkImageBase4_GetImageDimension() -> \"unsigned int\":\n return _itkImagePython.itkImageBase4_GetImageDimension()",
"def get_h(img):\n return len(img[1])",
"def get_w(img):\n return len(img[1][0])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a string of wiki markup and outputs a list of genshi Fragments (Elements and strings). This recursive function, with help from the WikiElement objects, does almost all the parsing. When no WikiElement objects are supplied, escapes are removed from ``text`` (except if remove_escapes=True) and it is returned asis. This is the only way for recursion to stop. | def fragmentize(text,wiki_elements, element_store, environ, remove_escapes=True):
while wiki_elements:
# If the first supplied wiki_element is actually a list of elements, \
# search for all of them and match the closest one only.
if isinstance(wiki_elements[0],(list,tuple)):
x = None
mos = None
for element in wiki_elements[0]:
mo = element.regexp.search(text)
if mo:
if x is None or mo.start() < x:
x,wiki_element,mos = mo.start(),element,[mo]
else:
wiki_element = wiki_elements[0]
mos = [mo for mo in wiki_element.regexp.finditer(text)]
if mos:
frags = wiki_element._process(mos, text, wiki_elements, element_store, environ)
break
else:
wiki_elements = wiki_elements[1:]
# remove escape characters
else:
if remove_escapes:
text = esc_to_remove.sub('',text)
frags = fill_from_store(text,element_store)
return frags | [
"def parse_elements(text):\n \n \n # sanitise and split using BeautifulSoup\n soup = BeautifulSoup(parse(text))\n elements = [e for e in soup.contents if type(e) == Tag]\n \n # wrap blocks in <div>\n format = u\"<div class='doccomment-block' id='DE-%d'>\\n%s\\n</div>\"\n for seq,txt in enumerate(elements):\n elements[seq] = format % (seq, txt)\n \n return elements",
"def parseText(self, text):\n results = []\n for tag in self.iterTags(text):\n results.append(self.tagToMarkdown(tag, \n self.cards))\n return '\\n\\n'.join(results)",
"def cleanup_mediawiki(text):\n # This tag was probably setup via SyntaxHighlight GeSHi for biopython.org's wiki\n #\n # <python>\n # import antigravity\n # </python>\n #\n # Replacing it with the following makes pandoc happy,\n #\n # <source lang=python>\n # import antigravity\n # </source>\n #\n # Conversion by pandoc to GitHub Flavour Markdown gives:\n #\n # ``` python\n # import antigravity\n # ```\n #\n # Which is much nicer.\n #\n # =================================================\n #\n # I may have been misled by old links, but right now I don't\n # think there is an easy way to get a table-of-contents with\n # (GitHub Flavoured) Markdown which works on GitHub pages.\n #\n # Meanwhile the MediaWiki __TOC__ etc get left in the .md\n # so I'm just going to remove them here.\n #\n new = []\n for line in text.split(\"\\n\"):\n # line is already unicode\n line = line.replace(\"\\xe2\\x80\\x8e\".decode(\"utf-8\"), \"\") # LEFT-TO-RIGHT\n if line.rstrip() == \"<python>\":\n line = \"<source lang=python>\"\n elif line.rstrip() == \"<perl>\":\n line = \"<source lang=perl>\"\n elif line.rstrip() in [\"</python>\", \"</perl>\"]:\n line = \"</source>\"\n undiv = un_div(line)\n if undiv in [\"__TOC__\", \"__FORCETOC__\", \"__NOTOC__\"]:\n continue\n elif undiv.startswith(\"[[Image:\") and undiv.endswith(\"]]\"):\n # Markdown image wrapped in a div does not render on Github Pages,\n # remove the div and any attempt at styling it (e.g. alignment)\n line = undiv\n new.append(line)\n return \"\\n\".join(new)",
"def inline_markup_to_html(astr):\n\n markup_to_elem = [(r'\\*', '<b>', '</b>'),\n (r'\\/', '<i>', '</i>'),\n (r'`', '<code>', '</code>')]\n\n def replace(matched):\n \"\"\" Take matched, add opening & closing tags, cgi escape if code \"\"\"\n\n matched_str = matched.groups()[0]\n if match == '`':\n matched_str = cgi.escape(matched_str)\n return opener + matched_str + closer\n\n for match, opener, closer in markup_to_elem:\n astr = wrap_match(match).sub(replace, astr)\n\n return fu.pipe(astr, [convert_markup_links, convert_raw_links])",
"def parse(self, text, strip_ansi=False):\r\n # parse everything to ansi first\r\n text = parse_ansi(text, strip_ansi=strip_ansi, xterm256=False)\r\n # convert all ansi to html\r\n result = re.sub(self.re_string, self.do_sub, text)\r\n result = self.re_color(result)\r\n result = self.re_bold(result)\r\n result = self.re_underline(result)\r\n result = self.remove_bells(result)\r\n result = self.convert_linebreaks(result)\r\n result = self.remove_backspaces(result)\r\n result = self.convert_urls(result)\r\n # clean out eventual ansi that was missed\r\n #result = parse_ansi(result, strip_ansi=True)\r\n\r\n return result",
"def clean_text_with_lxml(text, **kwargs):\n ###############\n# pdb.set_trace()\n ###############\n cleaner = LXMLCleaner(**kwargs)\n text = cleaner.clean_html(text)\n return text",
"def texts(self):\n for _, element in etree.iterparse(self.wiki_dump_file):\n if 'text' in element.tag and type(element.text) == str:\n yield self.tokenize_lemmatize(element.text)\n element.clear()\n else:\n element.clear()",
"def remove_unwanted_tags(text):\n # ? is for non-greedy to not go to last tag but end the current first\n # new lines and paragraphs\n text = re.sub(r'<br.?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'<p.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</p>', '', text, flags=re.IGNORECASE)\n # links and other anchors\n text = re.sub(r'<a.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</a>', '', text, flags=re.IGNORECASE)\n return text",
"def process_tag(text):\n if not isinstance(text, basestring):\n raise TypeError(\"string format required: got %r\" % type(text))\n\n try:\n text = text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")\n\n # replace `[text](http://...) or [text](https://...)`\n # with `text`\n text = re.sub(r\"\"\"\\[([^\\]]+)\\] # [text] parenthesis-captured group \\1\n \\(http\\S+\\) # (http://...)\"\"\", r\"\\1\", text, flags=re.X)\n\n # replace `http://... or https://...`\n # with ``\n text = re.sub(r\"https?://[^\\s\\\"\\']+\", \"\", text)\n\n # replace `(...) ... (...) ...`\n # with ``\n regex = r\"\"\"\\( # left (\n ([^\\(\\)]+) # captured text, parenthesis-captured group \\1\n \\) # right )\"\"\"\n text = re.sub(regex, \"\", text, flags=re.X)\n\n text = text.replace(\" \", \"\")\n text = text.replace(\">\", \"\")\n text = text.replace(\"<\", \"\")\n text = text.replace(\"&\", \"\")\n text = text.replace(\""\", \"\")\n text = text.replace(\"'\", \"\")\n text = text.replace(\"¢\", \"\")\n text = text.replace(\"£\", \"\")\n text = text.replace(\"¥\", \"\")\n text = text.replace(\"€\", \"\")\n text = text.replace(\"©\", \"\")\n text = text.replace(\"®\", \"\")\n text = re.sub(\"[*\\[\\]\\(\\)&%\\$#@\\^]\", \"\", text)\n text = re.sub(\"\\.{2,}\", \" \", text)\n except Exception as inst:\n print \"process_tag: %s\\ninput: %r\" % (inst, text)\n sys.exit(1)\n\n return text",
"def collapse_tags(self, my_etree):\n chars = []\n is_tag_start = False # True if inside tag\n tag_start_node = None # Pointer to current node. \n tag_start_char = '['\n tag_end_char = ']'\n\n # For every node with text\n for node,text in self._itertext(my_etree):\n # Go through each node's text character by character\n for i,c in enumerate(text):\n if c == tag_start_char: # Tag is starting!\n assert not is_tag_start # Better not already be inside a tag!\n is_tag_start = True \n tag_start_node = node \n chars = []\n elif c == tag_end_char: # Tag is ending\n assert is_tag_start # Better have seen a tag start!\n is_tag_start = False\n # If tag_start_node is the same as current node, then we don't need to do anything\n # But otherwise:\n if node != tag_start_node:\n # Tag started in different node, so move all the chars we've encountered since then\n # to the tag_start_node\n chars.append(c)\n tag_start_node.text += ''.join(chars)\n node.text = text[i+1:] # Remove characters from this node\n else:\n # Normal text character\n if is_tag_start and node != tag_start_node:\n # Need to save these chars to append to text in openbrac_node\n chars.append(c)\n\n # If we're here, that means we've consumed all the text in the current node.\n # Check if this node was part of a tag, yet did not start the tag\n if is_tag_start and node!= tag_start_node:\n # Need to remove this text completely as we've saved all of it inside chars for moving\n # into the start_node\n node.text = \"\"",
"def extract_xml_content_from_text(text_data):\n doc_tree = etree.fromstring(text_data, parser=parser)\n # List of strings to filter lines within the XML content by since we \n # only want usable data that can be parsed into TSV easily. This list \n # was generated based off of testing funds list for different tickers\n # and aggregating text that would likely invalidate the final TSV results.\n filter_vals = ['S REPORT SUMMARY',\n 'FORM 13F INFORMATION TABLE',\n 'SHARES/ SH/ PUT/ INVSTMT',\n 'Total ( '\n ]\n if doc_tree is not None:\n xml_results = []\n for node in doc_tree.iter():\n if str(node.tag).lower() == \"table\": # Get the table element\n vals = ''.join(node.itertext())\n # Filter out invalid lines found\n lines = vals.split('\\n')\n for line in lines:\n filter_found = False\n for each in filter_vals:\n if each in line:\n filter_found = True\n break\n if not filter_found:\n xml_results.append(line)\n return '\\n'.join(xml_results)\n else:\n return None",
"def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l",
"def fix_tags(input, removeEmptyTags = False, changeTagsNameCase = 0,\n unNestTags = None, check = False, verbose = False):\n\n if verbose:\n def assume(cond, msg):\n if not cond: print('tagsoupfixer: Parser bug:', msg)\n else:\n def assume(cond, msg): pass\n\n # Tags name comparator\n if changeTagsNameCase == 0: tagNameEqual = lambda a, b: a.lower() == b.lower()\n else: tagNameEqual = lambda a, b: a == b\n # Normalize tags to unNest\n if unNestTags:\n if changeTagsNameCase > 0: unNestTags = map(str.upper, unNestTags)\n else: unNestTags = map(str.lower, unNestTags)\n unNestTags = set(unNestTags)\n\n # Tokenize input\n tokens = _reTag.split(input)\n\n # Debugging\n #~ f = open('pat.txt', mode='w'); f.write(_patTag); f.close()\n #~ print(str(tokens).encode('cp1252'))\n\n # Initialize parser state\n # -- text output\n output = ''\n # -- tags stack; format: [(name, textBefore, markup)*]\n # example: [('div', '... blah <b>di dum</b> ...', '<div class=\"main\">'), ...]\n stack = []\n TAG_NAME = 0; TEXT_BEFORE = 1; MARKUP = 2; ATTRIBUTES = 3\n # -- contextual boolean states\n markupComplete = inTag = endTag = emptyElementTag = False\n # -- buffers for tag name and attributes\n curTagName = curTagAttributes = ''\n\n # http://www.w3.org/TR/2008/REC-xml-20081126/#sec-starttags\n for tok in tokens:\n\n # Simplistic XML parser (don't parse attributes)\n # Open StartTag / EmptyElementTag\n if tok == '<':\n assume(not inTag, 'Unexpected \"<\" inside markup.')\n inTag = True\n # Open EndTag\n elif tok == '</':\n assume(not inTag, 'Unexpected \"</\" inside markup.')\n inTag = endTag = True\n # Close StartTag / EndTag\n elif tok == '>':\n assume(inTag, 'Unexpected \">\" outside markup.')\n markupComplete = True\n # Close EmptyElementTag\n elif tok == '/>':\n assume(inTag, 'Unexpected \"/>\" outside markup.')\n markupComplete = emptyElementTag = True\n # Continue *Tag\n elif inTag:\n # Tag name\n if not curTagName:\n if changeTagsNameCase > 0: curTagName = tok.upper()\n elif changeTagsNameCase < 0: curTagName = tok.lower()\n else: curTagName = tok\n # Tag attributes\n else: curTagAttributes = tok\n # Text\n else:\n output += tok\n\n # We parsed a complete tag (StartTag, EndTag or EmptyElementTag)\n if markupComplete:\n # Quick'n'dirty hack to deal with BRs\n if tagNameEqual(curTagName, 'br'):\n emptyElementTag = True\n # Produce current tag\n curTag = \"<{}{}{}{}>\".format(\n '/' if endTag else '',\n curTagName,\n curTagAttributes,\n '/' if emptyElementTag else ''\n )\n # Process current tag\n # -- EmptyElementTag\n if emptyElementTag:\n # No text to process, output the markup\n output += curTag\n # -- StartTag\n elif not endTag:\n # Push current tag on the stack with current output as textBefore\n # and reset output.\n if unNestTags and curTagName in unNestTags:\n attrs = parse_attributes(curTagAttributes)\n # 20/01/2011: we HAVE to merge the parent's attributes if any\n if len(stack) and stack[-1][TAG_NAME] == curTagName and stack[-1][ATTRIBUTES] and attrs:\n tmp = stack[-1][ATTRIBUTES].copy()\n tmp.update(attrs)\n attrs = tmp\n tag = [curTagName, output, curTag, attrs]\n else: tag = [curTagName, output, curTag]\n output = ''\n stack.append(tag)\n # -- EndTag, try to match a StartTag\n else:\n if len(stack) == 0:\n # Drop this tag\n if verbose: print('tagsoupfixer: '+curTag+': End tag with no match, tag dropped.')\n elif tagNameEqual(stack[-1][TAG_NAME], curTagName):\n # Unnest of the poor (with the parent)\n if unNestTags and len(stack) > 1 and curTagName in unNestTags and stack[-2][TAG_NAME] == curTagName:\n attrs = stack[-1][ATTRIBUTES]\n # 20/01/2011: already done at StartTag\n #attrs.update(stack[-2][ATTRIBUTES])\n attrs = build_attributes(attrs)\n stack[-1][MARKUP] = '</' + curTagName + '>' + '<' + curTagName + attrs + '>'\n #if verbose: print('tagsoupfixer: '+curTag+': rewrote parent: '+stack[-1][MARKUP])\n curTag += stack[-2][MARKUP]\n # Properly nested tags\n if not removeEmptyTags or len(output.strip()) > 0:\n # Tag is not empty / We don't have to strip empty tags\n output = stack[-1][TEXT_BEFORE] + stack[-1][MARKUP] + output + curTag\n else:\n # Tag is empty and we have to strip its nasty markup\n output = stack[-1][TEXT_BEFORE] + output\n if verbose: print('tagsoupfixer: '+curTag+': Removed empty tag.')\n stack.pop()\n elif len(stack) > 1:\n # Detect improperly nested tags\n overlap = None\n for i in reversed(range(len(stack)-1)):\n # Overlapping tags !!\n if tagNameEqual(stack[i][TAG_NAME], curTagName):\n overlap = i; break\n if overlap is not None:\n if verbose:\n print('tagsoupfixer: ['+curTagName+','+stack[overlap-1][TAG_NAME]+']: Overlapping tags.')\n # Fix overlapping by properly closing the tag\n tag = stack[overlap]\n for i in range(overlap+1, len(stack)):\n stack[i][MARKUP] = '</'+tag[TAG_NAME]+'>'+stack[i][MARKUP]+tag[MARKUP]\n output += curTag\n stack[overlap+1][TEXT_BEFORE] = tag[TEXT_BEFORE] + tag[MARKUP] + stack[overlap+1][TEXT_BEFORE]\n stack.pop(overlap)\n # Reset tag parser state\n markupComplete = inTag = endTag = emptyElementTag = False\n curTagName = curTagAttributes = ''\n\n # Output remaining elements on the stack\n for i in reversed(range(len(stack))):\n output = stack[i][TEXT_BEFORE] + stack[i][MARKUP] + output\n\n # Cludgy hack to fix empty tags when unnesting\n if unNestTags and removeEmptyTags:\n output = fix_tags(output, removeEmptyTags=True)\n\n if check:\n oh = strip_tags(input)\n my = strip_tags(output)\n if oh != my:\n print('tagsoupfixer: Sorry, I stripped out some text, aaaaaaargh.\\n', oh, '\\n', my)\n\n return output",
"def render_markup(text):\n if flaskbb_config['MARKUP_TYPE'] == 'bbcode':\n return render_bbcode(text)\n elif flaskbb_config['MARKUP_TYPE'] == 'markdown':\n return render_markdown(text, extras=['tables'])\n return text",
"def iter_text(el):\n if el.text is not None:\n yield (el.text,el)\n\n for child in el:\n for part in iter_text(child):\n yield part\n\n if el.tail is not None:\n yield (el.tail,el)",
"def fragments_fromstring(html, no_leading_text=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n if parser is None:\n parser = html_parser\n\n children = parser.parseFragment(html, 'div', useChardet=guess_charset)\n if children and isinstance(children[0], _strings):\n if no_leading_text:\n if children[0].strip():\n raise etree.ParserError('There is leading text: %r' %\n children[0])\n del children[0]\n return children",
"def generate_parse_trees():\n result = []\n for s in ALL_SENTENCES:\n result.append('\"\"\"' + str(get_chunks(s)) + '\"\"\", ')\n return result",
"def _replaceNestedElementText(self, names, text, escapeAmpLtGt=False):\n openingTagsPattern = r\"\"\n closingTagsPattern = r\"\"\n firstLevel = True\n while names:\n nextName = names.pop(0)\n if not firstLevel:\n openingTagsPattern = openingTagsPattern + r\".*?\"\n closingTagsPattern = r\".*?\" + closingTagsPattern\n openingTagsPattern = openingTagsPattern + r\"<\" + nextName + r\"(?:\\s*|\\s+.*?)>\"\n closingTagsPattern = r\"</\" + nextName + r\"\\s*>\" + closingTagsPattern\n firstLevel = False\n patternString = r\"(?s)(\" + openingTagsPattern + r\")(.*?)(\" + closingTagsPattern + r\")\"\n if text is not None:\n if escapeAmpLtGt:\n text = escape(text)\n #replacementString = r\"\\g<1>\" + text + r\"\\g<3>\"\n replacementFunction = lambda match: match.group(1) + text + match.group(3)\n else:\n #replacementString = r\"\"\n replacementFunction = lambda match: r\"\"\n self._string = re.sub(patternString, replacementFunction, self._string)\n return self",
"def _parse(self, remaining_text, tree, frontier):\n\n # If the tree covers the text, and there's nothing left to\n # expand, then we've found a complete parse; return it.\n if len(remaining_text) == 0 and len(frontier) == 0:\n if self._trace:\n self._trace_succeed(tree, frontier)\n yield tree\n\n # If there's still text, but nothing left to expand, we failed.\n elif len(frontier) == 0:\n if self._trace:\n self._trace_backtrack(tree, frontier)\n\n # If the next element on the frontier is a tree, expand it.\n elif isinstance(tree[frontier[0]], Tree):\n yield from self._expand(remaining_text, tree, frontier)\n\n # If the next element on the frontier is a token, match it.\n else:\n yield from self._match(remaining_text, tree, frontier)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This re_string is for finding generic block elements like lists (ordered, unordered, and definition) that start with a single token. | def re_string(self):
leading_whitespace = r'^([ \t]*'
only_one_token = re.escape(self.token)+ '(?!' + re.escape(self.token) + ')'
rest_of_list = r'.*?(?:\n|\Z))'
only_one_stop_token = '([' + re.escape(self.stop_tokens) + r'])(?!\3)'
look_ahead = '(?=([ \t]*' + only_one_stop_token + '|$))'
return leading_whitespace + only_one_token + rest_of_list + \
look_ahead | [
"def begin_token(self) -> str:",
"def __find_block_start(self):\n try:\n return self.__find_token(self.__block_head)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for entry block start'))",
"def test_parse_token_single_element_name(self):\n\n # Parse the token.\n list = parse_token('G')\n\n # Check the list elements.\n self.assertEqual(len(list), 1)\n self.assertEqual(list[0], 'G')",
"def find_iters(template_string):\n\n # {{% match + any number of spaces + whatever + any number of spaces + %}}\n pattern = re.compile('{{%(.*?)\\s+.*\\s+%}}')\n tags = re.findall(pattern, template_string)\n \n return tags",
"def get_definition(text, startswith):\n return [\n re.split('[ ()]', line.strip())[1]\n for line in [line.strip() for line in text.splitlines()]\n if line.startswith(startswith)\n ]",
"def word_start_finder(ignore_subword=False, is_joiner=False) -> Callable:\n if not ignore_subword:\n if is_joiner:\n return _subword_start_by_joiner\n else:\n return _subword_start_by_spacer\n else:\n return lambda tokens: [True] * len(tokens)",
"def _subword_start_by_spacer(tokens: Sequence[str]) -> Sequence[bool]:\n flag = [x.startswith(SubwordMarker.SPACER) for x in tokens]\n flag[0] = True\n return flag",
"def first_character(self, from_end: bool = False) -> \"Regex\":",
"def get_lexer(\n tag_start_string: str = r\"{%\",\n tag_end_string: str = r\"%}\",\n statement_start_string: str = r\"{{\",\n statement_end_string: str = r\"}}\",\n) -> Callable[[str], Iterator[Token]]:\n rules = compile_liquid_rules(\n tag_start_string,\n tag_end_string,\n statement_start_string,\n statement_end_string,\n )\n return partial(_tokenize_template, rules=rules)",
"def first_strings():\n pass",
"def _find_block_starts(self):\n node_headers = []\n element_headers = []\n element_set_headers = []\n for i, line in enumerate(self._abq_file):\n node_header_match = self._node_header_pattern.match(line)\n element_header_match = self._element_header_pattern.match(line)\n elementset_header_match = self._elementset_header_pattern.match(line)\n if node_header_match:\n node_headers.append(i)\n elif element_header_match:\n element_headers.append(i)\n elif elementset_header_match:\n element_set_headers.append(i)\n self._node_block_start = node_headers[0]\n self._element_block_start = element_headers[0]\n self._elementset_block_start = element_set_headers[0]",
"def match_first(cls, pattern : str) -> Optional[\"ConduitBlock\"]:\n return cls._blocks.match_first(pattern)",
"def __find_word_start(self, iterator):\n pattern = re.compile(\"[a-z|A-Z|0-9|<|>|/]\")\n symbols = ('!', '@', '#', '$', '%', '&', '*',\n '(', ')', '-' ,'+', '.', ',', '~', '^')\n iterator.backward_char()\n if iterator.get_char() in symbols:\n return\n while True:\n char = iterator.get_char()\n if not(re.match(pattern, char)):\n iterator.forward_char()\n return\n elif iterator.starts_line():\n return\n else:\n iterator.backward_char()",
"def tokenize(self):",
"def test_starttag_simple():\n inst = _encoder.TextEncoder('foo')\n\n result = inst.starttag(b'xx', iter([]), False)\n assert result == b'[xx]'\n\n result = inst.starttag(b'yy', iter([(b'aa', None), (b'bb', b'cc')]),\n False)\n assert result == b'[yy aa bb=cc]'",
"def isStartOfBlock(self, line):\n line = line.strip()\n if line.startswith(\"----\"):\n return True\n\n if line.startswith(\"=\"):\n return True\n if line.startswith(\"[[\") and line.endswith(\"]]\"):\n return True\n\n return False",
"def lexer(it):\n tokens = []\n token = \"\"\n for c in it:\n if c == \"{\":\n if token:\n tokens.append(token)\n token = \"\"\n tokens.append(c)\n elif c == \"}\":\n if token:\n tokens.append(token)\n token = \"\"\n tokens.append(c)\n else:\n token += c\n if token:\n tokens.append(token)\n return tokens",
"def test_starts_at(line):\n return TEST_START_RE.match(line)",
"def str_const_type(self):\n return bool(re.fullmatch(\"\\\".*?\\\"\", self.current_token)) # \"....\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set `self.reader` by name. | def set_reader(self, reader_name, parser, parser_name):
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser | [
"def set_reader(self, fd, on_readable):\n raise NotImplementedError",
"def setName(self, name):\n self.content = name",
"def setScanner(self, scannerName):\n self.scanner = self.sourceManager.OpenSource(scannerName)",
"def set_name(self, name):\n if self._status == \"lock\":\n raise QiitaAnalysisError(\"analysis can't be changed. It's locked\")\n self._name = name",
"def set_scene_by_name(self, name):\n id = self.extract_scene_id_by_name(name)\n if id:\n self.set_scene(id)",
"def setName(self, new_name):\n self.__NAME = new_name\n self.__file = self.deepCopy(self.__NAME, self.__DIR).__file",
"def __init__(self, file_name, encoding):\n self.reader = codecs.getreader(encoding)(file_name)",
"def setRawName(*args, **kwargs):\n \n pass",
"def set_name(self,name):\r\n if not len(name):\r\n raise Exception(\"The specified morphism name is empty\")\r\n self.name = name",
"def set_name(self, name):\n self.recipe_proto[\"name\"] = name",
"def setName( self, name ):\n if type( name ) == str:\n self.Name = name\n self.graph.graph[ 'name' ] = name",
"def set_dataset_name(self, dataset_name):\n self.name = dataset_name",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"def from_name(self, from_name):\n self._from_name = from_name",
"def initread(self, idfname):\n with open(idfname, \"r\") as _:\n # raise nonexistent file error early if idfname doesn't exist\n pass\n iddfhandle = StringIO(iddcurrent.iddtxt)\n if self.getiddname() == None:\n self.setiddname(iddfhandle)\n self.idfname = idfname\n try:\n self.idfabsname = os.path.abspath(self.idfname)\n except TypeError as e:\n pass # it is file handle. the code can handle that\n self.read()",
"def read(self, read: SmartSsdReadLookahead):\n\n self._read = read",
"def get_reader(self):\n raise NotImplementedError()",
"def set_name(self, name):\n self.pattern.name = name",
"def set_name(self, new_name):\n self.name = new_name",
"def open_file_in_reader(file_name):\n global user_configurations\n command = user_configurations['READER'].replace('%f', '\"%s\"' % file_name)\n try:\n subprocess.call([command], shell=True)\n except OSError:\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set `self.writer` by name. | def set_writer(self, writer_name):
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class() | [
"def get_writer(self, name=None):\n self._create_working_folder()\n name = self.clean_name(name)\n if name not in self.writers:\n self.writers[name] = open(os.path.join(self.working_folder, name), 'wb')\n return self.writers[name]",
"def set_writer(self, fd, on_writable):\n raise NotImplementedError",
"def set_writer_position(self, name, timestamp):\n self.cursor.execute('REPLACE INTO gauged_writer_history '\n '(id, timestamp) VALUES (?, ?)',\n (name, timestamp))",
"def setName( self, name ):\n if type( name ) == str:\n self.Name = name\n self.graph.graph[ 'name' ] = name",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"def set_name(self, name):\n if self._status == \"lock\":\n raise QiitaAnalysisError(\"analysis can't be changed. It's locked\")\n self._name = name",
"def setName(self, name):\n self.content = name",
"def set_output_name(name):\n settings[\"outputName\"] = name",
"def set_name(self, room_name):\n self.name = room_name",
"def setName(self, new_name):\n self.__NAME = new_name\n self.__file = self.deepCopy(self.__NAME, self.__DIR).__file",
"def add_writer(self, writer: IEvaluatorWriter):\n\n self.writers.append(writer)\n self.is_header_written = False # re-write header",
"def set_player_name(self, player):\r\n self.__name = player",
"def change_name(self, name):\n self._player_name = name",
"def set_name(self, name):\n self.pattern.name = name",
"def set_name(self, new_name):\n self.name = new_name",
"def _setName(self,name,value):\n\n if name in SDS['COP']:\n self.COP.__dict__[name] = value\n else:\n self.__dict__[name] = value",
"def set_name(self,name):\r\n if not len(name):\r\n raise Exception(\"The specified morphism name is empty\")\r\n self.name = name",
"def set_player_name(name):\n\n player[\"player_name\"] = name",
"def setName(self, name):\r\n self._thread.setName(name)",
"def write_filename(self, record, pattern, filename):\n\n # Are we currently writing to this file? If not, open/create it.\n if not filename == self.current_filename.get(pattern, None):\n logging.info('LogfileWriter opening new file: %s', filename)\n self.current_filename[pattern] = filename\n self.writer[pattern] = FileWriter(filename=filename,\n header=self.header,\n header_file=self.header_file,\n flush=self.flush)\n # Now, if our logic is correct, should *always* have a matching_writer\n matching_writer = self.writer.get(pattern)\n matching_writer.write(record)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set and return default settings (overrides in `defaults` dict). Set components first (`self.set_reader` & `self.set_writer`). Explicitly setting `self.settings` disables command line option processing from `self.publish()`. | def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings | [
"def __init__(self, tool_name=None, file_name=None, path=None, defaults={},\n logger=None):\n self._settings = {}\n self._logger = logger or (lambda x: None)\n SettingsIO.__init__(self, tool_name, file_name, path)\n self.update(defaults, save=False)\n self._defaults = copy(self._settings)\n self.load()",
"def default_settings() -> Settings:\n return Settings()",
"def make_default_settings(self):\n if not os.path.exists(self.settings_dir):\n os.makedirs(self.settings_dir)\n # end\n self._set_settings(self.settings_default)",
"def default(**kwargs):\n for name, default_val in kwargs.items():\n try:\n getattr(SETTINGS, name)\n except AttributeError:\n setattr(SETTINGS, name, default_val)",
"def _load_settings(self):\n with open(DEFAULT_PATH, 'rb') as file_:\n default_settings = yaml.load(file_)\n LOG.info('Loaded defaults: %s', default_settings)\n\n user_settings = {}\n if os.path.isfile(USERSETTINGS_PATH) and os.access(USERSETTINGS_PATH, os.R_OK):\n try:\n with open(USERSETTINGS_PATH, 'rb') as file_:\n user_settings = yaml.load(file_)\n LOG.info('Loaded user settings %s from path %s', user_settings,\n USERSETTINGS_PATH)\n except Exception:\n LOG.exception('Exception during loading of user settings')\n # FIXME check user_settings keys\n else:\n LOG.info('No user settings found, file %s does not exist or is not readable',\n USERSETTINGS_PATH)\n\n self.__class__.settings = ChainMap(user_settings, default_settings)\n self.__class__.settings_names = list(self.settings.keys())",
"def restore_defaults(self):\n\n # Set default values for each of the pysat provided values. Set\n # all but the last parameter directly. Set last using __setitem__\n # to trigger a file write.\n keys = list(self.defaults.keys())\n for key in keys:\n self.data[key] = self.defaults[key]\n\n # Trigger a file write\n self.store()\n\n return",
"def configure(**kwargs):\n global settings\n settings = Settings(**kwargs)",
"def _get_settings(pargs):\n settings = DEFAULT_SETTINGS.copy()\n filepaths = DEFAULT_SETTINGS_FILEPATHS[:]\n if pargs.settings:\n filepaths.append(pargs.settings)\n for filepath in filepaths:\n try:\n settings = read_settings(filepath, settings=settings)\n _verbose(pargs, \"Settings read from file\", filepath)\n except IOError:\n _verbose(pargs, \"Warning: no settings file\", filepath)\n except (ValueError, TypeError):\n sys.exit(\"Error: bad settings file\", filepath)\n for key in [\"SERVER\", \"DATABASE\", \"USERNAME\", \"PASSWORD\"]:\n try:\n settings[key] = os.environ[key]\n except KeyError:\n pass\n if pargs.server:\n settings[\"SERVER\"] = pargs.server\n if pargs.database:\n settings[\"DATABASE\"] = pargs.database\n if pargs.username:\n settings[\"USERNAME\"] = pargs.username\n if pargs.password:\n settings[\"PASSWORD\"] = pargs.password\n if pargs.verbose:\n s = dict()\n for key in [\"SERVER\", \"DATABASE\", \"USERNAME\"]:\n s[key] = settings[key]\n if settings[\"PASSWORD\"] is None:\n s[\"PASSWORD\"] = None\n else:\n s[\"PASSWORD\"] = \"***\"\n print(\"Settings:\", _jsons(s, indent=2))\n return settings",
"def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]",
"def _loadSettings(self):\n self.logger.debug(\"Loading default settings\")\n\n # Set the audio devices\n default_input_device = int(self.measurement_settings[\"input device\"])\n input_index = self.inputDevices.findData(default_input_device)\n self.inputDevices.setCurrentIndex(input_index)\n\n default_output_device = int(self.measurement_settings[\"output device\"])\n output_index = self.outputDevices.findData(default_output_device)\n self.outputDevices.setCurrentIndex(output_index)\n\n # Set Excitation Signal\n default_signal = str(self.measurement_settings[\"signal type\"])\n signal_index = self.signalType.findText(default_signal)\n self.signalType.setCurrentIndex(signal_index)\n\n # Set swept sine signal settings\n upper_frequency = int(self.measurement_settings[\"upper frequency\"])\n self.upperFreq.setValue(upper_frequency)\n\n # Signal length is in seconds, convert to ms\n signal_length = float(self.measurement_settings[\"signal length\"])\n signal_length *= 1000\n self.signalLength.setValue(int(signal_length))\n\n # Set MLS / IRS settings\n num_taps = int(self.measurement_settings[\"mls taps\"])\n print self.measurement_settings\n self.numTaps.setValue(num_taps)\n\n num_bursts = int(self.measurement_settings[\"mls reps\"])\n self.numBursts.setValue(num_bursts)\n\n signal_reps = int(self.measurement_settings[\"signal reps\"])\n self.numRepititions.setValue(signal_reps)\n\n # Set filter options\n lpf_cutoff = int(self.measurement_settings[\"lpf cutoff\"])\n lpf_order = int(self.measurement_settings[\"lpf order\"])\n hpf_cutoff = int(self.measurement_settings[\"hpf cutoff\"])\n hpf_order = int(self.measurement_settings[\"hpf order\"])\n\n # Band Pass Filter\n self.freqLow.setValue(lpf_cutoff)\n self.orderLow.setValue(lpf_order)\n self.freqHigh.setValue(hpf_cutoff)\n self.orderHigh.setValue(hpf_order)\n\n # Low Pass Filter\n self.freqLPF.setValue(lpf_cutoff)\n self.orderLPF.setValue(lpf_order)\n\n # High Pass Filter\n self.freqHPF.setValue(hpf_cutoff)\n self.orderHPF.setValue(hpf_order)\n\n # Set the correct drop-down options\n lpf_enabled = int(self.measurement_settings[\"lpf enabled\"])\n hpf_enabled = int(self.measurement_settings[\"hpf enabled\"])\n\n if lpf_enabled == 1 and hpf_enabled == 1:\n # Band pass filter\n bpf_index = self.filterType.findText(\"Bandpass Filter\")\n self.filterType.setCurrentIndex(bpf_index)\n elif lpf_enabled == 1:\n # Low pass filter\n lpf_index = self.filterType.findText(\"Low Pass Filter\")\n self.filterType.setCurrentIndex(lpf_index)\n elif hpf_enabled == 1:\n # High pass filter\n hpf_index = self.filterType.findText(\"High Pass Filter\")\n self.filterType.setCurrentIndex(hpf_index)\n else:\n # Disabled\n index = self.filterType.findText(\"Disabled\")\n self.filterType.setCurrentIndex(index)\n\n # Set window options\n window_length = int(self.measurement_settings[\"window length\"])\n taper_length = int(self.measurement_settings[\"taper length\"])\n\n self.winLength.setValue(window_length)\n self.taperLength.setValue(taper_length)",
"def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }",
"def init_settings():\n logger = logging.getLogger('app.utils')\n default_filepath = os.path.abspath(os.path.join(config.BASE_DIR, '..', config.DEFAULT_SETTINGS_FILENAME))\n filepath = os.path.abspath(os.path.join(config.BASE_DIR, '..', config.SETTINGS_FILENAME))\n\n if os.path.isfile(filepath) is True:\n logger.debug('loading existing settings file')\n with open(filepath) as f:\n settings = json.load(f)\n else:\n logger.debug('creating new settings file')\n with open(default_filepath) as f:\n settings = json.load(f)\n with open(filepath, 'w') as f:\n json.dump(settings, f)\n\n return settings",
"def apply_defaults(self):\n # reset window geometry\n self.parent.update_idletasks()\n w = self.parent.winfo_screenwidth()\n h = self.parent.winfo_screenheight()\n rootsize = (self.DEFAULT_GUI_MIN_WIDTH, self.DEFAULT_GUI_MIN_HEIGHT)\n x = w / 2 - rootsize[0] / 2\n y = h / 2 - rootsize[1] / 2\n self.prefs[\"window_geometry\"] = \"%dx%d+%d+%d\" % (rootsize + (x, y))\n # reset tags\n self.prefs[\"tags\"] = self.DEFAULT_TAGS\n self.prefs[\"mo_class\"] = self.DEFAULT_MO_CLASS\n self.prefs[\"nomo_class\"] = self.DEFAULT_NOMO_CLASS\n self.prefs[\"id_regex\"] = self.DEFAULT_ID_REGEX\n self.prefs[\"id_format\"] = self.DEFAULT_ID_FORMAT\n self.prefs[\"existing_ids_only\"] = self.DEFAULT_EXISTING_IDS_ONLY\n self.prefs[\"save_directory\"] = self.DEFAULT_SAVE_DIRECTORY",
"def setDefaults():\n user_defaults = NSUserDefaults.standardUserDefaults()\n pref_dict = {\n Preferences.ordering_key: Preferences.ordering_default,\n Preferences.update_interval_key: Preferences.update_interval_default,\n Preferences.subreddit_key: Preferences.subreddit_default,\n Preferences.limit_key: Preferences.limit_default\n\n }\n nspref_dict = NSDictionary.dictionaryWithDictionary_(pref_dict)\n user_defaults.registerDefaults_(nspref_dict)",
"def make_default_settings():\n default_settings = {\n 'height': 24, \n 'width': 24, \n 'max_box_height': 7,\n 'max_box_width': 7,\n 'max_container_height': 5,\n 'max_container_width': 9,\n 'default_num_samples': 20,\n 'fixed_floor': False,\n 'floor_height': 3,\n 'infinite_position_domain': False,\n 'frame': False, # indicates presence of PixelWorld frame\n 'frame_color': PURPLE,\n 'padding': 0, # padding around outside edge\n 'colors': COLORS.values(), \n 'check_overlap': True,\n 'allow_pushable': False, # Whether to allow objects the option of being pushable\n 'allow_targets': False, # Whether to allow use of the is_target attribute\n 'add_self': True,\n 'make_self_red_pixel': True,\n 'self_color_is_unique': False,\n 'objects_are_white': False,\n 'objects_are_small_blobs': False,\n 'self_grips': False, # True if the self can grip/ungrip other objects\n }\n return default_settings",
"def initialize_plugin_settings(args: argparse.Namespace) -> None:\n # This is a sanity check to ensure we don't override any current settings.\n if get_settings().plugins:\n return\n\n # We initialize the `settings` variable here, but we can't save it to the global object\n # yet, since the contextmanager will revert those changes. As such, we quit the context\n # first, then set it to the global namespace.\n with default_settings() as settings:\n pass\n\n get_settings().set(settings)",
"def test_cli_defaults(self):\n argv = ['neurotic']\n args = neurotic.parse_args(argv)\n app = mkQApp()\n win = neurotic.win_from_args(args)\n\n # should match factory defaults because setUp() explicitly reset the\n # defaults to the factory defaults\n factory_defaults = neurotic._global_config_factory_defaults['defaults']\n self.assertEqual(win.do_toggle_debug_logging.isChecked(),\n factory_defaults['debug'],\n 'debug setting has unexpected default')\n self.assertEqual(win.lazy, factory_defaults['lazy'],\n 'lazy setting has unexpected default')\n self.assertEqual(win.support_increased_line_width,\n factory_defaults['thick_traces'],\n 'thick traces setting has unexpected default')\n self.assertEqual(win.show_datetime, factory_defaults['show_datetime'],\n 'show_datetime has unexpected default')\n self.assertEqual(win.ui_scale, factory_defaults['ui_scale'],\n 'ui_scale has unexpected default')\n self.assertEqual(win.theme, factory_defaults['theme'],\n 'theme has unexpected default')\n self.assertEqual(win.metadata_selector.file, self.example_file,\n 'file has unexpected default')\n self.assertEqual(win.metadata_selector._selection,\n self.example_dataset,\n 'dataset has unexpected default')",
"def correct_settings(self):\n # Create a new collection of settings, based on the defaults\n new_settings = ConfigParser.RawConfigParser()\n new_settings.read(self.default_filename)\n\n # Loop through all the current settings and add values to the new settings as needed.\n for section in self.config.sections():\n # Check if section exists in new settings. If not, create it.\n if section not in new_settings.sections():\n new_settings.add_section(section)\n\n # Check each existing option in the section and write it to the new settings.\n for option in self.config.options(section):\n value = self.config.get(section, option)\n new_settings.set(section, option, value)\n\n # Write our new collection of settings to the user's custom settings.ini file.\n with open(self.settings_file, 'w') as my_new_settings:\n new_settings.write(my_new_settings)",
"def configure_settings(localSettings=None, windmill_settings={}):\n\n \n globalSettings, localSettings = _load(localSettings=localSettings)\n\n mergeSettings(windmill_settings, globalSettings, localSettings)\n\n return windmill_settings"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`'s output. | def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit as error:
exit = 1
exit_status = error.code
except Exception as error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = True
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output | [
"def publish(self, argv=None, usage=None, description=None,\r\n settings_spec=None, settings_overrides=None,\r\n config_section=None, enable_exit_status=False):\r\n exit = None\r\n try:\r\n if self.settings is None:\r\n self.process_command_line(\r\n argv, usage, description, settings_spec, config_section,\r\n **(settings_overrides or {}))\r\n self.set_io()\r\n self.document = self.reader.read(self.source, self.parser,\r\n self.settings)\r\n self.apply_transforms()\r\n output = self.writer.write(self.document, self.destination)\r\n self.writer.assemble_parts()\r\n except SystemExit, error:\r\n exit = 1\r\n exit_status = error.code\r\n except Exception, error:\r\n if not self.settings: # exception too early to report nicely\r\n raise\r\n if self.settings.traceback: # Propagate exceptions?\r\n self.debugging_dumps()\r\n raise\r\n self.report_Exception(error)\r\n exit = True\r\n exit_status = 1\r\n self.debugging_dumps()\r\n if (enable_exit_status and self.document\r\n and (self.document.reporter.max_level\r\n >= self.settings.exit_status_level)):\r\n sys.exit(self.document.reporter.max_level + 10)\r\n elif exit:\r\n sys.exit(exit_status)\r\n return output",
"def main(cmdlineargs=None, trimmed_outfile=sys.stdout):\n\tparser = get_option_parser()\n\tif cmdlineargs is None:\n\t\tcmdlineargs = sys.argv[1:]\n\toptions, args = parser.parse_args(args=cmdlineargs)\n\n\tif len(args) == 0:\n\t\tparser.error(\"At least one parameter needed: name of a FASTA or FASTQ file.\")\n\telif len(args) > 2:\n\t\tparser.error(\"Too many parameters.\")\n\n\tinput_filename = args[0]\n\tquality_filename = None\n\tpe_filename = None\n\tif len(args) == 2:\n\t\tif args[1].endswith('.qual'):\n\t\t\tquality_filename = args[1]\n\t\telse:\n\t\t\tpe_filename = args[1]\n\t\t\tif not options.paired_output:\n\t\t\t\tparser.error('you must use --paired-output when trimming paired-end reads')\n\n\tif len(args) == 1 and options.paired_output:\n\t\tparser.error(\"You specified a --paired-output file, but gave only one input file.\")\n\tif input_filename.endswith('.qual') and quality_filename.endswith('fasta'):\n\t\tparser.error(\"FASTA and QUAL file given, but the FASTA file must be first.\")\n\n\tif options.format is not None and options.format.lower() not in ['fasta', 'fastq', 'sra-fastq']:\n\t\tparser.error(\"The input file format must be either 'fasta', 'fastq' or 'sra-fastq' (not '{0}').\".format(options.format))\n\n\t# TODO should this really be an error?\n\tif options.format is not None and quality_filename is not None:\n\t\tparser.error(\"If a pair of .fasta and .qual files is given, the -f/--format parameter cannot be used.\")\n\n\t# default output files (overwritten below)\n\ttoo_short_outfile = None # too short reads go here\n\ttoo_long_outfile = None # too long reads go here\n\tpe_outfile = None\n\tif options.output is not None:\n\t\ttrimmed_outfile = xopen(options.output, 'w')\n\tuntrimmed_outfile = trimmed_outfile # reads without adapters go here\n\tif options.untrimmed_output is not None:\n\t\tuntrimmed_outfile = xopen(options.untrimmed_output, 'w')\n\tif options.too_short_output is not None:\n\t\ttoo_short_outfile = xopen(options.too_short_output, 'w')\n\tif options.too_long_output is not None:\n\t\ttoo_long_outfile = xopen(options.too_long_output, 'w')\n\tif options.paired_output:\n\t\tpe_outfile = xopen(options.paired_output, 'w')\n\n\tif options.maq:\n\t\toptions.colorspace = True\n\t\toptions.double_encode = True\n\t\toptions.trim_primer = True\n\t\toptions.strip_suffix.append('_F3')\n\t\toptions.suffix = \"/1\"\n\t\toptions.zero_cap = True\n\tif options.trim_primer and not options.colorspace:\n\t\tparser.error(\"Trimming the primer makes only sense in color space.\")\n\tif options.double_encode and not options.colorspace:\n\t\tparser.error(\"Double-encoding makes only sense in color space.\")\n\tif options.anywhere and options.colorspace:\n\t\tparser.error(\"Using --anywhere with color space reads is currently not supported (if you think this may be useful, contact the author).\")\n\tif not (0 <= options.error_rate <= 1.):\n\t\tparser.error(\"The maximum error rate must be between 0 and 1.\")\n\tif options.overlap < 1:\n\t\tparser.error(\"The overlap must be at least 1.\")\n\n\tif options.rest_file is not None:\n\t\toptions.rest_file = xopen(options.rest_file, 'w')\n\t\trest_writer = RestFileWriter(options.rest_file)\n\telse:\n\t\trest_writer = None\n\tif options.info_file is not None:\n\t\toptions.info_file = xopen(options.info_file, 'w')\n\tif options.wildcard_file is not None:\n\t\toptions.wildcard_file = xopen(options.wildcard_file, 'w')\n\n\tadapters = []\n\n\tdef parse_adapter_name(seq):\n\t\t\"\"\"\n\t\tParse an adapter given as 'name=adapt' into 'name' and 'adapt'.\n\t\t\"\"\"\n\t\tfields = seq.split('=', 1)\n\t\tif len(fields) > 1:\n\t\t\tname, seq = fields\n\t\t\tname = name.strip()\n\t\telse:\n\t\t\tname = None\n\t\tseq = seq.strip()\n\t\treturn name, seq\n\n\tADAPTER_CLASS = ColorspaceAdapter if options.colorspace else Adapter\n\tdef append_adapters(adapter_list, where):\n\t\tfor seq in adapter_list:\n\t\t\tname, seq = parse_adapter_name(seq)\n\t\t\tw = where\n\t\t\tif w == FRONT and seq.startswith('^'):\n\t\t\t\tseq = seq[1:]\n\t\t\t\tw = PREFIX\n\t\t\telif not options.indels:\n\t\t\t\tparser.error(\"Not allowing indels is currently supported only for anchored 5' adapters.\")\n\t\t\tif not seq:\n\t\t\t\tparser.error(\"The adapter sequence is empty\")\n\t\t\tadapter = ADAPTER_CLASS(seq, w, options.error_rate,\n\t\t\t\toptions.overlap, options.match_read_wildcards,\n\t\t\t\toptions.match_adapter_wildcards, name=name, indels=options.indels)\n\t\t\tadapters.append(adapter)\n\n\tappend_adapters(options.adapters, BACK)\n\tappend_adapters(options.anywhere, ANYWHERE)\n\tappend_adapters(options.front, FRONT)\n\n\t# make sure these aren't used by accident\n\tdel options.adapters\n\tdel options.anywhere\n\tdel options.front\n\n\tif not adapters and options.quality_cutoff == 0 and options.cut == 0:\n\t\tparser.error(\"You need to provide at least one adapter sequence.\")\n\n\tmodifiers = []\n\tif options.cut:\n\t\tmodifiers.append(UnconditionalCutter(options.cut))\n\tif options.quality_cutoff > 0:\n\t\tmodifiers.append(QualityTrimmer(options.quality_cutoff, options.quality_base))\n\tif adapters:\n\t\tadapter_cutter = RepeatedAdapterCutter(adapters, options.times,\n\t\t\t\toptions.wildcard_file, options.info_file, options.trim,\n\t\t\t\trest_writer, options.mask_adapter)\n\t\tmodifiers.append(adapter_cutter)\n\telse:\n\t\tadapter_cutter = None\n\tif options.length_tag:\n\t\tmodifiers.append(LengthTagModifier(options.length_tag))\n\tif options.strip_f3:\n\t\toptions.strip_suffix.append('_F3')\n\tfor suffix in options.strip_suffix:\n\t\tmodifiers.append(SuffixRemover(suffix))\n\tif options.prefix or options.suffix:\n\t\tmodifiers.append(PrefixSuffixAdder(options.prefix, options.suffix))\n\tif options.double_encode:\n\t\tmodifiers.append(DoubleEncoder())\n\tif options.zero_cap:\n\t\tmodifiers.append(ZeroCapper(quality_base=options.quality_base))\n\tif options.trim_primer:\n\t\tmodifiers.append(PrimerTrimmer)\n\n\treadfilter = ReadFilter(options.minimum_length, options.maximum_length,\n\t\ttoo_short_outfile, too_long_outfile, options.discard_trimmed,\n\t\toptions.discard_untrimmed)\n\tstart_time = time.clock()\n\ttry:\n\t\treader = read_sequences(input_filename, quality_filename, colorspace=options.colorspace, fileformat=options.format)\n\t\tif pe_filename:\n\t\t\tpe_reader = read_sequences(pe_filename, None, colorspace=options.colorspace, fileformat=options.format)\n\t\telse:\n\t\t\tpe_reader = None\n\t\tstats = process_reads(reader, pe_reader, modifiers, readfilter, trimmed_outfile, untrimmed_outfile, pe_outfile)\n\texcept IOError as e:\n\t\tif e.errno == errno.EPIPE:\n\t\t\tsys.exit(1)\n\t\traise\n\texcept seqio.FormatError as e:\n\t\tprint(\"Error:\", e, file=sys.stderr)\n\t\tsys.exit(1)\n\t# close open files\n\tfor f in [options.rest_file, options.wildcard_file, options.info_file,\n\t\t\ttoo_short_outfile, too_long_outfile, options.info_file]:\n\t\tif f is not None:\n\t\t\tf.close()\n\t# send statistics to stderr if result was sent to stdout\n\tstat_file = sys.stderr if options.output is None else None\n\n\tprint_statistics(adapters, time.clock() - start_time, stats,\n\t\toptions.trim, adapter_cutter.reads_matched if adapter_cutter else 0,\n\t\toptions.error_rate, readfilter.too_short, readfilter.too_long, cmdlineargs, file=stat_file)",
"def doCommand():\n \n import time\n import sys\n from swap import myStore\n\n # These would just be attributes if this were an object\n global _store\n global workingContext\n option_need_rdf_sometime = 0 # If we don't need it, don't import it\n # (to save errors where parsers don't exist)\n \n option_pipe = 0 # Don't store, just pipe though\n option_inputs = []\n option_reify = 0 # Flag: reify on output (process?)\n option_flat = 0 # Flag: reify on output (process?)\n option_crypto = 0 # Flag: make cryptographic algorithms available\n setTracking(0)\n option_outURI = None\n option_outputStyle = \"-best\"\n _gotInput = 0 # Do we not need to take input from stdin?\n option_meta = 0\n option_normalize_iri = 0\n \n option_flags = { \"rdf\":\"l\", \"n3\":\"\", \"think\":\"\", \"sparql\":\"\"}\n # RDF/XML serializer can't do list (\"collection\") syntax.\n \n option_quiet = 0\n option_with = None # Command line arguments made available to N3 processing\n option_engine = \"llyn\"\n option_why = \"\"\n \n _step = 0 # Step number used for metadata\n _genid = 0\n\n hostname = \"localhost\" # @@@@@@@@@@@ Get real one\n \n # The base URI for this process - the Web equiv of cwd\n _baseURI = uripath.base()\n \n option_format = \"n3\" # set the default format\n option_first_format = None\n \n _outURI = _baseURI\n option_baseURI = _baseURI # To start with - then tracks running base\n \n # First pass on command line - - - - - - - P A S S 1\n \n for argnum in range(1,len(sys.argv)): # options after script name\n arg = sys.argv[argnum]\n if arg.startswith(\"--\"): arg = arg[1:] # Chop posix-style -- to -\n# _equals = string.find(arg, \"=\")\n _lhs = \"\"\n _rhs = \"\"\n try:\n [_lhs,_rhs]=arg.split('=',1)\n try:\n _uri = join(option_baseURI, _rhs)\n except ValueError:\n _uri = _rhs\n except ValueError: pass\n if arg == \"-ugly\": option_outputStyle = arg\n elif _lhs == \"-base\": option_baseURI = _uri\n elif arg == \"-rdf\":\n option_format = \"rdf\"\n if option_first_format == None:\n option_first_format = option_format \n option_need_rdf_sometime = 1\n elif _lhs == \"-rdf\":\n option_format = \"rdf\"\n if option_first_format == None:\n option_first_format = option_format \n option_flags[\"rdf\"] = _rhs\n option_need_rdf_sometime = 1\n elif arg == \"-n3\":\n option_format = \"n3\"\n if option_first_format == None:\n option_first_format = option_format \n elif _lhs == \"-n3\":\n option_format = \"n3\"\n if option_first_format == None:\n option_first_format = option_format \n option_flags[\"n3\"] = _rhs\n elif _lhs == \"-mode\":\n option_flags[\"think\"] = _rhs\n elif _lhs == \"-closure\":\n if \"n\" in _rhs:\n option_normalize_iri = 1\n #elif _lhs == \"-solve\":\n # sys.argv[argnum+1:argnum+1] = ['-think', '-filter=' + _rhs]\n elif _lhs == \"-language\":\n option_format = _rhs\n if option_first_format == None:\n option_first_format = option_format\n elif _lhs == \"-languageOptions\":\n option_flags[option_format] = _rhs\n elif arg == \"-quiet\": option_quiet = 1\n elif arg == \"-pipe\": option_pipe = 1\n elif arg == \"-crypto\": option_crypto = 1\n elif _lhs == \"-why\":\n diag.tracking=1\n diag.setTracking(1)\n option_why = _rhs\n elif arg == \"-why\":\n diag.tracking=1\n diag.setTracking(1)\n option_why = \"\"\n elif arg == \"-track\":\n diag.tracking=1\n diag.setTracking(1)\n elif arg == \"-bySubject\": option_outputStyle = arg\n elif arg == \"-no\": option_outputStyle = \"-no\"\n elif arg == \"-debugString\": option_outputStyle = \"-debugString\"\n elif arg == \"-strings\": option_outputStyle = \"-no\"\n elif arg == \"-sparqlResults\": option_outputStyle = \"-no\"\n elif arg == \"-triples\" or arg == \"-ntriples\":\n option_format = \"n3\"\n option_flags[\"n3\"] = \"bravestpun\"\n option_outputStyle = \"-bySubject\"\n option_quiet = 1\n elif _lhs == \"-outURI\": option_outURI = _uri\n elif _lhs == \"-chatty\":\n setVerbosity(int(_rhs))\n elif arg[:7] == \"-apply=\": pass\n elif arg[:7] == \"-patch=\": pass\n elif arg == \"-reify\": option_reify = 1\n elif arg == \"-flat\": option_flat = 1\n elif arg == \"-help\":\n print doCommand.__doc__\n print notation3.ToN3.flagDocumentation\n print toXML.ToRDF.flagDocumentation\n try:\n from swap import sax2rdf # RDF1.0 syntax parser to N3 RDF stream\n print sax2rdf.RDFXMLParser.flagDocumentation\n except:\n pass\n return\n elif arg == \"-revision\":\n progress( \"cwm=\",cvsRevision, \"llyn=\", llyn.cvsRevision)\n return\n elif arg == \"-with\":\n option_with = sys.argv[argnum+1:] # The rest of the args are passed to n3\n break\n elif arg[0] == \"-\": pass # Other option\n else :\n option_inputs.append(join(option_baseURI, arg))\n _gotInput = _gotInput + 1 # input filename\n \n\n # Between passes, prepare for processing\n setVerbosity(0)\n\n if not option_normalize_iri:\n llyn.canonical = lambda x: x\n\n # Base defauts\n if option_baseURI == _baseURI: # Base not specified explicitly - special case\n if _outURI == _baseURI: # Output name not specified either\n if _gotInput == 1: # But input file *is*, \n _outURI = option_inputs[0] # Just output to same URI\n option_baseURI = _outURI # using that as base.\n if diag.tracking:\n _outURI = RDFSink.runNamespace()[:-1]\n option_baseURI = _outURI\n option_baseURI = splitFrag(option_baseURI)[0]\n\n # Fix the output sink\n if option_format == \"rdf\":\n _outSink = toXML.ToRDF(sys.stdout, _outURI, base=option_baseURI, flags=option_flags[\"rdf\"])\n elif option_format == \"n3\" or option_format == \"sparql\":\n _outSink = notation3.ToN3(sys.stdout.write, base=option_baseURI,\n quiet=option_quiet, flags=option_flags[\"n3\"])\n elif option_format == \"trace\":\n _outSink = RDFSink.TracingRDFSink(_outURI, base=option_baseURI,\n flags=option_flags.get(\"trace\",\"\"))\n if option_pipe:\n # this is really what a parser wants to dump to\n _outSink.backing = llyn.RDFStore( _outURI+\"#_g\",\n argv=option_with, crypto=option_crypto) \n else:\n # this is really what a store wants to dump to \n _outSink.backing = notation3.ToN3(sys.stdout.write,\n base=option_baseURI, quiet=option_quiet,\n flags=option_flags[\"n3\"])\n\n # hm. why does TimBL use sys.stdout.write, above? performance at the \n else:\n raise NotImplementedError\n\n version = \"$Id: cwm.py,v 1.198 2012/01/30 09:30:20 timbl Exp $\"\n if not option_quiet and option_outputStyle != \"-no\":\n _outSink.makeComment(\"Processed by \" + version[1:-1]) # Strip $ to disarm\n _outSink.makeComment(\" using base \" + option_baseURI)\n\n if option_flat:\n _outSink = notation3.Reifier(_outSink, _outURI+ \"#_formula\", flat=1)\n\n if diag.tracking: \n myReason = BecauseOfCommandLine(`sys.argv`)\n # @@ add user, host, pid, pwd, date time? Privacy!\n else:\n myReason = None\n\n if option_pipe:\n _store = _outSink\n workingContext = _outSink #.newFormula()\n else:\n if \"u\" in option_flags[\"think\"]:\n _store = llyn.RDFStore(argv=option_with, crypto=option_crypto)\n else:\n _store = llyn.RDFStore( _outURI+\"#_g\",\n argv=option_with, crypto=option_crypto)\n myStore.setStore(_store)\n\n\n if _gotInput: \n workingContext = _store.newFormula(option_inputs [0]+\"#_work\")\n newTopLevelFormula(workingContext)\n else: # default input\n if option_first_format is None: option_first_format = option_format\n ContentType={ \"rdf\": \"application/xml+rdf\", \"n3\":\n \"text/n3\", \"sparql\":\n \"x-application/sparql\"}[option_first_format]\n workingContext = _store.load(\n # asIfFrom = join(_baseURI, \".stdin\"),\n asIfFrom = _baseURI,\n contentType = ContentType,\n flags = option_flags[option_first_format],\n remember = 0,\n referer = \"\",\n why = myReason, topLevel=True)\n workingContext.reopen()\n workingContext.stayOpen = 1 # Never canonicalize this. Never share it.\n \n\n # ____________________________________________________________________\n # Take commands from command line:- - - - - P A S S 2\n\n option_format = \"n3\" # Use RDF/n3 rather than RDF/XML \n option_flags = { \"rdf\":\"l\", \"n3\":\"\", \"think\": \"\", \"sparql\":\"\" } \n option_quiet = 0\n _outURI = _baseURI\n option_baseURI = _baseURI # To start with\n \n def filterize():\n \"\"\"implementation of --filter\n for the --filter command, so we don't have it printed twice\n \"\"\"\n global workingContext\n global r\n workingContext = workingContext.canonicalize()\n _store._formulaeOfLength = {}\n filterContext = _store.newFormula()\n newTopLevelFormula(filterContext)\n _store.load(_uri, openFormula=filterContext,\n why=myReason, referer=\"\")\n _newContext = _store.newFormula()\n newTopLevelFormula(_newContext)\n applyRules(workingContext, filterContext, _newContext)\n workingContext.close()\n workingContext = _newContext\n\n sparql_query_formula = None\n\n \n for arg in sys.argv[1:]: # Command line options after script name\n if verbosity()>5: progress(\"Processing %s.\" % (arg))\n if arg.startswith(\"--\"): arg = arg[1:] # Chop posix-style -- to -\n _equals = string.find(arg, \"=\")\n _lhs = \"\"\n _rhs = \"\"\n if _equals >=0:\n _lhs = arg[:_equals]\n _rhs = arg[_equals+1:]\n try:\n _uri = join(option_baseURI, _rhs)\n except ValueError:\n _uri =_rhs\n if arg[0] != \"-\":\n _inputURI = join(option_baseURI, splitFrag(arg)[0])\n assert ':' in _inputURI\n ContentType={ \"rdf\": \"application/xml+rdf\", \"n3\":\n \"text/n3\",\n \"sparql\": \"x-application/sparql\"}[option_format]\n\n if not option_pipe: workingContext.reopen()\n try:\n load(_store, _inputURI,\n openFormula=workingContext,\n contentType =ContentType,\n flags=option_flags[option_format],\n referer=\"\",\n why=myReason)\n except:\n progress(_inputURI)\n raise\n\n _gotInput = 1\n\n elif arg == \"-help\":\n pass # shouldn't happen\n elif arg == \"-revision\":\n pass\n elif _lhs == \"-base\":\n option_baseURI = _uri\n if verbosity() > 10: progress(\"Base now \"+option_baseURI)\n\n elif arg == \"-ugly\":\n option_outputStyle = arg \n\n elif arg == \"-crypto\": pass\n elif arg == \"-pipe\": pass\n elif _lhs == \"-outURI\": option_outURI = _uri\n\n elif arg == \"-rdf\": option_format = \"rdf\"\n elif _lhs == \"-rdf\":\n option_format = \"rdf\"\n option_flags[\"rdf\"] = _rhs\n elif _lhs == \"-mode\":\n option_flags[\"think\"] = _rhs\n elif _lhs == \"-closure\":\n workingContext.setClosureMode(_rhs)\n elif arg == \"-n3\": option_format = \"n3\"\n elif _lhs == \"-n3\":\n option_format = \"n3\"\n option_flags[\"n3\"] = _rhs\n elif _lhs == \"-language\":\n option_format = _rhs\n if option_first_format == None:\n option_first_format = option_format\n elif _lhs == \"-languageOptions\":\n option_flags[option_format] = _lhs\n elif arg == \"-quiet\" : option_quiet = 1 \n elif _lhs == \"-chatty\": setVerbosity(int(_rhs))\n elif arg[:7] == \"-track=\":\n diag.tracking = int(_rhs)\n \n elif option_pipe: ############## End of pipable options\n print \"# Command line error: %s illegal option with -pipe\", arg\n break\n\n elif arg == \"-triples\" or arg == \"-ntriples\":\n option_format = \"n3\"\n option_flags[\"n3\"] = \"spartan\"\n option_outputStyle = \"-bySubject\"\n option_quiet = 1\n\n elif arg == \"-bySubject\":\n option_outputStyle = arg\n\n elif arg == \"-debugString\":\n option_outputStyle = arg\n\n elif arg[:7] == \"-apply=\":\n workingContext = workingContext.canonicalize()\n \n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n workingContext.reopen()\n applyRules(workingContext, filterContext);\n\n elif arg[:7] == \"-apply=\":\n workingContext = workingContext.canonicalize()\n \n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n workingContext.reopen()\n applyRules(workingContext, filterContext);\n\n elif arg[:7] == \"-patch=\":\n workingContext = workingContext.canonicalize()\n \n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n workingContext.reopen()\n patch(workingContext, filterContext);\n\n elif _lhs == \"-filter\":\n filterize()\n\n elif _lhs == \"-query\":\n workingContext = workingContext.canonicalize()\n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n _newContext = _store.newFormula()\n applyQueries(workingContext, filterContext, _newContext)\n workingContext.close()\n workingContext = _newContext\n\n elif _lhs == \"-sparql\":\n workingContext.stayOpen = False\n workingContext = workingContext.canonicalize()\n filterContext = _store.load(_uri, why=myReason,\n referer=\"\", contentType=\"x-application/sparql\")\n _newContext = _store.newFormula()\n _newContext.stayOpen = True\n sparql_query_formula = filterContext\n applySparqlQueries(workingContext, filterContext, _newContext)\n# workingContext.close()\n workingContext = _newContext\n\n elif _lhs == \"-why\" or arg == \"-why\":\n workingContext.stayOpen = False\n workingContext = workingContext.close()\n workingContext = explainFormula(workingContext, option_why)\n # Can't prove proofs\n diag.tracking=0\n diag.setTracking(0)\n\n elif arg == \"-dump\":\n \n workingContext = workingContext.canonicalize()\n progress(\"\\nDump of working formula:\\n\" + workingContext.debugString())\n \n elif arg == \"-purge\":\n workingContext.reopen()\n _store.purge(workingContext)\n \n elif arg == \"-purge-rules\" or arg == \"-data\":\n \n workingContext.reopen()\n _store.purgeExceptData(workingContext)\n\n elif arg == \"-rules\":\n \n workingContext.reopen()\n applyRules(workingContext, workingContext)\n\n elif arg[:7] == \"-think=\":\n \n filterContext = _store.load(_uri, referer=\"\", why=myReason, topLevel=True)\n if verbosity() > 4:\n progress( \"Input rules to --think from \" + _uri)\n workingContext.reopen()\n think(workingContext, filterContext, mode=option_flags[\"think\"])\n\n elif arg[:7] == \"-solve=\":\n # --solve is a combination of --think and --filter.\n think(workingContext, mode=option_flags[\"think\"])\n filterize()\n \n elif _lhs == \"-engine\":\n option_engine = _rhs\n \n elif arg == \"-think\":\n workingContext.isWorkingContext = True\n think(workingContext, mode=option_flags[\"think\"])\n\n elif arg == '-rete':\n from swap import pycwmko \n pythink = pycwmko.directPychinkoQuery(workingContext)\n #return\n #pythink()\n \"\"\"\n from pychinko import interpreter\n from swap.set_importer import Set, ImmutableSet\n pyf = pycwmko.N3Loader.N3Loader()\n conv = pycwmko.ToPyStore(pyf)\n conv.statements(workingContext)\n interp = interpreter.Interpreter(pyf.rules[:])\n interp.addFacts(Set(pyf.facts), initialSet=True)\n interp.run()\n pyf.facts = interp.totalFacts\n workingContext = workingContext.store.newFormula()\n reconv = pycwmko.FromPyStore(workingContext, pyf)\n reconv.run()\n \"\"\"\n\n elif arg == '-sparqlServer':\n from swap.sparql import webserver\n from swap import cwm_sparql\n sandBoxed(True)\n workingContext.stayOpen = False\n workingContext = workingContext.canonicalize()\n def _handler(s):\n return cwm_sparql.sparql_queryString(workingContext, s)\n webserver.sparql_handler = _handler\n webserver.run()\n\n elif arg == \"-lxkbdump\": # just for debugging\n raise NotImplementedError\n\n elif arg == \"-lxfdump\": # just for debugging\n raise NotImplementedError \n\n elif _lhs == \"-prove\":\n\n # code copied from -filter without really being understood -sdh\n _tmpstore = llyn.RDFStore( _outURI+\"#_g\", metaURI=_metaURI, argv=option_with, crypto=option_crypto)\n\n tmpContext = _tmpstore.newFormula(_uri+ \"#_formula\")\n _newURI = join(_baseURI, \"_w_\"+`_genid`) # Intermediate\n _genid = _genid + 1\n _newContext = _tmpstore.newFormula(_newURI+ \"#_formula\")\n _tmpstore.loadURI(_uri)\n\n print targetkb\n\n elif arg == \"-flatten\":\n #raise NotImplementedError\n from swap import reify\n workingContext = reify.flatten(workingContext)\n\n elif arg == \"-unflatten\":\n from swap import reify\n workingContext = reify.unflatten(workingContext)\n #raise NotImplementedError\n \n elif arg == \"-reify\":\n from swap import reify\n workingContext = reify.reify(workingContext)\n \n\n elif arg == \"-dereify\":\n from swap import reify\n workingContext = reify.dereify(workingContext) \n \n\n elif arg == \"-size\":\n progress(\"Size: %i statements in store, %i in working formula.\"\n %(_store.size, workingContext.size()))\n\n elif arg == \"-strings\": # suppress output\n workingContext.outputStrings() \n option_outputStyle = \"-no\"\n\n elif arg == '-sparqlResults':\n from cwm_sparql import outputString, SPARQL_NS\n ns = _store.newSymbol(SPARQL_NS)\n if not sparql_query_formula:\n raise ValueError('No query')\n else:\n sys.stdout.write(outputString(sparql_query_formula, workingContext)[0].encode('utf_8'))\n option_outputStyle = \"-no\"\n \n \n elif arg == \"-no\": # suppress output\n option_outputStyle = arg\n \n elif arg[:8] == \"-outURI=\": pass\n elif arg == \"-with\": break\n else:\n progress( \"cwm: Unknown option: \" + arg)\n sys.exit(-1)\n\n\n\n # Squirt it out if not piped\n\n workingContext.stayOpen = 0 # End its use as an always-open knoweldge base\n if option_pipe:\n workingContext.endDoc()\n else:\n if hasattr(_outSink, \"serializeKB\"):\n raise NotImplementedError\n else:\n if verbosity()>5: progress(\"Begining output.\")\n workingContext = workingContext.close()\n assert workingContext.canonical != None\n\n if option_outputStyle == \"-ugly\":\n _store.dumpChronological(workingContext, _outSink)\n elif option_outputStyle == \"-bySubject\":\n _store.dumpBySubject(workingContext, _outSink)\n elif option_outputStyle == \"-no\":\n pass\n elif option_outputStyle == \"-debugString\":\n print workingContext.debugString()\n else: # \"-best\"\n _store.dumpNested(workingContext, _outSink,\n flags=option_flags[option_format])",
"def asCommandLine(self, args):\n try:\n inFile = args[\"xmlfile\"]\n outFile = args[\"out\"]\n except:\n raise PeachException(\"XmlAnalyzer requires two parameters, xmlfile and out.\")\n xml = _Xml2Peach().xml2Peach(\"file:\" + inFile)\n with open(outFile, \"wb+\") as fo:\n fo.write(xml)",
"def main(self): # just put into if __name__ ...\n parser = self.get_parser()\n args = parser.parse_args()\n self.run(args)",
"def main():\n # set up the program to take in arguments from the command line",
"def add_standard_args(self):\n self.add_argument(\"-v\", \"--verbose\",\n help=\"Set log verbosity to True, nominal debug level.\", action=\"store_true\")\n self.add_argument(\"--verbosity\",\n help=\"Set log verbosity to a specific level: 0..100.\", type=int, default=0)\n self.add_argument(\"--dump-cmdline\", action=\"store_true\",\n help=\"Dump the command line parameters used to start the script to the log.\")\n self.add_argument(\"-R\", \"--readonly-cache\", action=\"store_true\",\n help=\"Don't modify the CRDS cache. Not compatible with options which implicitly modify the cache.\")\n self.add_argument('-I', '--ignore-cache', action='store_true', dest=\"ignore_cache\",\n help=\"Download required files even if they're already in the cache.\")\n self.add_argument(\"-V\", \"--version\",\n help=\"Print the software version and exit.\", action=\"store_true\")\n self.add_argument(\"-J\", \"--jwst\", dest=\"jwst\", action=\"store_true\",\n help=\"Force observatory to JWST for determining header conventions.\"\"\")\n self.add_argument(\"-H\", \"--hst\", dest=\"hst\", action=\"store_true\",\n help=\"Force observatory to HST for determining header conventions.\"\"\")\n self.add_argument(\"--roman\", dest=\"roman\", action=\"store_true\",\n help=\"Force observatory to Roman for determining header conventions.\"\"\")\n self.add_argument(\"--stats\", action=\"store_true\",\n help=\"Track and print timing statistics.\")\n self.add_argument(\"--profile\",\n help=\"Output profile stats to the specified file.\", type=str, default=\"\")\n self.add_argument(\"--log-time\", action=\"store_true\",\n help=\"Add date/time to log messages.\")\n self.add_argument(\"--pdb\",\n help=\"Run under pdb.\", action=\"store_true\")\n self.add_argument(\"--debug-traps\",\n help=\"Bypass exception error message traps and re-raise exception.\", action=\"store_true\")",
"def main():\n args = docopt(__doc__, version='recipy v%s' % __version__)\n\n if args['--debug']:\n print('Command-line arguments: ')\n print(args)\n print('DB path: ', config.get_db_path())\n print('')\n print('Full config file (as interpreted):')\n print('----------------------------------')\n conf = config.read_config_file()\n s = six.StringIO()\n conf.write(s)\n print(s.getvalue())\n print('----------------------------------')\n\n\n if args['search']:\n search(args)\n elif args['latest']:\n latest(args)\n elif args['gui']:\n gui(args)\n elif args['annotate']:\n annotate(args)",
"def run_parser(self, parser: ArgumentParser):",
"def run_with(self, runner):\n runner([self.path] + self.arguments)",
"def process_commandline():\n parser = optparse.OptionParser(__doc__.strip())\n if os.getuid() == 0:\n support_path = \"/Library/\"\n else:\n support_path = os.path.expanduser(\"~/Library/\")\n preference_file = os.path.join(support_path, \"Preferences\",\n \"com.googlecode.pymacadmin.crankd.plist\")\n module_path = os.path.join(support_path, \"Application Support/crankd\")\n\n if os.path.exists(module_path):\n sys.path.append(module_path)\n else:\n print(\n \"Module directory %s does not exist: \"\n \"Python handlers will need to use absolute pathnames\" % module_path,\n file=sys.stderr)\n\n parser.add_option(\n \"-f\",\n \"--config\",\n dest=\"config_file\",\n help=\"Use an alternate config file instead of %default\",\n default=preference_file)\n parser.add_option(\n \"-l\",\n \"--list-events\",\n action=\"callback\",\n callback=list_events,\n help=\"List the events which can be monitored\")\n parser.add_option(\n \"-d\",\n \"--debug\",\n action=\"count\",\n default=False,\n help=\"Log detailed progress information\")\n (options, args) = parser.parse_args()\n\n if args:\n parser.error(\"Unknown command-line arguments: %s\" % args)\n\n options.support_path = support_path\n options.config_file = os.path.realpath(options.config_file)\n\n # This is somewhat messy but we want to alter the command-line to use full\n # file paths in case someone's code changes the current directory or the\n sys.argv = [\n os.path.realpath(sys.argv[0]),\n ]\n\n if options.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n sys.argv.append(\"--debug\")\n\n if options.config_file:\n sys.argv.append(\"--config\")\n sys.argv.append(options.config_file)\n\n return options",
"def run_one(options, args, stem_prefix='', input_file=None):\n if input_file is None:\n input_file = options.input_file\n stem = stem_prefix + '-'.join(args)\n data_filename = output_file_name(options.output_directory, stem, 'h')\n stdout_filename = output_file_name(options.output_directory, stem, 'out')\n stderr_filename = output_file_name(options.output_directory, stem, 'err')\n status_filename = output_file_name(options.output_directory, stem, 'status')\n shutil.copy(input_file, data_filename)\n # Pass only the file basename, not the full path, to avoid getting the\n # directory name in error messages, which would make comparisons\n # between output directories more difficult.\n cmd = [os.path.abspath(options.script),\n '-f', os.path.basename(data_filename)]\n with open(stdout_filename, 'wb') as out:\n with open(stderr_filename, 'wb') as err:\n status = subprocess.call(cmd + args,\n cwd=options.output_directory,\n stdin=subprocess.DEVNULL,\n stdout=out, stderr=err)\n with open(status_filename, 'w') as status_file:\n status_file.write('{}\\n'.format(status))\n return stem + \"+\", data_filename",
"def main(myCommandLine=None):\n myCommandLine = CommandLine()\n\n if myCommandLine.args.hiConfOut:\n hiConfOut = myCommandLine.args.hiConfOut\n\n if myCommandLine.args.bedFile:\n bedFile = myCommandLine.args.bedFile\n\n if myCommandLine.args.speciesName:\n speciesName = myCommandLine.args.speciesName\n else:\n speciesName = ''\n\n if myCommandLine.args.cactusPath:\n cactusPath = myCommandLine.args.cactusPath\n else:\n cactusPath = ''\n\n if myCommandLine.args.modFile:\n modFile = myCommandLine.args.modFile\n else:\n modFile = ''\n\n if myCommandLine.args.chromLengths:\n chromLengths = myCommandLine.args.chromLengths\n else:\n chromLengths = ''\n\n if myCommandLine.args.outPath:\n outPath = myCommandLine.args.outPath\n\n myFileConverter = fileConverter(hiConfOut, bedFile, speciesName, cactusPath, modFile, chromLengths, outPath)\n myFileConverter.makeHiConfBeds()",
"def setup(cls, subparser):\n # creates the parser for options\n parser = subparser.add_parser(cls.__command__, help=cls.__help__)\n\n # adds the arguments\n cls.args(parser)\n\n # sets the default function to invoke\n parser.set_defaults(func=cls.run)\n cls._parser = parser",
"def main(args):\n \n if (len(args) > 2):\n print('Incorrect amount of arguements, run file-extractor.py help for usage')\n exit()\n \n if(args[1] == 'run'):\n fh = FileHandler(\n **{'file' : args[0], })\n\n fh.verify()\n fh.parse_filename()\n fh.byte_counter()\n fh.sha1_digest()\n fh.md5_digest()\n \n fh.print_results()\n\n else:\n print(\n \"\"\"\n Need Command Usage\n Start run file-extractor.py <PathtoFile> run\n \"\"\"\n )\n exit()",
"def main():\n parser = OptionParser()\n parser.add_option('-p', '--population', action='append',\n dest=\"populations\", help='population_files')\n parser.add_option('-a', '--arguments-selection-pipelines',\n dest=\"extra_args\", help=('Arguments to the selection'\n 'pipeline script'))\n parser.add_option('-l', '--log-file', dest=\"log_file\", help=\"Log file\")\n parser.add_option('-i', '--vcf-input-file', dest=\"vcf_input\",\n help=\"VCF Input File\")\n parser.add_option('-c', '--chromosome', dest=\"chromosome\",\n help=(\"Chromosome label doesn't actually have to\"\n \"correspond to the real chromosome but is required\"\n \" to determine what output files to make\"))\n parser.add_option('--config-file', dest='config_file',\n help='Configuration File')\n parser.add_option('--fst-window-size', dest=\"fst_window_size\",\n help=\"FST window size (kb)\")\n parser.add_option('--fst-window-step', dest=\"fst_window_step\",\n help=\"FST window step size (kb)\")\n parser.add_option('--no-clean-up', dest=\"no_clean_up\",\n action=\"store_true\",\n help=\"Do not clean up intermediate datafiles\")\n parser.add_option('--cores', dest=\"cores\", help=(\"Overrides number of \"\n \"cores avaliable as provided in the config file\"))\n parser.add_option('--no-rsb',dest=\"no_rsb\", action=\"store_true\",\n help=\"Do not calculate RSB\")\n (options, args) = parser.parse_args()\n print(options.extra_args)\n assert options.vcf_input is not None, \\\n \"no VCF file has been specified as input\"\n assert os.path.isfile(options.vcf_input), \\\n \"Cannot locate vcf file at path = {0)\".format(options.vcf_input)\n assert options.chromosome is not None, \\\n \"no chromosome has been specified to the script\"\n assert options.populations is not None and \\\n len(options.populations) >= 2, \\\n \"At least two population files are required\"\n if options.config_file is None:\n options.config_file = 'defaults.cfg'\n if not(os.path.isfile(options.config_file)):\n raise Exception(\"Cannot find config file\")\n elif not(os.path.isfile(options.config_file)):\n raise Exception(\"Cannot find config file\")\n config = parse_config(options)\n if options.log_file is None:\n options.log_file = 'multi_population.log'\n logging.basicConfig(format='%(asctime)s %(message)s',\n filename=options.log_file, filemode='w',\n level=logging.INFO)\n if not (check_executables_and_scripts_exist(options, config)):\n sys.exit(CANNOT_FIND_EXECUTABLE)\n if options.no_clean_up is None:\n options.clean_up_files = False\n if options.fst_window_step is None:\n options.fst_window_step = str(1000)\n else:\n options.fst_window_step = str(\n float(options.fst_window_step) * 1e3)\n if options.fst_window_size is None:\n options.fst_window_size = str(1000)\n else:\n options.fst_window_size = str(\n float(options.fst_window_size) * 1e3)\n if options.no_rsb is None:\n options.no_rsb = False\n if options.cores is not None:\n config['system']['cores_avaliable'] = options.cores\n set_environment(config['environment'])\n options.vcf_input = os.path.abspath(options.vcf_input)\n populations = get_populations(options.populations)\n populations = OrderedDict(sorted(populations.items(), key=lambda t: t[0]))\n fst_vcf(options.vcf_input, config, options, populations)\n output_vcfs = subset_vcf(options.vcf_input, config, populations)\n run_selection_pipeline(output_vcfs, options, populations, config)\n # TODO move FST to here on filtered dataset\n if not (options.no_rsb):\n rsb(config, options, populations)\n if not os.path.exists('logs'):\n os.mkdir('logs')\n os.rename(options.log_file, 'logs/' + options.log_file)\n if not options.no_clean_up:\n keep = [os.path.basename(options.vcf_input),os.path.basename(options.config_file)]\n keep.extend(options.populations)\n clean_folder('.', keep=keep)\n logger.info(\"Multi_population Complete\")\n logger.info(\"Goodbye :\")\n print(\"Multi-population selection pipeline completed successfully !:)\")",
"def run():\n parser = argparse.ArgumentParser(\n prog='twitter-scraper', description=\"Scrape twitter public pages without an API key\",\n )\n parser.add_argument('account', type=str, help=\"twitter account\")\n parser.add_argument('-f', '--filename', type=str, help=\"Output filename\")\n parser.add_argument('-p', '--pages', type=int, help=\"Number of pages to download\", default=10)\n parser.add_argument('-v', '--verbose', action='count', help=\"Enable logging\", default=0)\n args = parser.parse_args()\n\n # Enable logging\n if args.verbose > 0:\n args.verbose = min(args.verbose, 3)\n level = {1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG}[args.verbose]\n enable_logging(level)\n\n write_tweets_to_csv(account=args.account, filename=args.filename, page_limit=args.pages)",
"def console(self):\r\n parser = argparse.ArgumentParser(\r\n prog=\"dexofuzzy\",\r\n description=(\"Dexofuzzy - Dalvik EXecutable Opcode Fuzzyhash\"),\r\n add_help=True)\r\n\r\n parser.add_argument(\r\n \"-f\", \"--file\", metavar=\"SAMPLE_FILENAME\",\r\n help=\"the sample to extract dexofuzzy\")\r\n parser.add_argument(\r\n \"-d\", \"--directory\", metavar=\"SAMPLE_DIRECTORY\",\r\n help=\"the directory of samples to extract dexofuzzy\")\r\n\r\n parser.add_argument(\r\n \"-m\", \"--method-fuzzy\", action=\"store_true\",\r\n help=\"extract the fuzzyhash based on method of the sample\"\r\n + \"(must include the -f or -d option by default)\")\r\n\r\n parser.add_argument(\r\n \"-g\", \"--clustering\", metavar=(\"N\", \"M\"), nargs=2, type=int,\r\n help=\"N-Gram Tokenizer and M-Partial Matching clustering\"\r\n + \" based on the sample's dexofuzzy \"\r\n + \"(must include the -d option by default)\")\r\n\r\n parser.add_argument(\r\n \"-s\", \"--score\", metavar=\"DEXOFUZZY\", nargs=2,\r\n help=\"score the dexofuzzy of the sample\")\r\n\r\n parser.add_argument(\r\n \"-c\", \"--csv\", metavar=\"CSV_FILENAME\",\r\n help=\"output as CSV format\")\r\n parser.add_argument(\r\n \"-j\", \"--json\", metavar=\"JSON_FILENAME\",\r\n help=\"output as json format \" +\r\n \"(include method fuzzy or clustering)\")\r\n parser.add_argument(\r\n \"-l\", \"--error-log\", metavar=\"LOG_FILENAME\",\r\n help=\"output the error log\")\r\n\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n return None\r\n\r\n self.args = parser.parse_args()\r\n dexofuzzy_list = []\r\n\r\n if self.args.score:\r\n print(self.__get_dexofuzzy_compare(self.args.score[0], self.args.score[1]))\r\n\r\n if self.args.directory:\r\n for result in self.__search_directory(self.args.directory):\r\n if result is not None:\r\n print(f'{result[\"file_name\"]},{result[\"file_sha256\"]},'\r\n f'{result[\"file_size\"]},{result[\"dexohash\"]},'\r\n f'{result[\"dexofuzzy\"]}')\r\n\r\n if self.args.method_fuzzy:\r\n print(json.dumps(result[\"method_fuzzy\"], indent=4))\r\n\r\n dexofuzzy_list.append(result)\r\n\r\n if self.args.file:\r\n result = self.__search_file(self.args.file)\r\n if result is not None:\r\n print(f'{result[\"file_name\"]},{result[\"file_sha256\"]},'\r\n f'{result[\"file_size\"]},{result[\"dexohash\"]},'\r\n f'{result[\"dexofuzzy\"]}')\r\n\r\n if self.args.method_fuzzy:\r\n print(json.dumps(result[\"method_fuzzy\"], indent=4))\r\n\r\n dexofuzzy_list.append(result)\r\n\r\n if self.args.clustering:\r\n if not self.args.directory:\r\n print(\"must include the -d option by default\")\r\n return None\r\n\r\n dexofuzzy_list = self.__clustering_dexofuzzy(dexofuzzy_list,\r\n self.args.clustering[0],\r\n self.args.clustering[1])\r\n print(json.dumps(dexofuzzy_list, indent=4))\r\n\r\n if self.args.csv:\r\n try:\r\n with open(self.args.csv, \"w\", encoding=\"UTF-8\", newline=\"\") as csv_file:\r\n fieldnames = [\"file_name\", \"file_sha256\", \"file_size\",\r\n \"dexohash\", \"dexofuzzy\"]\r\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n\r\n for output in dexofuzzy_list:\r\n row = {}\r\n row[\"file_name\"] = output[\"file_name\"]\r\n row[\"file_sha256\"] = output[\"file_sha256\"]\r\n row[\"file_size\"] = output[\"file_size\"]\r\n row[\"dexohash\"] = output[\"dexohash\"]\r\n row[\"dexofuzzy\"] = output[\"dexofuzzy\"]\r\n writer.writerow(row)\r\n\r\n except IOError:\r\n print(f\"{inspect.stack()[0][3]} : {traceback.format_exc()}\")\r\n return False\r\n\r\n if self.args.json:\r\n try:\r\n with open(self.args.json, \"w\", encoding=\"UTF-8\") as json_file:\r\n json.dump(dexofuzzy_list, json_file, indent=4)\r\n\r\n except IOError:\r\n print(f\"{inspect.stack()[0][3]} : {traceback.format_exc()}\")\r\n return False",
"def run_from_argv(self, argv):\n parser = self.create_arg_parser(argv)\n self.options = parser.parse_args(argv[2:])\n\n args = self.options.args\n\n # Check that the proper number of arguments have been provided.\n argspec = inspect.getargspec(self.main)\n minargs = len(argspec[0]) - 1\n maxargs = minargs\n\n # Arguments that have a default value are considered optional.\n if argspec[3] is not None:\n minargs -= len(argspec[3])\n\n if argspec[1] is not None:\n maxargs = None\n\n if len(args) < minargs or (maxargs is not None and\n len(args) > maxargs):\n parser.error('Invalid number of arguments provided')\n sys.exit(1)\n\n self.initialize()\n log_command_line('Command line: %s', argv)\n\n try:\n exit_code = self.main(*args) or 0\n except CommandError as e:\n if isinstance(e, ParseError):\n parser.error(e)\n elif self.options.debug:\n raise\n\n logging.error(e)\n exit_code = 1\n except CommandExit as e:\n exit_code = e.exit_code\n except Exception as e:\n # If debugging is on, we'll let python spit out the\n # stack trace and report the exception, otherwise\n # we'll suppress the trace and print the exception\n # manually.\n if self.options.debug:\n raise\n\n logging.critical(e)\n exit_code = 1\n\n cleanup_tempfiles()\n sys.exit(exit_code)",
"def process_cli_args():\n args = parse_cli_args()\n\n # delete empty args\n if not args[\"debug\"]:\n del args[\"debug\"]\n for arg_name in list(args.keys()):\n if args[arg_name] in [None, tuple()]:\n del args[arg_name]\n\n # validate\n validate_cli_args(args)\n\n # --write-config\n if args.pop(\"write_config\"):\n config_values = {}\n if args.get(\"command\"):\n config_values[\"command\"] = \" \".join(\n shlex.quote(subval) for subval in args[\"command\"]\n )\n if args.get(\"watch\"):\n config_values[\"watch\"] = \"\\n\".join(args[\"watch\"])\n if args.get(\"output\"):\n config_values[\"output\"] = \", \".join(args[\"output\"])\n for arg_name in [\"delay\", \"max_execs\", \"name\", \"start\", \"watcher\"]:\n if arg_name in args:\n config_values[arg_name] = args[arg_name]\n\n write_config_file(args, config_values)\n sys.exit(0)\n\n return args"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up & run a `Publisher` for commandlinebased file I/O (input and output file paths taken automatically from the command line). Return the encoded string output also. This is just like publish_cmdline, except that it uses io.BinaryFileOutput instead of io.FileOutput. | def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output | [
"def publish(self, argv=None, usage=None, description=None,\r\n settings_spec=None, settings_overrides=None,\r\n config_section=None, enable_exit_status=False):\r\n exit = None\r\n try:\r\n if self.settings is None:\r\n self.process_command_line(\r\n argv, usage, description, settings_spec, config_section,\r\n **(settings_overrides or {}))\r\n self.set_io()\r\n self.document = self.reader.read(self.source, self.parser,\r\n self.settings)\r\n self.apply_transforms()\r\n output = self.writer.write(self.document, self.destination)\r\n self.writer.assemble_parts()\r\n except SystemExit, error:\r\n exit = 1\r\n exit_status = error.code\r\n except Exception, error:\r\n if not self.settings: # exception too early to report nicely\r\n raise\r\n if self.settings.traceback: # Propagate exceptions?\r\n self.debugging_dumps()\r\n raise\r\n self.report_Exception(error)\r\n exit = True\r\n exit_status = 1\r\n self.debugging_dumps()\r\n if (enable_exit_status and self.document\r\n and (self.document.reporter.max_level\r\n >= self.settings.exit_status_level)):\r\n sys.exit(self.document.reporter.max_level + 10)\r\n elif exit:\r\n sys.exit(exit_status)\r\n return output",
"def main():\n \n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-d\", \"--decode\", action=\"store_true\")\n group.add_argument(\"-e\", \"--encode\", action=\"store_true\")\n parser.add_argument(\"-s\")\n parser.add_argument(\"-file\")\n parser.add_argument(\"-out\")\n args = parser.parse_args()\n\n if(args.decode):\n if(args.file):\n print(\"Reading\", args.file)\n file = open(args.file, \"r\")\n print(\"Result: \", decode(file.readline()))\n else:\n print(\"Result: \", decode(args.s))\n if(args.out):\n file = open(args.out, \"w+\")\n if(args.s):\n file.write(decode(args.s))\n if(args.file):\n encoded_file = open(args.file, \"r\")\n file.write(decode(encoded_file.readline()))\n print(\"Successfull write to\", args.out)\n elif(args.encode):\n if(args.file):\n file = open(args.file, \"r\")\n string = file.readline()\n else:\n string = args.s\n \n run = res_list_to_string(\n tricky(\n convert_bits_to_int_to_char(\n convert_string_binary_to_list(\n convert_list_to_string(\n convert_list_to_binary(\n convert_list_to_int(\n from_string_to_list(string))\n ))))))\n if(args.out):\n file = open(args.out, \"w+\")\n file.write(run)\n print(\"Successfull write in\", args.out)\n else:\n print(\"Result: \", run)\n\n\n else:\n print(\"Nothing to do.\")",
"def run_from_cmd():\n\n input_filepath, output_filepath = parse_command_line_args()\n\n if input_filepath:\n input = open(input_filepath, 'r')\n else:\n input = sys.stdin\n\n if output_filepath:\n output = open(output_filepath, 'wb')\n else:\n output = sys.stdout.buffer\n\n convert(input, output)",
"def main():\n file_reader = Text_Processor()\n publishers = file_reader.read_files()\n json_exporter(publishers)\n run()",
"def write_command(*args, **kwargs):\n encoding = 'default'\n if 'encoding' in kwargs:\n encoding = kwargs['encoding']\n # TODO: should we delete it from kwargs?\n stdin = kwargs['stdin']\n if encoding is None or encoding == 'default':\n stdin = encode(stdin)\n else:\n stdin = encode(stdin, encoding=encoding)\n if _capture_stderr and 'stderr' not in kwargs.keys():\n kwargs['stderr'] = PIPE\n process = feed_command(*args, **kwargs)\n unused, stderr = process.communicate(stdin)\n if encoding is not None:\n unused = _make_unicode(unused, encoding)\n stderr = _make_unicode(stderr, encoding)\n returncode = process.poll()\n if _capture_stderr and returncode:\n sys.stderr.write(stderr)\n return handle_errors(returncode, returncode, args, kwargs)",
"def embedded_pipeline():\n return \"\\n\".join(args['--cmd'])",
"def asCommandLine(self, args):\n try:\n inFile = args[\"xmlfile\"]\n outFile = args[\"out\"]\n except:\n raise PeachException(\"XmlAnalyzer requires two parameters, xmlfile and out.\")\n xml = _Xml2Peach().xml2Peach(\"file:\" + inFile)\n with open(outFile, \"wb+\") as fo:\n fo.write(xml)",
"def convert(inpipe):\r\n temp_args = split(\r\n \"convert -monitor -compress {} - pdf:-\".format(ARGS.compression))\r\n call(temp_args, stdin=inpipe)",
"def _add_input_output(input_files=None, output_file=None, pipe=True):\n\n input_files =\\\n input_files if isinstance(input_files, (list, tuple)) else [input_files]\n\n cmd = ''\n for input_file in input_files:\n if input_file:\n cmd += ' {}'.format(input_file)\n\n if output_file:\n cmd += ' > {}'.format(output_file)\n elif pipe:\n cmd += ' | '\n\n return cmd",
"def generateFile( self ):\n\n with temp_out_file( \"this\", ext( self.to_filename ) ) as outfile:\n args = ( \"pepper\", \"-o\", outfile.name, self.from_filename )\n ret, out, err = run_cmd( args, self.path )\n contents = read_file( outfile.name )\n\n assert ret == 0, ( '\"%s\" should return 0 but returned %d' % (\n cmd_desc( args, repl = ( 2, self.to_filename ) ), ret ) )\n\n expected_contents = read_file( os.path.join(\n self.path, self.to_filename ) )\n\n assert contents == expected_contents, (\n '\"%s\" should result in a file containing:\\n%s' +\n '\\nbut instead it gave:\\n%s' ) % (\n cmd_desc( args, repl = ( 2, self.to_filename ) ),\n expected_contents,\n contents )",
"def run_one(options, args, stem_prefix='', input_file=None):\n if input_file is None:\n input_file = options.input_file\n stem = stem_prefix + '-'.join(args)\n data_filename = output_file_name(options.output_directory, stem, 'h')\n stdout_filename = output_file_name(options.output_directory, stem, 'out')\n stderr_filename = output_file_name(options.output_directory, stem, 'err')\n status_filename = output_file_name(options.output_directory, stem, 'status')\n shutil.copy(input_file, data_filename)\n # Pass only the file basename, not the full path, to avoid getting the\n # directory name in error messages, which would make comparisons\n # between output directories more difficult.\n cmd = [os.path.abspath(options.script),\n '-f', os.path.basename(data_filename)]\n with open(stdout_filename, 'wb') as out:\n with open(stderr_filename, 'wb') as err:\n status = subprocess.call(cmd + args,\n cwd=options.output_directory,\n stdin=subprocess.DEVNULL,\n stdout=out, stderr=err)\n with open(status_filename, 'w') as status_file:\n status_file.write('{}\\n'.format(status))\n return stem + \"+\", data_filename",
"def test_file_write(self):\n\n args = self.parser.parse_args([self.str_len, '--file', '--raw-output'])\n\n self.randstr_output(args).process_parsed_args()\n output = sys.stdout.getvalue()\n\n filename = os.path.join(self.test_dir, args.file)\n with open(filename, 'r') as f:\n random_string = f.read()\n\n self.assertIn(random_string, output)",
"def generate_output(output, out = \".dvi\"):\n print 'hi', output\n # Standard tex inputs required for compiling .tex file\n filename = os.path.join(\"c:\",\"output\")\n tex = \".tex\"; pdf = \".pdf\"; dvi = \".dvi\"; ps = \".ps\"\n begin = [\"\\documentclass[12pt]{article}\\n\",\n \"\\usepackage{amsmath,url}\\n\",\n \"\\\\begin{document}\\n\",\n \"\\section{Cross-Section}\\n\\n\"]\n end = [\"\\end{document}\"]\n \n pieces = []\n # Crappy method to find out the type of the input, and then LaTeXify it\n if not isinstance(output, str):\n \n # Input is a list. Break it up and try to LaTeXify each piece\n if isinstance(output, list):\n try:\n print 'list'\n for i in range(len(output)):\n pieces.append(sp.latex(output[i]))\n except: e\n # Input is probably just a sympy expression\n else:\n try:\n output = sp.latex(output)+\"\\n\"\n except: \n e\n print e\n \n # Input is a string\n else: output = output+\"\\n\\n\"\n\n # If the input was a list, join all the pieces into one string with 2 spaces between them. \n if pieces != []:\n output = '\\n\\n'.join(pieces)\n # If the LaTeXifed input has any commas in it, split the expression at those commas and put some blank lines in between\n else:\n if output.find(',') > 0:\n output = '\\n'.join(output.split(','))\n\n print output\n # Create file and write to it\n FILE = open(filename+tex, \"w\")\n FILE.writelines(begin)\n FILE.writelines(output)\n FILE.writelines(end)\n FILE.close()\n\n if 1:\n # Create commands\n compile = [\"latex\",filename+tex]\n disdvi = [\"yap\", filename+dvi]\n \n # Process commands\n a = sub.Popen(compile,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n a.communicate()\n a.wait()\n \n # BROKEN\n if out == \"pdf\":\n tops = [\"dvips\", filename+dvi]\n topdf = [\"ps2pdf\", filename+ps]\n dispdf = [\"C:/Program Files/Adobe/Reader 9.0/Reader/AcroRd32\", filename+pdf]\n c = sub.check_call(tops)\n # c = sub.Popen(tops,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n # c.communicate\n # c.wait()\n d = sub.Popen(topdf,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n d.communicate\n d.wait()\n e = sub.Popen(dispdf,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n e.communicate\n else:\n b = sub.Popen(disdvi,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n b.communicate()",
"def _perform_transform(self, data, **kwargs):\n # Ensure that kaleido subprocess is running\n self._ensure_kaleido()\n\n # Perform export\n export_spec = self._json_dumps(dict(kwargs, data=data)).encode('utf-8')\n\n # Write to process and read result within a lock so that can be\n # sure we're reading the response to our request\n with self._proc_lock:\n # Reset _std_error buffer\n self._std_error = io.BytesIO()\n\n # Write and flush spec\n self._proc.stdin.write(export_spec)\n self._proc.stdin.write(\"\\n\".encode('utf-8'))\n self._proc.stdin.flush()\n response = self._proc.stdout.readline()\n\n response_string = response.decode('utf-8')\n if not response_string:\n message = (\n \"Transform failed. Error stream:\\n\\n\" +\n self._get_decoded_std_error()\n )\n raise ValueError(message)\n try:\n response = json.loads(response_string)\n except JSONDecodeError:\n print(\"Invalid JSON: \" + repr(response_string))\n raise\n\n return response",
"def to_stream(stream_name: str) -> IO[Any]:\n if stream_name == \"<stdout>\":\n return sys.__stdout__\n\n if stream_name == \"<stderr>\":\n return sys.__stderr__\n\n stream_file = Path(stream_name)\n if not stream_file.exists() or not stream_file.is_file():\n raise argparse.ArgumentTypeError(f\"{stream_name} is not a file\")\n\n try:\n return stream_file.open(\"a\")\n except:\n raise argparse.ArgumentTypeError(f\"could not open {stream_name} for writing\")",
"def test_save(self):\n\n out,err=Popen(\"cat testdata/mail-001 | python mail2json.py save x.out\", \n stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True).communicate()\n assert not err, err\n output=[ l.split('/x.out/')[1] for l in out.strip('\\n').split('\\n') ]\n # \n # output: json file representing the mime structure of email and any attachments are printed.\n # \n self.assertEqual(output, \n ['%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E/img_1871.mov',\n '%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E/md.json'])\n # \n # output directory layout\n # \n self.assertEqual(\n list(os.walk('./x.out')),\n [('./x.out', \n ['%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E'], \n []), \n ('./x.out/%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E', \n [], \n ['md.json', 'img_1871.mov'])])\n # \n # json-ified mime message looks like this\n # \n self.assertEqual(\n json.load(file('x.out/%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E/md.json')),\n {u'content': [{u'content': u'\\n\\n',\n u'header': {u'content-transfer-encoding': u'7bit',\n u'content-type': u'text/plain;\\n\\tcharset=us-ascii'}},\n {u'content': {u'encoding': u'base64',\n u'md5': u'762bc5d5715b6102111346c6069c23e5',\n u'media': True,\n u'name': u'img_1871.mov',\n u'suffix': u'.mov'},\n u'header': {u'content-disposition': u'attachment;\\n\\tfilename=IMG_1871.MOV',\n u'content-transfer-encoding': u'base64',\n u'content-type': u'video/quicktime;\\n\\tname=IMG_1871.MOV'}}],\n u'from': [u'tengu@example.com'],\n u'header': {u'content-transfer-encoding': u'7bit',\n u'content-type': u'multipart/mixed; boundary=Apple-Mail-E670757C-566F-46A7-82A7-DEADBEEF',\n u'date': u'Fri, 7 Feb 2014 09:07:23 +0900',\n u'delivered-to': u'skydog@example.com',\n u'from': {u'addr': u'tengu@example.com', u'name': u'Tengu'},\n u'message-id': u'<DEADBEEF-F52B-4B36-85D0-A85CF7B02C40@i.example.com>',\n u'mime-version': u'1.0 (1.0)',\n u'received': u'from [10.0.1.4] ([100.100.100.100] [100.100.100.100])\\n by hoge.i.example.com with ESMTP\\n id <20140207000724308.PHJN.36465.hoge.i.example.com@hoge.mailsv.example.com>\\n for <skydog@example.com>; Fri, 7 Feb 2014 09:07:24 +0900',\n u'return-path': u'<tengu@example.com>',\n u'to': u'skydog@example.com',\n u'x-mailer': u'iPhone Mail (11B554a)',\n u'x-original-to': u'skydog@example.com',\n u'x-sb-service': u'Virus-Checked'},\n u'media': [{u'encoding': u'base64',\n u'md5': u'762bc5d5715b6102111346c6069c23e5',\n u'media': True,\n u'name': u'img_1871.mov',\n u'suffix': u'.mov'}],\n u'message-id': [u'<DEADBEEF-F52B-4B36-85D0-A85CF7B02C40@i.example.com>'],\n u'text': [u'\\n\\n']}\n )",
"def dummy_runner(output):\n if isinstance(output, str):\n output = output.encode(ENCODING)\n def runner(*args, **kwargs):\n return output\n\n return runner",
"def convert_stdstreams_to_files(clt: cwl.CommandLineTool) -> None:\n for out in clt.outputs:\n if out.type == \"stdout\":\n if out.outputBinding is not None:\n raise ValidationException(\n \"Not allowed to specify outputBinding when using stdout shortcut.\"\n )\n if clt.stdout is None:\n clt.stdout = str(\n hashlib.sha1( # nosec\n json_dumps(clt.save(), sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n )\n out.type = \"File\"\n out.outputBinding = cwl.CommandOutputBinding(glob=clt.stdout)\n elif out.type == \"stderr\":\n if out.outputBinding is not None:\n raise ValidationException(\n \"Not allowed to specify outputBinding when using stderr shortcut.\"\n )\n if clt.stderr is None:\n clt.stderr = str(\n hashlib.sha1( # nosec\n json_dumps(clt.save(), sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n )\n out.type = \"File\"\n out.outputBinding = cwl.CommandOutputBinding(glob=clt.stderr)\n for inp in clt.inputs:\n if inp.type == \"stdin\":\n if inp.inputBinding is not None:\n raise ValidationException(\n \"Not allowed to specify unputBinding when using stdin shortcut.\"\n )\n if clt.stdin is not None:\n raise ValidationException(\n \"Not allowed to specify stdin path when using stdin type shortcut.\"\n )\n else:\n clt.stdin = (\n \"$(inputs.%s.path)\"\n % cast(str, inp.id).rpartition(\"#\")[2].split(\"/\")[-1]\n )\n inp.type = \"File\"",
"def make_bin_outputs(expt_dir, outputs_fname, cutoff):\n outputs = proc.load_outputs(outputs_fname)\n bin_outputs = proc.bin_transform_outputs(outputs, cutoff)\n bin_out_fname = expt_dir + \"/process/bin_outputs.cutoff_{0}.txt\".format(cutoff)\n proc.write_outputs(bin_outputs, bin_out_fname)",
"def main():\r\n\r\n parser = argparse.ArgumentParser(description=\"LSB Steganography Toy\")\r\n\r\n parser.add_argument(\"-i\",\"--inputfile\", action= \"store\", dest = \"inputfile\", help=\"Stores the input image file\", required= True)\r\n parser.add_argument(\"-m\",\"--message\", action= \"store\", dest=\"message\",help=\"the string you want to encode into an image\")\r\n parser.add_argument(\"-e\", \"--encode\", action = \"store_true\", default = False, dest = \"boolean_switch_encode\", help = \"set switch to true that you want to encode the image\")\r\n parser.add_argument(\"-d\",\"--decode\", action = \"store_true\", default = False, dest = \"boolean_switch_decode\", help = \"set switch to true that you want to decode the image\")\r\n\r\n results = parser.parse_args()\r\n\r\n inputfile = results.inputfile\r\n message = results.message\r\n boolean_switch_encode = results.boolean_switch_encode\r\n boolean_switch_decode = results.boolean_switch_decode\r\n\r\n if (boolean_switch_encode == True):\r\n encode(message, inputfile)\r\n print(\"Encode is successful\")\r\n print(message)\r\n\r\n else:\r\n print(\"Encode unsuccessful, message must be longer.\")\r\n\r\n if (boolean_switch_decode == True):\r\n decode(inputfile)\r\n print (\"Decoded file is saved in images/decoded.png\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input string, returns a dictionary of HTML document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client. | def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=True,
initial_header_level=1):
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts | [
"def direct_from_string(text: str) -> dict:\n return MarkdownTextObject(text=text).to_dict()",
"def _parse_fragment(fragment_string: str) -> Dict[str, str]:\n fragment_string = fragment_string.lstrip('#')\n\n try:\n return dict(\n cast(Tuple[str, str], tuple(key_value_string.split('=')))\n for key_value_string in fragment_string.split('&')\n )\n except ValueError:\n raise ValueError(f'Invalid fragment string {fragment_string}')",
"def direct_from_string(text: str) -> dict:\n return PlainTextObject(text=text).to_dict()",
"def headerDict(header_string):\n\t# >7244:002ce8 FBpp0236088 gene=FBgn0208790 orthodb8_OG=EOG8MGTH1 orthodb8_level=32281 organism_name=`Drosophila virilis` uniprot_de=`GJ21671`\n\t# Handling awful cases like uniprot_de=`Probable tRNA 2`-O-ribose methyltransferase`\n\theader_string = header_string.replace(\"`-\", \"'-\")\n\tquote_split = header_string.split(\"`\")\n\tdef garble(x):\n\t\treturn x.replace(\" \", \"@*#/*\")\n\tdef degarble(x):\n\t\treturn x.replace(\"@*#/*\", \" \")\n\treform = quote_split[0]\n\txi = 1\n\twhile xi < len(quote_split):\n\t\t# string in quotes\n\t\treform += garble(quote_split[xi])\n\t\t# next string\n\t\treform += quote_split[xi+1]\n\t\txi = xi+2\n\t# Split \n\n\td = {}\n\tfor entry in reform.split():\n\t\tif '=' in entry:\n\t\t\tsp = entry.split('=')\n\t\t\td[sp[0]] = degarble(sp[1])\n\t\n\treturn d",
"def parse_resource_document(self, content):\n\n content = content.strip()\n\n if not content.startswith('<html>'):\n # this is not a full HTML doc, probably content w/o title, tags, etc\n return dict(body=content)\n\n result = {}\n if '<title>' in content and '</title>' in content:\n result['subject'] = content[content.find('<title>') + 7:content.find('</title>')].strip()\n result['body'] = content[content.find('<body>') + 6:content.find('</body>')].strip()\n\n return result",
"def tags_from_string(tag_string):\n return dict(map(lambda kv: kv.split('='), tag_string.split(';')))",
"def split_html(html_string):\n\n try:\n i = html_string.index(\"<body\")\n j = html_string.index(\">\", i) + 1\n k = html_string.index(\"</body\")\n except ValueError:\n raise Exception(\"This is not a full html document.\")\n start = html_string[:j]\n body = html_string[j:k]\n ending = html_string[k:]\n return start, body, ending",
"def create_chapter_from_string(self, html_string, url=None, title=None, request_object=None):\n if request_object:\n # Test case: https://new.qq.com/omn/20180816/20180816A0A0D0.html which return headers \"content-type: text/html; charset=GB2312\"\n # ... shouldn't make it utf-8\n if not request_object.encoding: # just in case, default depends on header content-type(alternative to html meta)\n request_object.encoding = 'utf-8'\n html_string = request_object.text\n else:\n # test case(ISO-8859-1): http://castic.xiaoxiaotong.org/2019/studentDetails.html?77061\n try:\n html_string = request_object.text.encode(request_object.encoding).decode('utf-8')\n except UnicodeDecodeError:\n # test case: https://www.dawuxia.net/forum.php?mod=viewthread&tid=1034211\n html_string = request_object.text\n elif not html_string: #if 404, request_object will None\n html_string = '<html></html>'\n #print(html_string)\n clean_html_string = self.clean_function(html_string)\n #print(clean_html_string)\n clean_xhtml_string = clean.html_to_xhtml(clean_html_string)\n if title:\n pass\n else:\n try:\n if request_object:\n root = BeautifulSoup(html_string, 'html.parser')\n meta_encoding = hole_meta_encoding(root)\n #print(meta_encoding)\n if meta_encoding and (meta_encoding.lower() != 'utf-8'):\n print('Encoding to meta encoding: ' + repr(meta_encoding))\n request_object.encoding = meta_encoding\n html_string = request_object.text\n root = BeautifulSoup(html_string, 'html.parser')\n clean_html_string = self.clean_function(html_string)\n clean_xhtml_string = clean.html_to_xhtml(clean_html_string)\n \n else:\n root = BeautifulSoup(html_string, 'html.parser')\n\n title_node = root.title\n if title_node is not None:\n #title = unicode(title_node.string)\n title = title_node.string\n if title == None:\n title = 'Unknown title'\n else:\n raise ValueError\n except (IndexError, ValueError):\n title = 'Ebook Chapter'\n #print(clean_xhtml_string)\n return Chapter(clean_xhtml_string, title, url)",
"def split(self, string):\n\n # I prefer the Jekyll front matter format\n # but for compatibility keep the Flask-FlatPages one:\n\n lines = iter(string.split('\\n'))\n\n meta = '\\n'.join(itertools.takewhile(str.strip, lines))\n body = '\\n'.join(lines)\n\n return meta, body",
"def parseString(input_string):\r\n elements = {}\r\n while input_string:\r\n result = re.match(r\"[{}][{}]?\".format(CAP_CHARS, LOW_CHARS), input_string)\r\n \r\n # for fear that the parser cannot recognize the material string\r\n try:\r\n ele_str = result.group(0)\r\n pos_ele = result.span()[1]\r\n except AttributeError:\r\n return {}\r\n\r\n if pos_ele < len(input_string) and input_string[pos_ele].isdigit():\r\n result = re.match(r\"\\d+\\.?\\d*\", input_string[pos_ele:])\r\n pos_num = result.span()[1]\r\n number = float(result.group(0))\r\n else:\r\n pos_num = 0\r\n number = 1.0\r\n \r\n try:\r\n ele_index = lookupEle(ele_str)\r\n except KeyError:\r\n raise NoSuchElementError(ele_str)\r\n # one element could appear multiple times in one material string\r\n if ele_index not in elements: \r\n elements[ele_index] = 0.0\r\n \r\n elements[ele_index] += number\r\n input_string = input_string[(pos_num+pos_ele):]\r\n \r\n return elements",
"def hl7_str_to_dict(s, use_long_name=True):\n #s = s.replace(\"\\n\", \"\\r\")\n print(s)\n try:\n m = parse_message(s)\n return hl7_message_to_dict(m, use_long_name=use_long_name)\n except ParserError:\n return dict()",
"def read_bibstring(instring, string_dict={}): ###parses a bibtex string into a list of dictionaries\n\tdlist = []\n\tlines = []\n\n# ADDED PARAMETER FOR string_dict\n#\tstring_dict = {}\n\n#\tprint instring\n\tfor line in string.split(instring,'\\n'):\n\t\tif string.find(line,'--BREAK--') >= 0: \n\t\t\tbreak\n\t\telse: lines = lines + [string.strip(line)]\n\tinstring = string.join(lines,'\\n')\n\titems = string.split('\\n'+ instring,'\\n@')\n\t #### must add the leading '\\n' in case string starts with an '@'\n\tfor item in items[1:]:\n\t\t\t(d,string_dict) = read_bibitem(item,string_dict)\n\t\t\tdlist = dlist + [d]\t\n\treturn dlist",
"def _load_root_from_string(text):\n\n root = html.fromstring(text)\n return root",
"def get_form_as_dict(response):\n html_parser = etree.HTMLParser()\n root = etree.fromstring(response.get_data(), html_parser)\n input_elements = CSSSelector(\"input\")(root)\n form = {ie.attrib[\"name\"].replace(\"-input\", \"\"): ie for ie in input_elements}\n form[\"description\"] = CSSSelector(\"textarea#description-textarea\")(root)[0]\n return form",
"def html_to_dict(string):\n soup = BeautifulSoup(string, 'html.parser')\n recipe = {}\n recipe['title'] = soup.find(\"h1\").string\n try:\n recipe['text'] = soup.select(\"h1 + p\")[0].string.strip()\n except IndexError:\n recipe['text'] = None\n try:\n # Case one, author still exists\n recipe['author'] = soup.find(\"div\", class_=\"recipe-author\").find_all(\"span\")[-1].text.strip()\n except IndexError:\n # author was deleted\n recipe['author'] = soup.find(\"div\", class_=\"recipe-author\").find(\"div\", class_=\"ds-mb-right\").text.strip()\n ingredients = soup.find(\"table\", class_=\"ingredients\").find_all(\"tr\")\n recipe['ingredients'] = [(i.find('td', class_=\"td-right\").text.strip(),\n i.find('td', class_=\"td-left\").text.strip().split()\n )\n for i in ingredients\n if i.find(\"th\") is None]\n recipe[\"servings\"] = soup.find(\"div\", class_=\"recipe-servings\").find(\"input\").attrs['value']\n recipe[\"rating\"] = soup.find(\"div\", class_=\"ds-rating-avg\").find(\"strong\").text\n recipe[\"rates\"] = soup.find(\"div\", class_=\"ds-rating-count\").find(\"strong\").text\n # any and all of these might actually be optional. Currently it only looked\n # like kcal is optional.\n recipe[\"preptime\"] = soup.find(\"span\", class_=\"recipe-preptime\").find(text=True, recursive=False).strip()\n recipe[\"difficulty\"] = soup.find(\"span\", class_=\"recipe-difficulty\").find(text=True, recursive=False).strip()\n recipe[\"date\"] = soup.find(\"span\", class_=\"recipe-date\").find(text=True, recursive=False).strip()\n try:\n recipe[\"kcal\"] = soup.find(\"span\", class_=\"recipe-kcalories\").find(text=True, recursive=False).strip()\n except AttributeError:\n recipe[\"kcal\"] = None\n # instruction_meta = soup.find(\"small\", class_=\"ds-recipe-meta\")\n # instruction_meta doesn't seem reliable.\n instructions = soup.select(\"small.ds-recipe-meta + div.ds-box\")[0]\n recipe[\"instructions\"] = instructions.text.strip()\n recipe[\"comment_count\"] = soup.find(\"button\", class_=\"recipe-comments-anchor\").find(\"span\").text\n comments = soup.find(\"article\", class_=\"recipe-comments\")\n comments = comments.find_all(\"div\", class_=\"comment-item\")\n recipe['comments'] = []\n for comment in comments:\n comment = {\n \"user\": comment.find(\"strong\").text.strip(),\n \"text\": comment.find(\"p\").text.strip(),\n \"date\": comment.find(\"div\", class_=\"comment-date\").text.strip(),\n }\n recipe[\"comments\"].append(comment)\n # Get the categories\n recipe['categories'] = [tag.text.strip() for tag in soup.select(\"div > a.ds-tag\")]\n return recipe",
"def find_pdfdocencoding(encoding):\n\n if encoding != 'pdfdocencoding':\n return\n\n # Create the decoding map based on the table in section D.2 of the\n # PDF 1.7 manual\n\n # Start off with the characters with 1:1 correspondence\n decoding_map = set(range(0x20, 0x7F)) | set(range(0xA1, 0x100))\n decoding_map.update((0x09, 0x0A, 0x0D))\n decoding_map.remove(0xAD)\n decoding_map = dict((x, x) for x in decoding_map)\n\n # Add in the special Unicode characters\n decoding_map.update(zip(range(0x18, 0x20), (\n 0x02D8, 0x02C7, 0x02C6, 0x02D9, 0x02DD, 0x02DB, 0x02DA, 0x02DC)))\n decoding_map.update(zip(range(0x80, 0x9F), (\n 0x2022, 0x2020, 0x2021, 0x2026, 0x2014, 0x2013, 0x0192, 0x2044,\n 0x2039, 0x203A, 0x2212, 0x2030, 0x201E, 0x201C, 0x201D, 0x2018,\n 0x2019, 0x201A, 0x2122, 0xFB01, 0xFB02, 0x0141, 0x0152, 0x0160,\n 0x0178, 0x017D, 0x0131, 0x0142, 0x0153, 0x0161, 0x017E)))\n decoding_map[0xA0] = 0x20AC\n\n # Make the encoding map from the decoding map\n encoding_map = codecs.make_encoding_map(decoding_map)\n\n # Not every PDF producer follows the spec, so conform to Postel's law\n # and interpret encoded strings if at all possible. In particular, they\n # might have nulls and form-feeds, judging by random code snippets\n # floating around the internet.\n decoding_map.update(((x, x) for x in range(0x18)))\n\n def encode(input, errors='strict'):\n return codecs.charmap_encode(input, errors, encoding_map)\n\n def decode(input, errors='strict'):\n return codecs.charmap_decode(input, errors, decoding_map)\n\n return codecs.CodecInfo(encode, decode, name='pdfdocencoding')",
"def parse_html(html_string):\n return BeautifulSoup(html_string, \"html.parser\")",
"def _str2dict(self, istring):\n retDict = {}\n if istring == EMPTY_FIELD:\n return retDict\n for feat in istring.split(FEAT_SEP):\n # feature format changed in MATE\n if FEAT_VALUE_SEP_RE.search(feat):\n retDict.update((feat.split(FEAT_VALUE_SEP),))\n else:\n retDict.update([self._new2old(feat)])\n return retDict",
"def get_dict(string: str) -> Dict[str, int]:\n splited = string[1:-1].split(\", \")\n my_dict = {}\n for i in splited:\n key, value = i.split(\":\")\n if key[0] == \"'\" and key[-1] == \"'\":\n key = key[1:-1]\n if value[0] == \"'\" and value[-1] == \"'\":\n value = value[1:-1]\n my_dict[key] = value\n return my_dict",
"def parse_form_encoded_body(form):\n dict = {}\n\n fields = form.split('&')\n for field in fields:\n key_value = field.split('=')\n key = key_value[0]\n value = None if len(key_value) == 1 else key_value[1]\n dict[key] = value\n\n return dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input string, returns an HTML fragment as a string. The return value is the contents of the element. | def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=True, initial_header_level=1):
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment | [
"def fragment_fromstring(html, create_parent=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n accept_leading_text = bool(create_parent)\n\n elements = fragments_fromstring(\n html, guess_charset=guess_charset, parser=parser,\n no_leading_text=not accept_leading_text)\n\n if create_parent:\n if not isinstance(create_parent, _strings):\n create_parent = 'div'\n new_root = Element(create_parent)\n if elements:\n if isinstance(elements[0], _strings):\n new_root.text = elements[0]\n del elements[0]\n new_root.extend(elements)\n return new_root\n\n if not elements:\n raise etree.ParserError('No elements found')\n if len(elements) > 1:\n raise etree.ParserError('Multiple elements found')\n result = elements[0]\n if result.tail and result.tail.strip():\n raise etree.ParserError('Element followed by text: %r' % result.tail)\n result.tail = None\n return result",
"def innerhtml(el: Element, encoding: str=\"utf-8\") -> str:\n children = [_ for _ in el.iterchildren()]\n if not len(children):\n return el.text_content()\n text = \"%s\" % el.text if el.text else \"\"\n return \"%s%s\" % (text, \"\".join([tostring(c).decode(encoding) for\n c in el.iterchildren()]))",
"def HTML(html): # pylint: disable=invalid-name\n return markupsafe.Markup(html)",
"def html(s):\n pattern = s.strip()\n pattern = re.sub(r'\\s*<\\s*', '<', pattern)\n pattern = re.sub(r'\\s*>\\s*', '>', pattern)\n pattern = re.escape(pattern)\n pattern = re.sub(r'(?:\\\\\\s)+', r'\\\\s+', pattern)\n pattern = re.sub(r'<', r'\\\\s*<\\\\s*', pattern)\n pattern = re.sub(r'>', r'\\\\s*>\\\\s*', pattern)\n pattern = '\\\\s*' + pattern + '\\\\s*'\n pattern_object = re.compile(pattern, flags=re.IGNORECASE)\n return lambda c, x: \\\n [(True, c, x[len(pattern_object.match(x)[0]):])] if pattern_object.match(x) \\\n else [(False, s.strip(), x)]",
"def toString(self):\n\n return self.openTagToString() + self.innerHTMLtoString() + self.endTagToString()",
"def fragments_fromstring(html, no_leading_text=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n if parser is None:\n parser = html_parser\n\n children = parser.parseFragment(html, 'div', useChardet=guess_charset)\n if children and isinstance(children[0], _strings):\n if no_leading_text:\n if children[0].strip():\n raise etree.ParserError('There is leading text: %r' %\n children[0])\n del children[0]\n return children",
"def create_test_html():\n return lxml.html.fromstring(\"\"\"<html>\n <head>\n </head>\n <body>\n <div class=\"test\">Some <em>text</em></div>\n <img src=\"some_location\" alt=\"Alt text\" width=540>\n More <b>text</b>\n </body>\n </html>\"\"\")",
"def rst2html(rst_string):\r\n overrides = {'output_encoding': 'latin1', 'initial_header_level': 1}\r\n html_string = docCore.publish_string(\r\n source=rst_string, \r\n writer_name='html', settings_overrides=overrides)\r\n return html_string",
"def innerHTML(self):\n return self._innerHTML",
"def clean_html5lib(input):\n from html5lib import treebuilders, treewalkers, serializer, sanitizer\n\n p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder(\"dom\"))\n\n dom_tree = p.parseFragment(input)\n\n walker = treewalkers.getTreeWalker(\"dom\")\n\n stream = walker(dom_tree)\n\n s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False)\n\n return \"\".join(s.serialize(stream))",
"def parse_html(html_string):\n return BeautifulSoup(html_string, \"html.parser\")",
"def textContent(node):\n return ''.join(node.itertext())",
"def textstring(el):\n strval = u''\n strval += (el.etree_element.text or u'')\n for elem in el.iter_children():\n strval += textstring(elem)\n strval += (el.etree_element.tail or u'')\n return strval",
"def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)",
"def tostring(element):\n rv = []\n\n def serializeElement(element):\n if not hasattr(element, \"tag\"):\n if element.docinfo.internalDTD:\n if element.docinfo.doctype:\n dtd_str = element.docinfo.doctype\n else:\n dtd_str = \"<!DOCTYPE %s>\" % element.docinfo.root_name\n rv.append(dtd_str)\n serializeElement(element.getroot())\n\n elif element.tag == comment_type:\n rv.append(\"<!--%s-->\" % (element.text,))\n\n else:\n # This is assumed to be an ordinary element\n if not element.attrib:\n rv.append(\"<%s>\" % (element.tag,))\n else:\n attr = \" \".join([\"%s=\\\"%s\\\"\" % (name, value)\n for name, value in element.attrib.items()])\n rv.append(\"<%s %s>\" % (element.tag, attr))\n if element.text:\n rv.append(element.text)\n\n for child in element:\n serializeElement(child)\n\n rv.append(\"</%s>\" % (element.tag,))\n\n if hasattr(element, \"tail\") and element.tail:\n rv.append(element.tail)\n\n serializeElement(element)\n\n return \"\".join(rv)",
"def html_to_safe_dom(html_string):\n\n tag_bindings = get_tag_bindings()\n\n node_list = safe_dom.NodeList()\n if not html_string:\n return node_list\n\n def _process_html_tree(elt):\n node_list = safe_dom.NodeList()\n\n tail = elt.tail\n\n if elt.tag in tag_bindings:\n elt = tag_bindings[elt.tag]().render(elt)\n\n out_elt = safe_dom.Element(elt.tag)\n out_elt.add_attribute(**elt.attrib)\n if elt.text:\n out_elt.add_text(elt.text)\n for child in elt:\n out_elt.add_children(_process_html_tree(child))\n node_list.append(out_elt)\n if tail:\n node_list.append(safe_dom.Text(tail))\n return node_list\n\n parser = html5lib.HTMLParser(\n tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),\n namespaceHTMLElements=False)\n root = parser.parseFragment('<div>%s</div>' % html_string)[0]\n\n if root.text:\n node_list.append(safe_dom.Text(root.text))\n\n for elt in root:\n node_list.append(_process_html_tree(elt))\n\n return node_list",
"def unescape_html(s):\n return HTMLParser.unescape.__func__(HTMLParser, s)",
"def pango_markup(text: str, tag: str = \"span\", **attrib) -> str:\n e = Tree.Element(tag, attrib=attrib)\n e.text = text\n return Tree.tostring(e, encoding=\"unicode\")",
"def get_inner_html(self, locator):\n js = \"this.browserbot.findElement('%s').innerHTML\" % locator\n return self.execute_javascript(js).strip()",
"def escape_html(s):\n return cgi.escape(s, quote = True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Store multiple values in `parser.values`. (Option callback.) Store `None` for each attribute named in `args`, and store the value for each key (attribute name) in `kwargs`. | def store_multiple(option, opt, value, parser, *args, **kwargs):
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in list(kwargs.items()):
setattr(parser.values, key, value) | [
"def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value in self.kwargs.items():\n # setattr(self, key, value)",
"def processArguments(self, args = None):\n\n if hasattr(sys, \"argv\") and args == sys.argv:\n args = sys.argv[1:]\n\n max = len(args) # maximum index + 1\n self.freeValues = [] # array to hold return values\n self.optionValues= {}\n index = 0 # initial index\n self.terminator = None\n self.termValues = []\n\n while index < max:\n # obtain argument\n arg = args[index]\n # increment index -- REMEMBER; it is NOW incremented\n index = index + 1\n\n # terminate immediately if option terminator encountered\n if self._isTerminator(arg):\n self.freeValues = self.freeValues + args[index:]\n self.termValues = args[index:]\n return\n\n # is this possibly an option?\n match = self.optionStartExpr.match(arg)\n if match is None:\n # not an option-- add to freeValues\n self.freeValues = self.freeValues + [arg]\n if not self.orderMixed:\n # mixing not allowed; add rest of args as freeValues\n self.freeValues = self.freeValues + args[index:]\n # return to caller\n return\n else:\n continue\n\n # grab name\n optName = match.group('option')\n\n # obtain next argument-- index has already been incremented\n nextArg = match.group('arg')\n if nextArg:\n nextArg = nextArg[1:]\n index = index - 1 # put it back\n else:\n try:\n nextArg = args[index]\n except:\n nextArg = None\n\n # transpose to lower case, if necessary\n if self.ignoreCase:\n optName = string.lower(optName)\n\n # obtain defining tuple\n tuples = self._getArgTuple(optName)\n\n if tuples == None:\n raise ArgumentError('Illegal option \\'' + arg + '\\'')\n elif len(tuples) > 1:\n raise ArgumentError('Ambiguous option \\'' + arg +\n '\\'; matches ' +\n repr(map(lambda x: x[0], tuples)))\n else:\n config = tuples[0]\n\n # config is now set to the configuration tuple for the\n # argument\n (fullName, spec, realName) = config\n (optType, optMode, optDefault, optMultiple) = spec\n\n # if opt mode required, but nextArg is none, raise an error\n if (optMode == ArgRequired):\n if (not nextArg) or self._isTerminator(nextArg):\n# print nextArg\n raise ArgumentError('Option \\'' + arg +\n '\\' requires an argument of type ' +\n optType)\n\n if (not optMode == None) and nextArg and (not self._isTerminator(nextArg)):\n # nextArg defined, option configured to possibly consume arg\n try:\n # grab conversion function-- the try is more for internal diagnostics\n func = ConversionFunctions[optType]\n try:\n optionValue = func(nextArg)\n index = index + 1\n except:\n # only raise conversion error if REQUIRED to consume argument\n if optMode == ArgRequired:\n raise ArgumentError('Invalid argument to option \\''\n + arg + '\\'; should be \\'' +\n optType + '\\'')\n else:\n optionValue = optDefault\n except ArgumentError:\n raise\n except:\n raise ArgumentError('(' + arg +\n ') Conversion function for \\'' +\n optType + '\\' not found.')\n else:\n optionValue = optDefault\n\n # add value to options dictionary\n if optMultiple:\n # can be multiple values\n try:\n # try to append element\n self.optionValues[realName] = self.optionValues[realName] + [optionValue]\n except:\n # failed-- must not exist; add it\n self.optionValues[realName] = [optionValue]\n else:\n # only one value per\n if self.isPosixCompliant and realName in self.optionValues:\n raise ArgumentError('Argument \\'' + arg +\n '\\' occurs multiple times.')\n\n self.optionValues[realName] = optionValue",
"def get_dictionary_of_values(self, *args):\n values = {}\n for data in args:\n values[data] = self.get_value(data)\n\n return values",
"def _parse_args(parser, argv):\n i = 0\n args = []\n kwargs = {}\n\n for action in parser._option_string_actions.values():\n if action.dest != \"help\":\n kwargs[action.dest] = action.default\n\n positionals = parser._get_positional_actions()\n if len(positionals) == 0:\n wildcard = None\n elif len(positionals) == 1:\n action = positionals[0]\n if action.nargs != argparse.REMAINDER:\n raise api.api_utils.NoTracebackError(\n f\"Cannot parse position argument: {action} with nargs={action.nargs}\"\n )\n wildcard = action.dest\n kwargs[wildcard] = []\n else:\n raise api.api_utils.NoTracebackError(\n f\"Cannot handle multiple positional arguments: {positionals}\"\n )\n\n while i < len(argv):\n arg = argv[i]\n if arg.startswith(\"--\"):\n if \"=\" in arg:\n key, value = arg.split(\"=\", 1)\n else:\n key = arg\n value = None\n try:\n action = parser._option_string_actions[key]\n except KeyError:\n if wildcard is None:\n raise api.api_utils.NoTracebackError(f\"Unknown argument: {arg}\")\n kwargs[wildcard].append(arg)\n if i + 1 < len(argv) and not argv[i + 1].startswith(\"--\"):\n kwargs[wildcard].append(argv[i + 1])\n i += 1\n i += 1\n continue\n\n if isinstance(action, argparse._StoreAction):\n if value is None:\n value = argv[i + 1]\n i += 1\n elif isinstance(action, argparse._StoreConstAction):\n if value is not None:\n raise api.api_utils.NoTracebackError(\n f\"--{key} accepts no arguments, but got: {repr(value)}\"\n )\n value = action.const\n else:\n raise api.api_utils.NoTracebackError(f\"Unknown action: {action}\")\n kwargs[action.dest] = value\n else:\n if wildcard is not None:\n kwargs[wildcard].append(arg)\n else:\n args.append(arg)\n i += 1\n\n if wildcard is not None:\n kwargs[wildcard] = \" \".join(shlex.quote(a) for a in kwargs[wildcard])\n\n return args, kwargs",
"def _apply_kwargs(args, kwargs):\n\n for arg_name in kwargs:\n arg_value = str(kwargs[arg_name])\n args.append(\"--%s\" % (arg_name))\n if arg_value:\n args.append(arg_value)",
"def filter_args(**kwargs) -> Iterator[Tuple[str, Union[int, str]]]:\n for name, value in kwargs.items():\n if value is None:\n try:\n value = FILTER_DEFAULTS[name]()\n except KeyError:\n continue\n yield full_filter_name(name), value",
"def get_option_values(self):\n \n class CommandLineOptions(object):\n def __getattr__(self, name):\n # if an attribute can not be found, this is the last function called\n all_option_names=\", \".join(vars(self).keys())\n error_message=\"Unable to find option '{0}' in command line options.\\n\".format(name)\n error_message+=\"The available options are: {0}\".format(all_option_names)\n raise AttributeError(error_message)\n \n # get arguments from the command line (will not run again if already parsed)\n if not self._user_asked:\n self.ask_user()\n \n args=CommandLineOptions()\n for option in list(self._user_arguments.keys()) + list(self._arguments.keys()):\n option = re.sub(r'-', '_', option)\n value = self.get(option)\n setattr(args,option,value)\n \n return args",
"def get_args(self, argset):\n args = []\n kwargs = {}\n for element in argset or []:\n if isinstance(element, dict):\n kwargs.update(element)\n else:\n args.append(element)\n return args, kwargs",
"def set_attributes_from_kwargs(self, kwargs):\n for val in self.valid_kwargs:\n if val in kwargs:\n setattr(self, val, kwargs[val])",
"def set_arg_attributes(self):\n arg_spec = inspect.getfullargspec(self.func)\n\n self.args = [a for a in arg_spec.args if not a.startswith('default') and not a.startswith('_')]\n self.unpack_args = arg_spec.varargs\n self.unpack_kwargs = arg_spec.varkw\n\n if arg_spec.defaults:\n zipped = zip(reversed(arg_spec.args), reversed(arg_spec.defaults))\n self.default_args = {e[0]: e[1] for e in list(zipped)}",
"def pair_to_args(self, *args, **kwargs) -> Tuple:\n return [*args, *kwargs.values()]",
"def apply_kwargs_parser(parser):\n def inner_decorator(handler):\n def wrapped(**kwargs):\n parser_result = parser(**kwargs)\n kwargs.update(parser_result)\n handler(**kwargs)\n return wrapped\n return inner_decorator",
"def _parse(cls, node, path):\n kwargs = cls._parse_simple_attribs(node)\n kwargs.update(cls._parse_simple_elements(node, path))\n return kwargs",
"def _individual_args(args) -> None:\n if args is None:\n return\n\n if not isinstance(args, list):\n raise PluginValidationError(\n f\"Invalid {ConfigKeys.PLUGIN_ARGS.name} entry '{args}': must be a list\"\n )\n\n for arg in args:\n if not isinstance(arg, str):\n raise PluginValidationError(\n f\"Invalid plugin argument '{arg}': must be a string\"\n )",
"def parse_arglist(args):\n # per https://stackoverflow.com/a/49723227/318857\n\n args = 'f({})'.format(args)\n tree = ast.parse(args)\n funccall = tree.body[0].value\n\n args = [ast.literal_eval(arg) for arg in funccall.args]\n kwargs = {arg.arg: ast.literal_eval(arg.value)\n for arg in funccall.keywords}\n\n if len(args) > 2:\n raise TypeError(\n \"Expected at most 2 positional args but {} were given\".format(len(args)))\n\n if len(args) >= 1:\n kwargs['width'] = int(args[0])\n if len(args) >= 2:\n kwargs['height'] = int(args[1])\n\n return kwargs",
"def parse(self, cli_args=str(_sys.argv)[1:-1]):\n if cli_args is not _sys.argv:\n cli_args = cli_args.split()\n for i in range(len(cli_args)):\n cli_args[i] = cli_args[i].split('=')\n cli_args = sum(cli_args, [])\n\n self._ensure_required(cli_args)\n self._ensure_exclusive(cli_args)\n self._ensure_and_assign_values(cli_args)",
"def xs(name, parser_args, list_args):\n for args, kwargs in list_args:\n if len(set(args) & parser_args) > 0:\n yield args, kwargs\n\n else:\n if 'dest' in kwargs:\n if kwargs['dest'] == name:\n yield args, kwargs",
"def _init_kwargs(self, kwargs, kws):\n for k in kws:\n if k in kwargs:\n setattr(self, k, kwargs[k])",
"def parse_args_kwargs(parser, token):\n bits = token.contents.split(' ')\n\n if len(bits) <= 1:\n raise template.TemplateSyntaxError(\"'%s' takes at least one argument\" % bits[0])\n\n if token.contents[13] == '\"':\n end_quote = token.contents.index('\"', 14) + 1\n args = [template.Variable(token.contents[13:end_quote])]\n kwargs_start = end_quote\n else:\n try:\n next_space = token.contents.index(' ', 14)\n kwargs_start = next_space + 1\n except ValueError:\n next_space = None\n kwargs_start = None\n args = [template.Variable(token.contents[13:next_space])]\n\n kwargs = {}\n kwargs_list = token.contents[kwargs_start:].split(',')\n for kwargs_item in kwargs_list:\n if '=' in kwargs_item:\n k, v = kwargs_item.split('=', 1)\n k = k.strip()\n kwargs[k] = template.Variable(v)\n return args, kwargs",
"def fill_args(args, kwargs):\n for name, param in args.items():\n \n value = request.args.get(name)\n \n if value is None:\n value = request.form.get(name)\n \n if value is None:\n try:\n value = request.get_json(silent=True).get(name)\n except AttributeError:\n pass\n \n if value is None:\n if param.required:\n raise ApiError(message=f'Parameter {name} is required.', fields=name, what=REQUIRED)\n \n else:\n try:\n value = param.converter(value)\n except Exception as e:\n raise UnprocessableEntity(fields=name, what=BAD_VALUE,\n message=f'Failed to validate parameter {name}: {str(e)}')\n \n kwargs[name] = value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.