query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Write `data` to self.stream. Ignore, if self.stream is False. `data` can be a `string`, `unicode`, or `Exception` instance.
def write(self, data): if self.stream is False: return if isinstance(data, Exception): data = str(SafeString(data, self.encoding, self.encoding_errors, self.decoding_errors)) try: self.stream.write(data) except UnicodeEncodeError: self.stream.write(data.encode(self.encoding, self.encoding_errors)) except TypeError: # in Python 3, stderr expects unicode if self.stream in (sys.stderr, sys.stdout): self.stream.buffer.write(data) # write bytes to raw stream else: self.stream.write(str(data, self.encoding, self.decoding_errors))
[ "def write(self, data):\r\n if self.stream is False:\r\n return\r\n if isinstance(data, Exception):\r\n data = unicode(SafeString(data, self.encoding,\r\n self.encoding_errors, self.decoding_errors))\r\n try:\r\n self.stream.write(data)\r\n except UnicodeEncodeError:\r\n self.stream.write(data.encode(self.encoding, self.encoding_errors))\r\n except TypeError: # in Python 3, stderr expects unicode\r\n if self.stream in (sys.stderr, sys.stdout):\r\n self.stream.buffer.write(data) # write bytes to raw stream\r\n else:\r\n self.stream.write(unicode(data, self.encoding,\r\n self.decoding_errors))", "def write(self, data):\n if self.finished:\n raise SinkException(\"The AudioData is already finished writing.\")\n try:\n self.file.write(data)\n except ValueError:\n pass", "def write(self, data):\n try:\n with open (self.filename, 'w') as file:\n dump(data, file)\n return True\n except Exception as e:\n return False", "def send(self, data):\n ret = libvirtmod.virStreamSend(self._o, data)\n if ret == -1: raise libvirtError ('virStreamSend() failed')\n return ret", "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError as e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError) as err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data", "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError, e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError), err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data", "def safe_write(self, data):\n\t\ttry:\n\t\t\tself.ser.write(data)\n\t\t\treturn\n\t\texcept serial.SerialTimeoutException:\n\t\t\tprint('SerialProtocol: write timeout, attempting reset..')\n\t\t\tprint('WARN: Serial write timed out, attempting reset')\n\t\t\tself.reset()\n\t\t\tprint('SerialProtocol: retrying send of {} bytes'.format(len(data)))\n\t\t\tself.ser.write(data)", "def write_raw_file(self, data: bytes) -> None:\n pass", "def write(self, data):\n try:\n # Hack to support unicode under Python 2.x\n if isinstance(data, str) or (sys.version_info < (3,) and isinstance(data, unicode)):\n data = data.encode('utf-8')\n\n self._device.write(data)\n\n except serial.SerialTimeoutException:\n pass\n\n except serial.SerialException as err:\n raise CommError('Error writing to device.', err)\n\n else:\n self.on_write(data=data)", "def write(self,data):\r\n if not self.has_started:\r\n self.write_headers()\r\n self.has_started = True\r\n if self.is_chunked:\r\n self._write(hex(len(data))[2:])\r\n self._write(\"\\r\\n\")\r\n self._write(data)\r\n self._write(\"\\r\\n\")\r\n else:\r\n self._write(data)", "def _writeSomeData(self, data):\n sent = self.transport._originalWriteSomeData(data)\n self.dataSentEvent(sent)\n return sent", "def write(self, data, metadata):\n raise NotImplementedError", "def write(self, data):\n self.logger.debug(data)", "def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))", "def write_bytes(self, data):\n # type-check for the buffer interface before truncating the file\n view = memoryview(data)\n with self.open(mode='wb') as f:\n return f.write(view)", "def write(self, data, pack=struct.pack, eol=struct.pack('!b', 0)):\n send = self.send\n if data == 0:\n send(eol)\n else:\n for char in data:\n if sys.version_info[0] > 2:\n char = char.encode('utf-8')\n send(pack('!c', char))", "def write(self, data, flushing=False):\n self._assert_mode(\"w-\")\n\n if \"a\" in self.mode:\n self.seek(0, Seek.end)\n\n if not isinstance(data, binary_type):\n if isinstance(data, bytearray):\n data = bytes(data)\n elif isinstance(data, text_type):\n data = data.encode(self.encoding, self.errors)\n\n statmsg, res = self._file.write(data, offset=self._ipp)\n\n if not statmsg.ok:\n self._raise_status(self.path, statmsg, \"writing\")\n\n self._ipp += len(data)\n self._size = max(self.size, self.tell())\n if flushing:\n self.flush()", "def write_file(self, data) -> None:\n pass", "def write(self, data):\r\n assert self.open, \"*%s not open, call begin() method before writing\" %\\\r\n UART[self.config][0]\r\n\r\n if (type(data) == float): data = int(data)\r\n if (type(data) == int): data = chr(data & 0xff)\r\n\r\n elif ((type(data) == list) or (type(data) == tuple)):\r\n bytes_written = 0\r\n for i in data:\r\n bytes_written += self.write(i) \r\n return bytes_written\r\n\r\n elif (type(data) != str):\r\n # Type not supported by write, e.g. dict; use prints().\r\n return 0\r\n\r\n written = self.ser_port.write(data)\r\n # Serial.write() returns None if no bits written, we want 0:\r\n return written if written else 0", "def write(self, data):\n # PEP-3333 states:\n #\n # The server or gateway must transmit the yielded bytestrings to the\n # client in an unbuffered fashion, completing the transmission of\n # each bytestring before requesting another one.\n #\n # This write() method is used for the imperative and (indirectly) for\n # the more familiar iterable-of-bytestrings WSGI mechanism. It uses\n # C{blockingCallFromThread} to schedule writes. This allows exceptions\n # to propagate up from the underlying HTTP implementation. However,\n # that underlying implementation does not, as yet, provide any way to\n # know if the written data has been transmitted, so this method\n # violates the above part of PEP-3333.\n #\n # PEP-3333 also says that a server may:\n #\n # Use a different thread to ensure that the block continues to be\n # transmitted while the application produces the next block.\n #\n # Which suggests that this is actually compliant with PEP-3333,\n # because writes are done in the reactor thread.\n #\n # However, providing some back-pressure may nevertheless be a Good\n # Thing at some point in the future.\n\n def wsgiWrite(started):\n if not started:\n self._sendResponseHeaders()\n self.request.write(data)\n\n try:\n return blockingCallFromThread(\n self.reactor, wsgiWrite, self.started)\n finally:\n self.started = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the erroroutput stream. Ignored if the stream is` sys.stderr` or `sys.stdout` or has no close() method.
def close(self): if self.stream in (sys.stdout, sys.stderr): return try: self.stream.close() except AttributeError: pass
[ "def close_stream(self, input_stream):\n # type: (object) -> None\n # see https://docs.oracle.com/javase/7/docs/api/java/io/FilterInputStream.html#close()\n input_stream.close()", "def close(self):\n self._output_fh.close()", "def close(self):\n \n self.stream.close()", "def close(self):\n self.process.stdout.close()\n self.process.stderr.close()\n self.running = False", "def stop_stream(self):\n if self._stream is not None:\n self._stream.close()\n self._stream = None", "def output_stream(stream, formatter: Formatter):\n global output\n\n # create output before opening the stream, so file outputs can prompt on existing output\n output = create_output(formatter)\n\n success_open = False\n for i in range(args.retry_open):\n try:\n stream_fd, prebuffer = open_stream(stream)\n success_open = True\n break\n except StreamError as err:\n log.error(f\"Try {i + 1}/{args.retry_open}: Could not open stream {stream} ({err})\")\n\n if not success_open:\n return console.exit(f\"Could not open stream {stream}, tried {args.retry_open} times, exiting\")\n\n try:\n output.open()\n except OSError as err:\n if isinstance(output, PlayerOutput):\n console.exit(f\"Failed to start player: {args.player} ({err})\")\n elif output.filename:\n console.exit(f\"Failed to open output: {output.filename} ({err})\")\n else:\n console.exit(f\"Failed to open output ({err}\")\n return\n\n try:\n with closing(output):\n log.debug(\"Writing stream to output\")\n show_progress = args.progress == \"force\" or args.progress == \"yes\" and sys.stderr.isatty()\n if args.force_progress:\n show_progress = True\n warnings.warn(\n \"The --force-progress option has been deprecated in favor of --progress=force\",\n StreamlinkDeprecationWarning,\n stacklevel=1,\n )\n # TODO: finally clean up the global variable mess and refactor the streamlink_cli package\n # noinspection PyUnboundLocalVariable\n stream_runner = StreamRunner(stream_fd, output, show_progress=show_progress)\n # noinspection PyUnboundLocalVariable\n stream_runner.run(prebuffer)\n except OSError as err:\n # TODO: refactor all console.exit() calls\n console.exit(str(err))\n\n return True", "def _on_end_of_stream(self, input_stream):\n # By default, this function closes the output stream.\n self.output_stream.end_stream()", "def convert_stream_closed_error(obj, exc):\n if exc.real_error is not None:\n # The stream was closed because of an underlying OS error\n exc = exc.real_error\n if ssl and isinstance(exc, ssl.SSLError):\n if \"UNKNOWN_CA\" in exc.reason:\n raise FatalCommClosedError(\n \"in %s: %s: %s\" % (obj, exc.__class__.__name__, exc)\n )\n raise CommClosedError(\"in %s: %s: %s\" % (obj, exc.__class__.__name__, exc))\n else:\n raise CommClosedError(\"in %s: %s\" % (obj, exc))", "def output_closed(self):\n outread = self.stdout.readable() if self.stdout is not None else False\n errread = self.stderr.readable() if self.stderr is not None else False\n return not (outread or errread)", "def set_stdout_stderr():\n\n class Writer(object):\n def write(self, msg):\n log.debug(msg)\n if verbose:\n chunk_send(msg)\n\n def flush(self):\n pass\n\n orig_stds = sys.stdout, sys.stderr\n w = Writer()\n sys.stdout = w\n sys.stderr = w\n\n def cleanup():\n \"\"\"\n Restores stdout and stderr\n \"\"\"\n sys.stdout = orig_stds[0]\n sys.stderr = orig_stds[1]\n client_sock.close()\n\n return cleanup", "def stderr_pipe(self):\r\n return self.stderr(PIPE)", "def close(self, err_str=None, config_rollback=True):\n logger.info(\"entering close()\")\n if err_str:\n print(err_str)\n if (\n self.use_shell\n and self.sshshell._chan is not None\n and not self.sshshell._chan.closed\n or not self.use_shell\n and self.sshshell._transport is not None\n and self.sshshell._transport.active\n ):\n if self.rm_remote_tmp:\n self.remote_cleanup()\n if config_rollback and self.command_list:\n self.limits_rollback()\n print(f\"\\r{pad_string('closing device connection')}\")\n self.sshshell.close()\n if self.hard_close:\n try:\n shutil.rmtree(self.local_tmpdir)\n except PermissionError:\n # windows can throw this error, silence it for now\n print(\n f\"{self.local_tmpdir} may still exist, please delete manually if so\"\n )\n raise os._exit(1)\n else:\n raise SystemExit(1)", "def p2p_stream_close(self, **kwargs):\n endpoint = 'p2p/stream/close'\n args = []\n return self.client.get(endpoint, args, kwargs)", "def retrieve_stderr():\n with closing(StringIO()) as sio, replace_stderr(sio):\n oldprint = builtins.print\n try:\n # Overriding stderr doesn't work with libraries, this ensures even\n # cached variables take this up. Well... it works.\n def newprint(*args, **kwargs):\n kwargs['file'] = sio\n oldprint(*args, **kwargs)\n\n builtins.print = newprint\n yield sio\n finally:\n builtins.print = oldprint", "def close_active_stream(self):\n if self.stream is not None:\n if self.stream.is_active():\n self.stream.stop_stream()\n self.stream.close()\n self.stream = None\n self.start_time = 0.\n self.end_time = 0.", "def __shutdown_streams(self):\n if self.temp_sensor is not None:\n logger.debug('Closing temp sensor.')\n self.temp_sensor.close()\n if self.humid_sensor is not None:\n logger.debug('Closing humid sensor.')\n self.humid_sensor.close()", "def output_verbose(errors: List[Error], stream: TextIO) -> None:\n for err in errors:\n if err.lineno is not None:\n stream.write(\"{}:{}: {} ({}){}\".format(err.filename, err.lineno, err.description, err.identifier.value,\n os.linesep))\n else:\n stream.write(\"{}: {} ({}){}\".format(err.filename, err.description, err.identifier.value, os.linesep))", "def close(self):\n if not self.sink:\n return\n LOGGER.info('Closing connection with result sink server.')\n # Reset to default logging level of test runner scripts.\n logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.DEBUG)\n self._session.close()", "def redirect_stderr(new_stderr=None):\n if new_stderr is None:\n new_stderr = cStringIO.StringIO()\n old_stderr = sys.stderr\n sys.stderr = new_stderr\n try:\n yield new_stderr\n finally:\n sys.stderr = old_stderr", "def send_error_response(self, text) -> None:\n self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': text})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
append(child) > element Appends child and returns self if self is not full or first nonfull parent.
def append(self, child): assert not self.full() self.children.append(child) child.parent = self node = self while node.full(): node = node.parent return node
[ "def add_child(self, e):\n if self.children == None:\n raise TypeError('this element cannot have a child: %r' % self)\n if isinstance(e, basestring) and self.children and isinstance(self.children[-1], basestring):\n self.children[-1] += e\n else:\n self.children.append(e)\n if isinstance(e, HTMLElement):\n e.parent = self\n return self", "def add_or_fetch_child(self, element):\n\n child = self.child_by_element(element)\n if not child:\n child = Node(element, self)\n self._child_nodes.append(child)\n return child", "def append_child(elt: Element, child: Element) -> Any:\n raise NotImplementedError", "def appendOptionalChild(self, child):\n if child is not None:\n self.appendChild(child)", "def append_to(self, parent: HTMLNode) -> HTMLNode:\n parent.append(self)\n\n return parent # for chaining", "def append_child(self, child):\r\n child.parent = self\r\n self.children.append(child)\r\n self.changed()", "def add_child(self, parent, child):\n if parent is None:\n raise NoParentError(\"Parent can't be None, use 'tree.add_root(child)' instead.\")\n elif child in self._nodes:\n if child in self.children_of(parent):\n logging.debug(\"[add_child] tried to add a child already in the tree, but parent matched -> kept already existing child node\")\n return self # the child is already there -> keep existing node\n else:\n logging.debug(\"[add_child] tried to add a child already in the tree, under another parent -> kept already existing child node and added it to the parents children\")\n self._node(parent).add_child_node(self._node(child)) # the child already exists -> keep existing child and add it to the parent's children\n return self\n else:\n try:\n parent_node = self._node(parent)\n except NotInTreeError:\n # parent is not in the tree, try to make it root.\n parent_node = self.add_root(parent) # raises MultipleRootsError if there is already a root\n\n # add the child\n child_node = self._create_node(parent=parent_node, data=child)\n parent_node.add_child_node(child_node)\n self._nodes[child] = child_node\n return self", "def append_to(self, parent_element):\n\n xml = self.get_xml()\n parent_element.append(xml)", "def add_child(self, child: 'Node'):\n if child in self.children:\n return\n self.children.append(child)\n child.set_parent(self)", "def append(self, element: \"Element\") -> None:", "def appendChild(self, item):\n self.children.append(item)", "def add_child(self, child):\n if isinstance(child, list):\n seq = Sequence(child)\n seq.parent = self\n self.__children.append(seq)\n else:\n self.__children.append(child)\n child.parent = self", "def add_child(self, child):\n raise NotImplementedError", "def appendElement(self, e):\n eParent = e.parent\n if not eParent is None: \n eParent.removeElement(e) # Remove from current parent, if there is one.\n self._elements.append(e) # Possibly add to self again, will move it to the top of the element stack.\n e.setParent(self) # Set parent of element without calling this method again.\n if e.eId: # Store the element by unique element id, if it is defined.\n self._eIds[e.eId] = e\n # If this is a text box, then set self.lastTextBox\n if e.isTextBox:\n self.lastTextBox = e\n return len(self._elements)-1 # Answer the element index for e.", "def test_auto_append():\n r = R()\n r.foo\n assert len(r._children_) == 1\n\n # this makes another foo child, not append to it\n r.foo.bar\n assert len(r._children_) == 2\n assert len(r._children_[1]._children_) == 1", "def append(self, node: 'SoNode') -> \"void\":\n return _coin.SoChildList_append(self, node)", "def add_child(self, element: DOMElement, index: int=None) -> 'DOMLayout':\n if index is None:\n index = len(self._children)\n\n self._children.insert(index, element)\n element._set_parent(self, index)\n element.add_observer(DOMEventType.RESIZE, self._on_child_resize)\n element.add_global_observer(self._on_child_event)\n\n self._rerender(resize=(True, True))\n return self", "def addedChild(self, child):\n pass", "def attach_to(child, parent, position=None):\n # this is essentially a shorthand function\n # NOTE notice the only difference in return value\n parent.add_child(child, position)\n return parent", "def addChild(self, child, index=None):\n\n if self._children: # already have some children\n if index is None or index == len(self._children):\n n = self[-1]\n n.insertAfter(child)\n self.tail = child\n else:\n self[index].insertBefore(child)\n if index == 0:\n self.head = child\n else: # this is self's first child\n child.parent = self\n # child.user = self.user\n child.notebook = self.notebook\n self.head = self.tail = child\n if isinstance(child, Section):\n for node in child.nodes:\n node.notebook = self.notebook\n child.touchModified()\n self.touchModified()\n return child" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete_child() > child Delete last child and return it.
def delete_child(self): child = self.children[-1] del self.children[-1] return child
[ "def delete_child(child_id):\n Child.objects(id=child_id).delete()\n return {'success': True, 'data': \"Data Deleted\"}, 200", "def _delete(self, node):\n if self.num_children(node) == 2:\n raise ValueError('Position has two children')\n child = node._left if node._left else node._right # might be None\n if child is not None:\n child._parent = node._parent # child's grandparent becomes parent\n if node is self._root:\n self._root = child # child becomes root\n else:\n parent = node._parent\n if node is parent._left:\n parent._left = child\n else:\n parent._right = child\n self._size -= 1\n return node._element", "def delete(self):\n parent = self.parent\n if not parent:\n return None\n parent.childList.remove(self)\n self.parent = None\n globalref.docRef.modified = True\n return parent", "def remove_child(self, **attrs) -> Optional[SlashCommandChild]:\n child = utils.get(self._children, **attrs)\n if child:\n self._children.remove(child)\n\n return child", "def DeleteChild(self, child):\n if self.IsEmpty():\n raise XMLUnknownChild(child.xmlname)\n factoryName = self._FindFactory(child.__class__)\n if factoryName:\n factory = getattr(self, factoryName)\n if isinstance(factory, MethodType):\n deleteFactory = getattr(self, \"Delete_\" + factoryName)\n deleteFactory(child)\n elif isinstance(factory, NoneType):\n raise XMLUnknownChild(child.xmlname)\n elif isinstance(factory, ListType):\n match = False\n for i in xrange(len(factory)):\n if factory[i] is child:\n child.DetachFromDocument()\n child.parent = None\n del factory[i]\n match = True\n break\n if not match:\n raise XMLUnknownChild(child.xmlname)\n elif factory is child:\n # Single allowable child is replaced with None\n child.DetachFromDocument()\n child.parent = None\n setattr(self, factoryName, None)\n else:\n raise TypeError\n else:\n match = False\n for i in xrange(len(self._children)):\n if self._children[i] is child:\n child.DetachFromDocument()\n child.parent = None\n del self._children[i]\n match = True\n break\n if not match:\n raise XMLUnknownChild(child.xmlname)", "def last_child(self) -> None | \"Node\":\n ...", "def getLastChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[-1]\n return None", "def delete(self, value):\r\n deleted_node = self.search(value)\r\n if deleted_node == None:\r\n return 0\r\n deleted_node_original_color = deleted_node.color\r\n if deleted_node.right == self.null_node: #right child is null\r\n node_to_fix = deleted_node.left\r\n self.transplant(deleted_node, deleted_node.left)\r\n del deleted_node\r\n elif deleted_node.left == self.null_node: #left child is null\r\n node_to_fix = deleted_node.right\r\n self.transplant(deleted_node, deleted_node.right)\r\n del deleted_node\r\n else: #no child is null\r\n if deleted_node.right.left == self.null_node: #if the right child has no left child\r\n node_to_fix = deleted_node.right.right\r\n node_to_fix.parent = deleted_node.right\r\n deleted_node_original_color = node_to_fix.color\r\n self.transplant(deleted_node, deleted_node.right)\r\n deleted_node.left.parent = node_to_fix.parent\r\n node_to_fix.parent.left = deleted_node.left\r\n node_to_fix.parent.color = deleted_node.color\r\n del deleted_node\r\n else:\r\n trans_node = self.minium(deleted_node.right) #if the right child has left child\r\n deleted_node.key = trans_node.key\r\n deleted_node.satellite_data = trans_node.satellite_data\r\n node_to_fix = trans_node.right\r\n deleted_node_original_color = trans_node.color\r\n self.transplant(trans_node, trans_node.right)\r\n del trans_node\r\n\r\n if node_to_fix != self.null_node:\r\n node_to_fix.satellite_data = node_to_fix.left.satellite_data[0] + node_to_fix.right.satellite_data[0] + 1\r\n original_node_to_fix = node_to_fix\r\n while node_to_fix.parent != self.null_node:\r\n node_to_fix.parent.satellite_data[0] -= 1\r\n node_to_fix = node_to_fix.parent\r\n if deleted_node_original_color == \"BLACK\":\r\n self.delete_fixup(original_node_to_fix)", "def deleteLast(self):\n rv = self.last\n new_last = self.last.prev\n new_last.next = None;\n self.last = new_last\n return rv", "def moveLast(self):\n if self.parent:\n self.parent.childList.remove(self)\n self.parent.childList.append(self)\n globalref.docRef.modified = True", "def delete_child(self, child: Union[NodeStat, 'DirectoryStat']):\n child_dirs, child_files = self._scan_result()\n\n if isinstance(child, DirectoryStat):\n rm_items = -child.total_items\n rm_size = -child.total_size\n child_dirs.remove(child)\n shutil.rmtree(str(child.path))\n elif isinstance(child, NodeStat):\n child.path.unlink()\n rm_items = -1\n rm_size = -child.size\n child_files.remove(child)\n else:\n raise TypeError(f\"The type {type(child)} is not supported!\")\n self.total_items += rm_items\n self.total_size += rm_size\n if self._on_stats_change is not None:\n self._on_stats_change(rm_items, rm_size, self.finished.is_set())", "def delete_nth_node(self):\n nth_leaf = self.get_nth_node()\n if not nth_leaf:\n raise Exception(\"Can't delete empty heap\")\n # The last should be the right child.\n\n # if the n_th node is not the root :\n if nth_leaf is not self.root:\n if nth_leaf.parent.right_child:\n nth_leaf.parent.right_child = None\n else:\n nth_leaf.parent.left_child = None\n\n # update the nth\n self.n -= 1\n self.nth_binary_representing = format(self.n, \"b\")\n return nth_leaf.data", "def remove(self):\r\n self.child = None", "def delete_child(self, name):\n if name not in self.children:\n return False\n else:\n del self.children[name]", "def _delete_max(self):\n assert not self.parent, 'self should be root.'\n\n if not self.right:\n # self is max, so delete self.\n self_left = self.left\n self._cut('left')\n return self_left\n\n grand = self\n parent = grand.right\n child = parent.right\n while child:\n grand = grand.right\n parent = parent.right\n child = child.right\n\n # parent is max, so delete parent.\n #\n # grand\n # \\\n # --- cut\n # \\\n # parent\n # / \\\n # cut --- \\\n # / \\\n # parent_left child(None)\n #\n parent_left = parent.left\n grand._cut('right')\n parent._cut('left')\n grand._connect('right', parent_left)\n return self", "def test_delete_node_only_child(bst_long_branch_right):\n bst_long_branch_right.delete(2)\n assert bst_long_branch_right.root.right.data == 3", "def __delete_last_node(\n self\n ):\n size_of_list = self.size()\n node_before_last_node = self.get_node(size_of_list - 2)\n node_before_last_node.set_next_node(None)\n self.last_node.set_previous_node(None)\n self.last_node = node_before_last_node", "def remove_child_at(parent, position=None):\n if position is None:\n child = parent._children.pop()\n else:\n child = parent._children.pop(position)\n object.__setattr__(child, '_parent', None)\n\n # invalidate all ancestor nodes' length\n p = parent\n while p is not None:\n object.__setattr__(p, '_len', None)\n p = p._parent\n\n return child", "def remove(self, child):\n try:\n if self.element == child.traversal_parent:\n self._remove_from_traversal_index(child)\n else:\n self._remove_from_index(child)\n self.list.remove(child)\n except:\n raise", "def __delitem__(self, id):\n child = self[id]\n child.parent = None\n self.child_dict.pop(id)\n self.child_list.remove(child)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
close() > parent Close element and return first nonfull element.
def close(self): parent = self.parent while parent.full(): parent = parent.parent return parent
[ "def end_child(self):\n if self.cur_child is not None and not self.cur_child.closed:\n self.cur_child.end()\n self.cur_child = None", "def lastDescendant(self, inclClosed=False):\n item = self\n while True:\n if item.childList and (item.open or inclClosed):\n item = item.childList[-1]\n else:\n return item", "def closer(x):\n x.close()", "def closest_parent(self):\n # type: () -> Optional[Tag]\n parent = self.parent\n while parent:\n if parent.name in self.PARENT_TAGS:\n return parent\n parent = parent.parent\n return None # pragma: no cover", "def remove_one_from_stack(self):\n stack = self.opened_inventorystack\n stack.reverse()\n for inventory in stack:\n if inventory.is_closable_by_escape():\n self.hide(inventory)\n return inventory", "def get_parent(self) -> 'Element':\n\n return self._parent", "def parent(self):\n parent_elem = self.element_info.parent\n\n if parent_elem:\n return self.backend.generic_wrapper_class(parent_elem)\n else:\n return None", "def close_tags_to(self, elem_cp):\n while len(self._stack) and self._stack[-1] != elem_cp:\n self.end_tag()\n if len(self._stack):\n self.end_tag()", "def test_parent(self):\n button = self.dlg.Alpha.find()\n self.assertEqual(button.parent(), self.dlg.find())", "def delete(self):\n parent = self.parent\n if not parent:\n return None\n parent.childList.remove(self)\n self.parent = None\n globalref.docRef.modified = True\n return parent", "def click_button_close(self):\n # AutoGen method click_link: None\n self.click_element(self.BUTTON_CLOSE)", "def get_parent ( self ):\n return self.parent_ref.deref_safe()", "def get_parent(self):\n return self.__return(self.node.parent())", "def close(self, close_all=False):\n if close_all or self.parent is None:\n self.game.close_modal()\n else:\n self.parent.submodal = None", "def get_parent(self, it):\n return self._parent_array[it]", "def del_parent(self):\n self.parent = None", "def getLastChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[-1]\n return None", "def remove(self):\r\n if self.parent:\r\n for i, node in enumerate(self.parent.children):\r\n if node is self:\r\n self.parent.changed()\r\n del self.parent.children[i]\r\n self.parent = None\r\n return i", "def _cur_close(self):\n open = self._prices.open[self._offset]\n rel_close = self._prices.close[self._offset] # so close is rel ?\n return open * (1.0 + rel_close)", "def lastElementChild(self):\n try:\n return self.args[len(self.args) - 1]\n except Exception:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse_latex_math(string [,inline]) > MathMLtree Returns a MathMLtree parsed from string. inline=True is for inline math and inline=False is for displayed math. tree is the whole tree and node is the current element.
def parse_latex_math(string, inline=True): # Normalize white-space: string = ' '.join(string.split()) if inline: node = mrow() tree = math(node, inline=True) else: node = mtd() tree = math(mtable(mtr(node)), inline=False) while len(string) > 0: n = len(string) c = string[0] skip = 1 # number of characters consumed if n > 1: c2 = string[1] else: c2 = '' ## print n, string, c, c2, node.__class__.__name__ if c == ' ': pass elif c == '\\': if c2 in '{}': node = node.append(mo(c2)) skip = 2 elif c2 == ' ': node = node.append(mspace()) skip = 2 elif c2 == ',': # TODO: small space node = node.append(mspace()) skip = 2 elif c2.isalpha(): # We have a LaTeX-name: i = 2 while i < n and string[i].isalpha(): i += 1 name = string[1:i] node, skip = handle_keyword(name, node, string[i:]) skip += i elif c2 == '\\': # End of a row: entry = mtd() row = mtr(entry) node.close().close().append(row) node = entry skip = 2 else: raise SyntaxError(r'Syntax error: "%s%s"' % (c, c2)) elif c.isalpha(): node = node.append(mi(c)) elif c.isdigit(): node = node.append(mn(c)) elif c in "+-*/=()[]|<>,.!?':;@": node = node.append(mo(c)) elif c == '_': child = node.delete_child() if isinstance(child, msup): sub = msubsup(child.children, reversed=True) elif isinstance(child, mo) and child.data in sumintprod: sub = munder(child) else: sub = msub(child) node.append(sub) node = sub elif c == '^': child = node.delete_child() if isinstance(child, msub): sup = msubsup(child.children) elif isinstance(child, mo) and child.data in sumintprod: sup = mover(child) elif (isinstance(child, munder) and child.children[0].data in sumintprod): sup = munderover(child.children) else: sup = msup(child) node.append(sup) node = sup elif c == '{': row = mrow() node.append(row) node = row elif c == '}': node = node.close() elif c == '&': entry = mtd() node.close().append(entry) node = entry else: raise SyntaxError(r'Illegal character: "%s"' % c) string = string[skip:] return tree
[ "def parse_latex_math(string, inline=True):\r\n\r\n # Normalize white-space:\r\n string = ' '.join(string.split())\r\n\r\n if inline:\r\n node = mrow()\r\n tree = math(node, inline=True)\r\n else:\r\n node = mtd()\r\n tree = math(mtable(mtr(node)), inline=False)\r\n\r\n while len(string) > 0:\r\n n = len(string)\r\n c = string[0]\r\n skip = 1 # number of characters consumed\r\n if n > 1:\r\n c2 = string[1]\r\n else:\r\n c2 = ''\r\n## print n, string, c, c2, node.__class__.__name__\r\n if c == ' ':\r\n pass\r\n elif c == '\\\\':\r\n if c2 in '{}':\r\n node = node.append(mo(c2))\r\n skip = 2\r\n elif c2 == ' ':\r\n node = node.append(mspace())\r\n skip = 2\r\n elif c2 == ',': # TODO: small space\r\n node = node.append(mspace())\r\n skip = 2\r\n elif c2.isalpha():\r\n # We have a LaTeX-name:\r\n i = 2\r\n while i < n and string[i].isalpha():\r\n i += 1\r\n name = string[1:i]\r\n node, skip = handle_keyword(name, node, string[i:])\r\n skip += i\r\n elif c2 == '\\\\':\r\n # End of a row:\r\n entry = mtd()\r\n row = mtr(entry)\r\n node.close().close().append(row)\r\n node = entry\r\n skip = 2\r\n else:\r\n raise SyntaxError(ur'Syntax error: \"%s%s\"' % (c, c2))\r\n elif c.isalpha():\r\n node = node.append(mi(c))\r\n elif c.isdigit():\r\n node = node.append(mn(c))\r\n elif c in \"+-*/=()[]|<>,.!?':;@\":\r\n node = node.append(mo(c))\r\n elif c == '_':\r\n child = node.delete_child()\r\n if isinstance(child, msup):\r\n sub = msubsup(child.children, reversed=True)\r\n elif isinstance(child, mo) and child.data in sumintprod:\r\n sub = munder(child)\r\n else:\r\n sub = msub(child)\r\n node.append(sub)\r\n node = sub\r\n elif c == '^':\r\n child = node.delete_child()\r\n if isinstance(child, msub):\r\n sup = msubsup(child.children)\r\n elif isinstance(child, mo) and child.data in sumintprod:\r\n sup = mover(child)\r\n elif (isinstance(child, munder) and\r\n child.children[0].data in sumintprod):\r\n sup = munderover(child.children)\r\n else:\r\n sup = msup(child)\r\n node.append(sup)\r\n node = sup\r\n elif c == '{':\r\n row = mrow()\r\n node.append(row)\r\n node = row\r\n elif c == '}':\r\n node = node.close()\r\n elif c == '&':\r\n entry = mtd()\r\n node.close().append(entry)\r\n node = entry\r\n else:\r\n raise SyntaxError(ur'Illegal character: \"%s\"' % c)\r\n string = string[skip:]\r\n return tree", "def parse_mathml(s):\n import xml.dom.minidom\n x = xml.dom.minidom.parseString(s)\n return parse_mathml_rhs(dom_child(x))", "def compile_math(math):\n if isinstance(math, str):\n math = (\n math\n .replace('&&', 'and')\n .replace('||', 'or')\n .replace('^', '**')\n )\n\n model = evalidate.base_eval_model.clone()\n model.nodes.extend(VALID_MATH_EXPRESSION_NODES)\n model.allowed_functions.extend(MATHEMATICAL_FUNCTIONS.keys())\n\n math_node = evalidate.Expr(math, model=model)\n compiled_math = compile(math_node.node, '<math>', 'eval')\n return compiled_math", "def build_tree(math_exp_string):\n if not validate_math_exp(math_exp_string):\n raise InvalidInput('Validation Error, one or more parenthesis are not closed properly')\n \n exp_list = filter_exp_list(math_exp_string)\n stack = Stack()\n current_node = Tree()\n\n for token in exp_list:\n\n if token == '(':\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n\n elif token == ')':\n if stack.size():\n current_node = stack.pop()\n\n elif token in operator_map.keys():\n if current_node.get_val():\n if current_node.get_val() == token:\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n else:\n parent = Tree(token)\n parent.update_child(current_node)\n parent.add_child()\n stack.push(parent)\n current_node = parent.get_newborn_child()\n else:\n current_node.set_val(token)\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n\n else:\n try:\n current_node.set_val(float(token))\n except ValueError, e:\n logging.info(e.message)\n current_node.set_val(token)\n current_node = stack.pop()\n\n return current_node", "def parse_mathml_rhs(node, var_table=None, logger=None,\n number_post_processor=None, derivative_post_processor=None):\n def parsex(node):\n \"\"\"\n Parses a mathml expression.\n \"\"\"\n def chain(kind, node, unary=None):\n \"\"\"\n Parses operands for chained operators (for example plus, minus,\n times and division).\n\n The argument ``kind`` must be the myokit expression type being\n parsed, ``node`` is a DOM node and ``unary``, if given, should be\n the unary expression type (unary Plus or unary Minus).\n \"\"\"\n ops = []\n node = dom_next(node)\n while node:\n ops.append(parsex(node))\n node = dom_next(node)\n n = len(ops)\n if n < 1:\n raise MathMLError('Operator needs at least one operand.')\n if n < 2:\n if unary:\n return unary(ops[0])\n else:\n raise MathMLError('Operator needs at least two operands')\n ex = kind(ops[0], ops[1])\n for i in xrange(2, n):\n ex = kind(ex, ops[i])\n return ex\n # Start parsing\n name = node.tagName\n if name == 'apply':\n # Brackets, can be ignored in an expression tree.\n return parsex(dom_child(node))\n elif name == 'ci':\n # Reference\n var = str(node.firstChild.data).strip()\n if var_table:\n try:\n var = var_table[var]\n except KeyError:\n logger.warn('Unable to resolve reference to <' + str(var)\n + '>.')\n return myokit.Name(var)\n elif name == 'diff':\n # Derivative\n # Check time variable\n bvar = dom_next(node, 'bvar')\n if derivative_post_processor:\n derivative_post_processor(parsex(dom_child(bvar, 'ci')))\n # Check degree, if given\n d = dom_child(bvar, 'degree')\n if d is not None:\n d = parsex(dom_child(d, 'cn')).eval()\n if not d == 1:\n raise MathMLError('Only derivatives of degree one are'\n ' supported.')\n # Create derivative and return\n x = dom_next(node, 'ci')\n if x is None:\n raise MathMLError('Derivative of an expression found: only'\n ' derivatives of variables are supported.')\n return myokit.Derivative(parsex(x))\n elif name == 'cn':\n # Number\n number = parse_mathml_number(node, logger)\n if number_post_processor:\n return number_post_processor(node, number)\n return number\n #\n # Algebra\n #\n elif name == 'plus':\n return chain(myokit.Plus, node, myokit.PrefixPlus)\n elif name == 'minus':\n return chain(myokit.Minus, node, myokit.PrefixMinus)\n elif name == 'times':\n return chain(myokit.Multiply, node)\n elif name == 'divide':\n return chain(myokit.Divide, node)\n #\n # Functions\n #\n elif name == 'exp':\n return myokit.Exp(parsex(dom_next(node)))\n elif name == 'ln':\n return myokit.Log(parsex(dom_next(node)))\n elif name == 'log':\n if dom_next(node).tagName != 'logbase':\n return myokit.Log10(parsex(dom_next(node)))\n else:\n return myokit.Log(\n parsex(dom_next(dom_next(node))),\n parsex(dom_child(dom_next(node))))\n elif name == 'root':\n # Check degree, if given\n next = dom_next(node)\n if next.tagName == 'degree':\n # Degree given, return x^(1/d) unless d is 2\n d = parsex(dom_child(next))\n x = parsex(dom_next(next))\n if d.is_literal() and d.eval() == 2:\n return myokit.Sqrt(x)\n return myokit.Power(x, myokit.Divide(myokit.Number(1), d))\n else:\n return myokit.Sqrt(parsex(next))\n elif name == 'power':\n n2 = dom_next(node)\n return myokit.Power(parsex(n2), parsex(dom_next(n2)))\n elif name == 'floor':\n return myokit.Floor(parsex(dom_next(node)))\n elif name == 'ceiling':\n return myokit.Ceil(parsex(dom_next(node)))\n elif name == 'abs':\n return myokit.Abs(parsex(dom_next(node)))\n elif name == 'quotient':\n n2 = dom_next(node)\n return myokit.Quotient(parsex(n2), parsex(dom_next(n2)))\n elif name == 'rem':\n n2 = dom_next(node)\n return myokit.Remainder(parsex(n2), parsex(dom_next(n2)))\n #\n # Trigonometry\n #\n elif name == 'sin':\n return myokit.Sin(parsex(dom_next(node)))\n elif name == 'cos':\n return myokit.Cos(parsex(dom_next(node)))\n elif name == 'tan':\n return myokit.Tan(parsex(dom_next(node)))\n elif name == 'arcsin':\n return myokit.ASin(parsex(dom_next(node)))\n elif name == 'arccos':\n return myokit.ACos(parsex(dom_next(node)))\n elif name == 'arctan':\n return myokit.ATan(parsex(dom_next(node)))\n #\n # Redundant trigonometry (CellML includes this)\n #\n elif name == 'csc':\n # Cosecant: csc(x) = 1 / sin(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Sin(parsex(dom_next(node))))\n elif name == 'sec':\n # Secant: sec(x) = 1 / cos(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Cos(parsex(dom_next(node))))\n elif name == 'cot':\n # Contangent: cot(x) = 1 / tan(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Tan(parsex(dom_next(node))))\n elif name == 'arccsc':\n # ArcCosecant: acsc(x) = asin(1/x)\n return myokit.ASin(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n elif name == 'arcsec':\n # ArcSecant: asec(x) = acos(1/x)\n return myokit.ACos(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n elif name == 'arccot':\n # ArcCotangent: acot(x) = atan(1/x)\n return myokit.ATan(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n #\n # Hyperbolic trigonometry (CellML again)\n #\n elif name == 'sinh':\n # Hyperbolic sine: sinh(x) = 0.5 * (e^x - e^-x)\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'cosh':\n # Hyperbolic cosine: cosh(x) = 0.5 * (e^x + e^-x)\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Plus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'tanh':\n # Hyperbolic tangent: tanh(x) = (e^2x - 1) / (e^2x + 1)\n x = parsex(dom_next(node))\n e2x = myokit.Exp(myokit.Multiply(myokit.Number(2), x))\n return myokit.Divide(myokit.Minus(e2x, myokit.Number(1)),\n myokit.Plus(e2x, myokit.Number(1)))\n #\n # Inverse hyperbolic trigonometry (CellML...)\n #\n elif name == 'arcsinh':\n # Inverse hyperbolic sine: asinh(x) = log(x + sqrt(1 + x*x))\n x = parsex(dom_next(node))\n return myokit.Log(myokit.Plus(x, myokit.Sqrt(myokit.Plus(\n myokit.Number(1), myokit.Multiply(x, x)))))\n elif name == 'arccosh':\n # Inverse hyperbolic cosine:\n # acosh(x) = log(x + sqrt(x + 1) * sqrt(x - 1))\n x = parsex(dom_next(node))\n return myokit.Log(myokit.Plus(x, myokit.Multiply(myokit.Sqrt(\n myokit.Plus(x, myokit.Number(1))), myokit.Sqrt(\n myokit.Minus(x, myokit.Number(1))))))\n elif name == 'arctanh':\n # Inverse hyperbolic tangent:\n # atanh(x) = 0.5 * (log(1 + x) - log(1 - x))\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Log(myokit.Plus(myokit.Number(1), x)), myokit.Log(\n myokit.Minus(myokit.Number(1), x))))\n #\n # Hyperbolic redundant trigonometry (CellML...)\n #\n elif name == 'csch':\n # Hyperbolic cosecant: csch(x) = 2 / (exp(x) - exp(-x))\n x = parsex(dom_next(node))\n return myokit.Divide(myokit.Number(2), myokit.Minus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'sech':\n # Hyperbolic secant: sech(x) = 2 / (exp(x) + exp(-x))\n x = parsex(dom_next(node))\n return myokit.Divide(myokit.Number(2), myokit.Plus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'coth':\n # Hyperbolic cotangent:\n # coth(x) = (exp(2*x) + 1) / (exp(2*x) - 1)\n x = parsex(dom_next(node))\n e2x = myokit.Exp(myokit.Multiply(myokit.Number(2), x))\n return myokit.Divide(myokit.Plus(e2x, myokit.Number(1)),\n myokit.Minus(e2x, myokit.Number(1)))\n #\n # Inverse hyperbolic redundant trigonometry (CellML has a lot to answer\n # for...)\n #\n elif name == 'arccsch':\n # Inverse hyperbolic cosecant:\n # arccsch(x) = log(sqrt(1 + 1/x^2) + 1/x)\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Log(myokit.Plus(myokit.Sqrt(myokit.Number(1),\n myokit.Power(xi, myokit.Number(2))), xi))\n elif name == 'arcsech':\n # Inverse hyperbolic secant:\n # arcsech(x) = log(sqrt(1/x - 1) * sqrt(1/x + 1) + 1/x)\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Log(myokit.Plus(myokit.Multiply(\n myokit.Sqrt(myokit.Minus(xi, myokit.Number(1))),\n myokit.Sqrt(myokit.Plus(xi, myokit.Number(1)))), xi))\n elif name == 'arccoth':\n # Inverse hyperbolic cotangent:\n # arccoth(x) = 0.5 * (log(1 + 1/x) - log(1 - 1/x))\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Log(myokit.Plus(myokit.Number(1), xi)),\n myokit.Log(myokit.Minus(myokit.Number(1), xi))))\n #\n # Logic\n #\n elif name == 'and':\n return chain(myokit.And, node)\n elif name == 'or':\n return chain(myokit.Or, node)\n elif name == 'not':\n return chain(None, node, myokit.Not)\n elif name == 'eq' or name == 'equivalent':\n n2 = dom_next(node)\n return myokit.Equal(parsex(n2), parsex(dom_next(n2)))\n elif name == 'neq':\n n2 = dom_next(node)\n return myokit.NotEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'gt':\n n2 = dom_next(node)\n return myokit.More(parsex(n2), parsex(dom_next(n2)))\n elif name == 'lt':\n n2 = dom_next(node)\n return myokit.Less(parsex(n2), parsex(dom_next(n2)))\n elif name == 'geq':\n n2 = dom_next(node)\n return myokit.MoreEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'leq':\n n2 = dom_next(node)\n return myokit.LessEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'piecewise':\n # Piecewise contains at least one piece, optionally contains an\n # \"otherwise\". Syntax doesn't ensure this statement makes sense.\n conds = []\n funcs = []\n other = None\n piece = dom_child(node)\n while piece:\n if piece.tagName == 'otherwise':\n if other is None:\n other = parsex(dom_child(piece))\n elif logger:\n logger.warn('Multiple <otherwise> tags found in'\n ' <piecewise> statement.')\n elif piece.tagName == 'piece':\n n2 = dom_child(piece)\n funcs.append(parsex(n2))\n conds.append(parsex(dom_next(n2)))\n elif logger:\n logger.warn('Unexpected tag type in <piecewise>: '\n + '<' + piece.tagName + '>.')\n piece = dom_next(piece)\n if other is None:\n other = myokit.Number(0)\n # Create string of if statements\n args = []\n f = iter(funcs)\n for c in conds:\n args.append(c)\n args.append(f.next())\n args.append(other)\n return myokit.Piecewise(*args)\n #\n # Constants\n #\n elif name == 'pi':\n return myokit.Number('3.14159265358979323846')\n elif name == 'exponentiale':\n return myokit.Exp(myokit.Number(1))\n elif name == 'true':\n # This is corrent, even in Python True == 1 but not True == 2\n return myokit.Number(1)\n elif name == 'false':\n return myokit.Number(0)\n #\n # Unknown/unhandled elements\n #\n else:\n if logger:\n logger.warn('Unknown element: ' + name)\n ops = []\n node = dom_child(node) if dom_child(node) else dom_next(node)\n while node:\n ops.append(parsex(node))\n node = dom_next(node)\n return myokit.UnsupportedFunction(name, ops)\n # Remove math node, if given\n if node.tagName == 'math':\n node = dom_child(node)\n #TODO: Check xmlns?\n return parsex(node)", "def calc(mystring):\n return(evaltree(buildtree(tokenize(mystring))))", "def is_mathml(self):\n return '<math ' in self.expr", "def make_sympy(self, xml=None): # lint-amnesty, pylint: disable=too-many-statements\n\n if self.the_sympy:\n return self.the_sympy\n\n if xml is None:\t # root\n if not self.is_mathml():\n return my_sympify(self.expr)\n if self.is_presentation_mathml():\n cmml = None\n try:\n cmml = self.cmathml\n xml = etree.fromstring(str(cmml))\n except Exception as err:\n if 'conversion from Presentation MathML to Content MathML was not successful' in cmml: # lint-amnesty, pylint: disable=unsupported-membership-test\n msg = \"Illegal math expression\"\n else:\n msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)\n raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n else:\n xml = etree.fromstring(self.expr)\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n return self.the_sympy\n\n def gettag(expr):\n return re.sub('{http://[^}]+}', '', expr.tag)\n\n def op_plus(*args):\n return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]\n\n def op_times(*args):\n return reduce(operator.mul, args)\n\n def op_minus(*args):\n if len(args) == 1:\n return -args[0]\n if not len(args) == 2: # lint-amnesty, pylint: disable=unneeded-not\n raise Exception('minus given wrong number of arguments!')\n #return sympy.Add(args[0],-args[1])\n return args[0] - args[1]\n\n opdict = {\n 'plus': op_plus,\n 'divide': operator.div, # lint-amnesty, pylint: disable=no-member\n 'times': op_times,\n 'minus': op_minus,\n 'root': sympy.sqrt,\n 'power': sympy.Pow,\n 'sin': sympy.sin,\n 'cos': sympy.cos,\n 'tan': sympy.tan,\n 'cot': sympy.cot,\n 'sinh': sympy.sinh,\n 'cosh': sympy.cosh,\n 'coth': sympy.coth,\n 'tanh': sympy.tanh,\n 'asin': sympy.asin,\n 'acos': sympy.acos,\n 'atan': sympy.atan,\n 'atan2': sympy.atan2,\n 'acot': sympy.acot,\n 'asinh': sympy.asinh,\n 'acosh': sympy.acosh,\n 'atanh': sympy.atanh,\n 'acoth': sympy.acoth,\n 'exp': sympy.exp,\n 'log': sympy.log,\n 'ln': sympy.ln,\n }\n\n def parse_presentation_symbol(xml):\n \"\"\"\n Parse <msub>, <msup>, <mi>, and <mn>\n \"\"\"\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'msub':\n return '_'.join([parse_presentation_symbol(y) for y in xml])\n elif tag == 'msup':\n return '^'.join([parse_presentation_symbol(y) for y in xml])\n raise Exception('[parse_presentation_symbol] unknown tag %s' % tag)\n\n # parser tree for Content MathML\n tag = gettag(xml)\n\n # first do compound objects\n\n if tag == 'apply':\t\t# apply operator\n opstr = gettag(xml[0])\n if opstr in opdict:\n op = opdict[opstr] # pylint: disable=invalid-name\n args = [self.make_sympy(expr) for expr in xml[1:]]\n try:\n res = op(*args)\n except Exception as err:\n self.args = args # pylint: disable=attribute-defined-outside-init\n self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name\n raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args)) # lint-amnesty, pylint: disable=raise-missing-from\n return res\n else:\n raise Exception('[formula]: unknown operator tag %s' % (opstr))\n\n elif tag == 'list':\t\t# square bracket list\n if gettag(xml[0]) == 'matrix':\n return self.make_sympy(xml[0])\n else:\n return [self.make_sympy(expr) for expr in xml]\n\n elif tag == 'matrix':\n return sympy.Matrix([self.make_sympy(expr) for expr in xml])\n\n elif tag == 'vector':\n return [self.make_sympy(expr) for expr in xml]\n\n # atoms are below\n\n elif tag == 'cn':\t\t\t# number\n return sympy.sympify(xml.text)\n\n elif tag == 'ci':\t\t\t# variable (symbol)\n if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'):\t # subscript or superscript\n usym = parse_presentation_symbol(xml[0])\n sym = sympy.Symbol(str(usym))\n else:\n usym = six.text_type(xml.text)\n if 'hat' in usym:\n sym = my_sympify(usym)\n else:\n if usym == 'i' and self.options is not None and 'imaginary' in self.options:\t # i = sqrt(-1)\n sym = sympy.I\n else:\n sym = sympy.Symbol(str(usym))\n return sym\n\n else:\t\t\t\t# unknown tag\n raise Exception('[formula] unknown tag %s' % tag)", "def parse_arithmetic(reference_string):\n try:\n return _ast_eval(ast.parse(reference_string, mode=\"eval\").body)\n except (TypeError, SyntaxError, KeyError):\n return reference_string", "def math_for_markdown(pelicanobj):\n\n try:\n pelicanobj.settings[\"MARKDOWN\"].setdefault(\"extensions\", []).append(\n MathExtension()\n )\n except Exception:\n sys.excepthook(*sys.exc_info())\n sys.stderr.write(\n \"\\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\\n\"\n )\n sys.stderr.flush()", "def make_new_mathelement(TeX):\n mathmlelement = etree.Element('math')\n semantics = etree.Element('semantics')\n junk = etree.Element('mtext')\n junk.text = 'CLICKME'\n semantics.append(junk)\n annotation = etree.Element('annotation')\n annotation.attrib['encoding'] = 'TeX'\n annotation.text = TeX\n\n semantics.append(annotation)\n mathmlelement.append(semantics)\n return etree.tostring(mathmlelement)", "def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings):\n\n # Create the configuration for the markdown template\n config = {}\n config['mathjax_script'] = mathjax_script\n config['math_tag_class'] = 'math'\n config['auto_insert'] = mathjax_settings['auto_insert']\n\n # Instantiate markdown extension and append it to the current extensions\n try:\n if isinstance(pelicanobj.settings.get('MD_EXTENSIONS'), list): # pelican 3.6.3 and earlier\n pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config))\n else:\n pelicanobj.settings['MARKDOWN'].setdefault('extensions', []).append(PelicanMathJaxExtension(config))\n except:\n sys.excepthook(*sys.exc_info())\n sys.stderr.write(\"\\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\\n\")\n sys.stderr.flush()", "def parse(cls, xml_string, **parser_kwargs):\n\n xml_string = OOXMLtoLatexParser.change_xml_double_open_tag_to_left_arrow(xml_string)\n xml_string = OOXMLtoLatexParser._remove_self_closing_tags(xml_string)\n xml_to_latex_parser = cls(**parser_kwargs)\n\n if isinstance(xml_string, basestring):\n element = etree.fromstring(xml_string)\n sax.saxify(element, xml_to_latex_parser)\n return xml_to_latex_parser\n else:\n raise TypeError(\"xml string parameter must be str or unicode\")", "def test_advanced_math(self):\n exp = \"m{(10+10)+10+10}\"\n self.assertEqual(self.engine.Process(exp), \"40\", \"adds complex nested math\")", "def preprocess_pmathml(self, xml): # lint-amnesty, pylint: disable=too-many-statements\n\n if isinstance(xml, (str, six.text_type)):\n xml = etree.fromstring(xml)\t\t# TODO: wrap in try\n\n xml = self.fix_greek_in_mathml(xml)\t # convert greek utf letters to greek spelled out in ascii\n\n def gettag(expr):\n return re.sub('{http://[^}]+}', '', expr.tag)\n\n def fix_pmathml(xml):\n \"\"\"\n f and g are processed as functions by asciimathml, eg \"f-2\" turns\n into \"<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>\" this is\n really terrible for turning into cmathml. undo this here.\n \"\"\"\n for k in xml:\n tag = gettag(k)\n if tag == 'mrow':\n if len(k) == 2:\n if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':\n idx = xml.index(k)\n xml.insert(idx, deepcopy(k[0]))\t # drop the <mrow> container\n xml.insert(idx + 1, deepcopy(k[1]))\n xml.remove(k)\n fix_pmathml(k)\n\n fix_pmathml(xml)\n\n def fix_hat(xml):\n \"\"\"\n hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle\n this into <mi>hat(f)</mi> hat i also somtimes turned into\n <mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover>\n \"\"\"\n for k in xml:\n tag = gettag(k)\n if tag == 'mover':\n if len(k) == 2:\n if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':\n newk = etree.Element('mi')\n newk.text = 'hat(%s)' % k[0].text\n xml.replace(k, newk)\n if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and \\\n gettag(k[1]) == 'mo' and str(k[1].text) == '^':\n newk = etree.Element('mi')\n newk.text = 'hat(%s)' % k[0][0].text\n xml.replace(k, newk)\n fix_hat(k)\n fix_hat(xml)\n\n def flatten_pmathml(xml):\n \"\"\"\n Give the text version of certain PMathML elements\n\n Sometimes MathML will be given with each letter separated (it\n doesn't know if its implicit multiplication or what). From an xml\n node, find the (text only) variable name it represents. So it takes\n <mrow>\n <mi>m</mi>\n <mi>a</mi>\n <mi>x</mi>\n </mrow>\n and returns 'max', for easier use later on.\n \"\"\"\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'mrow':\n return ''.join([flatten_pmathml(y) for y in xml])\n raise Exception('[flatten_pmathml] unknown tag %s' % tag)\n\n def fix_mathvariant(parent):\n \"\"\"\n Fix certain kinds of math variants\n\n Literally replace <mstyle mathvariant=\"script\"><mi>N</mi></mstyle>\n with 'scriptN'. There have been problems using script_N or script(N)\n \"\"\"\n for child in parent:\n if gettag(child) == 'mstyle' and child.get('mathvariant') == 'script':\n newchild = etree.Element('mi')\n newchild.text = 'script%s' % flatten_pmathml(child[0])\n parent.replace(child, newchild)\n fix_mathvariant(child)\n fix_mathvariant(xml)\n\n # find \"tagged\" superscripts\n # they have the character \\u200b in the superscript\n # replace them with a__b so snuggle doesn't get confused\n def fix_superscripts(xml):\n \"\"\" Look for and replace sup elements with 'X__Y' or 'X_Y__Z'\n\n In the javascript, variables with '__X' in them had an invisible\n character inserted into the sup (to distinguish from powers)\n E.g. normal:\n <msubsup>\n <mi>a</mi>\n <mi>b</mi>\n <mi>c</mi>\n </msubsup>\n to be interpreted '(a_b)^c' (nothing done by this method)\n\n And modified:\n <msubsup>\n <mi>b</mi>\n <mi>x</mi>\n <mrow>\n <mo>&#x200B;</mo>\n <mi>d</mi>\n </mrow>\n </msubsup>\n to be interpreted 'a_b__c'\n\n also:\n <msup>\n <mi>x</mi>\n <mrow>\n <mo>&#x200B;</mo>\n <mi>B</mi>\n </mrow>\n </msup>\n to be 'x__B'\n \"\"\"\n for k in xml:\n tag = gettag(k)\n\n # match things like the last example--\n # the second item in msub is an mrow with the first\n # character equal to \\u200b\n if (\n tag == 'msup' and\n len(k) == 2 and gettag(k[1]) == 'mrow' and\n gettag(k[1][0]) == 'mo' and k[1][0].text == u'\\u200b' # whew\n ):\n\n # replace the msup with 'X__Y'\n k[1].remove(k[1][0])\n newk = etree.Element('mi')\n newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]))\n xml.replace(k, newk)\n\n # match things like the middle example-\n # the third item in msubsup is an mrow with the first\n # character equal to \\u200b\n if (\n tag == 'msubsup' and\n len(k) == 3 and gettag(k[2]) == 'mrow' and\n gettag(k[2][0]) == 'mo' and k[2][0].text == u'\\u200b' # whew\n ):\n\n # replace the msubsup with 'X_Y__Z'\n k[2].remove(k[2][0])\n newk = etree.Element('mi')\n newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2]))\n xml.replace(k, newk)\n\n fix_superscripts(k)\n fix_superscripts(xml)\n\n def fix_msubsup(parent):\n \"\"\"\n Snuggle returns an error when it sees an <msubsup> replace such\n elements with an <msup>, except the first element is of\n the form a_b. I.e. map a_b^c => (a_b)^c\n \"\"\"\n for child in parent:\n # fix msubsup\n if gettag(child) == 'msubsup' and len(child) == 3:\n newchild = etree.Element('msup')\n newbase = etree.Element('mi')\n newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1]))\n newexp = child[2]\n newchild.append(newbase)\n newchild.append(newexp)\n parent.replace(child, newchild)\n\n fix_msubsup(child)\n fix_msubsup(xml)\n\n self.xml = xml # pylint: disable=attribute-defined-outside-init\n return self.xml", "def md2html():\n if len(sys.argv) < 2:\n _usage_md2html()\n sys.exit(1)\n\n filename = sys.argv[1]\n if not filename.endswith('.md'):\n if os.path.isfile(filename + '.md'):\n filename += '.md'\n else:\n raise IOError('no file %s.md' % filename)\n # First make sure \\eqref survives the pandoc translation\n f = open(filename ,'r'); text = f.read(); f.close()\n text = text.replace('\\\\eqref{', 'EQREF{')\n f = open(filename ,'w'); f.write(text); f.close()\n\n # Translate to HTML and fix the MathJax things\n basename = filename[:-3]\n cmd = 'pandoc -f markdown -t html --mathjax -s -o %s.html %s.md' % \\\n (basename, basename)\n print cmd\n failure = os.system(cmd)\n if failure:\n print 'could not run\\n', cmd\n sys.exit(1)\n f = open('%s.html' % basename, 'r')\n text = f.read()\n f.close()\n # Add extra info\n pattern = r'(<script src=\".+?MathJax\\.js)'\n replacement = r\"\"\"\n<script type=\"text/x-mathjax-config\">\nMathJax.Hub.Config({\n TeX: {\n equationNumbers: { autoNumber: \"AMS\" },\n extensions: [\"AMSmath.js\", \"AMSsymbols.js\", \"autobold.js\"]\n }\n});\n</script>\n\\g<1>\"\"\"\n text = re.sub(pattern, replacement, text)\n text = text.replace('EQREF{', '\\\\eqref{')\n\n f = open('%s.html' % basename, 'w')\n f.write(text)\n f.close()\n print 'output in %s.html' % basename", "def loads(string):\r\n tree = qasm_parser.parse(string)\r\n tree = QASMToIRTransformer().transform(tree)\r\n return tree", "def render_md(self, ds):\n ds_no_math, math = remove_math(ds, '$')\n # We have to run `mathjax_editing.replace_math` on the text in code\n # blocks before passing it to Pygments (see `render_block_code`),\n # otherwise `replace_math` will be confused by the added syntax\n # highlighting `<span>`s and won't be able to splice in those blocks.\n self.math = math\n html = self.render(Document(ds_no_math))\n return replace_math(html, self.math)", "def parse(line):\n\n document = Document()\n root = document.createElement('tree')\n current_element = root\n rest = line\n\n while True:\n element, separator, rest = parse_element(rest, document)\n\n if isinstance(current_element.lastChild, Text) and \\\n current_element.lastChild.data == '':\n current_element.removeChild(current_element.lastChild)\n\n current_element.appendChild(element)\n\n if rest is None:\n break\n\n if separator == '<':\n current_element = current_element.parentNode\n elif separator == '+':\n current_element = current_element\n elif separator == '>':\n current_element = element\n\n expand_multipliers(root)\n\n return root", "def MathExtract(self):\n\n Regex = r\"\\\\begin\\{equation\\}.*?\\\\end\\{equation\\}\"\n self.MathRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n\n MathExtracted = self.MathRegex.findall(self.ParsedText)\n\n for Mathematics in MathExtracted:\n ThisUID = self.GenerateUID()\n self.ParsedMath[ThisUID] = Math(Mathematics, ThisUID)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return dictionary of Unicode character lists. For each of the `catagories`, an item contains a list with all Unicode characters with `cp_min` <= codepoint <= `cp_max` that belong to the category. (The default values check every codepoint supported by Python.)
def unicode_charlists(categories, cp_min=0, cp_max=None): # Determine highest code point with one of the given categories # (may shorten the search time considerably if there are many # categories with not too high characters): if cp_max is None: cp_max = max(x for x in range(sys.maxunicode + 1) if unicodedata.category(chr(x)) in categories) # print cp_max # => 74867 for unicode_punctuation_categories charlists = {} for cat in categories: charlists[cat] = [chr(x) for x in range(cp_min, cp_max+1) if unicodedata.category(chr(x)) == cat] return charlists
[ "def unicode_charlists(categories, cp_min=0, cp_max=None):\r\n # Determine highest code point with one of the given categories\r\n # (may shorten the search time considerably if there are many\r\n # categories with not too high characters):\r\n if cp_max is None:\r\n cp_max = max(x for x in xrange(sys.maxunicode + 1)\r\n if unicodedata.category(unichr(x)) in categories)\r\n # print cp_max # => 74867 for unicode_punctuation_categories\r\n charlists = {}\r\n for cat in categories:\r\n charlists[cat] = [unichr(x) for x in xrange(cp_min, cp_max+1)\r\n if unicodedata.category(unichr(x)) == cat]\r\n return charlists", "def generate_categories():\n # inspired by https://gist.github.com/anonymous/2204527\n code_points_ranges = []\n iso_15924_aliases = []\n categories = []\n\n match = re.compile(r'([0-9A-F]+)(?:\\.\\.([0-9A-F]+))?\\W+(\\w+)\\s*#\\s*(\\w+)',\n re.UNICODE)\n\n url = 'ftp://ftp.unicode.org/Public/UNIDATA/Scripts.txt'\n file = get(url)\n for line in file:\n p = re.findall(match, line)\n if p:\n code_point_range_from, code_point_range_to, alias, category = p[0]\n alias = u(alias.upper())\n category = u(category)\n if alias not in iso_15924_aliases:\n iso_15924_aliases.append(alias)\n if category not in categories:\n categories.append(category)\n code_points_ranges.append((\n int(code_point_range_from, 16),\n int(code_point_range_to or code_point_range_from, 16),\n iso_15924_aliases.index(alias), categories.index(category))\n )\n code_points_ranges.sort()\n\n categories_data = {\n 'iso_15924_aliases': iso_15924_aliases,\n 'categories': categories,\n 'code_points_ranges': code_points_ranges,\n }\n\n dump('categories.json', categories_data)", "def build_categories():\n # Note that we're acting like we're going to grab all categories from\n # the dataset, though in practice we're going to limit it to COMMON and\n # LATIN entries.\n #\n # This is because right now, we only need it for Unicode confusable maps,\n # but as we implement more Unicode checks, we will likely need to fetch\n # and store more information. Given the complexity of this, it's better to\n # have a more future-proof, \"correct\" implementation up-front.\n LINE_RE = re.compile(\n r'^(?P<codepoint_from>[0-9A-F]+)'\n r'(?:\\.\\.(?P<codepoint_through>[0-9A-F]+))?'\n r'\\s*; (?P<alias>\\w+) # (?P<category>[\\w]+)',\n re.UNICODE)\n\n aliases = []\n alias_id_map = {}\n\n categories = []\n category_id_map = {}\n\n codepoint_ranges = {}\n\n for info in _load_data(CATEGORIES_URL, LINE_RE):\n alias = info['alias'].upper()\n\n if alias not in ('COMMON', 'LATIN'):\n continue\n\n if alias not in alias_id_map:\n alias_id_map[alias] = len(aliases)\n aliases.append(alias)\n\n category = info['category']\n\n if category not in category_id_map:\n category_id_map[category] = len(categories)\n categories.append(category)\n\n codepoint_from_s = info['codepoint_from']\n codepoint_through_s = info['codepoint_through'] or codepoint_from_s\n\n codepoint_from = int(codepoint_from_s, 16)\n codepoint_through = int(codepoint_through_s, 16)\n\n # Split into subtables. Key off from some prefix.\n prev_key = None\n cur_range = None\n\n # We need a quick way to look up Unicode codepoints, but it's too\n # expensive to maintain a mapping of every codepoint. So, instead\n # we have a 5-level tree.\n #\n # The first 4 levels are increasingly specific masks of starting\n # codepoint ranges, with the 5th level being the codepoints in that\n # range.\n #\n # Codepoint ranges are split up as needed to fit in the correct range.\n #\n # As an example, if we were storing category range 1F400..1F6D7\n # (RAT..ELEVATOR):\n #\n # 10000: {\n # 1F000: {\n # 1F400: {\n # 1F400: [1F400..1F409],\n # ...\n # 1F490: [1F490..1F499],\n # },\n # ..\n # 1F600: {\n # 1F600: [1F600..1F609],\n # ...\n # 1F6D0: [1F6D0..1F6D7],\n # }\n # }\n # }\n #\n # In practice, the leafs often have more than one range, particularly\n # for the lower codepoint ranges.\n #\n # This is easy to build and fast for lookup.\n for codepoint in range(codepoint_from, codepoint_through + 1):\n key = _make_codepoints_key_path(codepoint)\n\n if key != prev_key:\n if cur_range:\n codepoints = (\n codepoint_ranges\n .setdefault(prev_key[0], {})\n .setdefault(prev_key[1], {})\n .setdefault(prev_key[2], {})\n .setdefault(prev_key[3], [])\n )\n codepoints.append(cur_range)\n\n cur_range = (\n codepoint,\n codepoint,\n alias_id_map[alias],\n category_id_map[category],\n )\n else:\n cur_range = (\n cur_range[0],\n codepoint,\n cur_range[2],\n cur_range[3],\n )\n\n prev_key = key\n\n if prev_key:\n codepoints = (\n codepoint_ranges\n .setdefault(prev_key[0], {})\n .setdefault(prev_key[1], {})\n .setdefault(prev_key[2], {})\n .setdefault(prev_key[3], [])\n )\n codepoints.append(cur_range)\n\n categories_data.update({\n 'aliases': aliases,\n 'categories': categories,\n 'codepoints': codepoint_ranges,\n })", "def category(chr: str) -> str:\n idx = ord(chr)\n start_keys = sorted(unicode_data_to_category_start.keys())\n insertion_point = bisect.bisect_left(start_keys, idx)\n if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:\n insertion_point -= 1\n key_start = start_keys[insertion_point]\n result_start = unicode_data_to_category_start[key_start]\n\n end_keys = sorted(unicode_data_to_category_end.keys())\n insertion_point = bisect.bisect_left(end_keys, idx)\n try:\n key_end = end_keys[insertion_point]\n result_end = unicode_data_to_category_end[key_end]\n\n if result_end != key_start:\n result_end = result_start\n key_end = key_start\n else:\n result_end = unicode_data_to_category_start[result_end]\n\n if key_start <= idx <= key_end and result_start == result_end:\n return result_start\n else:\n return \"Zzzz\"\n except IndexError:\n return \"Zzzz\"", "def _build_public_opentype_categories(ufo: Font) -> dict[str, str]:\n from glyphsLib import glyphdata\n\n categories: dict[str, str] = {}\n category_key = GLYPHLIB_PREFIX + \"category\"\n subCategory_key = GLYPHLIB_PREFIX + \"subCategory\"\n\n # NOTE: We can generate the category even for glyphs that are not exported,\n # because entries don't have to exist in the final fonts.\n for glyph in ufo:\n glyph_name = glyph.name\n assert glyph_name is not None\n\n has_attaching_anchor = False\n for anchor in glyph.anchors:\n name = anchor.name\n if name and not name.startswith(\"_\"):\n has_attaching_anchor = True\n\n # First check glyph.lib for category/subCategory overrides. Otherwise,\n # use global values from GlyphData.\n glyphinfo = glyphdata.get_glyph(\n glyph_name, unicodes=[f\"{c:04X}\" for c in glyph.unicodes]\n )\n category = glyph.lib.get(category_key) or glyphinfo.category\n subCategory = glyph.lib.get(subCategory_key) or glyphinfo.subCategory\n\n if subCategory == \"Ligature\" and has_attaching_anchor:\n categories[glyph_name] = \"ligature\"\n elif category == \"Mark\" and (\n subCategory == \"Nonspacing\" or subCategory == \"Spacing Combining\"\n ):\n categories[glyph_name] = \"mark\"\n elif has_attaching_anchor:\n categories[glyph_name] = \"base\"\n\n return categories", "def crime_category_breakdown():\n db_request = main_db_call()\n all_crimes = [item[0] for item in db_request]\n sub_offense = offense_counter(all_crimes)\n sub_pie = color_applicator(sub_offense)\n sub_dict = {}\n for i, thing in enumerate(sub_pie):\n for key, category in UPPER_DICT.items():\n if sub_pie[i][0] in category:\n sub_dict.setdefault(key, [])\n sub_dict[key].append(sub_pie[i])\n return sub_dict", "def get_nuclei_in_range(zmin, zmax, amin, amax):\n\n nuc_list = []\n assert zmax >= zmin, \"zmax must be >= zmin\"\n assert amax >= amin, \"amax must be >= amin\"\n\n for z in range(zmin, zmax+1):\n element = PeriodicTable.lookup_Z(z)\n for a in range(amin, amax+1):\n name = f\"{element.abbreviation}{a}\"\n nuc_list.append(Nucleus(name))\n\n return nuc_list", "def _build_unicode_property_table(unicode_range):\n table = {}\n p = None\n for i in range(*unicode_range):\n try:\n c = uchr(i)\n p = unicodedata.category(c)\n except:\n continue\n if p[0] not in table:\n table[p[0]] = {}\n if p[1] not in table[p[0]]:\n table[p[0]][p[1]] = []\n table[p[0]][p[1]].append(c)\n\n # Join as one string\n for k1, v1 in table.items():\n for k2, v2 in v1.items():\n v1[k2] = ''.join(v2)\n\n return table", "def __get_cat_levels(self,data):\n levels = {}\n\n for v in self.categorical:\n ds = data[v].astype('category')\n levels[v] = ds[ds.notnull()].unique().categories.sort_values()\n\n return levels", "def _convert_to_cmap_props(glyphs):\n return {\n ord(_g.char): _name\n for _name, _g in glyphs.items()\n # .notdef should not be mapped in cmap\n if _g.char and _name != '.notdef'\n }", "def world_cups():\n return [(\"Germany\", 2006, \"Italy\"), (\"South-Africa\", 2010, \"Spain\"), (\"Brazil\", 2014, \"Germany\")]", "def test_code_to_category_basic_worklist(self):\n # Basic Worklist Management, PS3.4 Annex K\n c2c = code_to_category\n\n assert c2c(0x0000) == \"Success\"\n for code in [0xA700, 0xA900]:\n assert c2c(code) == \"Failure\"\n for code in range(0xC000, 0xD000):\n assert c2c(code) == \"Failure\"\n assert c2c(0xFE00) == \"Cancel\"\n assert c2c(0xB000) == \"Warning\"\n for code in [0xFF00, 0xFF01]:\n assert c2c(code) == \"Pending\"", "def _categoryMap (self):\n return self.__categoryMap", "def get_categories():\n # feel free to modify this as you like. just make sure that\n # the category is a valid Yelp category:\n # https://blog.yelp.com/businesses/yelp_category_list/#section21\n categories = [\n 'mexican', 'chinese', 'pizza', 'italian', 'thai', 'japanese',\n 'vietnamese', 'asianfusion', 'ethiopian', 'korean', 'indpak',\n 'mideastern', 'tapas', 'pakistani', 'brazilian', 'filipino',\n 'african', 'greek', 'coffee', 'dessert'\n ]\n categories.sort()\n return categories", "def characters(whitelist_categories=None, blacklist_categories=None,\n blacklist_characters=None, min_codepoint=None,\n max_codepoint=None):\n if (\n min_codepoint is not None and max_codepoint is not None and\n min_codepoint > max_codepoint\n ):\n raise InvalidArgument(\n 'Cannot have min_codepoint=%d > max_codepoint=%d ' % (\n min_codepoint, max_codepoint\n )\n )\n\n from hypothesis.searchstrategy.strings import OneCharStringStrategy\n return OneCharStringStrategy(whitelist_categories=whitelist_categories,\n blacklist_categories=blacklist_categories,\n blacklist_characters=blacklist_characters,\n min_codepoint=min_codepoint,\n max_codepoint=max_codepoint)", "def utility_characterization_factors(self) -> dict[tuple[str, str], tuple[float, AbsoluteUnitsOfMeasure]]:\n return bst.HeatUtility.characterization_factors", "def test_code_to_category_substance_admin(self):\n # Substance Administration Query, PS3.4 Annex V\n c2c = code_to_category\n\n assert c2c(0x0000) == \"Success\"\n for code in [0xA700, 0xA900]:\n assert c2c(code) == \"Failure\"\n for code in range(0xC000, 0xD000):\n assert c2c(code) == \"Failure\"\n assert c2c(0xFE00) == \"Cancel\"\n assert c2c(0xB000) == \"Warning\"\n for code in [0xFF00, 0xFF01]:\n assert c2c(code) == \"Pending\"", "def _discretize_feature(\n feature: np.ndarray,\n category_map: List[Tuple[float, str]]\n):\n cat_feat = np.zeros(feature.shape[0]).astype(str)\n for lower_bound, category in category_map:\n cat_feat[feature >= lower_bound] = category\n return cat_feat", "def gen_kmers(kmin, kmax, alphabet=\"ACGT\"):\r\n\r\n for n in xrange(kmin, kmax + 1):\r\n kmer_lis = [''.join(mer) for mer in prod(alphabet, repeat=n)]\r\n\r\n return kmer_lis" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Docutils punctuation category sample strings. Return list of sample strings for the categories "Open", "Close", "Delimiters" and "ClosingDelimiters" used in the `inline markup recognition rules`_.
def punctuation_samples(): # Lists with characters in Unicode punctuation character categories cp_min = 160 # ASCII chars have special rules for backwards compatibility ucharlists = unicode_charlists(unicode_punctuation_categories, cp_min) # match opening/closing characters # -------------------------------- # Rearange the lists to ensure matching characters at the same # index position. # low quotation marks are also used as closers (e.g. in Greek) # move them to category Pi: ucharlists['Ps'].remove('‚') # 201A SINGLE LOW-9 QUOTATION MARK ucharlists['Ps'].remove('„') # 201E DOUBLE LOW-9 QUOTATION MARK ucharlists['Pi'] += ['‚', '„'] ucharlists['Pi'].remove('‛') # 201B SINGLE HIGH-REVERSED-9 QUOTATION MARK ucharlists['Pi'].remove('‟') # 201F DOUBLE HIGH-REVERSED-9 QUOTATION MARK ucharlists['Pf'] += ['‛', '‟'] # 301F LOW DOUBLE PRIME QUOTATION MARK misses the opening pendant: ucharlists['Ps'].insert(ucharlists['Pe'].index('\u301f'), '\u301d') # print u''.join(ucharlists['Ps']).encode('utf8') # print u''.join(ucharlists['Pe']).encode('utf8') # print u''.join(ucharlists['Pi']).encode('utf8') # print u''.join(ucharlists['Pf']).encode('utf8') # The Docutils character categories # --------------------------------- # # The categorization of ASCII chars is non-standard to reduce both # false positives and need for escaping. (see `inline markup recognition # rules`_) # matching, allowed before markup openers = [re.escape('"\'(<[{')] for cat in ('Ps', 'Pi', 'Pf'): openers.extend(ucharlists[cat]) # matching, allowed after markup closers = [re.escape('"\')>]}')] for cat in ('Pe', 'Pf', 'Pi'): closers.extend(ucharlists[cat]) # non-matching, allowed on both sides delimiters = [re.escape('-/:')] for cat in ('Pd', 'Po'): delimiters.extend(ucharlists[cat]) # non-matching, after markup closing_delimiters = [re.escape('.,;!?')] # # Test open/close matching: # for i in range(min(len(openers),len(closers))): # print '%4d %s %s' % (i, openers[i].encode('utf8'), # closers[i].encode('utf8')) return [''.join(chars) for chars in (openers, closers, delimiters, closing_delimiters)]
[ "def punctuation_samples():\r\n\r\n # Lists with characters in Unicode punctuation character categories\r\n cp_min = 160 # ASCII chars have special rules for backwards compatibility\r\n ucharlists = unicode_charlists(unicode_punctuation_categories, cp_min)\r\n\r\n # match opening/closing characters\r\n # --------------------------------\r\n # Rearange the lists to ensure matching characters at the same\r\n # index position.\r\n\r\n # low quotation marks are also used as closers (e.g. in Greek)\r\n # move them to category Pi:\r\n ucharlists['Ps'].remove(u'‚') # 201A SINGLE LOW-9 QUOTATION MARK\r\n ucharlists['Ps'].remove(u'„') # 201E DOUBLE LOW-9 QUOTATION MARK\r\n ucharlists['Pi'] += [u'‚', u'„']\r\n\r\n ucharlists['Pi'].remove(u'‛') # 201B SINGLE HIGH-REVERSED-9 QUOTATION MARK\r\n ucharlists['Pi'].remove(u'‟') # 201F DOUBLE HIGH-REVERSED-9 QUOTATION MARK\r\n ucharlists['Pf'] += [u'‛', u'‟']\r\n\r\n # 301F LOW DOUBLE PRIME QUOTATION MARK misses the opening pendant:\r\n ucharlists['Ps'].insert(ucharlists['Pe'].index(u'\\u301f'), u'\\u301d')\r\n\r\n # print u''.join(ucharlists['Ps']).encode('utf8')\r\n # print u''.join(ucharlists['Pe']).encode('utf8')\r\n # print u''.join(ucharlists['Pi']).encode('utf8')\r\n # print u''.join(ucharlists['Pf']).encode('utf8')\r\n\r\n # The Docutils character categories\r\n # ---------------------------------\r\n #\r\n # The categorization of ASCII chars is non-standard to reduce both\r\n # false positives and need for escaping. (see `inline markup recognition\r\n # rules`_)\r\n\r\n # matching, allowed before markup\r\n openers = [re.escape('\"\\'(<[{')]\r\n for cat in ('Ps', 'Pi', 'Pf'):\r\n openers.extend(ucharlists[cat])\r\n\r\n # matching, allowed after markup\r\n closers = [re.escape('\"\\')>]}')]\r\n for cat in ('Pe', 'Pf', 'Pi'):\r\n closers.extend(ucharlists[cat])\r\n\r\n # non-matching, allowed on both sides\r\n delimiters = [re.escape('-/:')]\r\n for cat in ('Pd', 'Po'):\r\n delimiters.extend(ucharlists[cat])\r\n\r\n # non-matching, after markup\r\n closing_delimiters = [re.escape('.,;!?')]\r\n\r\n # # Test open/close matching:\r\n # for i in range(min(len(openers),len(closers))):\r\n # print '%4d %s %s' % (i, openers[i].encode('utf8'),\r\n # closers[i].encode('utf8'))\r\n\r\n return [u''.join(chars)\r\n for chars in (openers, closers, delimiters, closing_delimiters)]", "def _run_split_on_punctuation(self, text):\n\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if self._is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]", "def test_all_punctuation_effectively_removed(self):\n text1 = 'THe, quick; (br-own): fox! \"jumped\" <> - -- [over] the\\ /lazy dog.'\n text2 = \"He shouted, 'Hello!'\"\n textlist = text_to_list(text1) + text_to_list(text2)\n for char in '-.,\\;\":_*!\\n' or \"'[]()/\":\n if char in textlist:\n return False", "def filter_words_by_punctuation(self):\n list_ = []\n for word in self.words:\n if \"'\" in word:\n list_.append(word)\n return list_", "def create_punctC_representation(lyrics): # based on 'Bleaching text: Abstract features for cross-lingual gender prediction', van der Goot et al. 2018\n\t\n\tpunctC_repr = \"\"\n\tfor sentence in lyrics.split('\\n'):\n\t\tsentence_repr = ''\n\t\tfor word in sentence.split():\n\t\t\tpunctC = \"\"\n\t\t\tfor char in word:\n\t\t\t\tif char not in string.punctuation:\n\t\t\t\t\tpunctC += 'W'\n\t\t\t\telse:\n\t\t\t\t\tpunctC += char\n\t\t\tpunctC = re.sub(\"W+\", \"W\", punctC) + ' '\n\t\t\tsentence_repr += punctC\n\t\tpunctC_repr += sentence_repr.rstrip() + '\\n'\n\t\t\n\treturn punctC_repr.rstrip()", "def get_punctuation_frequency(self):\r\n \r\n punctuations = list(string.punctuation)\r\n #Dataframe to store the punctuation occurences only\r\n punc_occ = pd.DataFrame(columns=['punctuation', 'occurence'])\r\n #Loop for identifying rows with punctuations in the instance variable\r\n for i, row in self.char_occ.iterrows():\r\n if row['character'] in punctuations:\r\n temp_df = pd.DataFrame([[row['character'],row['occurence']]], \r\n columns=['punctuation', 'occurence'])\r\n punc_occ = punc_occ.append(temp_df, ignore_index=True)\r\n \r\n return punc_occ", "def apply_nlp(category):\n if \" \" in category:\n if \" for \" in category:\n idx = category.find(\" for \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 5 :])\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \"(\" in category:\n start = category.find(\"(\")\n end = category.find(\")\")\n outer = strip_article(category[:start] + \" \" + category[end + 1 :])\n inner = strip_article(category[start + 1 : end])\n return [outer, inner, *apply_nlp(outer), *apply_nlp(inner)]\n elif \" with \" in category:\n idx = category.find(\" with \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 6 :])\n return [prefix, suffix, *apply_nlp(prefix), *apply_nlp(suffix)]\n elif \" of \" in category:\n idx = category.find(\" of \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 4 :])\n if prefix in [\"pair\", \"copy\", \"base\", \"fragments\", \"figure\", \"copy\"]:\n return [suffix, *apply_nlp(suffix)]\n else:\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \" from \" in category:\n idx = category.find(\" from \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 4 :])\n if prefix in [\"pair\", \"copy\", \"base\", \"fragments\", \"figure\", \"copy\"]:\n return [suffix, *apply_nlp(suffix)]\n else:\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \"&\" in category:\n categories = [strip_article(c) for c in category.split(\"&\")]\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n elif \" and \" in category or \",\" in category:\n categories = []\n while \" and \" in category or \",\" in category:\n and_idx = category.find(\" and \")\n comma_idx = category.find(\",\")\n if and_idx >= 0 and comma_idx >= 0:\n idx = min(and_idx, comma_idx)\n elif and_idx >= 0:\n idx = and_idx\n elif comma_idx >= 0:\n idx = comma_idx\n else:\n idx = -1\n if idx >= 0:\n categories.append(strip_article(category[:idx]))\n if category[idx] == \",\":\n category = category[idx + 1 :]\n else:\n category = category[idx + 5 :]\n if category.strip().strip(\"()[]\"):\n categories.append(strip_article(category.strip().strip(\"()[]\")))\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n elif \" or \" in category:\n categories = []\n while \" or \" in category:\n idx = category.find(\" or \")\n if idx >= 0:\n categories.append(strip_article(category[:idx]))\n category = category[idx + 4 :].strip().strip(\"()[]\")\n if category.strip().strip(\"()[]\"):\n categories.append(strip_article(category))\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n else:\n categories = category.split()\n return [\" \".join(categories[-idx:]) for idx in range(len(categories) - 1, 0, -1)]\n else:\n return []", "def get_punctuation_tokens(tokens):\n punct_tokens = []\n punct_tokens = punct_tokens + [term for term in tokens\n if term in string.punctuation]\n return punct_tokens", "def categories(self, word):\n ...", "def StopW_Punct():\r\n punctList = [\"!\",'\"',\"#\",\"$\",\"%\",\"&\",\"'\",\"(\",\")\",\"*\",\"+\",\",\",\"-\",\".\",\"/\",\":\",\";\",\"<\",\"=\",\">\",\"?\",\"@\",\"[\",\"{\",\"|\",\"}\",\"~\",\"^\",\"_\",\"]\",\"`\"]\r\n return punctList", "def _split_on_punctuation(self, tokens: List[str]) -> List[str]:\n punctuation = []\n punctuation.extend(self.terminators)\n if self.allow_redirection:\n punctuation.extend(constants.REDIRECTION_CHARS)\n\n punctuated_tokens = []\n\n for cur_initial_token in tokens:\n\n # Save tokens up to 1 character in length or quoted tokens. No need to parse these.\n if len(cur_initial_token) <= 1 or cur_initial_token[0] in constants.QUOTES:\n punctuated_tokens.append(cur_initial_token)\n continue\n\n # Iterate over each character in this token\n cur_index = 0\n cur_char = cur_initial_token[cur_index]\n\n # Keep track of the token we are building\n new_token = ''\n\n while True:\n if cur_char not in punctuation:\n\n # Keep appending to new_token until we hit a punctuation char\n while cur_char not in punctuation:\n new_token += cur_char\n cur_index += 1\n if cur_index < len(cur_initial_token):\n cur_char = cur_initial_token[cur_index]\n else:\n break\n\n else:\n cur_punc = cur_char\n\n # Keep appending to new_token until we hit something other than cur_punc\n while cur_char == cur_punc:\n new_token += cur_char\n cur_index += 1\n if cur_index < len(cur_initial_token):\n cur_char = cur_initial_token[cur_index]\n else:\n break\n\n # Save the new token\n punctuated_tokens.append(new_token)\n new_token = ''\n\n # Check if we've viewed all characters\n if cur_index >= len(cur_initial_token):\n break\n\n return punctuated_tokens", "def print_categories():\n category_list = ['60fps', 'amateur', 'anal', 'arab', 'asian', 'bbw(big busty women)', 'babe', 'babysitter',\n 'btscenes(behind the scenes)',\n 'bigass', 'bigdick', 'titslg(big tits)', 'bimale', 'blonde', 'bj(blowjob)', 'bondage', 'brazilian',\n 'british', 'brunette',\n 'bukkake', 'cartoon', 'casting', 'celeb', 'cc', 'college', 'comp(compilation)', 'cosplay',\n 'creampie', 'cuckold',\n 'cumshot', 'czech', 'described', 'dp', 'ebony', 'euro', 'exclusive', 'feet',\n 'femaleorgy(female orgasm)',\n 'fetish', 'fisting', 'french', 'funny', 'gangbang', 'gay', 'german', 'hd', 'handjob', 'hardcore',\n 'hentai',\n 'indian', 'interactive', 'interracial', 'italian', 'japanese', 'korean', 'latina', 'lesbian',\n 'milf', 'massage',\n 'masturbate', 'mature', 'musclemen', 'music', 'oldyoung', 'orgy', 'pov', 'parody', 'party', 'piss',\n 'popww(popular with women)', 'pornstar', 'public', 'pussylick', 'reality', 'redhead',\n 'rp(roleplay)',\n 'romantic', 'rough', 'russian', 'sfw(safe for work)', 'school', 'titssm(small tits)', 'smoking', 'solofemale',\n 'solomale',\n 'squirt', 'step(step fantasy)', 'strip(striptease)', 'tatwomen(tatooed women)', 'teen', '3some',\n 'toys',\n 'tmale(transmale)', 'twgirl(trans with girl)', 'twguy(trans with guy)', 'trans(transgender)',\n 'veramateurs(verified amateurs)', 'vercouples(verified couples)', 'vermodels(verified models)',\n 'vintage', 'vr(virtual reality)', 'webcam']\n print(category_list)", "def sentence_punctuation():\n check50.run(\"python3 readability.py\").stdin(\"Congratulations! Today is your day. You're off to Great Places! You're off and away!\").stdout(\"Grade\\D+3\", \"Grade 3\\n\").exit(0)", "def get_sample_categories(self):\n # TODO: cache these results since they change very rarely\n result = self.get(cc_urls['sample_categories'])\n return result['sample_categories']", "def segment_by_punctuation(text: str):\n\treturn nltk.sent_tokenize(text)", "def _split_punctuation(self, word):\n\n opening_puncts = []\n closing_puncts = []\n core_token = word\n\n (off1, off2, tok) = word\n\n while True:\n if not tok:\n break\n found_punc = punctuation_pattern.search(tok[0])\n if found_punc:\n opening_puncts.append((off1, off1 + 1, tok[0]))\n core_token = (off1 + 1, off2, tok[1:])\n off1 += 1\n tok = tok[1:]\n else:\n break\n \n while True:\n if not tok:\n break\n found_punc = punctuation_pattern.search(tok[-1])\n if found_punc:\n closing_puncts.append((off2 - 1, off2, tok[-1]))\n core_token = (off1, off2 - 1, tok[:-1])\n off2 += -1\n tok = tok[:-1]\n else:\n break\n\n # need to reverse because the closing punctuations were added from the\n # end\n closing_puncts.reverse()\n return opening_puncts, core_token, closing_puncts", "def buildCategoryKeywords(categoryname):\n catKeywords = []\n catKeywords2 =[]\n \n catKeywords = re.findall('[A-Z]+[^A-Z ]*', categoryname)\n\n for word in catKeywords:\n noSpaceWord = word.replace(\" \", \"\")\n catKeywords2.append(noSpaceWord)\n \n return catKeywords2", "def test_category_format_Category(self):\n data = [pywikibot.Category(self.site, 'Cat1'),\n pywikibot.Category(self.site, 'Cat2')]\n self.assertEqual(self.catresult,\n textlib.categoryFormat(data, self.site))", "def test_category_format_bare(self):\n self.assertEqual(self.catresult,\n textlib.categoryFormat(['Cat1', 'Cat2'], self.site))", "def test_valid_punctuation():\n assert rw('What did they say? Say what again!') == 'say'\n assert rw('I am... that am!') == 'am'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append the separator for table head.
def append_separator(self, separator): self._rows.append([separator])
[ "def add_divider(self):\n self.page += '<hr style=\"clear:both;\"/>\\n'", "def set_separator(self) -> None:\n self.separator = len(self.lines)", "def __writeSeparator(self, indent):\n self.__dev.write(\" \" * indent)\n self.__dev.write(\"<HR>\\n\")", "def _Header(numCols):\n return \"\\\\begin{center}\\n\\\\begin{tabular}{\" + \"|c\" * numCols + \"|}\\n\"", "def print_row_separator(columns):\n print(\"--+\", end=\"\")\n for i in range(1, columns - 1):\n print(\"---+\", end=\"\")\n print(\"--\")", "def getSeparator(self) -> str:\n ...", "def test_custom_hor_split(self):\n tab = tabl.Tabl()\n tab.set_hor('~')\n string = tab.to_table([['a']])\n self.assertEqual('+~+\\n' + \\\n '|a|\\n' + \\\n '+~+\\n', string)", "def InsertCellSeparator(self):\n #dlg = wx.TextEntryDialog(self, 'Enter code cell separator label text:',\n # 'Insert code cell separator', '')\n dlg = CellDialog(self, -1)\n\n if dlg.ShowModal() == wx.ID_OK:\n label = dlg.GetValue()\n\n #If not at the start of a line add \\n\n pos = self.GetCurrentPos()\n indent = self.GetColumn(pos)\n if indent!=0:\n self.InsertText(pos,'\\n')\n self.SetCurrentPos(pos+1)\n\n #add the separator\n pos = self.GetCurrentPos()\n line = self.LineFromPosition(pos)\n pos = self.PositionFromLine(line)\n self.InsertText(pos,label)\n\n #move to end of separator\n self.SetCurrentPos(pos+len(label))\n self.SetAnchor(pos+len(label))\n\n dlg.Destroy()", "def append_header(self):\r\n # NOTE before everything\r\n # .TH title_upper section date source manual\r\n if self.header_written:\r\n return\r\n self.head.append(self.header())\r\n self.head.append(MACRO_DEF)\r\n self.header_written = 1", "def _render_column_separator(self, type: int = BORDER_OUTSIDE) -> str:\n borders = self._style.border_chars\n\n return self._style.border_format.format(\n borders[1] if type == self.BORDER_OUTSIDE else borders[3]\n )", "def _generate_top_border(self):\n self._generate_separator(left_char='╭', right_char='╮')", "def add_menu_separator(self):\n if self._menu is None:\n self._create_menu()\n self._menu.addSeparator()", "def _generate_header(self):\n margin_str = ' ' * self.column_margin\n top = '┌'\n headings = '│'\n heading_sep = '╞'\n row_sep = '├'\n self._bottom = '└'\n for i, col in enumerate(self.columns, start=1):\n top += ('─' * (col.width + 2 * self.column_margin)\n + ('┐' if i == len(self.columns) else '┬'))\n headings += margin_str + col.get_header_cell() + margin_str + '│'\n heading_sep += ('═' * (col.width + 2 * self.column_margin)\n + ('╡' if i == len(self.columns) else '╪'))\n row_sep += ('─' * (col.width + 2 * self.column_margin)\n + ('┤' if i == len(self.columns) else '┼'))\n self._bottom += ('─' * (col.width + 2 * self.column_margin)\n + ('┘' if i == len(self.columns) else '┴'))\n if self.title:\n self._text_lines.append(self.title)\n self._text_lines.append(top)\n if self.include_headings:\n self._text_lines.append(headings)\n self._text_lines.append(heading_sep)\n self._row_separator = row_sep if self.use_row_separators else None", "def addSeparator(self, *args) -> \"adsk::core::Ptr< adsk::core::ListItem >\" :\n return _core.ListItems_addSeparator(self, *args)", "def getSeparator(self):\r\n return '/'", "def separator(self, menu):\n return menu.AppendSeparator()", "def getSeparator(self):\n return '/'", "def tbl_header():\n header = ['REGION', 'DEL/DUP', 'CNV LENGTH', 'ZSCORE', 'MEAN DEPTH', 'NUMBER OF PROBES', 'TOTAL ALLELES',\n 'POP DEL COUNT', 'POP DEL AF', 'POP DUP COUNT', 'POP DUP AF', 'GENES']\n return header", "def print_headers():\n print(\"symbol\\t count\\t price\\t\\t total\")\n print(\"-\" * 71)", "def get_divider(source, tbl_filt_label, tbl_filt, *, page_break_before=False):\r\n debug = False\r\n filt_msg = lib.FiltLib.get_filt_msg(tbl_filt_label, tbl_filt)\r\n pagebreak = ' page-break-before: always ' if page_break_before else ''\r\n div = (f'\\n<br><br>\\n<hr style=\"clear: both; {pagebreak}\">\\n{source}'\r\n f'\\n<p>{filt_msg}</p>\\n{mg.VISUAL_DIVIDER_BEFORE_THIS}')\r\n if debug: print(div)\r\n return div" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return commented version of the passed text.
def comment(self, text): return self.comment_begin(text)+'.\n'
[ "def get_comment_text():\n first = comment_start + len(lang.comment_start)\n return line[first:]", "def _get_comment_text():\n comment_samples = [\n \"Malesu mauris nas lum rfusce vehicula bibend. Morbi.\",\n \"Nuncsed quamal felis donec rutrum class ipsumnam teger. Sedin metusd metusdo quamnunc utcras facilis nequen.\",\n \"Adipisci ent neque eger vehicula dis. Miquis auctorpr quamphas purusp phasel duifusce parturi. Ris liberoa ligula lacini risus nean. Arcualiq cubilia aenean nuncnunc ulum fringi uisque abitur rerit setiam. Nean miproin aliquet risusvi tempusp aliquete. Integer nequenu bulum ibulum laoree accumsan ellus mus odio uis. Amet curae ivamus congue aliquama liberofu que.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In justov volutpat mus habitas dapibusc nequenu volutp justo. Quam blandi tur maurisd egesta erossed morbi turpis risus tate. Lacusp facilis class vehicula varius iaculis setiam montes pharetra. Usce ecenas quispr naeos nec nibhphas lacinia roin. Abitur maurisma metusqui justop uscras llam enas. Magnaqu faucibus sduis arcualiq imperd teger egetlor teger.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Conseq tristiq enas duis sociosqu eduis enimsed tudin vel. Lus semnunc risusm nulla parturi atein at placerat. Tiam laut nibhnul turpisn vitaenul eleifen commodo euismo quat posuered. Egestas nullain justop maurisin purusp donec nas liberofu aptent. Nec aliquam tiam puruscra turpisp luctus proin. Lectusin turpisn usce orcivest nullam eget arcuduis tdonec min. Esent cursus vulput aenean bulum lacini congued pretiu. Portamor bulum tate isse llam cidunt estmae.\\n\\nSque leocras fusce nullap fusce convall laoreet nibhnull estsusp. Roin aliquet esent ctetur blandit etiam nequesed viverr. Nislqu sse orciduis lacusp in tasse gravida lla ullam. Itnunc id mauris rerit entum disse lacinia. Oin luctus velit musetiam onec potenti ipsump volutp. Tortor musetiam bibendum onec esent libero esque sim. Enas ras eclass placerat sedin risusut vulput enimdon montes. Rhoncus dolorma estsusp facilis etsed llaut esque cursus. Nisl ullamcor tincid llus nulla iaculis.\",\n ]\n return random.choice(comment_samples)", "def comment(s):\n return '\\n'.join('// ' + line if line else '' for line in s.split('\\n'))", "def strip_comments(text):\n \n # (m?) enables multiline mode\n return re.sub(r'(?m)^ *#.*\\n?', '', text).strip()", "def comment(self, text):\n \n self.stream.write(\"# {}\\n\".format(text))", "def remove_comments(text):\n return re.sub(r' //.*\\n', r'', text)", "def get_comment(self):\n return str(self.gui.txt_comment.text())", "def commentify(lang):\n plaintext = pyperclip.paste().split('\\n')\n\n if lang == 'python':\n comment = ['###\\n']\n char = ' # '\n end = '###\\n'\n\n else:\n comment = ['/*\\n']\n char = ' * '\n end = '*/\\n'\n\n for line in plaintext:\n comment.append(char + line + '\\n')\n\n comment.append(end)\n return ''.join(comment)", "def _comment_format(self, path):\n _, extension = os.path.splitext(path)\n return '# {}\\n' if extension == '.py' else '<!-- {} -->'", "def extract_comments(self, sid, text):\n pass", "def remove_comment_lines_in_str(text_data):\n try:\n from StringIO import StringIO # python 2\n except ImportError:\n from io import StringIO # python 3\n\n newData = ''\n\n for line in StringIO(text_data).readlines():\n # rstrip() will keep the _indent but remove all white spaces including '\\n'\n stripped_line = line.strip()\n line = line.rstrip()\n # The Shebang line should survive. shouldn't she?\n if stripped_line.startswith(('#!', '# -*-')):\n newData += line + '\\n'\n # user wants to leave a comment\n elif stripped_line.startswith(('##', '!!')):\n newData += line.replace(stripped_line[0:2], stripped_line[:1], 1) + '\\n'\n # Also keep existing empty lines\n elif not stripped_line:\n newData += line + '\\n'\n # But remove lines that only contains comments\n elif stripped_line.startswith(('#', '!', 'REM')):\n pass\n else:\n # the comments after the code will remain.\n newData += line + '\\n'\n\n return newData", "def add_escapement_back_for_not_comments(text):\n return text.replace(COMMENT_MARKER, ESCAPE_SYM+COMMENT_MARKER)", "def ps1_comment(line: str) -> str:\n return f\"# {line}\"", "def to_comment(comment):\n return '#' + re.sub(r'[^\\x00-\\xFF]', _esc,\n re.sub(r'\\n(?![#!])', '\\n#',\n re.sub(r'\\r\\n?', '\\n', comment)))", "def format_comment(listed_game):\n\n comment = listed_game.comment\n\n if comment:\n return comment\n else:\n return \"\"", "def remove_comments(tex):\n return re.sub(r'%(.+)\\n', r'', tex)", "def _commentWrap(self, message, text, length):\n\n comment = message + \" \" + text\n\n return self._wrapText(comment, 0, \"* \", 70)", "def commentLine(self, line):\n # if the line is non-empty\n if line:\n # mark it\n return self.leader + self.startBlock + ' ' + line + ' ' + self.endBlock\n # otherwise, just return the comment characters\n return line", "def cmd_comment(line: str) -> str:\n return f\"@REM {line}\"", "def rem_comment(line):\n return line.split(\"#\", 1)[0].rstrip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the last line in body is terminated by new line.
def ensure_eol(self): if len(self.body) > 0 and self.body[-1][-1] != '\n': self.body.append('\n')
[ "def have_trailing_newline(line):\n\treturn line[-1] == '\\n' or line[-1] == '\\r' or line[-2:] == '\\r\\n'", "def ensure_newline(self, n):\n assert n >= 0\n text = self._output.getvalue().rstrip('\\n')\n if not text:\n return\n self._output = StringIO()\n self._output.write(text)\n self._output.write('\\n' * n)\n text = self._output.getvalue()\n assert text[-n-1] != '\\n'\n assert text[-n:] == '\\n' * n", "def end(self):\n line = '%s End' % self.tstamp\n if line not in self.lines:\n self.blank()\n self.lines.append(line)", "def rfc6376_simple_body(body: bytes) -> bytes:\n # In DKIM simple body, an empty body becomes CRLF\n body = body or b\"\\r\\n\"\n while body.endswith(b\"\\r\\n\\r\\n\"):\n body = body[:-2]\n return body", "def rfc5322_endings(data: bytes) -> bytes:\n # v\n # [^\\r]\\n -> [^\\r]\\r\\n\n # v\n # \\r[^\\n] -> \\r\\n[^\\n]\n # v\n # \\r$ -> \\r\\n$\n CR: int = 0x0D\n LF: int = 0x0A\n this: int\n prev: Optional[int] = None\n output: bytearray = bytearray()\n for this in data:\n if (this == LF) and (prev != CR):\n output.extend(b\"\\r\\n\")\n elif (prev == CR) and (this != LF):\n output.extend(b\"\\n\")\n output.append(this)\n else:\n output.append(this)\n prev = this\n if prev == CR:\n output.append(LF)\n return bytes(output)", "def test_ends_newline(self):\r\n text = 'A line\\nAnother line\\nAnd a final one.\\n'\r\n expected_res = text.split('\\n')\r\n for res, expected in zip(split_by_newline(text), expected_res):\r\n self.assertEqual(res[1], expected)", "def add_newline(self):\n if len(self.gem) == 0 or self.gem[-1] == '\\n':\n return\n self.gem += \"\\n\"", "def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line", "def handle_newline(self, token_type: int) -> None:\n assert self.processor is not None\n if token_type == tokenize.NEWLINE:\n self.run_logical_checks()\n self.processor.reset_blank_before()\n elif len(self.processor.tokens) == 1:\n # The physical line contains only this token.\n self.processor.visited_new_blank_line()\n self.processor.delete_first_token()\n else:\n self.run_logical_checks()", "def append_newline(string):\n if len(string) > 0 and string[-1] != '\\n':\n string += \"\\n\"\n return string", "def test_alternate_eol(self):\n if hasattr(self.s, 'xreadlines'): # test if it is our FileLike base class\n self.s.write(serial.to_bytes(\"no\\rno\\nyes\\r\\n\"))\n self.assertEqual(\n self.s.readline(eol=serial.to_bytes(\"\\r\\n\")),\n serial.to_bytes(\"no\\rno\\nyes\\r\\n\"))", "def line_feed(self):\n self._stream.write(self._line_separator)\n self._is_new_line = True\n return self", "def __get_line_ending(self, file_content):\r\n\r\n ending = LINE_ENDINGS.search(file_content)\r\n return \"\\r\" if ending is not None and ending.group(2) else \"\\n\"", "def end(self):\n while self.position < len(self.document.characters) and self.document.characters[self.position] != '\\n':\n self.position += 1", "def _fixupEOL(self, doc):\n eolPref = self._globalPrefsvc.prefs.getStringPref(\"endOfLine\")\n try:\n eol = eollib.eolPref2eol[eolPref]\n except KeyError:\n # Be paranoid: stay with system default if pref value is bogus.\n log.exception(\"unexpected 'endOfLine' pref value: %r\", eolPref)\n eol = eollib.EOL_PLATFORM\n doc.existing_line_endings = eol\n doc.new_line_endings = eol\n doc.isDirty = 0", "def _end_of(self, output, lines):\n return '\\n'.join(output.split('\\n')[-lines:])", "def testReadlinesWithFileWithoutNewLineAtEnd(self):\n test_file_path = self._GetTestFilePath(['bodyfile'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_path_spec = os_path_spec.OSPathSpec(location=test_file_path)\n file_object = path_spec_resolver.Resolver.OpenFileObject(\n test_path_spec, resolver_context=self._resolver_context)\n\n line_reader = line_reader_file.BinaryLineReader(file_object)\n\n lines = line_reader.readlines()\n\n self.assertEqual(len(lines), 25)", "def idempotent_append_newline(string):\n if type(string) is not str:\n raise TypeError\n if string.endswith('\\n'):\n return string\n else:\n return string + '\\n'", "def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
append header with .TH and .SH NAME
def append_header(self): # NOTE before everything # .TH title_upper section date source manual if self.header_written: return self.head.append(self.header()) self.head.append(MACRO_DEF) self.header_written = 1
[ "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n file_sam = open(os.path.join(args.output_dir,'watsonAligned.out.sam'))\n print(file_sam)\n for line in file_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def _create_header(self):\r\n t = time.localtime() # get current time\r\n time_string = \"%d/%d/%d %d:%d:%d\" % (t.tm_mday,\r\n t.tm_mon,\r\n t.tm_year,\r\n t.tm_hour,\r\n t.tm_min,\r\n t.tm_sec)\r\n self.header_lines = \"\"\"#Filename = \"\"\" + self.filename + \"\"\" \r\n#Date Saved = \"\"\" + time_string + \"\"\"\r\nFileType = 111\r\nVersion = 7.11\r\n\r\n\"\"\"", "def add_header(self, *args, **kwargs):\r\n self.header = True\r\n self.add_row(ypos=0, *args, **kwargs)", "def add_header(self, drawing, header_type, value):\n drawing.header[header_type] = value", "def synth_header(self):\n\n header = \"n,imbalanced,num_c,internoiselvl,intranoiselvl,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,kvs_sze,kvs_fsze,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_sze_GT,l2_fsze_GT,l1_sze_GT,l1_fsze_GT,l2_usze_G, th_usze_G,l2_ufsze_G, th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)", "def real_header(self):\n\n header = \"n,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_usze_G,th_usze_G,l2_ufsze_G,th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)", "def createHeading(self):\n\t\tfieldNames = ['Year','Month','State','District']\n\t\tfor i in range(1,43):\n\t\t\tfieldNames.append('col '+str(i))\n\t\twith open(self.filepath, 'w') as PMGSYFile:\n\t\t\tcsvWriter = csv.writer(PMGSYFile)\n\t\t\tcsvWriter.writerow(fieldNames)\n\t\tPMGSYFile.close()", "def add_headers(sheet):\n row = ['URL', 'Title Text', 'Title Length', 'Bad Anchor Text', 'No Alt Text', 'H1 Tags', \"Reading Score\"]\n title_list = sheet.range('A1:G1')\n for n in range(len(row)):\n title_list[n].value = row[n]\n\n sheet.update_cells(title_list)", "def tvp_writeheader( self ):\n dsw = self.dsw\n\n if self.verbosity and self.verbosity is not None:\n print (\"tvp_writeheader:header_name=%s, fieldnames='%s'\"\n % (dsw.header_name, repr(self.fieldnames)))\n # fieldnames is authoritative ordered list of colnames\n # because in future od_column_type will not be required to have\n # all columns\n with open(dsw.header_name, 'wb') as fh:\n for colname in self.fieldnames:\n # add logic to allow missing entry and use a default spec/type\n coltype = dsw.od_column_type[colname]\n line = \"%s\\t%s\\n\" % (colname, coltype)\n fh.write(line)", "def writeRowHeaders(self):\n titles = [\"Rule Name\", \"NAT Type\", \"Src Zone\", \"Dst Zone\", \"Dst Interface\", \"Orig Src Address\", \"Orig Dst Address\", \"Service\", \"Src Translation\", \"Dst Translation\", \"Description\", \"Disabled\"]\n i = 0\n for title in titles:\n worksheet.write(0, i, title, bold)\n i += 1", "def draw_header(self, stream, header):\n stream.writeln(header)\n stream.writeln('~' * len(header))\n stream.writeln()", "def hvp_writeheader( self ):\n dsw = self.dsw\n columns_line = dsw.delimiter.join(self.fieldnames)\n if self.verbosity and self.verbosity is not None:\n print (\"hvp_writeheader:header_name=%s, Columns='%s'\"\n % (dsw.header_name,repr(columns_line)))\n\n with open(self.dsw.header_name, 'wb') as fh:\n fh.write(columns_line)", "def append_header_row(self):\n if not self._exists:\n raise AssertionError(\"The data file has not been created. Use the 'Create Data File' keyword to create it.\")\n else:\n headers = self.row_definition.create_header_row()\n self._append(headers)", "def add_to_header(self, header_f, outfilename, line_ann):\n\n of = open(outfilename, 'w')\n\n # getting the type of line that is being passed\n p = re.compile(\"^##(\\w+)=\")\n\n m1 = p.match(line_ann)\n type_ann = m1.group(1)\n\n line_seen = False\n with open(header_f) as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n m2 = p.match(line)\n if m2 is None:\n of.write(line+\"\\n\")\n continue\n type1_ann = m2.group(1)\n if type_ann == type1_ann and line_seen is False:\n line_seen = True\n of.write(line+\"\\n\"+line_ann+\"\\n\")\n continue\n else:\n of.write(line+\"\\n\")\n of.close()\n\n return outfilename", "def createNewHeader(fileName, new_period):\n\n # Changes the new_period format to the one used in the files\n new_period = changeFormatTime(new_period)\n\n header = getHeader(fileName)\n\n header[INDEXPeriod] = new_period\n\n # Turns header into string, each line separated by commas. To understand the\n # use of commas, see outputStatus.writeServicesFile\n header = ','.join(header)\n\n # Deletes newlines\n header = header.replace('\\n', '')\n\n return header", "def add(self, t, header, ignore_existing=False):\n t = sanitize_t(t)\n header_file = Path(\n self.header_folder / (t.strftime(\"%Y_%m_%d_%H_%M_%S%z\") + \".csv\")\n )\n\n # print(header_file)\n header = \"\\n\".join(header)\n if not header_file.exists() or ignore_existing:\n with open(header_file, \"w\") as fp:\n fp.write(header)\n self._logger.info(f\"Added {file} header file.\")\n else:\n raise FileExistsError(\n f\"File {file} already exists, pass \" \"ignore_existing=True to replace.\"\n )", "def updateheader(self,data):\n # Update PRODuct TYPE keyword with step name, add history keyword\n data.setheadval('PRODTYPE',self.name,'Product Type')\n histmsg = 'Reduced: ' + self.name + ' v' + self.stepver + ' '\n histmsg += time.strftime('%Y-%m-%d_%H:%M:%S')\n data.setheadval('HISTORY',histmsg)\n # Add input parameters to history\n for p in [par[0] for par in self.paramlist]:\n histmsg = ' %s: %s=%s' % (self.name, p, self.getarg(p))\n data.setheadval('HISTORY',histmsg)\n # Update file name with .PipeStepName.fits\n data.filename = data.filenamebegin + self.procname.upper() + data.filenameend\n # Add config file name if available and not already present\n # in HISTORY\n try:\n # This may fail if config has no filename - in that case,\n # don't add the message.\n conffilename = '' + self.config.filename\n # New history message\n histmsg = 'CONFIG: %s' % conffilename\n # Check history for presence of the full message or possibly\n # a truncated version (eg. for long filenames in FITS headers)\n full_history = data.getheadval('HISTORY')\n if len(histmsg) > 72:\n shortmsg = histmsg[0:72]\n else:\n shortmsg = histmsg\n if histmsg not in full_history and shortmsg not in full_history:\n self.log.debug('Recording config file name %s' % conffilename)\n data.setheadval('HISTORY',histmsg)\n except TypeError:\n pass\n\n # Send log messages", "def mkHeaders(phdr, events_header, extver=1):\n\n headers = [phdr]\n # This is a reference, not a copy. Keywords will be updated (in other\n # functions) in headers[1], and the output corrtag header as well as the\n # flt and counts headers will contain the updated values.\n headers.append(events_header)\n\n err_hdr = fits.Header()\n dq_hdr = fits.Header()\n err_hdr[\"extname\"] = (\"ERR\", \"extension name\")\n dq_hdr[\"extname\"] = (\"DQ\", \"extension name\")\n err_hdr[\"extver\"] = (extver, \"extension version number\")\n dq_hdr[\"extver\"] = (extver, \"extension version number\")\n if \"rootname\" in events_header:\n rootname = events_header[\"rootname\"]\n err_hdr[\"rootname\"] = (rootname, \"rootname of the observation set\")\n dq_hdr[\"rootname\"] = (rootname, \"rootname of the observation set\")\n if \"expname\" in events_header:\n expname = events_header[\"expname\"]\n err_hdr[\"expname\"] = (expname, \"exposure identifier\")\n dq_hdr[\"expname\"] = (expname, \"exposure identifier\")\n if \"ra_aper\" in events_header:\n err_hdr[\"ra_aper\"] = (events_header[\"ra_aper\"],\n \"RA of reference aperture center\")\n if \"dec_aper\" in events_header:\n err_hdr[\"dec_aper\"] = (events_header[\"dec_aper\"],\n \"Declination of reference aperture center\")\n if \"pa_aper\" in events_header:\n err_hdr[\"pa_aper\"] = (events_header[\"pa_aper\"],\n \"Position Angle of reference aperture center (de\")\n if \"dispaxis\" in events_header:\n err_hdr[\"dispaxis\"] = (events_header[\"dispaxis\"],\n \"dispersion axis; 1 = axis 1, 2 = axis 2, none\")\n if \"ngoodpix\" in events_header:\n err_hdr[\"ngoodpix\"] = (-999, \"number of good pixels\")\n if \"goodmean\" in events_header:\n err_hdr[\"goodmean\"] = (-999., \"mean value of good pixels\")\n if \"goodmax\" in events_header:\n err_hdr[\"goodmax\"] = (-999., \"maximum value of good pixels\")\n\n headers.append(err_hdr)\n headers.append(dq_hdr)\n\n return headers", "def add_header(self, _name, _value, **_params):\r\n parts = []\r\n for k, v in _params.items():\r\n if v is None:\r\n parts.append(k.replace('_', '-'))\r\n else:\r\n parts.append(_formatparam(k.replace('_', '-'), v))\r\n if _value is not None:\r\n parts.insert(0, _value)\r\n self._headers.append((_name, SEMISPACE.join(parts)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new factory that is bound to the specified namespace.
def __getitem__(self, namespace): return ElementFactory(namespace)
[ "def __getitem__(self, namespace):\n return ElementFactory(namespace)", "def convert_namespace_to_factory(class_input):\r\n return decorate_class_methods(class_input, to_factory)", "def get_factory(package):\r\n return functools.partial(get, package)", "def __new__ (cls, *args, **kw):\n (uri,) = args\n if isinstance(uri, tuple):\n # Special handling to reconstruct absent namespaces.\n (variant, uid) = uri\n if cls.__SerializedVariantAbsent == variant:\n ns = cls.__AbsentNamespaceRegistry.get(uid)\n if ns is None:\n raise pyxb.UsageError('Unable to reconstruct instance of absent namespace')\n return ns\n raise pyxb.LogicError('Unrecognized serialized namespace variant %s uid %s' % (variant, uid))\n elif not (uri in cls.__Registry):\n instance = object.__new__(cls)\n # Do this one step of __init__ so we can do checks during unpickling\n instance.__uri = uri\n instance._reset()\n # Absent namespaces are not stored in the registry.\n if uri is None:\n cls.__AbsentNamespaces.add(instance)\n return instance\n cls.__Registry[uri] = instance\n return cls.__Registry[uri]", "def test_init_namespace(store_session):\n\n _, session = store_session\n\n Person = session.get_class(surf.ns.FOAF.Person)\n surf.ns.register(nstest=\"http://example.com/ns#\")\n\n # namespace is an instance of Namespace\n p = Person(namespace=surf.ns.NSTEST)\n ns, _ = uri_split(p.subject)\n assert ns == \"NSTEST\"\n\n # namespace is an instance of URIRef\n p = Person(namespace=URIRef(\"http://example.com/ns#\"))\n ns, _ = uri_split(p.subject)\n assert ns == \"NSTEST\"\n\n # namespace is string\n p = Person(namespace=\"http://example.com/ns#\")\n ns, _ = uri_split(p.subject)\n assert ns == \"NSTEST\"", "def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory, pvc_factory):\n return namespacestore_factory_implementation(\n request, cld_mgr, mcg_obj, cloud_uls_factory, pvc_factory\n )", "def factory(cls, *args, **kwargs):\n return Factory(cls).bind(*args, **kwargs)", "def bind_namespace(g, prefix, namespace):\n ns = Namespace(namespace)\n g.namespace_manager.bind(prefix, ns, override=False)\n return ns", "def convert_namespace_to_callable_factory(class_input):\r\n return decorate_class_methods(class_input, to_callable_factory)", "def factory( self ):\n return self._factory", "def getFactory(self):\n factory = ServerFactory()\n def protocol():\n proto = CredReceiver()\n proto.portal = Portal(\n self.loginSystem,\n [self.loginSystem,\n OneTimePadChecker(self._oneTimePads)])\n return proto\n factory.protocol = protocol\n return factory", "def factory(cls, sitename: str):\n return cls.subclasses[sitename]", "def getFactory(self):\n return self.factory", "def getNamespaceFromName(*args, **kwargs):\n \n pass", "def init_service(parent_device, service_root, service_type, namespace):\n try:\n service = service_map[service_type]\n except KeyError:\n service = Service\n\n return service(parent_device, service_root, service_type, namespace)", "def create_namespaced_binding(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_binding\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_binding`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_binding`\")\n\n resource_path = '/api/v1/namespaces/{namespace}/bindings'.replace('{format}', 'json')\n method = 'POST'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1Binding',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def ns(self, name):\n if not isinstance(name, str):\n raise TypeError('`name` must be a string.')\n\n if not name:\n raise ValueError('`name` must not be blank.')\n\n return self._get_namespace_class()(\n self._client,\n f'{self._path}/{name}',\n )", "def factoryAddress() -> address(Factory):\n return self.factory", "def __new__(cls, *args, **kw):\n factory = object.__new__(cls)\n factory.__init__(*args, **kw)\n return cls.source_class(factory)", "def returnFactory(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return Factory(func).bind(*args, **kwargs)\n return wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that every item on the stream is actually a markup event.
def _ensure(stream): stream = iter(stream) event = next(stream) # Check whether the iterable is a real markup event stream by examining the # first item it yields; if it's not we'll need to do some conversion if type(event) is not tuple or len(event) != 3: for event in chain([event], stream): if hasattr(event, 'totuple'): event = event.totuple() else: event = TEXT, str(event), (None, -1, -1) yield event return # This looks like a markup event stream, so we'll just pass it through # unchanged yield event for event in stream: yield event
[ "def test_accept_feed_advice(self):\n pass", "def assert_no_events_published(self, event_type):\n for event in self.events:\n assert event['event_type'] != event_type", "def test_item_timestamp_missing(testapp):\n\n stream = todatetime.process(\n testapp,\n [holocron.Item({\"content\": \"the Force is strong with this one\"})],\n todatetime=\"timestamp\",\n )\n\n assert isinstance(stream, collections.abc.Iterable)\n assert list(stream) == [\n holocron.Item({\"content\": \"the Force is strong with this one\"})\n ]", "def is_normal_news_item(self):\n if self.text:\n return False\n else:\n return True", "def test_parse_semantics(self):\n\n items = pulldom.parseString(SMALL_SAMPLE)\n evt, node = next(items)\n # Just check the node is a Document:\n self.assertTrue(hasattr(node, \"createElement\"))\n self.assertEqual(pulldom.START_DOCUMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"html\", node.tagName)\n self.assertEqual(2, len(node.attributes))\n self.assertEqual(node.attributes.getNamedItem(\"xmlns:xdc\").value,\n \"http://www.xml.com/books\")\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt) # Line break\n evt, node = next(items)\n # XXX - A comment should be reported here!\n # self.assertEqual(pulldom.COMMENT, evt)\n # Line break after swallowed comment:\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(\"title\", node.tagName)\n title_node = node\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n self.assertEqual(\"Introduction to XSL\", node.data)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"title\", node.tagName)\n self.assertTrue(title_node is node)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"p\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n # XXX No END_DOCUMENT item is ever obtained:\n #evt, node = next(items)\n #self.assertEqual(pulldom.END_DOCUMENT, evt)", "def testInfiniteSendItem(self):\n self.item_generator = self.handler.readStream(self.bin_file)\n i = 0\n while True:\n self.item = self.handler.sendItem()\n if self.item == 'stream ended':\n self.assertTrue(self.item)\n break", "def test_encode_and_parse_all(self):\n p = mido.Parser()\n for spec in mido.messages.get_message_specs():\n if spec.type == 'sysex_end':\n # This is considered a part of 'sysex_start'.\n continue\n\n msg = Message(spec.type)\n p.feed(msg.bytes())\n outmsg = p.get_message()\n self.assertTrue(outmsg is not True)\n self.assertTrue(outmsg.type == spec.type)", "def check_for_event(self):\r\n a=self.read_chat(0)\r\n event=False\r\n finmes=\"\"\r\n next=False\r\n for m in a:\r\n if next==True:\r\n finmes=m\r\n break\r\n\r\n elif \"event\" in m:\r\n event=True\r\n next=True\r\n\r\n\r\n if event==True:\r\n finmes+=\" \"\r\n t1=finmes[finmes.find(\"Type\")+5:-1]\r\n\r\n self.write_to_chat(t1)\r\n\r\n t2=finmes[finmes.find(\"type\")+5:-1]\r\n self.write_to_chat(t2)\r\n\r\n for i in range(5):\r\n self.write_to_chat(t2)\r\n sleep(0.8)\r\n self.write_to_chat(t1)\r\n sleep(0.8)\r\n\r\n return True\r\n\r\n else:\r\n return False", "def test_template_tag(self):\n entries = Entry.objects.all()\n entries = get_published_entries(entries, 'en')\n self.assertEqual(len(entries), 1, msg=(\n 'Should return the entries that are published.'))", "def test_get_inbox_replier_events(self):\n pass", "def check_events(self, event:Event):\n pass", "def test_issue_104__ignore_exceptions(self):\n ical_str = \"\"\"\nBEGIN:VEVENT\nDTSTART:20140401T000000Z\nDTEND:20140401T010000Z\nDTSTAMP:20140401T000000Z\nSUMMARY:Broken Eevnt\nCLASS:PUBLIC\nSTATUS:CONFIRMED\nTRANSP:OPAQUE\nX\nEND:VEVENT\"\"\"\n event = icalendar.Calendar.from_ical(ical_str)\n self.assertTrue(isinstance(event, icalendar.Event))\n self.assertTrue(event.is_broken) # REMOVE FOR NEXT MAJOR RELEASE\n self.assertEqual(\n event.errors,\n [(None, \"Content line could not be parsed into parts: 'X': Invalid content line\")] # noqa\n )", "def _process_event(self, operation, event):\n\n event_type, data, pos = event\n if event_type == START:\n tag, attrs = data\n\n # check how these tag should be diffed\n diff_type = Html5Definition.get_diff_type(tag)\n if diff_type == DiffBehaviour.skip:\n # diffing of this tag and its contents should be skipped\n # passthrough whole tag to the output\n self._passthrough(event)\n return True\n elif diff_type == DiffBehaviour.as_block:\n # diff this tag as one element, to do that go through all\n self._process_block(event)\n return True\n\n self.append(event)\n self._enter(data[0])\n elif event_type == END:\n self._leave(data)\n self.append(event)\n else:\n self.append(event)\n\n return True", "def run_stream(self, stream, handler):\n for item in stream:\n if item is None:\n break\n handler(item)", "def strip_eof_messages(messages):\n if not messages:\n return messages, False\n stripped_messages = [message for message in messages\n if not isinstance(message, EOFNotification)]\n found_eof = len(stripped_messages) != len(messages)\n return stripped_messages, found_eof", "def process_events(self):\n pass", "def mightRender(self, text):\r\n return True", "def inject_meta_tags(self, stream, taglist):\n done = False\n meta_tag = None\n for ev, item in stream:\n if not done:\n if ev in (START, END):\n tag = self.tagname(item.tag)\n if meta_tag:\n if item.tag == meta_tag:\n if ev == START:\n for attributes in taglist:\n for attrib, value in item.items():\n attrib = attrib.lower()\n if attrib == 'content':\n continue\n if attrib not in attributes:\n break\n value = value.lower()\n if attributes[attrib] != value:\n break\n else:\n # that meta tag exists already\n attributes['content'] = None\n break\n else:\n for attributes in taglist:\n if attributes['content'] is None:\n continue\n meta_item = Element(meta_tag, **attributes)\n yield START, meta_item\n yield END, meta_item\n yield TEXT, '\\n'\n done = True\n elif tag == 'head' and ev == START:\n meta_tag = item.tag[:-4] + 'meta'\n yield ev, item", "def testWriteEventBody(self):\n formatters_manager.FormattersManager.RegisterFormatter(\n L2TTestEventFormatter)\n\n event, event_data = containers_test_lib.CreateEventFromValues(\n self._TEST_EVENTS[0])\n\n event_tag = events.EventTag()\n event_tag.AddLabels(['Malware', 'Printed'])\n\n self._formatter.WriteEventBody(event, event_data, event_tag)\n\n expected_event_body = (\n '06/27/2012,18:17:01,UTC,M...,LOG,Syslog,Content Modification Time,-,'\n 'ubuntu,Reporter <CRON> PID: 8442 (pam_unix(cron:session): session '\n 'closed for user root),Reporter <CRON> PID: 8442 '\n '(pam_unix(cron:session): session closed for user root),'\n '2,log/syslog.1,-,Malware Printed,test_parser,'\n 'a_binary_field: binary; my_number: 123; some_additional_foo: True\\n')\n\n event_body = self._output_writer.ReadOutput()\n self.assertEqual(event_body, expected_event_body)\n\n # Ensure that the only commas returned are the 16 delimiters.\n self.assertEqual(event_body.count(','), 16)\n\n formatters_manager.FormattersManager.DeregisterFormatter(\n L2TTestEventFormatter)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an item or slice of the attributes list. >>> attrs = Attrs([('href', ''), ('title', 'Foo')]) >>> attrs[1] ('title', 'Foo')
def __getitem__(self, i): items = tuple.__getitem__(self, i) if type(i) is slice: return Attrs(items) return items
[ "def __getitem__(self, index):\n return self.attribute_values[index]", "def __getslice__(self, i, j):\r\n return Attrs(tuple.__getslice__(self, i, j))", "def attribute_get(self, attr):\n attributes_struct = self.single_query_get('Attributes')\n attribute_struct = [x for x in attributes_struct\n if x['Name'] == attr]\n if len(attribute_struct) > 1:\n raise tdapi.TDException(\"Too many attributes with name {}\".format(attr))\n elif len(attribute_struct) == 0:\n return\n else:\n return attribute_struct[0]['Value']", "def getAttrs(self, i):\n try:\n val = self._table[i]\n except IndexError, e:\n # raise e\n return None\n else:\n return val\n pass", "def _get_attributes(measure, position=-1):\n if len(measure) and measure[position].tag == 'attributes':\n return measure[position]\n else:\n attributes = etree.Element('attributes')\n measure.insert(position, attributes)\n return attributes", "def firstAttr(self, *attrs):\n for attr in attrs:\n value = getattr(self, attr, None)\n if value is not None:\n return value", "def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm", "def itemsByGroup(self, *args) -> \"std::vector< adsk::core::Ptr< adsk::core::Attribute >,std::allocator< adsk::core::Ptr< adsk::core::Attribute > > >\" :\n return _core.Attributes_itemsByGroup(self, *args)", "def __getitem__(self, key):\n if isinteger(key):\n return self._list[key]\n\n if isinstance(key, slice):\n return type(self)(self._list[key])\n\n return tuple(xml[key] for xml in self)", "def attributes(self, string, pos):\n\t\tfor (expr, attr) in self.attrs:\n\t\t\tmatch = re.search(expr, string)\n\t\t\tif match and match.start() == pos:\n\t\t\t\treturn attr\n\t\treturn 0", "def _getAttrMultiple(self, data, **kwargs):\n returnData = []\n if not data:\n return returnData\n\n for i in data:\n result = self._getAttr(i, **kwargs)\n if result:\n returnData.append(result)\n return returnData", "def get_next_attribute(self, attribute_list, records_df):\n if attribute_list:\n return attribute_list[0]\n else:\n return None", "def get_attribute(self,attr):\n\t\tif (attr is None):\n\t\t\traise ValueError(\"You must specify an attribute\")\n\t\tif (attr not in self._Attributes):\n\t\t\traise ValueError(\"Attribute \" + attr + \" unrecognized\")\n\t\treturn self._Attributes[attr]", "def get(iterable: Iterable[_T], **attrs: Any) -> _T | None:\n\n # global -> local\n _all = all\n attrget = attrgetter\n\n # Special case the single element call\n if len(attrs) == 1:\n k, v = attrs.popitem()\n pred = attrget(k.replace(\"__\", \".\"))\n for elem in iterable:\n if pred(elem) == v:\n return elem\n return None\n\n converted = [(attrget(attr.replace(\"__\", \".\")), value) for attr, value in attrs.items()]\n\n for elem in iterable:\n if _all(pred(elem) == value for pred, value in converted):\n return elem\n return None", "def getAttr(self,attr):\n try: return self.__getattribute__(attr)\n\texcept: return None", "def get_attr(self,n,attr):\n\t\tself.realopen(self.rohint)\t# make sure the database is open\n\t\ttry :\n\t\t\tret={}\n\t\t\tfor i in n:\n\t\t\t\td=loads(self.bdb.get(dumps(i,-1),txn=self.txn))\n\t\t\t\tif getattr(attr, '__iter__', False):\n\t\t\t\t\tret[i]={}\n\t\t\t\t\tfor a in attr:\n\t\t\t\t\t\tif a in d : ret[i][a]=d[a]\n\t\t\t\telse:\n\t\t\t\t\ttry: ret[i]=d[attr]\n\t\t\t\t\texcept: pass\n\t\t\treturn ret\n\t\texcept:\n\t\t\tif getattr(attr, '__iter__', False):\n\t\t\t\td=loads(self.bdb.get(dumps(n,-1),txn=self.txn))\n\t\t\t\tret={}\n\t\t\t\tfor a in attr:\n\t\t\t\t\tif a in d : ret[a]=d[a]\n\t\t\t\treturn ret\n\t\t\treturn loads(self.bdb.get(dumps(n,-1),txn=self.txn))[attr]", "def get_attrs(expr):\n if isinstance(expr, Call):\n return expr.attrs\n if isinstance(expr, TupleGetItem):\n return get_attrs(expr.tuple_value)\n return {}", "def get_attributes(start, *path):\n\n cursor = _get_cursor(start)\n\n (intermediateNode, _) = _traverse_path(cursor, path)\n if intermediateNode:\n # we encountered an array with variable (-1) indices.\n # this is only allowed when calling coda.fetch().\n raise ValueError(\"variable (-1) array indices are only allowed when calling coda.fetch()\")\n\n cursor_goto_attributes(cursor)\n\n result = _fetch_subtree(cursor)\n\n del cursor\n return result", "def getAttrs(self):\n\t\treturn self._attributes", "def get_attribute(self):\n data = self.data\n # Step 1 (skip chars)\n c = data.skip(skip1)\n assert c is None or len(c) == 1\n # Step 2\n if c in (b\">\", None):\n return None\n # Step 3\n attr_name = []\n attr_value = []\n # Step 4 attribute name\n while True:\n if c == b\"=\" and attr_name:\n break\n elif c in space_chars_bytes:\n # Step 6!\n c = data.skip()\n break\n elif c in (b\"/\", b\">\"):\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_name.append(c)\n # Step 5\n c = next(data)\n # Step 7\n if c != b\"=\":\n data.previous()\n return b\"\".join(attr_name), b\"\"\n # Step 8\n next(data)\n # Step 9\n c = data.skip()\n # Step 10\n if c in (b\"'\", b'\"'):\n # 10.1\n quote_char = c\n while True:\n # 10.2\n c = next(data)\n # 10.3\n if c == quote_char:\n next(data)\n return b\"\".join(attr_name), b\"\".join(attr_value)\n # 10.4\n else:\n attr_value.append(c)\n elif c == b\">\":\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_value.append(c)\n # Step 11\n while True:\n c = next(data)\n if c in spaces_angle_brackets:\n return b\"\".join(attr_name), b\"\".join(attr_value)\n elif c is None:\n return None\n else:\n attr_value.append(c)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a slice of the attributes list. >>> attrs = Attrs([('href', ''), ('title', 'Foo')])
def __getslice__(self, i, j): return Attrs(tuple.__getslice__(self, i, j))
[ "def attrsToList(self, attrs):\n return [g.Bunch(name=name, val=attrs.getValue(name))\n for name in attrs.getNames()]", "def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm", "def getAttrs(self):\n\t\treturn self._attributes", "def itemsByGroup(self, *args) -> \"std::vector< adsk::core::Ptr< adsk::core::Attribute >,std::allocator< adsk::core::Ptr< adsk::core::Attribute > > >\" :\n return _core.Attributes_itemsByGroup(self, *args)", "def _get_attributes(measure, position=-1):\n if len(measure) and measure[position].tag == 'attributes':\n return measure[position]\n else:\n attributes = etree.Element('attributes')\n measure.insert(position, attributes)\n return attributes", "def _get_attrs_items(obj):\n attrs = getattr(obj.__class__, \"__attrs_attrs__\")\n attr_names = [a.name for a in attrs]\n return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]", "def listAttributes(self):\n return list(self._attributes.keys())", "def get_stripped_attributes(self):\n stripped = []\n for (tags, attrs) in self.getHtmlExclusions():\n if not tags:\n stripped.extend(attrs)\n return stripped", "def iterAttrs(self):\n return iter(self.requested_attributes.values())", "def get_attributes(html):\n\n for i, c in enumerate(html):\n if c == '>':\n if USE_BUFFER:\n html = buffer(html, 0, i)\n else:\n html = html[:i]\n break\n return dict((name.lower().strip(), value.strip('\\'\" ')) for (name, value) in attributes_regex.findall(html))", "def getPredAttrs (self, predicates): \n counter = 0\n PredAttrList = []\n for item in predicates: \n if (counter) % 2 == 0: \n PredAttrList.append(item[0]) \n counter += 1 \n return PredAttrList", "def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])", "def getAttributes(self):\n title = self.getAttributeNode('title')\n if title is not None:\n return NamedNodeMap({'title':title})\n return NamedNodeMap()", "def get_attrs(self):\n return self.ms.get_attrs()", "def zip_with_attr(iterable, *attrs):\n\n return zip(\n iterable, *(tuple(xattr(item, attr) for item in iterable) for attr in attrs)\n )", "def all_attributes (self):\n attrs = []\n for sup in self.super:\n sup_attrs = sup.all_attributes ()\n if len (sup_attrs) > 0:\n attrs.extend (sup_attrs)\n attrs.extend (self.attributes)\n return attrs", "def parse_attrs(buf):\n attrs = []\n while buf:\n t = ord(buf[0])\n l = ord(buf[1])\n if l < 2:\n break\n d, buf = buf[2:l], buf[l:]\n attrs.append((t, d))\n return attrs", "def Attributes(self) -> _n_5_t_17:", "def _get_attributes_list(self, xpath, xml=None):\n if xml is None:\n xml = self.pom_data\n attrs = xml.findall(xpath)\n attrs = [attr.text for attr in attrs]\n return [attr.strip() for attr in attrs if attr and attr.strip()]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new instance that contains the attributes in `attrs` in addition to any already existing attributes. Any attributes in the new set that have a value of `None` are removed.
def __or__(self, attrs): remove = set([an for an, av in attrs if av is None]) replace = dict([(an, av) for an, av in attrs if an in self and av is not None]) return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self if sn not in remove] + [(an, av) for an, av in attrs if an not in self and an not in remove])
[ "def copy(self):\n\t\tnewAttr = AtomAttributes(None, None, None, None, None, None)\n\t\tnewAttr.__dict__ = self.__dict__.copy()\n\t\treturn newAttr", "def copy(self,**kwds):\n new_ds = copy.copy(self)\n def _find_set(kwd):\n val = kwds.get(kwd)\n if val is not None:\n setattr(new_ds,kwd,val) \n for attr in self.__attrs__: _find_set(attr) \n return(new_ds)", "def _remove_attributes(attrs, remove_list):\n new_attrs = {}\n for attr in attrs.keys():\n if attr not in remove_list:\n new_attrs[attr] = attrs[attr]\n return new_attrs", "def strip_attributes(self):\r\n original_attributes = set(self.inventory)\r\n keys = list(self.__dict__.keys())\r\n for att in keys:\r\n if att not in original_attributes:\r\n del(self.__dict__[att])", "def possibly_init_attrs(self, attrs):\n for key, value in attrs.items():\n if not self.__dict__.has_key(key):\n setattr(self, key, value)", "def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])", "def strip_attrs(self):\n for tag in self.root.findAll(True):\n tag.attrs = [(attr, val) for attr, val in tag.attrs\n if attr in self.settings['valid_attrs']]", "def clone_attributes():\n _clone_attributes(utils.get_sentiwordnet_groups(SENTIWORDNET_FILE))\n _clone_attributes(utils.get_e_lemma_groups(E_LEMMA_FILE))", "def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml", "def reset(self):\n for attribute in self._trained_attributes:\n setattr(self, attribute, None)\n return None", "def set_attributes_all_required(instance, attrs, res):\r\n for attr in attrs:\r\n attr_val = res.get(attr)\r\n # all attributes are required\r\n if not attr_val:\r\n print(attr)\r\n abort(400)\r\n setattr(instance, attr, attr_val)\r\n return instance", "def set_stripped_attributes(self, stripped):\n exclusions = [(tags, attrs) for (tags, attrs) in self.html_exclusions if tags]\n exclusions.append(((), tuple(stripped)))\n self.set_html_exclusions(exclusions)", "def setattrs(self, attrs):\n for k, v in attrs:\n self.setattr(k, v)", "def newFixedAtomSet(self, **attrlinks):\n return FixedAtomSet(self, **attrlinks)", "def clean(self):\n attr = self.getAttributes()\n for a in attr:\n value = getattr(self, a)\n if value is None:\n # remove from header file? (easy)\n if a in self.header:\n del self.header[a]\n else:\n # remove from disk...\n # solve path from attribute name\n path = None\n for f in os.listdir(self._getDirectory()):\n if os.path.splitext(f)[0] == a: # we have found the right file\n path = os.path.join(self._getDirectory(), f)\n break\n if path is not None and os.path.exists(path):\n hdr, dat = hylite.io.matchHeader( path )\n if hdr is not None and os.path.exists(hdr):\n os.remove(hdr)\n if dat is not None and os.path.exists(dat) and os.path.isdir(dat): # nested HyCollection\n shutil.rmtree(dat)\n if os.path.exists(dat) and os.path.isfile(dat): # other data type\n os.remove(dat)\n # remove attribute\n delattr(self, a)", "def copyAttributes(source, dest, skip_refs=True):\n for attr in source.attrs.keys():\n atval = source.attrs[attr]\n \"\"\"\n Don't copy references unless asked\n \"\"\"\n if isinstance(atval, h5py.Reference):\n if isinstance(atval, h5py.RegionReference) or skip_refs:\n continue\n elif isinstance(atval, h5py.RegionReference):\n \"\"\"\n Dereference old reference, get the appropriate data\n slice and create new reference.\n \"\"\"\n try:\n region = h5py.h5r.get_region(atval, source.id)\n\n start, end = region.get_select_bounds()\n ref_slice = []\n for i in range(len(start)):\n if start[i] == end[i]:\n ref_slice.append(start[i])\n else:\n ref_slice.append(slice(start[i], end[i]))\n except:\n warn('Could not create new region reference for {} in {}.'.format(attr, source.name))\n continue\n\n dest.attrs[attr] = dest.regionref[tuple(ref_slice)]\n continue\n else:\n dest.attrs[attr] = atval\n continue\n dest.attrs[attr] = atval\n if not skip_refs:\n try:\n copyRegionRefs(source, dest)\n except:\n print('Could not create new region reference for {} in {}.'.format(attr, source.name))\n\n return dest", "def copy_attributes(self, parent_dict, child_dict, attrs):\n for attr in attrs:\n has_attr = parent_dict.get(attr)\n if has_attr is not None:\n child_dict[attr] = has_attr", "def remove_attr(self,attr_list=[]):\n for x in attr_list: \n if hasattr(self,x): delattr(self,x)", "def sanitize(self):\n if self._has_private_attribute():\n self.attributes = {k:v for (k,v) in self.attributes.items() if not k.startswith('__')}\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new instance with all attributes with a name in `names` are removed.
def __sub__(self, names): if isinstance(names, str): names = (names,) return Attrs([(name, val) for name, val in self if name not in names])
[ "def remove_attr(self, name):\n del self.attributes_dict[name]", "def strip_attributes(self):\r\n original_attributes = set(self.inventory)\r\n keys = list(self.__dict__.keys())\r\n for att in keys:\r\n if att not in original_attributes:\r\n del(self.__dict__[att])", "def deleteAttr(attribute=\"string\", name=\"string\"):\n pass", "def _remove_attributes(attrs, remove_list):\n new_attrs = {}\n for attr in attrs.keys():\n if attr not in remove_list:\n new_attrs[attr] = attrs[attr]\n return new_attrs", "def removeMembers(members):", "def get_with_excluded_names(self, *names, **kwargs):\n order_by = kwargs.get('order_by')\n limit = kwargs.get('limit')\n\n cursor = db.cursor()\n cursor.execute(\n f\"SELECT * FROM {self.table} \"\n f\"WHERE name NOT IN ({', '.join('%s' for name in names)}) \"\n f\"{self._format_order_by(order_by)}\"\n f\"{self._format_limit(limit)}\",\n tuple(names),\n )\n results = [self.model(*row) for row in cursor]\n cursor.close()\n return results", "def unselect(self, *colnames):\n for colname in self.colnames:\n if colname not in colnames:\n yield colname, self[colname].copy()", "def del_component_instances(names):\n for name in names:\n del_component_instance(name)", "def remove_attr(self,attr_list=[]):\n for x in attr_list: \n if hasattr(self,x): delattr(self,x)", "def remove_members(self, members):\n members = [member.dn for member in pyadutils.generate_list(members)]\n return self.remove_from_attribute('member', members)", "def remove_attribute(self, name):\n try:\n del self._attributes[name]\n except KeyError:\n pass", "def without(self, *names):\n only_vars = {}\n\n for name in self.request_variables:\n if name not in names:\n only_vars[name] = self.request_variables.get(name)\n\n return only_vars", "def _replace_names(self, names):\n el_namen = self.get_root().xpath('./person/persName')\n for el_naam in el_namen:\n el_naam.getparent().remove(el_naam)\n for name in names:\n self._add_a_name(name)", "def __delattr__(self, name):\n # First check if is a valid DICOM name and if we have that data element\n tag = tag_for_name(name)\n if tag and tag in self:\n del self[tag]\n # If not a DICOM name (or we don't have it), check for regular instance name\n # can't do delete directly, that will call __delattr__ again!\n elif name in self.__dict__:\n del self.__dict__[name]\n # Not found, raise an error in same style as python does\n else:\n raise AttributeError, name", "def clearing_cases(self, *names):\n\n return self._updating(lambda builder: builder.clear_cases(*names))", "def remove_columns(self, column_names):\n if not hasattr(column_names, '__iter__'):\n raise TypeError('Column_names must be an iterable.')\n for name in column_names:\n if name not in self.column_names():\n raise KeyError('Cannot find column {}.'.format(name))\n return XStream(impl=self._impl.remove_columns(column_names))", "def remove_private_attrs(mapping: Mapping) -> Mapping:\n cls = type(mapping)\n public_keys = [key for key in mapping if not key.startswith('_')]\n dict_ = {key: mapping[key] for key in public_keys}\n return cls(dict_)", "def del_attribs(self, alist):\r\n if isinstance(alist, str):\r\n alist = (alist, )\r\n d = self.dict\r\n for a in alist:\r\n if a in d:\r\n del d[a]", "def trimUnnamed(df):\n df_new = df.copy()\n if UNNAMED in df_new.columns:\n del df_new[UNNAMED]\n return df_new" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Markup instance from a string and escape special characters it may contain (, & and \"). >>> escape('"1 If the `quotes` parameter is set to `False`, the \" character is left as is. Escaping quotes is generally only required for strings that are to be used in attribute values. >>> escape('"1
def escape(cls, text, quotes=True): if not text: return cls() if type(text) is cls: return text if hasattr(text, '__html__'): return cls(text.__html__()) text = text.replace('&', '&amp;') \ .replace('<', '&lt;') \ .replace('>', '&gt;') if quotes: text = text.replace('"', '&#34;') return cls(text)
[ "def escape_quotes(self, str): \n return str.replace(\"\\\"\", \"\\\\\\\"\")", "def escape(t):\n return (t\n .replace(\"&quot;\", '@quot;')\n .replace(\"&amp;\", \"@amp;\").replace(\"&lt;\", \"@lt;\").replace(\"&gt;\", \"@gt;\")\n\n .replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\n .replace(\"'\", \"&#39;\").replace('\"', \"&quot;\")\n .replace(\"\\\\\", \"&#92;\")\n\n .replace(\"@quot;\", '&quot;')\n .replace(\"@amp;\", \"&amp;\").replace(\"@lt;\", \"&lt;\").replace(\"@gt;\", \"&gt;\")\n\n )", "def html_escape(s):\n if s is None:\n return ''\n if hasattr(s, '__html__'):\n return s.__html__()\n if not isinstance(s, basestring):\n if hasattr(s, '__unicode__'):\n s = unicode(s)\n else:\n s = str(s)\n s = cgi.escape(s, True)\n if isinstance(s, unicode):\n s = s.encode('ascii', 'xmlcharrefreplace')\n return s", "def _escape(self, s):\r\n return s", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def escape_xml_string(s):\n return xml.sax.saxutils.escape(s)", "def escape_html(s):\n return cgi.escape(s, quote = True)", "def test_attr_escape_quotes(self):\r\n tmpl = MarkupTemplate(\"\"\"<div xmlns:py=\"http://genshi.edgewall.org/\">\r\n <elem class=\"$myvar\"/>\r\n </div>\"\"\")\r\n self.assertEqual(\"\"\"<div>\r\n <elem class=\"&#34;foo&#34;\"/>\r\n </div>\"\"\", str(tmpl.generate(myvar='\"foo\"')))", "def escape_string(self, s): # real signature unknown; restored from __doc__\n pass", "def writeWithAttributeEscaping(write):\n def _write(data):\n write(escapeForContent(data).replace(b'\"', b'&quot;'))\n return _write", "def _quote_escape(item):\n\n rex_sqlquote = re.compile(\"'\", re.M)\n\n return rex_sqlquote.sub(\"''\", item)", "def format_html(format_string, *args, **kwargs):\n args_safe = map(html.conditional_escape, args)\n kwargs_safe = dict([(k, html.conditional_escape(v)) for (k, v) in\n six.iteritems(kwargs)])\n return html.mark_safe(format_string.format(*args_safe, **kwargs_safe))", "def escape( *args ):\n cmd = ''\n for s in args:\n if cmd: cmd += ' '\n if not s:\n cmd += '\"\"'\n else:\n cmd += pipes.quote(s)\n return cmd", "def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&amp;\")\r\n data = data.replace(\"<\", \"&lt;\")\r\n data = data.replace(\">\", \"&gt;\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data", "def test_escape_tags(self):\n simple_replace = ('#', '$', '%', '_', '{', '}', '&')\n for character in simple_replace:\n escaped_char = u'\\\\{}'.format(character)\n self.assertEqual(tags.latex_safe(character), escaped_char)\n\n self.assertEqual(tags.latex_safe('\\\\'), '\\\\textbackslash{}')\n self.assertEqual(tags.latex_safe('~'), '\\\\textasciitidle{}')\n self.assertEqual(tags.latex_safe('^'), '\\\\^{}')", "def _QuoteString(s):\n single_quote_count = s.count('\\'')\n double_quote_count = s.count('\"')\n quote_delim = '\\'' if single_quote_count <= double_quote_count else '\"'\n # Apply escaping to the chosen quote character and the backslash.\n encoded = re.sub(r'([%s\\\\])' % quote_delim, r'\\\\\\1', s)\n return quote_delim + encoded + quote_delim", "def _quote(s):\n return b\"'%s'\" % stringutil.escapestr(pycompat.bytestr(s))", "def test_escape(fb, fb_secure):\n\n assert fb.escape('This has \"quotes\"') == 'This has \\\\\"quotes\\\\\"'\n assert fb.escape('This has a backslash \\\\') == 'This has a backslash \\\\\\\\'\n assert fb.escape('This has \\\\\"both\\\\\"') == 'This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"'", "def UndoSafeForHTML(escaped_string):\n raw_string = escaped_string.replace('&lt;', '<')\n raw_string = raw_string.replace('&gt;', '>')\n raw_string = raw_string.replace('&quot;', '\"')\n raw_string = raw_string.replace('&amp;', '&')\n return raw_string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the `QName` instance.
def __new__(cls, qname): if type(qname) is cls: return qname qname = qname.lstrip('{') parts = qname.split('}', 1) if len(parts) > 1: self = str.__new__(cls, '{%s' % qname) self.namespace, self.localname = list(map(str, parts)) else: self = str.__new__(cls, qname) self.namespace, self.localname = None, str(qname) return self
[ "def __new__(cls, qname):\r\n if type(qname) is cls:\r\n return qname\r\n\r\n qname = qname.lstrip('{')\r\n parts = qname.split('}', 1)\r\n if len(parts) > 1:\r\n self = unicode.__new__(cls, '{%s' % qname)\r\n self.namespace, self.localname = map(unicode, parts)\r\n else:\r\n self = unicode.__new__(cls, qname)\r\n self.namespace, self.localname = None, unicode(qname)\r\n return self", "def createElementNS(namespaceURI, qualifiedName, options=None):\n from domonic.html import tag, tag_init\n el = type(qualifiedName, (tag, Element), {'name': qualifiedName, '__init__': tag_init})\n el.namespaceURI = namespaceURI\n return el()", "def fromClarkName(self, p_str, QXmlNamePool): # real signature unknown; restored from __doc__\n return QXmlName", "def createDocumentType(self, qualifiedName, publicId, systemId):\n return DocumentType(qualifiedName, publicId, systemId)\n # d = DocumentType()\n # d.name = qualifiedName\n # d.publicId = publicId\n # d.systemId = systemId\n # return d\n # pass", "def qstrvec_t_create(*args) -> \"PyObject *\":\n return _ida_pro.qstrvec_t_create(*args)", "def create(cls) -> str:\n return f'create_{cls.__name__.lower()}'", "def create_namespace(self):\n ns = self.project.create_namespace()\n ns['task'] = self\n return ns", "def create_namespace(self):\n name = 'namespace-{random_string}'.format(random_string=random_str(5))\n\n namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=name))\n\n self.core_api.create_namespace(namespace)\n\n logger.info(\"Creating namespace: %s\", name)\n\n # save all namespaces created with this backend\n self.managed_namespaces.append(name)\n\n # wait for namespace to be ready\n Probe(timeout=30, pause=5, expected_retval=True,\n fnc=self._namespace_ready, namespace=name).run()\n\n return name", "def _makeelement(self, name):\n return self.__class__(name)", "def create_qualification_type(Name=None, Keywords=None, Description=None, QualificationTypeStatus=None, RetryDelayInSeconds=None, Test=None, AnswerKey=None, TestDurationInSeconds=None, AutoGranted=None, AutoGrantedValue=None):\n pass", "def namespace_create(self, name, size=None, password=None, public=True):\n self.state.check('status', 'running', 'ok')\n if self._namespace_exists_update_delete(name):\n raise ValueError('Namespace {} already exists'.format(name))\n self.data['namespaces'].append({'name': name, 'size': size, 'password': password, 'public': public})\n self._zerodb_sal.deploy()", "def prefix(self, QXmlNamePool): # real signature unknown; restored from __doc__\n return \"\"", "def __createXMLElement (name, descr = None, attrs = {}, nsmap = {}):\n\n element = etree.Element(name, attrs, nsmap=nsmap)\n \n if descr != None:\n for match in regex.finditer(descr):\n descr = descr[:match.start()] + \"?\" + descr[match.end():]\n element.text= descr\n\n return (element)", "def _expand_qname(self, qname):\n if type(qname) is not rt.URIRef:\n raise TypeError(\"Cannot expand qname of type {}, must be URIRef\"\n .format(type(qname)))\n for ns in self.graph.namespaces():\n if ns[0] == qname.split(':')[0]:\n return rt.URIRef(\"%s%s\" % (ns[1], qname.split(':')[-1]))\n return qname", "def instance(origin, copy, identifier):\n newInstance = ObName()\n newInstance.origin = origin\n newInstance.copy = copy\n newInstance.identifier = identifier\n return newInstance", "def attributeNodeNS(self, QString, QString_1): # real signature unknown; restored from __doc__\r\n return QDomAttr", "def create_queue(self, queue: Queue, address: Address, durable: bool = True):", "def create_queue(self, queue: Queue, address: Address, durable: bool=True):\n pass", "def declare_exchange_name(self, exchange_name=None, exchange_space_id=''):\n # get xntype and translate @TODO should we just consolidate these to be the same?\n typemap = { 'XN_SERVICE':'service', 'XN_PROCESS':'process', 'XN_QUEUE':'queue' }\n if not exchange_name.xn_type in typemap:\n raise BadRequest(\"Unknown exchange name type: %s\" % exchange_name.xn_type)\n\n xntype = typemap[exchange_name.xn_type]\n\n exchange_space = self.read_exchange_space(exchange_space_id)\n exchange_name_id,rev = self.clients.resource_registry.create(exchange_name)\n\n aid = self.clients.resource_registry.create_association(exchange_space_id, PRED.hasExchangeName, exchange_name_id)\n\n # call container API\n xs = exchange.ExchangeSpace(self.container.ex_manager, exchange_space.name)\n self.container.ex_manager._create_xn(xntype, exchange_name.name, xs, use_ems=False)\n\n return exchange_name_id #QUestion - is this the correct canonical name?" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether the given css property declaration is to be considered safe for inclusion in the output.
def is_safe_css(self, propname, value): if propname not in self.safe_css: return False if propname.startswith('margin') and '-' in value: # Negative margins can be used for phishing return False return True
[ "def is_property_allowed(prop):\n return self.allowed_styles is None or \\\n prop.lower() in self.allowed_styles", "def _isprop(self, attr: str) -> bool:\n\n return isinstance(attr, property)", "def _is_property(self,key):\n return bool(re.match(database.RE_KIMID, key))", "def _HasProperty(step, prop):\n try:\n step.getProperty(prop)\n return True\n # pylint: disable=W0702\n except:\n return False", "def explicit_no_additional_properties(self) -> bool:\n return bool(\n (self.properties or self.pattern_properties)\n and self.no_additional_properties\n and not self.additional_properties\n )", "def _has_prop_on_mol_block(block, prop_key):\n\n if prop_key not in expected_props:\n raise ValueError('%s is not a supported property type.', prop_key)\n has_prop = False\n for line in block:\n if line.strip() == ('> <%s>' % prop_key):\n has_prop = True\n return has_prop", "def _check_style(style):\n\t\treturn style in plt.style.available", "def check_minimal_propset():\n\n unset_properties = []\n for node, val in REQUIRED_PROPS.iteritems():\n for param in val:\n if CONFIG[node][param] is None:\n unset_properties.append(\"%s.%s\" % (node, param))\n\n if len(unset_properties) > 0:\n print(\"[ERROR] There is not enough information to proceed.\"\n \"Please define these properties: %s\" % unset_properties)\n return False\n else:\n return True", "def check_properties(self):\n matches = const.regex['properties'].findall(self.data)\n if matches:\n for _, x in enumerate(matches):\n self.properties[x[0].lower()] = x[1]", "def __check_if_cstyle_comment(source_line) -> Tuple[bool, bool]:\n src_line = source_line.strip()\n cstyle_start = '/*' in src_line\n cstyle_end = '*/' in src_line\n return cstyle_start, cstyle_end", "def decorated_with_property(node: astroid.FunctionDef) -> bool:\n if not node.decorators:\n return False\n for decorator in node.decorators.nodes:\n try:\n if _is_property_decorator(decorator):\n return True\n except astroid.InferenceError:\n pass\n return False", "def isProperty(self,uid):\n return( self.id2node[uid].group==\"Property\" )", "def __validateKeys(self, style_dict):\n\n invalidKeys = [key for key in style_dict.keys()\n if key not in STYLE_PROPERTIES]\n if len(invalidKeys) > 0:\n raise InvalidPropertyError(str(invalidKeys))", "def is_required_property(self) -> bool:\n return self.parent and self.property_name in self.parent.required_properties", "def _is_valid_element(self, element):\n # pylint: disable=no-self-use\n\n return element.get_tag_name() in AccessibleCSSImplementation.VALID_TAGS", "def sanitize_css(self, text):\r\n decls = []\r\n text = self._strip_css_comments(self._replace_unicode_escapes(text))\r\n for decl in text.split(';'):\r\n decl = decl.strip()\r\n if not decl:\r\n continue\r\n try:\r\n propname, value = decl.split(':', 1)\r\n except ValueError:\r\n continue\r\n if not self.is_safe_css(propname.strip().lower(), value.strip()):\r\n continue\r\n is_evil = False\r\n if self._EXPRESSION_SEARCH(value):\r\n is_evil = True\r\n for match in self._URL_FINDITER(value):\r\n if not self.is_safe_uri(match.group(1)):\r\n is_evil = True\r\n break\r\n if not is_evil:\r\n decls.append(decl.strip())\r\n return decls", "def is_code_prop(prop):\n return prop.startswith('co_')", "def check_css(css):\r\n # Using 'encoding' adds a CSSCharsetRule\r\n rule = css.stylesheet.rules[-1]\r\n assert rule.selector.as_css() == 'h1::before'\r\n content, background = rule.declarations\r\n\r\n assert content.name == 'content'\r\n string, = content.value\r\n assert string.value == 'I løvë Unicode'\r\n\r\n assert background.name == 'background-image'\r\n url_value, = background.value\r\n assert url_value.type == 'URI'\r\n url = urljoin(css.base_url, url_value.value)\r\n assert url.startswith('file:')\r\n assert url.endswith('weasyprint/tests/resources/pattern.png')", "def has(self, prop: P, quiet: bool = False) -> bool:\n prop_name = self._prop_name(prop, quiet=quiet)\n return prop_name in self._properties" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether the given URI is to be considered safe for inclusion in the output. The default implementation checks whether the scheme of the URI is in the set of allowed URIs (`safe_schemes`). >>> sanitizer = HTMLSanitizer()
def is_safe_uri(self, uri): if '#' in uri: uri = uri.split('#', 1)[0] # Strip out the fragment identifier if ':' not in uri: return True # This is a relative URI chars = [char for char in uri.split(':', 1)[0] if char.isalnum()] return ''.join(chars).lower() in self.safe_schemes
[ "def safe_uri(uri):\n path, query, frag = split_path(uri)\n safe = True\n for part in (path, query, frag):\n safe = safe and safe_chars_regex.search(part)\n return safe", "def _ManifestUrlHasSecureScheme(self):\n secure_schemes = (\n \"file\",\n \"https\",\n \"ssh\",\n \"persistent-https\",\n \"sso\",\n \"rpc\",\n )\n parse_results = urllib.parse.urlparse(self._manifest_url)\n return parse_results.scheme in secure_schemes", "def is_safe_url(url, host):\r\n if not url:\r\n return False\r\n\r\n parsed = urllib.parse.urlparse(url)\r\n\r\n return ((not parsed.netloc or parsed.netloc == host) and\r\n (not parsed.scheme or parsed.scheme in [\"http\", \"https\"]))", "def checkUri(uri):\n # Must replace spaces\n space = ' '\n newUri = uri.replace(' ', '%20')\n return newUri", "def MakeUrllibSafe(uriRef):\r\n # IDN support requires decoding any percent-encoded octets in the\r\n # host part (if it's a reg-name) of the authority component, and when\r\n # doing DNS lookups, applying IDNA encoding to that string first.\r\n # As of Python 2.3, there is an IDNA codec, and the socket and httplib\r\n # modules accept Unicode strings and apply IDNA encoding automatically\r\n # where necessary. However, urllib.urlopen() has not yet been updated\r\n # to do the same; it raises an exception if you give it a Unicode\r\n # string, and does no conversion on non-Unicode strings, meaning you\r\n # have to give it an IDNA string yourself. We will only support it on\r\n # Python 2.3 and up.\r\n #\r\n # see if host is a reg-name, as opposed to IPv4 or IPv6 addr.\r\n if isinstance(uriRef, unicode):\r\n try:\r\n uriRef = uriRef.encode('us-ascii') # parts of urllib are not unicode safe\r\n except UnicodeError:\r\n raise ValueError(\"uri %r must consist of ASCII characters.\" % uriRef)\r\n (scheme, auth, path, query, frag) = urlparse.urlsplit(uriRef)\r\n if auth and auth.find('@') > -1:\r\n userinfo, hostport = auth.split('@')\r\n else:\r\n userinfo = None\r\n hostport = auth\r\n if hostport and hostport.find(':') > -1:\r\n host, port = hostport.split(':')\r\n else:\r\n host = hostport\r\n port = None\r\n if host and REG_NAME_HOST_PATTERN.match(host):\r\n # percent-encoded hostnames will always fail DNS lookups\r\n host = urllib.unquote(host) #PercentDecode(host)\r\n # IDNA-encode if possible.\r\n # We shouldn't do this for schemes that don't need DNS lookup,\r\n # but are there any (that you'd be calling urlopen for)?\r\n if sys.version_info[0:2] >= (2, 3):\r\n if isinstance(host, str):\r\n host = host.decode('utf-8')\r\n host = host.encode('idna')\r\n # reassemble the authority with the new hostname\r\n # (percent-decoded, and possibly IDNA-encoded)\r\n auth = ''\r\n if userinfo:\r\n auth += userinfo + '@'\r\n auth += host\r\n if port:\r\n auth += ':' + port\r\n\r\n # On Windows, ensure that '|', not ':', is used in a drivespec.\r\n if os.name == 'nt' and scheme == 'file':\r\n path = path.replace(':', '|', 1)\r\n\r\n # Note that we drop fragment, if any. See RFC 3986 sec. 3.5.\r\n uri = urlparse.urlunsplit((scheme, auth, path, query, None))\r\n\r\n return uri", "def MakeUrllibSafe(uriRef):\n # IDN support requires decoding any percent-encoded octets in the\n # host part (if it's a reg-name) of the authority component, and when\n # doing DNS lookups, applying IDNA encoding to that string first.\n # As of Python 2.3, there is an IDNA codec, and the socket and httplib\n # modules accept Unicode strings and apply IDNA encoding automatically\n # where necessary. However, urllib.urlopen() has not yet been updated\n # to do the same; it raises an exception if you give it a Unicode\n # string, and does no conversion on non-Unicode strings, meaning you\n # have to give it an IDNA string yourself. We will only support it on\n # Python 2.3 and up.\n #\n # see if host is a reg-name, as opposed to IPv4 or IPv6 addr.\n if isinstance(uriRef, unicode):\n try:\n uriRef = uriRef.encode('us-ascii') # parts of urllib are not unicode safe\n except UnicodeError:\n raise ValueError(\"uri %r must consist of ASCII characters.\" % uriRef)\n (scheme, auth, path, query, frag) = urlparse.urlsplit(uriRef)\n if auth and auth.find('@') > -1:\n userinfo, hostport = auth.split('@')\n else:\n userinfo = None\n hostport = auth\n if hostport and hostport.find(':') > -1:\n host, port = hostport.split(':')\n else:\n host = hostport\n port = None\n if host and REG_NAME_HOST_PATTERN.match(host):\n # percent-encoded hostnames will always fail DNS lookups\n host = urllib.unquote(host) #PercentDecode(host)\n # IDNA-encode if possible.\n # We shouldn't do this for schemes that don't need DNS lookup,\n # but are there any (that you'd be calling urlopen for)?\n if sys.version_info[0:2] >= (2, 3):\n if isinstance(host, str):\n host = host.decode('utf-8')\n host = host.encode('idna')\n # reassemble the authority with the new hostname\n # (percent-decoded, and possibly IDNA-encoded)\n auth = ''\n if userinfo:\n auth += userinfo + '@'\n auth += host\n if port:\n auth += ':' + port\n\n # On Windows, ensure that '|', not ':', is used in a drivespec.\n if os.name == 'nt' and scheme == 'file':\n path = path.replace(':', '|', 1)\n\n # Note that we drop fragment, if any. See RFC 3986 sec. 3.5.\n uri = urlparse.urlunsplit((scheme, auth, path, query, None))\n\n return uri", "def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc", "def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc", "def is_legacy_signed_url_valid(user, url):\n parsed = urlsplit(url)\n params = MultiDict(parse_qs(parsed.query))\n try:\n signature = params.pop('token')\n except KeyError:\n return False\n\n url = urlunsplit((\n '',\n '',\n parsed.path,\n urlencode(list(params.lists()), doseq=True),\n parsed.fragment\n ))\n signer = Signer(user.signing_secret, salt='url-signing')\n return signer.verify_signature(url.encode(), signature)", "def Sanitize(Content): # for your protection\n \n ### strip any illegal HTML\n Content = re.sub(r\"(?is)<.+?>\", HTMLChecker, Content)\n\n ### validate any links\n Content = re.sub(r'(?is)(<A .*?HREF=\")(.+?)(\".*?>)', LinkChecker, Content)\n \n ### then escape any funky characters\n ### TODO: is this really neccesary for the database?\n \n # Content = re.escape(Content)\n\n return Content", "def is_uri(val: str = None) -> bool:\n is_valid = False\n validator = validators.Validator().allow_schemes(\n \"http\", \"https\", \"ftp\"\n ).require_presence_of(\n \"scheme\", \"host\"\n ).check_validity_of(\n \"scheme\", \"host\", \"path\"\n )\n uri = uri_reference(val)\n try:\n validator.validate(uri)\n is_valid = True\n except (InvalidComponentsError, MissingComponentError, UnpermittedComponentError) as ex:\n logger.debug(ex)\n return is_valid", "def default_validation(url):\n return bool(urlparse(url).scheme)", "def validate_uri(self, uri):\n logging.debug(\"Validating URL %s\" % uri)\n\n # Return None in error case. This is 'null' in final output.\n try:\n if not validators.url(uri):\n uri = None\n except validators.utils.ValidationFailure:\n logging.error(\"Invalid URL %s\" % uri)\n uri = None\n return uri", "def _sanitizeURL(self, couchURL):\n return couchURL", "def es_url_valida(url_):\n url_parseado = urlparse.urlparse(url_)\n return all([url_parseado.scheme, url_parseado.netloc])", "def has_valid_scheme(uri: ParseResult) -> bool:\n scheme = uri.scheme\n return scheme == 'ws' or scheme == 'warp'", "def test_check_uri(self):\n # OK\n self.assertTrue(SiteService.check_uri(\"localhost:12345\"))\n self.assertTrue(SiteService.check_uri(\"www.google.com:12345\"))\n self.assertTrue(SiteService.check_uri(\"127.0.0.1:12345\"))\n # Missing Port\n self.assertFalse(SiteService.check_uri(\"localhost:\"))\n # Missing seperator\n self.assertFalse(SiteService.check_uri(\"localhost\"))\n self.assertFalse(SiteService.check_uri(\"localhost12345\"))\n self.assertFalse(SiteService.check_uri(\"localhost@12345\"))\n # Starts with invalid char\n self.assertFalse(SiteService.check_uri(\"_localhost:12345\"))\n self.assertFalse(SiteService.check_uri(\".localhost:12345\"))\n # Non-numeric port\n self.assertFalse(SiteService.check_uri(\"localhost:bah\"))", "def URL_CANONICALIZER(self):\n return util.UrlCanonicalizer(\n domain=self.gr_source.DOMAIN,\n headers=util.REQUEST_HEADERS)", "def validateURL(url):", "def is_uri(uri):\n scheme, netloc, path, params, query, fragment = urlparse(uri)\n if scheme and netloc and path:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove potentially dangerous property declarations from CSS code. In particular, properties using the CSS ``url()`` function with a scheme
def sanitize_css(self, text): decls = [] text = self._strip_css_comments(self._replace_unicode_escapes(text)) for decl in text.split(';'): decl = decl.strip() if not decl: continue try: propname, value = decl.split(':', 1) except ValueError: continue if not self.is_safe_css(propname.strip().lower(), value.strip()): continue is_evil = False if self._EXPRESSION_SEARCH(value): is_evil = True for match in self._URL_FINDITER(value): if not self.is_safe_uri(match.group(1)): is_evil = True break if not is_evil: decls.append(decl.strip()) return decls
[ "def minify_properties(src):\n min_re = re.compile(r\"(^|[^\\\\](?:\\\\\\\\)*)#.*$\", re.M)\n src = min_re.sub(r\"\\1\", src)\n src = re.sub(r\"\\n+\", r\"\\n\", src)\n return src", "def remove_urls(document):\n return re.sub(r'https?://(www\\.)?[-\\w@:%.\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-\\w@:%_\\+.~#?&/=;]*)', '', document)", "def ClearExternalCssStyle(matchobj):\n\treturn ''", "def repair_broken_urls(line):\n def _chop_spaces_in_url_match(m):\n \"\"\"Suppresses spaces in a matched URL.\"\"\"\n return m.group(1).replace(\" \", \"\")\n for ptn in re_list_url_repair_patterns:\n line = ptn.sub(_chop_spaces_in_url_match, line)\n return line", "def clean_code_prop(prop):\n if is_code_prop(prop) is False:\n return prop\n return prop.replace('co_', '')", "def clean(tweet):\n clean = re.sub(r'https?:\\/\\/\\w+(\\.\\w+)*(:\\w+)?(/[A-Za-z0-9-_\\.]*)* ?', '', tweet)\n clean = re.sub(r'#', '', clean)\n clean = re.sub(r'!', '', clean)\n clean = re.sub(r'\\.\\.\\.', '', clean)\n clean = re.sub(r',', '', clean)\n return clean", "def ClearExternalCss(matchobj):\n\treturn ''", "def preprocess_declarations(base_url, declarations):\r\n def validation_error(level, reason):\r\n getattr(LOGGER, level)(\r\n 'Ignored `%s: %s` at %i:%i, %s.',\r\n declaration.name, declaration.value.as_css(),\r\n declaration.line, declaration.column, reason)\r\n\r\n for declaration in declarations:\r\n name = declaration.name\r\n\r\n if name in PREFIXED and not name.startswith(PREFIX):\r\n validation_error(\r\n 'warning',\r\n 'the property is experimental or non-standard, use '\r\n + PREFIX + name)\r\n continue\r\n\r\n if name in NOT_PRINT_MEDIA:\r\n validation_error(\r\n 'info', 'the property does not apply for the print media')\r\n continue\r\n\r\n if name.startswith(PREFIX):\r\n unprefixed_name = name[len(PREFIX):]\r\n if unprefixed_name in UNPREFIXED:\r\n validation_error(\r\n 'warning',\r\n 'the property was unprefixed, use ' + unprefixed_name)\r\n continue\r\n if unprefixed_name in PREFIXED:\r\n name = unprefixed_name\r\n\r\n expander_ = EXPANDERS.get(name, validate_non_shorthand)\r\n tokens = remove_whitespace(declaration.value)\r\n try:\r\n # Use list() to consume generators now and catch any error.\r\n result = list(expander_(base_url, name, tokens))\r\n except InvalidValues as exc:\r\n validation_error(\r\n 'warning',\r\n exc.args[0] if exc.args and exc.args[0] else 'invalid value')\r\n continue\r\n\r\n priority = declaration.priority\r\n for long_name, value in result:\r\n yield long_name.replace('-', '_'), value, priority", "def remove_http(self):\n\n if re.search(\"https\",self.get_url()):\n self.set_url(self.get_url().replace('https://',''))\n elif re.search(\"http\",self.get_url()):\n self.set_url(self.get_url().replace('http://',''))\n elif re.search(\"ftp\",self.get_url()):\n self.set_url(self.get_url().replace('ftp://',''))\n else:\n pass", "def archive_css(self, css_string, base_url):\n # It would be nice to do this with a proper CSS parser, but all the\n # ones I've tried are missing modern CSS features, e.g. ignore URIs in\n # a @font-face rule.\n for match in re.finditer(r'url\\((?P<url>[^\\)]+)\\)', css_string):\n resource_url = match.group('url')\n resource_url = resource_url.strip('\"').strip(\"'\")\n\n # Something to do with SVG resources that are identified elsewhere\n # in the stylesheet\n resource_url = unquote_plus(resource_url)\n if resource_url.startswith('#'):\n continue\n\n # Any existing data: URIs are already self-contained and don't\n # need changing.\n if resource_url.startswith('data:'):\n continue\n\n # Determine the media type for the data: URI\n resource_url = urljoin(base_url, resource_url)\n data = self._get_base64_encode(resource_url)\n if data is not None:\n css_string = css_string.replace(match.group('url'), data)\n\n return css_string", "def sanitize_source(src):\n src_lines = src.splitlines(True)\n for i, line in enumerate(src_lines[:2]):\n if _CODING_PATTERN.match(line):\n src_lines[i] = re.sub('#.*$', '# (removed coding)', line)\n return ''.join(src_lines)", "def fix_uris(self, host_url):\n ret_val = copy.deepcopy(self)\n ret_val.uri = uris.ATTRIBUTE_DEF_URI_STR % (host_url, ret_val.uri)\n return ret_val", "def _remove_file_scheme(path):\n path = _file_utils.remove_prefix(path, \"file://\")\n path = _file_utils.remove_prefix(path, \"file:\")\n\n return path", "def clean_url(url) -> str:\n if 'http' not in url:\n return f'http://{url}'\n return url", "def strip_protocol(path: str) -> str:\n return re.sub(r\"https?:\\/\\/\", \"\", path)", "def remove_URLs(section_content):\n\n # remove URL with regexp\n section_content = re.sub(r'http\\S+', '', section_content)\n section_content = re.sub(r'www\\S+', '', section_content)\n section_content = re.sub(r'mailto\\S+', '', section_content)\n\n # remove multiple consecutive spaces\n section_content = re.sub(' +', ' ', section_content)\n\n return section_content", "def restore_boxmodelhack(css):\n return re.sub('___PSEUDOCLASSBMH___', '\"\\\\\"}\\\\\"\"', css)", "def __cleanUrl(self, url):\n cleanurl = QUrl(url)\n if cleanurl.password():\n # don't save the password in the history\n cleanurl.setPassword(\"\")\n if cleanurl.host():\n # convert host to lower case\n cleanurl.setHost(url.host().lower())\n \n return cleanurl", "def sanitize_property(self, property):\n\n if property and not property.startswith('.'):\n property = '.%s' % property\n return property" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translate any localizable strings in the given stream. This function shouldn't be called directly. Instead, an instance of the `Translator` class should be registered as a filter with the `Template` or the `TemplateLoader`, or applied as a regular stream filter. If used as a template filter, it should be inserted in front of all the default filters.
def __call__(self, stream, ctxt=None, translate_text=True, translate_attrs=True): ignore_tags = self.ignore_tags include_attrs = self.include_attrs skip = 0 xml_lang = XML_NAMESPACE['lang'] if not self.extract_text: translate_text = False translate_attrs = False if type(self.translate) is FunctionType: gettext = self.translate if ctxt: ctxt['_i18n.gettext'] = gettext else: if IS_PYTHON2: gettext = self.translate.ugettext ngettext = self.translate.ungettext else: gettext = self.translate.gettext ngettext = self.translate.ngettext try: if IS_PYTHON2: dgettext = self.translate.dugettext dngettext = self.translate.dungettext else: dgettext = self.translate.dgettext dngettext = self.translate.dngettext except AttributeError: dgettext = lambda _, y: gettext(y) dngettext = lambda _, s, p, n: ngettext(s, p, n) if ctxt: ctxt['_i18n.gettext'] = gettext ctxt['_i18n.ngettext'] = ngettext ctxt['_i18n.dgettext'] = dgettext ctxt['_i18n.dngettext'] = dngettext if ctxt and ctxt.get('_i18n.domain'): # TODO: This can cause infinite recursion if dgettext is defined # via the AttributeError case above! gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg) for kind, data, pos in stream: # skip chunks that should not be localized if skip: if kind is START: skip += 1 elif kind is END: skip -= 1 yield kind, data, pos continue # handle different events that can be localized if kind is START: tag, attrs = data if tag in self.ignore_tags or \ isinstance(attrs.get(xml_lang), str): skip += 1 yield kind, data, pos continue new_attrs = [] changed = False for name, value in attrs: newval = value if isinstance(value, str): if translate_attrs and name in include_attrs: newval = gettext(value) else: newval = list( self(_ensure(value), ctxt, translate_text=False) ) if newval != value: value = newval changed = True new_attrs.append((name, value)) if changed: attrs = Attrs(new_attrs) yield kind, (tag, attrs), pos elif translate_text and kind is TEXT: text = data.strip() if text: data = data.replace(text, str(gettext(text))) yield kind, data, pos elif kind is SUB: directives, substream = data current_domain = None for idx, directive in enumerate(directives): # Organize directives to make everything work # FIXME: There's got to be a better way to do this! if isinstance(directive, DomainDirective): # Grab current domain and update context current_domain = directive.domain ctxt.push({'_i18n.domain': current_domain}) # Put domain directive as the first one in order to # update context before any other directives evaluation directives.insert(0, directives.pop(idx)) # If this is an i18n directive, no need to translate text # nodes here is_i18n_directive = any([ isinstance(d, ExtractableI18NDirective) for d in directives ]) substream = list(self(substream, ctxt, translate_text=not is_i18n_directive, translate_attrs=translate_attrs)) yield kind, (directives, substream), pos if current_domain: ctxt.pop() else: yield kind, data, pos
[ "def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate_text = False\r\n translate_attrs = False\r\n\r\n if type(self.translate) is FunctionType:\r\n gettext = self.translate\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n else:\r\n if IS_PYTHON2:\r\n gettext = self.translate.ugettext\r\n ngettext = self.translate.ungettext\r\n else:\r\n gettext = self.translate.gettext\r\n ngettext = self.translate.ngettext\r\n try:\r\n if IS_PYTHON2:\r\n dgettext = self.translate.dugettext\r\n dngettext = self.translate.dungettext\r\n else:\r\n dgettext = self.translate.dgettext\r\n dngettext = self.translate.dngettext\r\n except AttributeError:\r\n dgettext = lambda _, y: gettext(y)\r\n dngettext = lambda _, s, p, n: ngettext(s, p, n)\r\n if ctxt:\r\n ctxt['_i18n.gettext'] = gettext\r\n ctxt['_i18n.ngettext'] = ngettext\r\n ctxt['_i18n.dgettext'] = dgettext\r\n ctxt['_i18n.dngettext'] = dngettext\r\n\r\n if ctxt and ctxt.get('_i18n.domain'):\r\n # TODO: This can cause infinite recursion if dgettext is defined\r\n # via the AttributeError case above!\r\n gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)\r\n\r\n for kind, data, pos in stream:\r\n\r\n # skip chunks that should not be localized\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n elif kind is END:\r\n skip -= 1\r\n yield kind, data, pos\r\n continue\r\n\r\n # handle different events that can be localized\r\n if kind is START:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n yield kind, data, pos\r\n continue\r\n\r\n new_attrs = []\r\n changed = False\r\n\r\n for name, value in attrs:\r\n newval = value\r\n if isinstance(value, basestring):\r\n if translate_attrs and name in include_attrs:\r\n newval = gettext(value)\r\n else:\r\n newval = list(\r\n self(_ensure(value), ctxt, translate_text=False)\r\n )\r\n if newval != value:\r\n value = newval\r\n changed = True\r\n new_attrs.append((name, value))\r\n if changed:\r\n attrs = Attrs(new_attrs)\r\n\r\n yield kind, (tag, attrs), pos\r\n\r\n elif translate_text and kind is TEXT:\r\n text = data.strip()\r\n if text:\r\n data = data.replace(text, unicode(gettext(text)))\r\n yield kind, data, pos\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n current_domain = None\r\n for idx, directive in enumerate(directives):\r\n # Organize directives to make everything work\r\n # FIXME: There's got to be a better way to do this!\r\n if isinstance(directive, DomainDirective):\r\n # Grab current domain and update context\r\n current_domain = directive.domain\r\n ctxt.push({'_i18n.domain': current_domain})\r\n # Put domain directive as the first one in order to\r\n # update context before any other directives evaluation\r\n directives.insert(0, directives.pop(idx))\r\n\r\n # If this is an i18n directive, no need to translate text\r\n # nodes here\r\n is_i18n_directive = any([\r\n isinstance(d, ExtractableI18NDirective)\r\n for d in directives\r\n ])\r\n substream = list(self(substream, ctxt,\r\n translate_text=not is_i18n_directive,\r\n translate_attrs=translate_attrs))\r\n yield kind, (directives, substream), pos\r\n\r\n if current_domain:\r\n ctxt.pop()\r\n else:\r\n yield kind, data, pos", "def translator(string, list=bool, defaultOptions=\"string\", filter=bool, optionsScript=bool, extension=bool, fileCompression=\"string\", objectType=bool, readSupport=bool, loaded=bool, defaultFileRule=bool, writeSupport=bool):\n pass", "def translate(input_str, lang_source, lang_target):\n pass", "def _visit_translation(self, s):\r\n return s", "def translate(self, texts):\n inputs = self._preprocess(texts)\n\n outputs = self._translate_fn(**inputs)\n return self._postprocess(outputs)", "def _get_user_filtered_source_strings(resources, users, language, *args, **kwargs):\r\n return Translation.objects.user_translated_strings(resources, language, users)", "def _apply_translations(self, translations, text):\r\n regex = hash_regex()\r\n return regex.sub(\r\n lambda m: translations.get(m.group(0), m.group(0)), text\r\n )", "def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)", "def test_apply_translations(self):\r\n hash_normal = '1' * 32 + '_tr'\r\n hash_plural = '2' * 32 + '_pl_0'\r\n text = '%s %s' % (hash_normal, hash_plural)\r\n translations = {\r\n hash_normal: 'normal',\r\n hash_plural: 'plural',\r\n }\r\n compiler = PluralCompiler(resource=None)\r\n res = compiler._apply_translations(translations, text)\r\n self.assertEquals(res, 'normal plural')", "def test_translate_locations(self):\n # Check that translatables can be loaded from the dialog directory\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-dialog/'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n # Check that translatables can be loaded from locale folder\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n\n # Check loading in a non-en-us language\n s = SimpleSkill1()\n s.config_core['lang'] = 'de-de'\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertEqual(lst, ['sonne', 'mycroft', 'zahne'])\n vals = s.translate_namedvalues('named_things')\n self.assertEqual(vals['blau'], '2')\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Aber setzen sie sich herr test framework'])\n\n # Check fallback to english\n lst = s.translate_list('not_in_german')\n self.assertEqual(lst, ['not', 'in', 'German'])\n\n # Restore lang to en-us\n s.config_core['lang'] = 'en-us'", "def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS,\r\n search_text=True, comment_stack=None):\r\n if not self.extract_text:\r\n search_text = False\r\n if comment_stack is None:\r\n comment_stack = []\r\n skip = 0\r\n\r\n xml_lang = XML_NAMESPACE['lang']\r\n\r\n for kind, data, pos in stream:\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n if kind is END:\r\n skip -= 1\r\n\r\n if kind is START and not skip:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n continue\r\n\r\n for message in self._extract_attrs((kind, data, pos),\r\n gettext_functions,\r\n search_text=search_text):\r\n yield message\r\n\r\n elif not skip and search_text and kind is TEXT:\r\n text = data.strip()\r\n if text and [ch for ch in text if ch.isalpha()]:\r\n yield pos[1], None, text, comment_stack[-1:]\r\n\r\n elif kind is EXPR or kind is EXEC:\r\n for funcname, strings in extract_from_code(data,\r\n gettext_functions):\r\n # XXX: Do we need to grab i18n:comment from comment_stack ???\r\n yield pos[1], funcname, strings, []\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n in_comment = False\r\n\r\n for idx, directive in enumerate(directives):\r\n # Do a first loop to see if there's a comment directive\r\n # If there is update context and pop it from directives\r\n if isinstance(directive, CommentDirective):\r\n in_comment = True\r\n comment_stack.append(directive.comment)\r\n if len(directives) == 1:\r\n # in case we're in the presence of something like:\r\n # <p i18n:comment=\"foo\">Foo</p>\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n directives.pop(idx)\r\n elif not isinstance(directive, I18NDirective):\r\n # Remove all other non i18n directives from the process\r\n directives.pop(idx)\r\n\r\n if not directives and not in_comment:\r\n # Extract content if there's no directives because\r\n # strip was pop'ed and not because comment was pop'ed.\r\n # Extraction in this case has been taken care of.\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip):\r\n yield message\r\n\r\n for directive in directives:\r\n if isinstance(directive, ExtractableI18NDirective):\r\n for message in directive.extract(self,\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n else:\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n\r\n if in_comment:\r\n comment_stack.pop()", "def translate_file(self, fname):\n po = polib.pofile(fname)\n\n # FIXME - This might be a bit goofy\n po.metadata['Language'] = \",\".join(self.pipeline_spec)\n po.metadata['Plural-Forms'] = 'nplurals=2; plural= n != 1'\n po.metadata['Content-Type'] = 'text/plain; charset=UTF-8'\n count = 0\n for entry in po:\n if entry.msgid_plural:\n entry.msgstr_plural[0] = self.translate_string(\n entry.msgid)\n entry.msgstr_plural[1] = self.translate_string(\n entry.msgid_plural)\n else:\n entry.msgstr = self.translate_string(entry.msgid)\n\n if 'fuzzy' in entry.flags:\n entry.flags.remove('fuzzy') # clear the fuzzy flag\n count += 1\n\n po.save()\n return '{0}: Translated {1} messages.'.format(fname, count)", "def user_translated_strings(self, resources, language, users):\r\n source_language = get_source_language(resources)\r\n user_translated_se_ids = frozenset(self.filter(\r\n language=language, rule=5,\r\n user__id__in=users,\r\n resource__in=resources\r\n ).values_list('source_entity_id', flat=True))\r\n # Add resource_id as well to reduce the search space\r\n # by taking advantage of the indexes in resource and language\r\n return self.filter(\r\n resource__in=resources,\r\n source_entity__id__in=user_translated_se_ids,\r\n language=source_language, rule=5,\r\n )", "def hook_StreamString(state, level, format_ea, str_ea):\n DeepManticore(state).api_stream_string(level, format_ea, str_ea)", "def translated_source_strings(self, resources, language):\r\n source_language = get_source_language(resources)\r\n translated_se_ids = frozenset(self.filter(\r\n resource__in=resources, language=language, rule=5\r\n ).values_list('source_entity_id', flat=True))\r\n # Add resource_id as well to reduce the search space\r\n # by taking advantage of the indexes in resource and language\r\n return self.filter(\r\n resource__in=resources,\r\n source_entity__id__in=translated_se_ids,\r\n language=source_language, rule=5\r\n )", "def getStringsStream(self) -> ghidra.app.util.bin.format.pe.cli.streams.CliStreamStrings:\n ...", "def translate(self, string, regex=re.compile(r'%\\((\\w+)\\)s')):\r\n substream = None\r\n\r\n def yield_parts(string):\r\n for idx, part in enumerate(regex.split(string)):\r\n if idx % 2:\r\n yield self.values[part]\r\n elif part:\r\n yield (TEXT,\r\n part.replace('\\[', '[').replace('\\]', ']'),\r\n (None, -1, -1)\r\n )\r\n\r\n parts = parse_msg(string)\r\n parts_counter = {}\r\n for order, string in parts:\r\n parts_counter.setdefault(order, []).append(None)\r\n\r\n while parts:\r\n order, string = parts.pop(0)\r\n events = self.events[order].pop(0)\r\n parts_counter[order].pop()\r\n\r\n for event in events:\r\n if event[0] is SUB_START:\r\n substream = []\r\n elif event[0] is SUB_END:\r\n # Yield a substream which might have directives to be\r\n # applied to it (after translation events)\r\n yield SUB, (self.subdirectives[order], substream), event[2]\r\n substream = None\r\n elif event[0] is TEXT:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is START:\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n elif event[0] is END:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event\r\n elif event[0] is EXPR:\r\n # These are handled on the strings itself\r\n continue\r\n else:\r\n if string:\r\n for part in yield_parts(string):\r\n if substream is not None:\r\n substream.append(part)\r\n else:\r\n yield part\r\n # String handled, reset it\r\n string = None\r\n if substream is not None:\r\n substream.append(event)\r\n else:\r\n yield event", "async def translate(self, ctx, *, message: commands.clean_content):\n\n loop = self.bot.loop\n\n try:\n ret = await loop.run_in_executor(None, self.trans.translate, message)\n except Exception as e:\n return await ctx.send(f'An error occurred: {e.__class__.__name__}: {e}')\n\n embed = discord.Embed(title='Translated', colour=0x4284F3)\n src = googletrans.LANGUAGES.get(ret.src, '(auto-detected)').title()\n dest = googletrans.LANGUAGES.get(ret.dest, 'Unknown').title()\n embed.add_field(name=f'From {src}', value=ret.origin, inline=False)\n embed.add_field(name=f'To {dest}', value=ret.text, inline=False)\n await ctx.send(embed=embed)", "def translate_or_register( cls, source, language ):\n if source:\n source = unicode( source )\n translation = cls.translate( source, language )\n if not translation:\n session = Session()\n query = session.query( cls )\n translation = query.filter_by( source = source, \n language = language ).first()\n if not translation:\n if ( source, language ) not in cls._cache:\n registered_translation = Translation( source = source, \n language = language )\n cls._cache[( source, language )] = source\n session.flush( [registered_translation] )\n logger.debug( 'registed %s with id %s' % ( source, registered_translation.id ) )\n return source\n return translation\n return ''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function to register the `Translator` filter and the related directives with the given template.
def setup(self, template): template.filters.insert(0, self) if hasattr(template, 'add_directives'): template.add_directives(Translator.NAMESPACE, self)
[ "def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)", "def templateFilter(func):\n jinja2_env.filters[func.__name__] = func", "def register_pre_resources_template(self, template):\n pass", "def test_can_use_imported_templatetags(self):\n template = (\"{% load cachet i18n %}{% cachet %}\"\n \"{% get_current_language as lang %}{{ lang }}\"\n \"{% endcachet %}\")\n translation.activate('en')\n rendered = self.render_template(template)\n self.assertEqual(rendered, 'en')", "def add_plim_renderer(config, extension, mako_settings_prefix='mako.', preprocessor='plim.preprocessor'):\r\n renderer_factory = MakoRendererFactory()\r\n config.add_renderer(extension, renderer_factory)\r\n\r\n def register():\r\n settings = copy.copy(config.registry.settings)\r\n settings['{prefix}preprocessor'.format(prefix=mako_settings_prefix)] = preprocessor\r\n\r\n opts = parse_options_from_settings(settings, mako_settings_prefix, config.maybe_dotted)\r\n lookup = PkgResourceTemplateLookup(**opts)\r\n\r\n renderer_factory.lookup = lookup\r\n\r\n # read about config.action() at\r\n # http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/extconfig.html#using-config-action-in-a-directive\r\n config.action(('plim-renderer', extension), register)", "def register(f):\n if f.__name__ in template_functions:\n raise KeyError('Template function %s already registered' % (f.__name__,))\n template_functions[f.__name__] = f\n return f", "def register_type_pre_resources_template(cls, project, template):\n pass", "def configure_template_filters(app):\r\n app.jinja_env.filters['format_date'] = format_date\r\n app.jinja_env.filters['time_since'] = time_since\r\n app.jinja_env.filters['older_than_one_month'] = older_than_one_month\r\n app.jinja_env.filters['time_left_to'] = time_left_to\r\n app.jinja_env.filters['is_online'] = is_online\r\n app.jinja_env.filters['crop_title'] = crop_title\r\n app.jinja_env.filters['quote'] = quote", "def __init__(self):\n self.template_files = {\n 'CCDA': CCDA_TPL_FILENAME,\n 'FHIR-XML': FHIR_TPL_FILENAME,\n 'FHIR-JSON': FHIR_TPL_FILENAME\n }\n self.environment = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATES_DIR))\n\n # load filters defined in custom_filters\n for a in dir(custom_filters):\n if isinstance(custom_filters.__dict__.get(a), types.FunctionType):\n self.environment.filters[a] = custom_filters.__dict__.get(a)\n\n self.templates = {}\n for key in self.template_files:\n self.templates[key] = self.environment.get_template(self.template_files[key])", "def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)", "def addSyntheticTemplate(self, templates, class_id) -> retval:\n ...", "def register_filter(self, filter, function):\n if filter in self.filters:\n self.filters[filter].append(function)\n else:\n self.filters[filter] = [ function ]", "def add_template(self, template, label, units='counts'):\n\n if units == 'flux':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a flux template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template *= self.exposure_map\n\n if units == 'PS':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a PS template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template /= self.exposure_map/np.mean(self.exposure_map)\n self.templates_dict.update({label: template})\n self.templates.append(template)", "def includeme(config): # pragma: no cover\n config.add_renderer('.pt', zpt.renderer_factory)\n config.add_renderer('.txt', text.renderer_factory)\n config.include('.localization')", "def register(mgr):\n mgr.set_lang_info(\"Less\",\n silvercity_lexer=LessLexer(),\n buf_class=LessBuffer,\n langintel_class=LessLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"SCSS\",\n silvercity_lexer=SCSSLexer(),\n buf_class=SCSSBuffer,\n langintel_class=SCSSLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"Sass\",\n silvercity_lexer=SassLexer(),\n buf_class=SassBuffer,\n langintel_class=SassLangIntel,\n is_cpln_lang=True)", "def register_template_extensions(\n cls,\n exts_fn: Callable[[CompileCtx], Dict[str, Any]]\n ) -> None:\n assert not cls._template_extensions_frozen\n CompileCtx._template_extensions_fns.append(exts_fn)", "def create_translator(self, *args):\r\n translator_class = self.translator_class\r\n return translator_class(*args)", "def render_template(text, **context_args):\r\n template = Template(\"{% load bootstrap3 %}\" + text)\r\n if not 'form' in context_args:\r\n context_args['form'] = ExpenseFilterForm()\r\n return template.render(Context(context_args))", "def sub_template(template,template_tag,substitution):\n\n template = template.replace(template_tag,substitution)\n return template", "def templates():\n return [\n Template(\"dummy\", [\n Decompressor,\n DummyService,\n ])\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract strings from Python bytecode. >>> from genshi.template.eval import Expression >>> expr = Expression('_("Hello")') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('_', 'Hello')] >>> expr = Expression('ngettext("You have %(num)s item", ' ... '"You have %(num)s items", num)') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('ngettext', ('You have %(num)s item', 'You have %(num)s items', None))]
def extract_from_code(code, gettext_functions): def _walk(node): if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \ and node.func.id in gettext_functions: strings = [] def _add(arg): if isinstance(arg, _ast.Str) and isinstance(arg.s, str): strings.append(arg.s) elif isinstance(arg, _ast.Str): strings.append(str(arg.s, 'utf-8')) elif arg: strings.append(None) [_add(arg) for arg in node.args] _add(node.starargs) _add(node.kwargs) if len(strings) == 1: strings = strings[0] else: strings = tuple(strings) yield node.func.id, strings elif node._fields: children = [] for field in node._fields: child = getattr(node, field, None) if isinstance(child, list): for elem in child: children.append(elem) elif isinstance(child, _ast.AST): children.append(child) for child in children: for funcname, strings in _walk(child): yield funcname, strings return _walk(code.ast)
[ "def extract_from_code(code, gettext_functions):\r\n def _walk(node):\r\n if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \\\r\n and node.func.id in gettext_functions:\r\n strings = []\r\n def _add(arg):\r\n if isinstance(arg, _ast.Str) and isinstance(arg.s, unicode):\r\n strings.append(arg.s)\r\n elif isinstance(arg, _ast.Str):\r\n strings.append(unicode(arg.s, 'utf-8'))\r\n elif arg:\r\n strings.append(None)\r\n [_add(arg) for arg in node.args]\r\n _add(node.starargs)\r\n _add(node.kwargs)\r\n if len(strings) == 1:\r\n strings = strings[0]\r\n else:\r\n strings = tuple(strings)\r\n yield node.func.id, strings\r\n elif node._fields:\r\n children = []\r\n for field in node._fields:\r\n child = getattr(node, field, None)\r\n if isinstance(child, list):\r\n for elem in child:\r\n children.append(elem)\r\n elif isinstance(child, _ast.AST):\r\n children.append(child)\r\n for child in children:\r\n for funcname, strings in _walk(child):\r\n yield funcname, strings\r\n return _walk(code.ast)", "def get_functions(text, startswith='def '):\n return get_definition(text, startswith)", "def extract_function_code(code_chunk):\n # Remove the function definition line\n #print(code_chunk)\n function_code = re.sub(r'^\\s*def .+\\n', '', code_chunk)\n # Split the function code by triple \"s into a function chunks variable\n function_chunks = re.split(r'\\\"\\\"\\\"', function_code)\n # If the first chunk contains anything besides newlines and whitespace, return the function_code unchanged\n if not re.match(r'^\\s*$', function_chunks[0]):\n print(function_chunks[0])\n return function_code\n #print(function_code)\n # Remove the first docstring\n function_code = re.sub(r'\"\"\".*?\"\"\"', '', function_code, 1, flags=re.DOTALL)\n #function_code = re.sub(r'\\):\\n*\\s*\"\"\".*?\"\"\"', '\\):\\n', function_code, flags=re.DOTALL)\n\n #print(function_code)\n return function_code", "def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS,\r\n search_text=True, comment_stack=None):\r\n if not self.extract_text:\r\n search_text = False\r\n if comment_stack is None:\r\n comment_stack = []\r\n skip = 0\r\n\r\n xml_lang = XML_NAMESPACE['lang']\r\n\r\n for kind, data, pos in stream:\r\n if skip:\r\n if kind is START:\r\n skip += 1\r\n if kind is END:\r\n skip -= 1\r\n\r\n if kind is START and not skip:\r\n tag, attrs = data\r\n if tag in self.ignore_tags or \\\r\n isinstance(attrs.get(xml_lang), basestring):\r\n skip += 1\r\n continue\r\n\r\n for message in self._extract_attrs((kind, data, pos),\r\n gettext_functions,\r\n search_text=search_text):\r\n yield message\r\n\r\n elif not skip and search_text and kind is TEXT:\r\n text = data.strip()\r\n if text and [ch for ch in text if ch.isalpha()]:\r\n yield pos[1], None, text, comment_stack[-1:]\r\n\r\n elif kind is EXPR or kind is EXEC:\r\n for funcname, strings in extract_from_code(data,\r\n gettext_functions):\r\n # XXX: Do we need to grab i18n:comment from comment_stack ???\r\n yield pos[1], funcname, strings, []\r\n\r\n elif kind is SUB:\r\n directives, substream = data\r\n in_comment = False\r\n\r\n for idx, directive in enumerate(directives):\r\n # Do a first loop to see if there's a comment directive\r\n # If there is update context and pop it from directives\r\n if isinstance(directive, CommentDirective):\r\n in_comment = True\r\n comment_stack.append(directive.comment)\r\n if len(directives) == 1:\r\n # in case we're in the presence of something like:\r\n # <p i18n:comment=\"foo\">Foo</p>\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n directives.pop(idx)\r\n elif not isinstance(directive, I18NDirective):\r\n # Remove all other non i18n directives from the process\r\n directives.pop(idx)\r\n\r\n if not directives and not in_comment:\r\n # Extract content if there's no directives because\r\n # strip was pop'ed and not because comment was pop'ed.\r\n # Extraction in this case has been taken care of.\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip):\r\n yield message\r\n\r\n for directive in directives:\r\n if isinstance(directive, ExtractableI18NDirective):\r\n for message in directive.extract(self,\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n else:\r\n for message in self.extract(\r\n substream, gettext_functions,\r\n search_text=search_text and not skip,\r\n comment_stack=comment_stack):\r\n yield message\r\n\r\n if in_comment:\r\n comment_stack.pop()", "def extract(self, *args) -> \"simpleline_t *\":\n return _ida_pro.strvec_t_extract(self, *args)", "def initFunctionsFromText(self, text):", "def _code_str_to_source_list(self, code):\n source_list = [\"{}\\n\".format(s) for s in code.split(\"\\n\")]\n source_list[-1] = source_list[-1].rstrip(\"\\n\")\n return source_list", "def strings_in_code_files(code_files: List[str]) -> List[LocalizedString]:\n\n strings: List[LocalizedString] = []\n\n for file_path in code_files:\n strings += strings_in_code_file(file_path)\n\n return strings", "def process_source_text(self, source_text):\n return source_text", "def extract(path):\n# --------------------------------------------------------------------\n body = []\n func = \"\"\n brief = \"\"\n seenfunction = False\n seenpercent = False\n\n for l in open(path):\n\n # Remove whitespace and newline\n line = l.strip().lstrip()\n\n if line.startswith('%'): seenpercent = True\n if line.startswith('function'):\n seenfunction = True\n continue\n if not line.startswith('%'):\n if (seenfunction and seenpercent) or not seenfunction:\n break\n else:\n continue\n\n # remove leading `%' character\n line = line[1:] #\n body.append('%s\\n' % line)\n\n # Extract header from body\n if len(body) > 0:\n head = body[0]\n body = body[1:]\n match = re.match(r\"^\\s*(\\w+)\\s*(\\S.*)\\n$\", head)\n func = match.group(1)\n brief = match.group(2)\n\n return (body, func, brief)", "def translateStrings(language_code):\n from translate_admin import translateAdminStrings\n from translate_frontend import translateFrontendStrings\n from translate_help import translateHelpStrings\n from translate_login import translateLoginStrings\n\n translateAdminStrings(language_code)\n translateFrontendStrings(language_code)\n translateHelpStrings(language_code)\n translateLoginStrings(language_code)", "def get_function_instructions(self, _ea):\n\t\tinstr = []\n\t\tif (_ea != BADADDR):\n\t\t\tinstr_matrix = self.get_function_disasm(_ea)\n\t\t\tfor line in instr_matrix:\n\t\t\t\tinstr.append(line[0])\n\t\treturn instr", "def extract(fileobj, keywords, comment_tags, options):\n encoding = options.get('encoding', 'utf-8')\n\n original_position = fileobj.tell()\n\n text = fileobj.read().decode(encoding)\n\n if django.VERSION[:2] >= (1, 9):\n tokens = Lexer(text).tokenize()\n else:\n tokens = Lexer(text, None).tokenize()\n\n vars = [token.token_type != TOKEN_TEXT for token in tokens]\n\n could_be_django = any(list(vars))\n\n if could_be_django:\n fileobj.seek(original_position)\n iterator = extract_django(fileobj, keywords, comment_tags, options)\n for lineno, funcname, message, comments in iterator:\n yield lineno, funcname, message, comments\n else:\n # Underscore template extraction\n comments = []\n\n fileobj.seek(original_position)\n\n for lineno, line in enumerate(fileobj, 1):\n funcname = None\n\n stream = TokenStream.from_tuple_iter(tokenize(line, underscore.rules))\n while not stream.eof:\n if stream.current.type == 'gettext_begin':\n stream.expect('gettext_begin')\n funcname = stream.expect('func_name').value\n args, kwargs = parse_arguments(stream, 'gettext_end')\n\n strings = []\n\n for arg, argtype in args:\n if argtype == 'func_string_arg':\n strings.append(force_text(arg))\n else:\n strings.append(None)\n\n for arg in kwargs:\n strings.append(None)\n\n if len(strings) == 1:\n strings = strings[0]\n else:\n strings = tuple(strings)\n\n yield lineno, funcname, strings, []\n\n stream.next()", "def extract(self, source: str, **kwargs):\n raise NotImplementedError", "def get_functions_dictionary():\n return {\n 'tfidf': extract_tf_idf,\n 'post_length': extract_post_length,\n 'topics': extract_topics,\n 'screamer': extract_screamer,\n 'words': extract_meaningful_words_existence,\n 'off_dis': extract_distance_from_offensive,\n 'not_off_dis': extract_distance_from_not_offensive,\n 'wmd_off': extract_wmd_offensive,\n 'wmd_not_off': extract_wmd_not_offensive,\n 'dis_avg_vec': extract_distance_from_avg_vector\n }", "def parse_code(code: List[str]) -> List[Tuple[str, int]]:\n return [parse_line(line) for line in code]", "def get_inverted_code(self, code_list):\n name_code, name_arg = code_list[-2]\n if name_code == LOAD_NAME and len(code_list) == 3:\n handler = self.get_name_handler()\n new_code = code_list[:-2]\n new_code.extend([\n (LOAD_CONST, handler),\n (LOAD_NAME, '_[expr]'),\n (LOAD_NAME, '_[obj]'),\n (LOAD_NAME, '_[name]'),\n (LOAD_CONST, name_arg),\n (LOAD_NAME, '_[new]'),\n (CALL_FUNCTION, 0x0005),\n (RETURN_VALUE, None),\n ])\n return new_code", "def _get_unreviewed_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.unreviewed_source_strings(resources, language)", "def apply_pycode_macros(code, env_dict=None, **kwargs):\n d = env_dict\n if not d:\n d = kwargs\n elif kwargs:\n d = {**kwargs, **env_dict}\n\n def macro(match: Match[str]):\n match_string = match['k']\n if match_string:\n return macro_dict[match_string]\n match_string = match['k_b']\n if match_string:\n return str(eval(match_string, d))\n match_string = match['other']\n if match_string:\n if match_string == '$':\n return '$'\n raise ValueError('cannot match template code: $' + match_string)\n raise Exception('unrecognized match: ' + match.group())\n\n return MACRO_PATTERN.sub(macro, code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simplify a marked stream.
def _simplify(stream, with_attrs=False): def _generate(): for mark, (kind, data, pos) in stream: if kind is START: if with_attrs: data = (str(data[0]), dict((str(k), v) for k, v in data[1])) else: data = str(data[0]) elif kind is END: data = str(data) elif kind is ATTR: kind = ATTR data = dict((str(k), v) for k, v in data[1]) yield mark, kind, data return list(_generate())
[ "def _simplify(stream, with_attrs=False):\r\n def _generate():\r\n for mark, (kind, data, pos) in stream:\r\n if kind is START:\r\n if with_attrs:\r\n data = (unicode(data[0]), dict((unicode(k), v)\r\n for k, v in data[1]))\r\n else:\r\n data = unicode(data[0])\r\n elif kind is END:\r\n data = unicode(data)\r\n elif kind is ATTR:\r\n kind = ATTR\r\n data = dict((unicode(k), v) for k, v in data[1])\r\n yield mark, kind, data\r\n return list(_generate())", "def strip_changes(self, source, stream):\r\n raise NotImplementedError()", "def gentle_simplify(self):\n return self.apply_to_content(operator.methodcaller('gentle_simplify'))", "def _substitute_stream_ ( klass ) :\n index = klass.find('>>')\n while -1 != index :\n klass = klass.replace('>>','> >')\n index = klass.find( '>>' )\n index = klass.find(' ')\n while -1 != index :\n klass = klass.replace(' ',' ')\n index = klass.find( ' ' )\n return klass", "def filter_stream(fn, s):\n if s.empty:\n return s\n\n def compute_rest():\n return filter_stream(fn, s.rest)\n if fn(s.first):\n return Stream(s.first, compute_rest)\n return compute_rest()", "def format_stream(self, stream, format):\n filter_text = format.filter\n indent, wrap = format.indent, format.wrap\n if indent is not None:\n indent_lines = format.indent_lines\n lstrip_blanks = format.lstrip_blanks\n rstrip_blanks = format.rstrip_blanks\n lstrip_lines = format.lstrip_lines\n min_level, max_level = format.min_level, format.max_level\n indent_level = []\n new_line = False\n if wrap is not None:\n wrap_lines = format.wrap_lines\n indent_width, new_offset = format.indent_width, format.new_offset\n offset = 0\n formatted = 0\n text = last_char = ''\n for ev, item in stream:\n if ev == TEXT:\n text += item\n else:\n if ev in (START, END):\n tag = item.tag\n if not formatted:\n text = filter_text(text, last_char)\n if indent is None:\n if wrap is not None:\n text = wrap_lines(text, wrap, offset)\n else:\n level = len(indent_level)\n if max_level and level > max_level:\n level = max_level\n if min_level:\n level -= min_level\n if level < 0:\n level = 0\n if wrap is not None:\n text = wrap_lines(text, wrap, offset,\n indent_width(level*indent))\n if '\\n' in text:\n indent_level[-1] = True\n if new_line:\n if lstrip_blanks(text)[:1] != '\\n':\n text = '\\n' + lstrip_blanks(text)\n offset = 0\n new_line = False\n if tag == Comment or not self.is_inline(tag):\n if ev == START:\n if indent_level:\n if rstrip_blanks(text)[-1:] != '\\n':\n text = rstrip_blanks(text) + '\\n'\n text = indent_lines(text, level*indent)\n indent_level[-1] = True\n elif text:\n text = lstrip_lines(text)\n if tag != Comment \\\n and not self.is_formatted(tag):\n indent_level.append(False)\n else:\n if indent_level:\n if indent_level.pop():\n if rstrip_blanks(text)[-1:] == '\\n':\n text = rstrip_blanks(text)[:-1]\n text = indent_lines(text,\n level*indent)\n text = rstrip_blanks(text) + '\\n'\n level = len(indent_level)\n if max_level and level > max_level:\n level = max_level\n if min_level:\n level -= min_level\n if level < 0:\n level = 0\n text += level*indent\n elif text:\n text = lstrip_lines(text)\n new_line = True\n elif text:\n if level > 0:\n text = indent_lines(text, level*indent)\n else:\n text = lstrip_lines(text)\n if tag == Comment or self.is_formatted(tag):\n if ev == START:\n formatted += 1\n elif formatted:\n formatted -= 1\n new_line = True\n yield TEXT, text\n if wrap is not None:\n offset = new_offset(text, offset)\n last_char = text[-1:]\n text = ''\n yield ev, item\n if text:\n if not formatted:\n text = filter_text(text, last_char)\n if wrap is not None:\n text = wrap_lines(text, wrap, offset)\n if indent is None:\n if wrap is not None:\n text = wrap_lines(text, wrap, offset)\n else:\n level = len(indent_level)\n if max_level and level > max_level:\n level = max_level\n if min_level:\n level -= min_level\n if level < 0:\n level = 0\n if wrap is not None:\n text = wrap_lines(text, wrap, offset,\n indent_width(level*indent))\n if rstrip_blanks(text)[-1:] == '\\n':\n text = text[:-1]\n text = indent_lines(text, level*indent)\n yield TEXT, text", "def filter_stream(fn, s):\r\n def compute_rest():\r\n return filter_stream(fn, s.rest)\r\n if s is Stream.empty:\r\n return 'There is not matched value in this stream.'\r\n elif fn(s.first):\r\n return Stream(s.first, compute_rest) # compute the rest stream whenever .rest method is called.\r\n else:\r\n return compute_rest() # This will compute the rest stream immediately.\r", "def toggle_marked(self) -> None:\n self.show_marked = not self.show_marked\n self._refilter()", "def flatland_filter(stream, context):\n return Stream(FlatlandFilter()(stream, context))", "def simplify(self):\n #c = 0\n simp_sentences = []\n for s in self.sentences:\n\n #print \"Original: \" + s\n \n simp_sentences.append(self.transformation(s, ''))\n\n ## for demonstration purposes only. remove the prints later\n #print \"Simplified: \",\n #print simp_sentences[c]\n #c+=1\n\n #print \n return simp_sentences", "def encode_stream(cls, stream):\n stream.seek(0)\n return cls.encode_string(stream.read())", "def minify_html_from_file (stream: IOBase):\n\n with HTMLMinifier() as minifier:\n minifier.feed(stream.read())\n return minifier.get_result()", "def map_stream(fn, s):\r\n def compute_rest():\r\n return map_stream(fn, s.rest)\r\n if s is Stream.empty:\r\n return s\r\n return Stream(fn(s.first), compute_rest)", "def writable(stream):", "def _ensure(stream):\r\n stream = iter(stream)\r\n event = next(stream)\r\n\r\n # Check whether the iterable is a real markup event stream by examining the\r\n # first item it yields; if it's not we'll need to do some conversion\r\n if type(event) is not tuple or len(event) != 3:\r\n for event in chain([event], stream):\r\n if hasattr(event, 'totuple'):\r\n event = event.totuple()\r\n else:\r\n event = TEXT, str(event), (None, -1, -1)\r\n yield event\r\n return\r\n\r\n # This looks like a markup event stream, so we'll just pass it through\r\n # unchanged\r\n yield event\r\n for event in stream:\r\n yield event", "def transform_incoming(self, son, collection):\n if self.will_copy():\n return SON(son)\n return son", "def b4kencode_stream(stream, style='s', width=None):\n tw = SimpleTextWrap(width)\n data = stream.read(_BUFFER_4k_e)\n while data:\n yield ''.join(tw.write(b4kencode(data, style)))\n data = stream.read(_BUFFER_4k_e)", "def test_expand_fragments():\n template = \"\"\"<div xmlns:py=\"http://purl.org/kid/ns#\"\n py:replace=\"stream\" />\"\"\"\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\">\n <div py:for=\"i in range(3)\">\n <p>Hello World #$i</p>\n </div>\n </div>\"\"\")\n s = t.serialize(fragment=True)\n expected = \"\"\"<div>\n <div>\n <p>Hello World #0</p>\n </div><div>\n <p>Hello World #1</p>\n </div><div>\n <p>Hello World #2</p>\n </div>\n </div>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True)\n assert s2 == s\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\" py:for=\"i in range(3)\">\n <p>Hello World #$i</p>\n </div>\"\"\")\n s = t.serialize(fragment=True)\n expected = \"\"\"<div>\n <p>Hello World #0</p>\n </div><div>\n <p>Hello World #1</p>\n </div><div>\n <p>Hello World #2</p>\n </div>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True)\n assert s2 == s\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\">\n <div py:strip=\"True\">\n <p>Hello World</p>\n </div>\n </div>\"\"\")\n s = t.serialize(fragment=True)\n expected = \"\"\"<div>\n <p>Hello World</p>\n </div>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True)\n assert s2 == s\n t = Template(\"\"\"\\\n <div xmlns:py=\"http://purl.org/kid/ns#\" py:strip=\"True\">\n <p>Hello World</p>\n </div>\"\"\")\n s = t.serialize(fragment=True).strip()\n expected = \"\"\"<p>Hello World</p>\"\"\"\n assert s == expected\n stream = ElementStream(t.transform()).expand()\n t2 = Template(source=template, stream=stream)\n s2 = t2.serialize(fragment=True).strip()\n assert s2 == s", "def parse_stream_raw(self, stream, debug=False):\n tokens = tokenize.generate_tokens(stream.readline)\n return self.parse_tokens(tokens, debug)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invert selection so that marked events become unmarked, and vice versa. Specificaly, all marks are converted to null marks, and all null marks are converted to OUTSIDE marks. >>> html = HTML('Some test text', encoding='utf8') >>> print((html | Transformer('//em').invert().trace())) ('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0))) ('OUTSIDE', ('TEXT', 'Some ', (None, 1, 6))) (None, ('START', (QName('em'), Attrs()), (None, 1, 11))) (None, ('TEXT', 'test', (None, 1, 15))) (None, ('END', QName('em'), (None, 1, 19))) ('OUTSIDE', ('TEXT', ' text', (None, 1, 24))) ('OUTSIDE', ('END', QName('body'), (None, 1, 29))) Some test text
def invert(self): return self.apply(InvertTransformation())
[ "def inverse(transformer, inverse='identity', inverse_dropped='nan'):\n if isinstance(transformer, TransformerExtensions):\n transformer.inverse = inverse\n return transformer\n\n return TransformerExtensions(\n transformer,\n inverse=inverse,\n inverse_dropped=inverse_dropped\n )", "def inverse(self):\n return Transform(self.m_inv, self.m)", "def invert(self):\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = self.ref(col).invert()\n return self.copy(op=TransformNode(self, exprs))", "def invert(self):\n d = det(self.a, self.b, self.d, self.e)\n return affine(self.e/d, -self.b/d,\n det(self.b, self.c, self.e, self.f)/d,\n -self.d/d, self.a/d,\n -det(self.a, self.c, self.d, self.f)/d)", "def inverse(self):\n return _almathswig.Transform_inverse(self)", "def invert_transform(trans):\n return Transform(trans['to'], trans['from'], linalg.inv(trans['trans']))", "def invert(self) -> 'BaseFlow':\n return InverseFlow(self)", "def invert(self) -> \"SbDPRotation &\":\n return _coin.SbDPRotation_invert(self)", "def invert(self):\n self.image = ImageOps.invert(self.image).convert(o.device_mode)\n self.display_if_interactive()", "def invert(self):\n inverse = self.copy()\n inverse.pixels = ~self.pixels\n return inverse", "def invert_in_place(self) -> \"vnl_diag_matrixSI &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_invert_in_place(self)", "def invert(self):\n new_tree = binary_tree()\n new_tree.val = self.val\n if self.left:\n new_tree.right = self.left.invert()\n if self.right:\n new_tree.left = self.right.invert()\n\n return new_tree", "def set_invert(self, flag):\n if self._invert != flag:\n self._invert = flag\n self.Modified()", "def invert_coverage(self):\n if self.inst is None: \n warnings.warn(\"experiment.invert_coverage(): called with experiment.inst == None.\")\n return\n \n invert = self.params[PARAM_INVERT]\n if invert is None:\n #Don't invert if no setting is saved.\n do_invert = False\n else:\n do_invert = invert.invert\n \n self._lock_qspace_displayed.acquire()\n if do_invert:\n #Ok, we invert, and account for the sphere that fits in the box\n self.qspace_displayed = 1.0*(self.qspace == 0) * (self.inst.qspace_radius < self.inst.qlim)\n else:\n #Or we don't\n self.qspace_displayed = self.qspace.copy()\n self._lock_qspace_displayed.release()\n \n #Continue processing\n self.slice_coverage()", "def invertQTransform(tr):\n try:\n det = tr.determinant()\n detr = 1.0 / det # let singular matrices raise ZeroDivisionError\n inv = tr.adjoint()\n inv *= detr\n return inv\n except ZeroDivisionError:\n return _pinv_fallback(tr)", "def setInverted(self, state):\n collapsed = self.isCollapsed()\n self._inverted = state\n if self.isCollapsible():\n self.setCollapsed(collapsed)", "def inverse(self):\n return self._inverse", "def inverse(self): \n if self._inverse is None:\n if self._name is None:\n inv_name = None\n else:\n inv_name = self._name + '^(-1)'\n if self._latex_name is None:\n inv_latex_name = None\n else:\n inv_latex_name = self._latex_name + r'^{-1}'\n self._inverse = AutomorphismField(self._vmodule, name=inv_name, \n latex_name=inv_latex_name)\n for dom, rst in self._restrictions.iteritems():\n self._inverse._restrictions[dom] = rst.inverse()\n return self._inverse", "def get_inverse(self):\n return MotionDiffeo(topology=self.topology_s,\n lvel=(-self.lvel),\n avel=(-self.avel),\n interval=self.interval)", "def invert_in_place(self) -> \"vnl_diag_matrixD &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixD_invert_in_place(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrap selection in an element. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print((html | Transformer('.//em').wrap('strong'))) Some TitleSome body text.
def wrap(self, element): return self.apply(WrapTransformation(element))
[ "def wrap(text, open_tag, close_tag):\n return ''.join((open_tag, text, close_tag, ))", "def __enclose_in_html_tag(self, elem, tag):\n\n return tag + elem.strip() + self.__create_closing_html_tag(tag)", "def render(elt: Element) -> HTML:\n raise NotImplementedError", "def wrap(self, text: str) -> str:\n return \"\\n\".join(textwrap.wrap(\n text, self.width,\n initial_indent=self.prefix + self.initial_indent,\n subsequent_indent=self.prefix + self.subsequent_indent\n ))", "def __convert_first_level_tags(self, chunk, tag):\n\n html_tag = self.first_level_tags[tag]\n if html_tag == '<blockquote>':\n for index, line in enumerate(chunk):\n line = line + '<br>'\n chunk[index] = line\n\n chunk = list(map(lambda elem: elem[len(tag):], chunk))\n if html_tag in ('<ul>', '<ol>'):\n chunk = [\n self.__enclose_in_html_tag(elem, '<li>') for elem in chunk\n ]\n chunk[0] = html_tag + chunk[0]\n chunk[-1] = chunk[-1] + self.__create_closing_html_tag(html_tag)\n return chunk", "def __handle_start_emphasis_token(cls, output_html, next_token, transform_state):\n _ = transform_state\n\n return \"\".join(\n [output_html, \"<em>\" if next_token.emphasis_length == 1 else \"<strong>\"]\n )", "def optwrap(text):\n\t\t\t#if not BODY_WIDTH:\n\t\t\tif 1:\n\t\t\t\treturn text\n\n\t\t\tassert wrap, \"Requires Python 2.3.\"\n\t\t\tresult = ''\n\t\t\tnewlines = 0\n\t\t\tfor para in text.split(\"\\n\"):\n\t\t\t\tif len(para) > 0:\n\t\t\t\t\tif para[0] != ' ' and para[0] != '-' and para[0] != '*':\n\t\t\t\t\t\tfor line in wrap(para, BODY_WIDTH):\n\t\t\t\t\t\t\tresult += line + \"\\n\"\n\t\t\t\t\t\tresult += \"\\n\"\n\t\t\t\t\t\tnewlines = 2\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not onlywhite(para):\n\t\t\t\t\t\t\tresult += para + \"\\n\"\n\t\t\t\t\t\t\tnewlines = 1\n\t\t\t\telse:\n\t\t\t\t\tif newlines < 2:\n\t\t\t\t\t\tresult += \"\\n\"\n\t\t\t\t\t\tnewlines += 1\n\t\t\treturn result", "def wrap(text, maxlen=76, wrapstr=\" \"):\n\n assert \"\\n\" not in text\n return wrapstr + wrapstr.join([text[0 + i:maxlen + i]\n for i in range(0, len(text), maxlen)])", "def Wrap( self, fn, wrapFn ):\n def Wrapped( *args ):\n return wrapFn( *fn( *args ) )\n return Wrapped", "def __apply_leading_text(cls, output_html, transform_state):\n\n output_html = (\n f\"{output_html}{ParserHelper.newline_character}{transform_state.add_leading_text}\"\n if output_html and output_html[-1] != ParserHelper.newline_character\n else f\"{output_html}{transform_state.add_leading_text}\"\n )\n transform_state.transform_stack.append(output_html)\n return \"\"", "def find_wrapper(element):\n raise NotImplementedError()", "def insert_before_element_by_text(soup: BeautifulSoup, text_element: str, insert_html: str) -> None:\n for target in soup.find_all(text=text_element):\n target: Tag\n target.string.insert_before(BeautifulSoup(insert_html, 'html.parser'))", "def _transform_tag(self, tag):\n\n def _(e):\n if isinstance(e, bs4.element.Comment): # do not modify comments\n return\n if e.name in ['script']: # do not modify contents of 'script' tag\n return\n if isinstance(e, bs4.element.NavigableString): # has no children\n e.replaceWith(self._transform_element_text(e))\n return\n for i in e.children:\n _(i)\n\n for el in self.soup.find(tag):\n _(el)", "def add_children(soup: BeautifulSoup, css_selector: str, child_html: str, wrap_tag: str,\n wrap_attrs: Dict[str, str]) -> None:\n for target in soup.select(css_selector):\n wrap_tag = soup.new_tag(wrap_tag)\n # child_tag.string = child_text\n for key, value in wrap_attrs.items():\n setattr(wrap_tag, key, value)\n target: Tag\n wrap_tag.append(BeautifulSoup(child_html, 'html.parser'))\n target.append(wrap_tag)", "def includechain(parser, tocken):\r\n class WrapperIncludeNode(Node):\r\n def __init__(self, objInclude):\r\n self.objInclude = objInclude\r\n\r\n def template(self, context):\r\n return self.objInclude.template.resolve(context)\r\n\r\n def settemplate(self, value):\r\n self.objInclude.template.filters = []\r\n self.objInclude.template.tocken = value\r\n self.objInclude.template.var = SafeText(value)\r\n\r\n def render(self, context):\r\n # trabajamos con objInclude.template que es el nombre\r\n xtemplate, modif1 = templateCTX.separatemplateModif(self.template(context))\r\n xtemplate, prefix1 = templateCTX.separatemplatePREFIJO(xtemplate)\r\n aplic, prefix2, modif2 = templateCTX.dameAPLICPREFIXMODIF(context)\r\n modif = modif1 if modif1 else modif2\r\n prefix = prefix1 if prefix1 else prefix2\r\n milist = templateCTX.damechainTemplate(aplic, prefix, xtemplate, modif)\r\n\r\n for mitemp in milist:\r\n try:\r\n self.settemplate(mitemp)\r\n return self.objInclude.render(context)\r\n except template.base.TemplateDoesNotExist:\r\n pass\r\n raise template.base.TemplateDoesNotExist(\"Template %s no encontrada en chain\" % self.template(context))\r\n\r\n # HACEMOS COMO SI LLAMARAMOS A BASE Y WRAPEAMOS\r\n return WrapperIncludeNode(template.loader_tags.do_include(parser, tocken))", "def chunk_type_wrap(chunk_type, chunk):\n return '<%s>%s</%s>' % (chunk_type, '<br>'.join(chunk), chunk_type)", "def __call__(self, elem, group_name=None):\n if self.transform_groups[group_name] is None:\n return elem\n else:\n return super().__call__(elem, group_name=group_name)", "def _build_html(items, wrapping):\r\n return jinja2.Markup('\\n'.join((wrapping % item for item in items)))", "def wrap_text(code_edit, key):\n if not key:\n return\n try:\n key_in, key_out = key_lookup[key]\n except KeyError:\n return\n\n textCursor = code_edit.textCursor()\n\n text = textCursor.selectedText()\n if text:\n text = key_in + text + key_out\n else:\n text = key\n textCursor.insertText(text)", "def ent2html(entity, roots_only=True, compact=False):\n return _ent2html(entity, roots_only=roots_only, compact=compact)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace selection with content. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print((html | Transformer('.//title/text()').replace('New Title'))) New TitleSome body text.
def replace(self, content): return self.apply(ReplaceTransformation(content))
[ "def _replaceElementText(self, name, text, escapeAmpLtGt=False):\n patternString = r\"(?s)(<\" + name + r\"(?:\\s*|\\s+.*?)>)(.*?)(</\" + name + r\"\\s*>)\"\n if text is not None:\n if escapeAmpLtGt:\n text = escape(text)\n #replacementString = r\"\\g<1>\" + text + r\"\\g<3>\"\n replacementFunction = lambda match: match.group(1) + text + match.group(3)\n else:\n #replacementString = r\"\"\n replacementFunction = lambda match: r\"\"\n self._string = re.sub(patternString, replacementFunction, self._string)\n return self", "def replace_mustache_tag(self, html_source, tag, replacement_text, encode=False):\n if encode:\n return html_source.replace(tag, html.escape(replacement_text, quote=True))\n else:\n return html_source.replace(tag, replacement_text)", "def _transform_tag(self, tag):\n\n def _(e):\n if isinstance(e, bs4.element.Comment): # do not modify comments\n return\n if e.name in ['script']: # do not modify contents of 'script' tag\n return\n if isinstance(e, bs4.element.NavigableString): # has no children\n e.replaceWith(self._transform_element_text(e))\n return\n for i in e.children:\n _(i)\n\n for el in self.soup.find(tag):\n _(el)", "def replace_with_element(soup: BeautifulSoup, css_selector: str, replace_html: str) -> None:\n for target in soup.select(css_selector):\n target: Tag\n target.replace_with(BeautifulSoup(replace_html, 'html.parser'))", "def replace_one(self, query, doc):\n raise NotImplementedError()", "def smartreplace():\n \n st = app(u'MarsEdit').documents[1].selected_text()\n\n st = u\"“\" + st + u\"”\"\n app(u'MarsEdit').documents[1].selected_text.set(st)\n return", "def generic_visit(self, node):\n if (\n not self.replaced\n and hasattr(node, \"_location\")\n and node._location == self.search\n ):\n self.replaced = True\n return self.replacement_node\n else:\n return NodeTransformer.generic_visit(self, node)", "def substitute(self, pattern, repl, flags=0, summary=None):\n\n if not repl:\n edit_summary = \"Automated edit: Removed text\"\n else:\n edit_summary = \"Automated edit: Replaced text\"\n\n if summary is not None:\n edit_summary = summary\n\n content = self.read()\n content = re.sub(pattern, repl, content, flags=flags)\n self.edit(content, edit_summary)", "def replace(text,what,with_what,start=0,stop=None,\n\n SearchObject=TextSearch,join=join,joinlist=joinlist,tag=tag,\n string_replace=string.replace,type=type,\n StringType=types.StringType):\n if type(what) is not TextSearchType:\n so = SearchObject(what)\n else:\n so = what\n what = so.match\n if stop is None:\n if start == 0 and len(what) < 2:\n return string_replace(text,what,with_what)\n stop = len(text)\n t = ((text,sWordStart,so,+2),\n # Found something, replace and continue searching\n (with_what,Skip+AppendTagobj,len(what),-1,-1),\n # Rest of text\n (text,Move,ToEOF)\n )\n found,taglist,last = tag(text,t,start,stop)\n if not found:\n return text\n return join(taglist)", "def _set_text(self, path, text):\n\n element = self._get_one_xpath(path)\n element.text = text", "def setContent(self, content):\n # later will add content modified check here\n self.text.ChangeValue(content)", "def _insert_html_fetching_plain_text(self, cursor, html):\n cursor.beginEditBlock()\n cursor.removeSelectedText()\n\n start = cursor.position()\n self._insert_html(cursor, html)\n end = cursor.position()\n cursor.setPosition(start, QtGui.QTextCursor.KeepAnchor)\n text = cursor.selection().toPlainText()\n\n cursor.setPosition(end)\n cursor.endEditBlock()\n return text", "def sub_template(template,template_tag,substitution):\n\n template = template.replace(template_tag,substitution)\n return template", "def replace_links(text: str, replace, site: 'pywikibot.site.BaseSite') -> str:\n def to_link(source):\n \"\"\"Return the link from source when it's a Page otherwise itself.\"\"\"\n if isinstance(source, pywikibot.Page):\n return source._link\n if isinstance(source, str):\n return pywikibot.Link(source, site)\n return source\n\n def replace_callable(link, text, groups, rng):\n if replace_list[0] == link:\n return replace_list[1]\n return None\n\n def check_classes(replacement):\n \"\"\"Normalize the replacement into a list.\"\"\"\n if not isinstance(replacement, (pywikibot.Page, pywikibot.Link)):\n raise ValueError('The replacement must be None, False, '\n 'a sequence, a Link or a str but '\n 'is \"{}\"'.format(type(replacement)))\n\n def title_section(link) -> str:\n title = link.title\n if link.section:\n title += '#' + link.section\n return title\n\n if not isinstance(site, pywikibot.site.BaseSite):\n raise ValueError('The \"site\" argument must be a BaseSite not {}.'\n .format(type(site).__name__))\n\n if isinstance(replace, Sequence):\n if len(replace) != 2:\n raise ValueError('When used as a sequence, the \"replace\" '\n 'argument must contain exactly 2 items.')\n replace_list = [to_link(replace[0]), replace[1]]\n if not isinstance(replace_list[0], pywikibot.Link):\n raise ValueError(\n 'The original value must be either str, Link or Page '\n 'but is \"{}\"'.format(type(replace_list[0])))\n if replace_list[1] is not False and replace_list[1] is not None:\n if isinstance(replace_list[1], str):\n replace_list[1] = pywikibot.Page(site, replace_list[1])\n check_classes(replace_list[0])\n replace = replace_callable\n\n linktrail = site.linktrail()\n link_pattern = re.compile(\n r'\\[\\[(?P<title>.*?)(#(?P<section>.*?))?(\\|(?P<label>.*?))?\\]\\]'\n r'(?P<linktrail>{})'.format(linktrail))\n extended_label_pattern = re.compile(fr'(.*?\\]\\])({linktrail})')\n linktrail = re.compile(linktrail)\n curpos = 0\n # This loop will run until we have finished the current page\n while True:\n m = link_pattern.search(text, pos=curpos)\n if not m:\n break\n\n m_title = m['title'].strip()\n\n # Ignore links to sections of the same page\n if not m_title:\n curpos = m.end()\n continue\n\n # Ignore interwiki links\n if site.isInterwikiLink(m_title) and not m_title.startswith(':'):\n curpos = m.end()\n continue\n\n groups = m.groupdict()\n if groups['label'] and '[[' in groups['label']:\n # TODO: Work on the link within the label too\n # A link within a link, extend the label to the ]] after it\n extended_match = extended_label_pattern.search(text, pos=m.end())\n if not extended_match:\n # TODO: Unclosed link label, what happens there?\n curpos = m.end()\n continue\n groups['label'] += groups['linktrail'] + extended_match[1]\n groups['linktrail'] = extended_match[2]\n end = extended_match.end()\n else:\n end = m.end()\n\n start = m.start()\n # Since this point the m variable shouldn't be used as it may not\n # contain all contents\n del m\n\n try:\n link = pywikibot.Link.create_separated(\n groups['title'], site, section=groups['section'],\n label=groups['label'])\n except (SiteDefinitionError, InvalidTitleError):\n # unrecognized iw prefix or invalid title\n curpos = end\n continue\n\n # Check whether the link found should be replaced.\n # Either None, False or tuple(Link, bool)\n new_link = replace(link, text, groups.copy(), (start, end))\n if new_link is None:\n curpos = end\n continue\n\n # The link looks like this:\n # [[page_title|new_label]]new_linktrail\n page_title = groups['title']\n new_label = groups['label']\n\n if not new_label:\n # or like this: [[page_title]]new_linktrail\n new_label = page_title\n # remove preleading \":\" from the link text\n if new_label[0] == ':':\n new_label = new_label[1:]\n\n new_linktrail = groups['linktrail']\n if new_linktrail:\n new_label += new_linktrail\n\n if new_link is False:\n # unlink - we remove the section if there's any\n assert isinstance(new_label, str), 'link text must be str.'\n new_link = new_label\n\n if isinstance(new_link, str):\n text = text[:start] + new_link + text[end:]\n # Make sure that next time around we will not find this same hit.\n curpos = start + len(new_link)\n continue\n\n if isinstance(new_link, bytes):\n raise ValueError('The result must be str and not bytes.')\n\n # Verify that it's either Link, Page or str\n check_classes(new_link)\n # Use section and label if it's a Link and not otherwise\n if isinstance(new_link, pywikibot.Link):\n is_link = True\n else:\n new_link = new_link._link\n is_link = False\n\n new_title = new_link.canonical_title()\n # Make correct langlink if needed\n if new_link.site != site:\n new_title = ':' + new_link.site.code + ':' + new_title\n\n if is_link:\n # Use link's label\n new_label = new_link.anchor\n must_piped = new_label is not None\n new_section = new_link.section\n else:\n must_piped = True\n new_section = groups['section']\n\n if new_section:\n new_title += '#' + new_section\n\n if new_label is None:\n new_label = new_title\n\n # Parse the link text and check if it points to the same page\n parsed_new_label = pywikibot.Link(new_label, new_link.site)\n try:\n parsed_new_label.parse()\n except InvalidTitleError:\n pass\n else:\n parsed_link_title = title_section(parsed_new_label)\n new_link_title = title_section(new_link)\n # compare title, but only with parts if linktrail works\n if not linktrail.sub('',\n parsed_link_title[len(new_link_title):]):\n # TODO: This must also compare everything that was used as a\n # prefix (in case insensitive)\n must_piped = (\n not parsed_link_title.startswith(new_link_title)\n or parsed_new_label.namespace != new_link.namespace)\n\n if must_piped:\n new_text = f'[[{new_title}|{new_label}]]'\n else:\n new_text = (f'[[{new_label[:len(new_title)]}]]'\n f'{new_label[len(new_title):]}')\n\n text = text[:start] + new_text + text[end:]\n # Make sure that next time around we will not find this same hit.\n curpos = start + len(new_text)\n return text", "def replace_texts(docs, subdocs):\n for i, doc in enumerate(docs):\n doc.text = '\\n' + subdocs[i] #newline after opening tag", "def test__replace_wiki_text(self):\n new_markdown = mirrorwiki._replace_wiki_text(\n markdown=self.markdown,\n wiki_mapping=self.wiki_mapping,\n entity=self.entity,\n destination=self.destination,\n )\n assert new_markdown == self.expected_markdown", "def change_text(self):\r\n self.webView.update_by_content(self.editor.toPlainText())", "def changeTitle(self, item):\n if not self.model.setData(item.node.index(), item.text()):\n self.blockSignals(True)\n item.setText(item.node.title())\n self.blockSignals(False)", "def replace(self, replaceWord): #$NON-NLS-1$\r", "def replace(settings, pattern, pattern_type=None, with_value=None):\n\tfilter = settings.format(settings.content)\n\tfilter.replace(pattern, with_value, pattern_type)\n\tsettings.content = filter.content" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add, replace or delete an attribute on selected elements. If `value` evaulates to `None` the attribute will be deleted from the
def attr(self, name, value): return self.apply(AttrTransformation(name, value))
[ "def setOptionalAttribute(self, name, value):\n if value is not None:\n self.setAttribute(name, value)", "def replace_attributes(soup: BeautifulSoup, attribute: str, value: str, new_value: str) -> None:\n for target in soup.find_all(attrs={attribute: value}):\n target: Tag\n target.attrs[attribute] = new_value", "def change_attr(el, attr, values):\n v = el.attrib.get(attr, '')\n changed = False\n for value in values.split(';'):\n k, newv = split2(value, \"Each value must be in the form x:y\", \":\")\n v = replace_key(v, k, newv)\n if v == '': # there were no such yet\n v = \"%s:%s\" % (k, newv)\n #print \"Changing %s : %s, got %s\" % (attr, values, str(v))\n el.attrib[attr] = v", "def replace_attr(self, attr, value, force = True):\r\n # One or the other\r\n if force or self.get(attr) is None:\r\n self[attr] = value", "def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)", "def chattr(self, name, value=None):\n attr = lookup_attr(name)\n if attr == -1:\n raise KeyError('invalid attribute')\n\n if value is not None:\n self.setattr(name, value)\n else:\n self.delattr(name)", "def touch(attribute, attr_value, is_wnva):\n # ====================================================================#\n # Detect Empty Attribute Values\n if attr_value is None or len(str(attr_value)) < 1:\n return None\n # ====================================================================#\n # STR? Search or Create Attribute by Code\n if isinstance(attribute, str):\n attribute = AttributesHelper.touch(attribute, is_wnva)\n if attribute is None:\n Framework.log().error(\"An Error Occurred while Loading Attribute\")\n return None\n # ====================================================================#\n # Search for Value in Attribute\n values = attribute.value_ids.filtered(lambda r: r.name.lower() == attr_value.lower())\n if len(values) > 0:\n return values[0]\n # ====================================================================#\n # Crate New Value for Attribute\n return ValuesHelper.create(attribute, attr_value)", "def xml_set_attrib_value(xmltree: XMLLike,\n schema_dict: fleur_schema.SchemaDict,\n xpath: XPathLike,\n base_xpath: str,\n name: str,\n value: Any,\n occurrences: int | Iterable[int] | None = None,\n create: bool = False) -> XMLLike:\n\n from masci_tools.util.xml.xml_setters_basic import xml_set_attrib_value_no_create\n from masci_tools.util.xml.converters import convert_to_xml\n from masci_tools.util.xml.common_functions import check_complex_xpath, split_off_tag\n from masci_tools.io.common_functions import is_sequence\n\n check_complex_xpath(xmltree, base_xpath, xpath)\n _, tag_name = split_off_tag(base_xpath)\n\n attribs = schema_dict['tag_info'][base_xpath]['attribs']\n if name not in attribs:\n raise ValueError(f\"The key '{name}' is not expected for this version of the input for the '{tag_name}' tag. \"\n f'Allowed attributes are: {sorted(attribs.original_case.values())}')\n name = attribs.original_case[name]\n\n converted_value, _ = convert_to_xml(value, schema_dict, name, text=False)\n n_nodes = len(converted_value) if is_sequence(converted_value) else 1\n\n if create:\n nodes = eval_xpath_create(xmltree,\n schema_dict,\n xpath,\n base_xpath,\n create_parents=True,\n occurrences=occurrences,\n number_nodes=n_nodes)\n else:\n nodes = eval_xpath_all(xmltree, xpath, etree._Element)\n\n if len(nodes) == 0:\n raise ValueError(f\"Could not set attribute '{name}' on path '{xpath!r}' \"\n 'because at least one subtag is missing. '\n 'Use create=True to create the subtags')\n\n return xml_set_attrib_value_no_create(xmltree, xpath, name, converted_value, occurrences=occurrences)", "def decr_attr(self, name, value = 1):\n try:\n self.attributes_dict[name] -= value\n except ValueError:\n print 'Expected a numerical value'", "def update_attr(field, attr, value):\n\n field.widget.attrs.update({\n attr: value\n })", "def set_attribute(self,att,val):\r\n self.attributes[att] = val", "def deleteAttribute (\n\n self,\n attribute = None,\n attributes = None,\n values = None\n ) :\n\n\n if utilities.isEmpty( attribute ) : return None\n\n if utilities.isEmpty( value ) : return None\n\n if attributes is None : attributes = self.attributeList\n\n if values is None : values = self.valueList\n\n\n attribute = utilities.string( attribute , format = \"identifier\" )\n\n index = utilities.index( attributes, attribute )\n\n if index < 0 : return False\n\n attributes.pop( index )\n\n values.pop( index )\n\n return True", "def _apply_value(self, value):\n\n setattr(self._obj, self._attr, value)", "def quoteAttr(self, value):\n ret = quoteattr(\"'\"+value+\"'\")\n return ret[2:len(ret)-2]", "def change_attrib(elem, name, value):\r\n\r\n log = logging.getLogger()\r\n\r\n value_type = type(value)\r\n if value_type(elem.attrib[name]) == value:\r\n log.warning('{0} in {1} already equal to {2}'.format(name, str(elem), value))\r\n return False\r\n else:\r\n log.info('Changed {0} in {1} from {2} to {3}'.format(name, str(elem), elem.attrib[name], value))\r\n elem.attrib[name] = str(value)\r\n return True\r\n # end of change_attrib\r", "def set_attr_2(self, value):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.attr2\", self._object._eco_id, value)\r\n p2e._app.Exec(arg_str)", "def SetAttribute(self, name, value):\n aMap = self._AMap()\n if name in aMap:\n attrName, decode, vType = aMap[name]\n if vType is ListType:\n if value is None:\n value = []\n else:\n value = value.split()\n setattr(self, attrName, map(decode, value))\n elif vType is DictType:\n if value is None:\n value = []\n else:\n value = value.split()\n dValue = {}\n for iv in map(decode, value):\n dValue[iv] = dValue.get(iv, 0) + 1\n setattr(self, attrName, dValue)\n else:\n x = getattr(self, attrName, None)\n if type(x) in (ListType, DictType):\n print \"Problem setting %s in %s: single value will overwrite List or Dict\" % (repr(name), repr(self.__class__.__name__))\n # print self.GetDocument()\n if value is None:\n setattr(self, attrName, None)\n else:\n setattr(self, attrName, decode(value))\n elif hasattr(self.__class__, 'ID') and name == self.__class__.ID:\n self.SetID(value)\n else:\n if value is None:\n if name in self._attrs:\n del self._attrs[name]\n else:\n self._attrs[name] = value", "def _attr_ne(self, name, value):\n self._attr_present(name)\n self.filters.append(lambda elem: elem.attrib[name] != value)", "def set(self, attr_name, attr_value, overwrite=False):\n aname = 'lgt.' + attr_name\n if attr_name in self.lgtattrs:\n if not overwrite:\n log.warn('LGT attribute \"%s\" exists but overwrite is False.' % aname)\n return \n self.lgtattrs.union(set(attr_name))\n self._obj.attrs[aname] = attr_value", "def delete(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value, do nothing\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n _set(environment, attr, current_value.replace(value, \"\"))\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(\n environment,\n attr,\n CompleteExpression(delete_sublist(list(current_value), list(value))),\n )\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, delete_sublist(current_value, value))\n else:\n raise TypeError(\n \"current value is of unsupported type\"\n f\"'{type(current_value)}' for the 'append' action\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy selection into buffer. The buffer is replaced by each contiguous selection before being passed to the next transformation. If accumulate=True, further selections will be appended to the buffer rather than replacing it. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print((html | Transformer('head/title/text()').copy(buffer) ... .end().select('body').prepend(tag.h1(buffer)))) Some TitleSome TitleSome body text. This example illustrates that only a single contiguous selection will
def copy(self, buffer, accumulate=False): return self.apply(CopyTransformation(buffer, accumulate))
[ "def copy(self):\n return TransformNode(self.input[0], self.exprs, self.keep_index)", "def _copy_iterator(self):\n self.__udiff, iterator_copy = tee(self.__udiff)\n return iterator_copy", "def simpleCopySelection():\n # ideas / tests / original:\n # push into current group..\n\n App = FreeCAD\n Gui = FreeCADGui\n\n selection = FreeCADGui.Selection.getSelection()\n\n for obj in selection:\n obj_new = object_create_copy(obj)\n obj_new.ViewObject.Visibility = True\n obj.ViewObject.Visibility = False\n # try to add it at same tree location\n obj_parent = find_Parent(obj)\n if obj_parent:\n obj_parent.addObject(obj_new)\n\n #\n\n App.ActiveDocument.recompute()", "def copy(self):\n self.focus()\n self.dispatch('Copy')\n return self", "def copy(self, select, position):\n if select == []:\n self.status.update('No row selected to copy !')\n return\n select_iid = self.select_to_iid(select)\n self.clipboard = []\n for iid in select_iid:\n data_dict = self.Widget.set(iid) # get row data\n self.clipboard.append([data_dict[column] for column in self.header])", "def editCopy(self):\n splitter = self.activeWindow.rightTabs.currentWidget()\n if splitter == self.activeWindow.outputSplitter:\n for view in splitter.children():\n try:\n if view.hasSelectedText():\n view.copy()\n return\n except AttributeError:\n pass\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.copy()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()", "def copy(self):\n return UnionNode(self.input)", "def copy(self):\n self.lazycopy = True\n p = page(self.begin, self.end)\n p.lazycopy = True\n p.tree = self.tree\n p.lookup = self.lookup\n return p", "def select(self): \n if self.l_buffer:\n selectList = []\n # Need to dig down through the items\n for item in self.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n tmpFactory = cgmBuffer(item)\n selectList.extend(tmpFactory.l_buffer)\n \n for item in tmpFactory.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n subTmpFactory = cgmBuffer(item) \n selectList.extend(subTmpFactory.l_buffer)\n \n else:\n selectList.append(item)\n \n mc.select(selectList)\n return\n \n log.warning(\"'%s' has no data\"%(self.mNode)) \n return False", "def gen_memcpy(self, dst, src, count):\n # Destination pointer:\n yield instructions.Lea(rdi, dst)\n\n # Source pointer:\n yield instructions.Lea(rsi, src)\n\n yield instructions.MovImm(rcx, count) # Byte count\n yield instructions.Rep()\n yield RegisterUseDef(uses=(rcx,))\n yield instructions.Movsb()\n yield RegisterUseDef(uses=(rdi, rsi))\n\n # for x in\n # Memcopy action!\n # yield mov(rdi, arg)\n # yield mov(rsi, arg_loc)\n # yield mov(rcx, arg_loc.size)\n # yield rep()\n # yield movsb()\n # raise NotImplementedError()", "def concat_from_buffer(self, clear=True, ignore_index=False):\n\t\t_buffer = self._buffer\n\t\tif _buffer:\n\t\t\tif self.empty:\n\t\t\t\tself.append_bar(_buffer[0])\n\t\t\t\t_new_frame = pd.concat([self, pd.DataFrame(\n\t\t\t\t\t\t\t\t\t\tdata = _buffer[1:], \n\t\t\t\t\t\t\t\t\t\tindex = range(1,len(_buffer)))],\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\n\t\t\telse:\n\t\t\t\tdf = pd.DataFrame(_buffer)\n\t\t\t\tnew_frame = pd.concat([self, pd.DataFrame(_buffer)],\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\n\t\t\tself.__init__(data = new_frame)\n\t\t\tif clear:\n\t\t\t\tself._buffer = []\n\t\telse:\n\t\t\tpass", "def concat_from_buffer(self, clear=True, ignore_index=False):\r\n\t\t_buffer = self._buffer\r\n\t\tif _buffer:\r\n\t\t\tif self.empty:\r\n\t\t\t\tself.append_bar(_buffer[0])\r\n\t\t\t\t_new_frame = pd.concat([self, pd.DataFrame(\r\n\t\t\t\t\t\t\t\t\t\tdata = _buffer[1:], \r\n\t\t\t\t\t\t\t\t\t\tindex = range(1,len(_buffer)))],\r\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\r\n\t\t\telse:\r\n\t\t\t\tdf = pd.DataFrame(_buffer)\r\n\t\t\t\tnew_frame = pd.concat([self, pd.DataFrame(_buffer)],\r\n\t\t\t\t\t\t\t\t\t\tignore_index = ignore_index)\r\n\t\t\tself.__init__(data = new_frame)\r\n\t\t\tif clear:\r\n\t\t\t\tself._buffer = []\r\n\t\telse:\r\n\t\t\tpass", "def maintained_selection():\n\n previous_selection = cmds.ls(selection=True)\n try:\n yield\n finally:\n if previous_selection:\n cmds.select(previous_selection,\n replace=True,\n noExpand=True)\n else:\n cmds.select(deselect=True,\n noExpand=True)", "def NETRCopy(self):\n for buf, nodes in self._picked_nodes.items():\n buf.copy(nodes)\n self._copied_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.redraw_if_highlight_outdated()", "def replace(self):\n if not self.buffer.get_has_selection():\n self.next()\n else:\n if self.matches != None:\n self.buffer.begin_user_action()\n \n start_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][0])\n end_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][1])\n \n self.buffer.delete(start_iter, end_iter)\n \n self.buffer.insert(start_iter, self.entry_replace.get_text())\n self.matches_num -= 1\n \n self.__get_environment()\n #begin = self.buffer.get_start_iter()\n #end = self.buffer.get_end_iter()\n \n #begin, end = self.view.get_line_iters()\n #self.__get_matches(self.entry.get_text(), begin, end)\n self.next()\n #if self.matches != None:\n if self.matches_num != 0:\n if self.index > (self.matches_num - 1):\n self.index = 0\n \n try:\n start_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][0])\n end_iter = \\\n self.buffer.get_iter_at_mark(self.matches[self.index][1])\n \n self.buffer.select_range(start_iter, end_iter)\n except IndexError:\n cursor_iter = self.buffer.get_iter_at_mark(self.cursor_mark)\n self.buffer.place_cursor(cursor_iter)\n else:\n self.view.scroll_to_iter(start_iter, 0, True)\n self.buffer.end_user_action()\n else:\n cursor_iter = self.buffer.get_iter_at_mark(self.cursor_mark)\n self.buffer.place_cursor(cursor_iter)", "def apply_buffer(self):\n print(\"Processing Sentence buffers...\")\n self.df['buffered_story_sentence_index'] = self.df.progress_apply(self.__series_wrapper_apply_buffer, axis=1)", "def NETRCopy(self):\n for buf, nodes in self._picked_nodes.items():\n buf.copy(nodes)\n self._copied_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.refresh_outdated_highlight()", "def copy(self):\n # seq length will be provided when copying, no need to pass\n return CyclerParams(sequence=self.sequence, mutation_probability=self.mutation_probability)", "def copy(self, bfrom):\n _ldns.ldns_buffer_copy(self, bfrom)\n #parameters: ldns_buffer *, ldns_buffer *,\n #retvals: ", "def copy(self):\n\n # Get the bounds using the top left and bottom right selected cells\n indexes = self.selectionModel().selection().indexes()\n rows = [ix.row() for ix in indexes]\n cols = [ix.column() for ix in indexes]\n\n df = self.pgdf.dataframe.iloc[min(rows): max(rows) + 1, min(cols): max(cols) + 1]\n\n # Special case for single-cell copy since df.to_clipboard appends extra newline\n if df.shape == (1, 1):\n clipboard = QtWidgets.QApplication.instance().clipboard()\n value = str(df.iloc[0, 0])\n clipboard.setText(value)\n else:\n # If I try to use Pyperclip without starting new thread large selections give access denied error\n threading.Thread(target=lambda df: df.to_clipboard(index=False, header=False), args=(df,)).start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy selection into buffer and remove the selection from the stream. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print((html | Transformer('.//em/text()').cut(buffer) ... .end().select('.//em').after(tag.h1(buffer)))) Some TitleSome body text. Specifying accumulate=True, appends all selected intervals onto the buffer. Combining this with the .buffer() operation allows us operate on all copied events rather than persegment. See the documentation on buffer() for more information.
def cut(self, buffer, accumulate=False): return self.apply(CutTransformation(buffer, accumulate))
[ "def cut(self):\n self.focus()\n self.dispatch('Cut')\n return self", "def editCut(self):\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.cut()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()\n self.nodeDelete()", "def cut(self):\n # get the current signal selection interval\n self._edition_action(self.editionSignalProcessor.cut, CutAction)\n self.graph()", "def _clear_temporary_buffer(self):\n # Select and remove all text below the input buffer.\n cursor = self._get_prompt_cursor()\n prompt = self._continuation_prompt.lstrip()\n if(self._temp_buffer_filled):\n self._temp_buffer_filled = False\n while cursor.movePosition(QtGui.QTextCursor.NextBlock):\n temp_cursor = QtGui.QTextCursor(cursor)\n temp_cursor.select(QtGui.QTextCursor.BlockUnderCursor)\n text = temp_cursor.selection().toPlainText().lstrip()\n if not text.startswith(prompt):\n break\n else:\n # We've reached the end of the input buffer and no text follows.\n return\n cursor.movePosition(QtGui.QTextCursor.Left) # Grab the newline.\n cursor.movePosition(QtGui.QTextCursor.End,\n QtGui.QTextCursor.KeepAnchor)\n cursor.removeSelectedText()\n\n # After doing this, we have no choice but to clear the undo/redo\n # history. Otherwise, the text is not \"temporary\" at all, because it\n # can be recalled with undo/redo. Unfortunately, Qt does not expose\n # fine-grained control to the undo/redo system.\n if self._control.isUndoRedoEnabled():\n self._control.setUndoRedoEnabled(False)\n self._control.setUndoRedoEnabled(True)", "def empty(doc, selection, selectmode=None):\n beg = selection[0][0]\n return Selection(Interval(beg, beg))", "def NETRCut(self):\n for buf, nodes in self._picked_nodes.items():\n buf.cut(nodes)\n self._cut_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.refresh_outdated_highlight()", "def NETRCut(self):\n for buf, nodes in self._picked_nodes.items():\n buf.cut(nodes)\n self._cut_nodes[buf].update(nodes)\n self._picked_nodes = defaultdict(set)\n self.cur_buf.redraw_if_highlight_outdated()", "def cut(self, loc):\n \n val = self.body.pop()\n while val != loc:\n val = self.body.pop()", "def clear(self):\r\n self.prepare()\r\n self.buffer[:] = [self.dtext]\r\n self.firstwrite = 1", "def cut_copy_paste_del_sel_event(self, event):\n\n if event.GetId() == wx.ID_CUT or wx.ID_COPY or wx.ID_PASTE or wx.ID_DELETE or wx.ID_SELECTALL:\n self.cut_copy_paste_del_sel_action(event)\n else:\n event.Skip()", "def texCutContext(*args, **kwargs):\n\n pass", "def __cutAllChat(self):\n txt = self.chatEdit.toPlainText()\n if txt:\n cb = QApplication.clipboard()\n cb.setText(txt)\n self.chatEdit.clear()", "def cut(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n row = content.row()\n col = content.column()\n if content.text() is not None:\n clip.setText(content.text())\n self.tableWidget.setItem(row, col, QTableWidgetItem(str()))\n self.isChanged = True\n else:\n pass", "def select(self): \n if self.l_buffer:\n selectList = []\n # Need to dig down through the items\n for item in self.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n tmpFactory = cgmBuffer(item)\n selectList.extend(tmpFactory.l_buffer)\n \n for item in tmpFactory.l_buffer:\n if search.returnTagInfo(item,'cgmType') == 'objectBuffer':\n subTmpFactory = cgmBuffer(item) \n selectList.extend(subTmpFactory.l_buffer)\n \n else:\n selectList.append(item)\n \n mc.select(selectList)\n return\n \n log.warning(\"'%s' has no data\"%(self.mNode)) \n return False", "def cut_to_clipboard(self, widget, data=None):\n\t\t#print \"Copying text\"\n\t\tbuff = self._get_buffer()\n\t\tbuff.cut_clipboard(self.clipboard, True)", "def cut(self, matches):\n\t\t# TODO move these lines out of here\n\t\tread = matches[0].read\n\n\t\tif __debug__:\n\t\t\told_length = len(read.sequence)\n\t\tassert matches\n\t\tif self.trim:\n\t\t\t# The last match contains a copy of the read it was matched to.\n\t\t\t# No iteration is necessary.\n\t\t\tread = matches[-1].adapter.trimmed(matches[-1])\n\n\t\t\t# if an adapter was found, then the read should now be shorter\n\t\t\tassert len(read.sequence) < old_length\n\n\t\t\tif self.mask_adapter:\n\t\t\t\t# add N from last modification\n\t\t\t\tmasked_sequence = matches[-1].adapter.trimmed(matches[-1]).sequence\n\t\t\t\tfor match in sorted(matches, reverse=True):\n\t\t\t\t\tns = 'N' * (len(match.read.sequence) -\n\t\t\t\t\t\t\t\tlen(match.adapter.trimmed(match).sequence))\n\t\t\t\t\t# add N depending on match position\n\t\t\t\t\tif match.front:\n\t\t\t\t\t\tmasked_sequence = ns + masked_sequence\n\t\t\t\t\telse:\n\t\t\t\t\t\tmasked_sequence += ns\n\t\t\t\t# set masked sequence as sequence with original quality\n\t\t\t\tread.sequence = masked_sequence\n\t\t\t\tread.qualities = matches[0].read.qualities\n\t\t\t\tread.trimmed = True\n\n\t\t\t\tassert len(read.sequence) == old_length\n\n\t\tself.reads_matched += 1 # TODO move to filter class\n\n\t\tif self.rest_writer:\n\t\t\tself.rest_writer.write(matches[-1])\n\n\t\treturn read", "def to_cut(self):\n from lhotse.cut import MonoCut, MultiCut\n\n cls = MonoCut if self.num_channels == 1 else MultiCut\n return cls(\n id=self.id,\n start=0.0,\n duration=self.duration,\n channel=self.channel_ids[0] if self.num_channels == 1 else self.channel_ids,\n recording=self,\n )", "def cut(self, nodes):\n with self.history.command_context(\"cut\"):\n clipboard = self.copy(nodes)\n\n # Delete nodes\n self.delete_nodes(nodes)\n\n return clipboard", "def simpleCopySelection():\n # ideas / tests / original:\n # push into current group..\n\n App = FreeCAD\n Gui = FreeCADGui\n\n selection = FreeCADGui.Selection.getSelection()\n\n for obj in selection:\n obj_new = object_create_copy(obj)\n obj_new.ViewObject.Visibility = True\n obj.ViewObject.Visibility = False\n # try to add it at same tree location\n obj_parent = find_Parent(obj)\n if obj_parent:\n obj_parent.addObject(obj_new)\n\n #\n\n App.ActiveDocument.recompute()", "def editCopy(self):\n splitter = self.activeWindow.rightTabs.currentWidget()\n if splitter == self.activeWindow.outputSplitter:\n for view in splitter.children():\n try:\n if view.hasSelectedText():\n view.copy()\n return\n except AttributeError:\n pass\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.copy()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a function to the ``data`` element of events of ``kind`` in the selection. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print((html | Transformer('head/title').map(str.upper, TEXT))) SOME TITLESome body text.
def map(self, function, kind): return self.apply(MapTransformation(function, kind))
[ "def transform(self, data):\n\t\t\n\t\tfor t in self.transformer_list:\n\t\t\tdata = t.transform(data)\n\t\t\t\n\t\treturn data", "def pipe(data, *funcs):\n for func in funcs:\n data = func(data)\n return data", "def transform(self, node):\n try:\n handler = getattr(self, 'transform_%s' % node.kind.name.lower())\n return handler(node)\n except AttributeError:\n print(\n \"Ignoring node of type %s (%s)\" % (\n node.kind,\n ' '.join(\n t.spelling for t in node.get_tokens())\n ),\n file=sys.stderr\n )", "def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset", "def transform(func):\n WalkoffTag.transform.tag(func)\n return func", "def map(f,data):\n for item in data:\n yield f(item)", "def _apply_chapter(self,\n chapter: 'Chapter',\n data: Union['Dataset', 'Book']) -> 'Chapter':\n for technique in chapter.techniques:\n data = technique.apply(data = data)\n setattr(chapter, 'data', data)\n return chapter", "def __process_data(self, data):\n return self.mlda.transform(data)", "def transform(self, dataset, params={}):\n raise NotImplementedError()", "def apply_to_collection(\n data: Any,\n dtype: Union[type, tuple],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, tuple]] = None,\n **kwargs: Any,\n) -> Any:\n elem_type = type(data)\n\n # Breaking condition\n if isinstance(data, dtype) and (\n wrong_dtype is None or not isinstance(data, wrong_dtype)\n ):\n return function(data, *args, **kwargs)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n return elem_type(\n {\n k: apply_to_collection(v, dtype, function, *args, **kwargs)\n for k, v in data.items()\n }\n )\n\n if isinstance(data, tuple) and hasattr(data, \"_fields\"): # named tuple\n return elem_type(\n *(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data)\n )\n\n if isinstance(data, Sequence) and not isinstance(data, str):\n return elem_type(\n [apply_to_collection(d, dtype, function, *args, **kwargs) for d in data]\n )\n\n # data is neither of dtype, nor a collection\n return data", "def translate(data, taxfiledirect, mode='species'):\n t = makeTrans(mode, taxfiledirect)\n for i in range(len(data['category'])):\n data['category'][i] = changeCategory(t, data['hierarchy'][i])\n return data", "def process(self, instance: _Traversable):\n try:\n return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)\n except AttributeError as e:\n raise RuntimeError(\n 'This visitor does not support {}'.format(type(instance))\n ) from e", "def pipeline_each(data, fns):\n\tfrom functools import reduce\n\treturn reduce(lambda a, x: list(map(x, a)), fns, data)", "def execute_sequence(data, rule):\n for func in rule:\n for index, value in enumerate(data):\n func_name = \"_\".join([\"fun\", func])\n # run function by name and redefine value of data element\n data[index] = getattr(functions, func_name)(value)\n return data", "def map(self, col_name: str, func):\n self._validate_col_name(col_name)\n self.data_table[col_name] = [func(x) for x in self.data_table[col_name]]", "def apply_transform_to_type(self, typedef):\n for iconv in self.transform:\n if not iconv.original_datatype:\n iconv.set_original_datatype(typedef)\n typedef = iconv.transformed_datatype\n return typedef", "def transform(self, func, func_description=None):\n\n if not callable(func):\n raise TypeError('Given function {} is not a callable'.format(func))\n\n xfm_ds = self.__class__()\n for samplet, data in self._data.items():\n try:\n xfm_data = func(data)\n except:\n print('Unable to transform features for {}. '\n 'Quitting.'.format(samplet))\n raise\n\n xfm_ds.add_samplet(samplet, xfm_data,\n target=self._targets[samplet])\n\n xfm_ds.description = \"{}\\n{}\".format(func_description, self._description)\n\n return xfm_ds", "def apply(self, fn, dtype):\n if not callable(fn):\n raise TypeError('Input must be a function.')\n if not type(dtype) is type:\n raise TypeError('Dtype must be a type')\n\n return XStream(impl=self._impl.apply(fn, dtype))", "def map(self, func, pds):\n \n raise NotImplemented", "def _map_write_functions(self, data: pd.DataFrame) -> accepted_methods:\n function_map = {\n \"parquet\": data.to_parquet,\n \"csv\": data.to_csv,\n \"xls\": data.to_excel,\n \"xlsx\": data.to_excel,\n \"dat\": data.to_csv,\n \"data\": data.to_csv\n }\n return function_map.get(self.path.file_type)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the given XML source and return a markup stream. Unlike with `XMLParser`, the returned stream is reusable, meaning it can be
def XML(text): return Stream(list(XMLParser(StringIO(text))))
[ "def parse(self, src=None, xml=None, checkunicode=False, catchexception=True):\n if src is None:\n if checkunicode:\n assert isinstance(xml, unicode), '[XmlParser.parse] XML must be unicode: \"%s\"' % xml\n\n if not catchexception:\n return etree.fromstring(xml)\n else:\n try:\n doc = etree.fromstring(xml)\n return doc\n except etree.XMLSyntaxError:\n try:\n xml = '' + xml + ''\n doc = etree.fromstring(xml)\n return doc\n except etree.XMLSyntaxError:\n msg = 'Error while parsing docstring for xml'\n print 'source:', src, 'xml:', xml.encode('utf-8')\n return etree.fromstring(msg)\n\n if src.startswith('/_root'):\n src = src[len('/_root'):]\n else:\n src = self.e.path2fspath(src)\n return etree.parse(src, self.parser)", "def xmlstream(self, xml):\n\n # Parse the tree\n context = etree.iterparse(xml, events=(\"start\", \"end\"))\n\n # turn it into an iterator\n context = iter(context)\n\n # get the root element\n _, root = next(context)\n\n return context, root", "def parse(self):\r\n def _generate():\r\n if self.encoding:\r\n reader = codecs.getreader(self.encoding)\r\n source = reader(self.source)\r\n else:\r\n source = self.source\r\n try:\r\n bufsize = 4 * 1024 # 4K\r\n done = False\r\n while 1:\r\n while not done and len(self._queue) == 0:\r\n data = source.read(bufsize)\r\n if not data: # end of data\r\n self.close()\r\n done = True\r\n else:\r\n if not isinstance(data, str):\r\n raise UnicodeError(\"source returned bytes, but no encoding specified\")\r\n self.feed(data)\r\n for kind, data, pos in self._queue:\r\n yield kind, data, pos\r\n self._queue = []\r\n if done:\r\n open_tags = self._open_tags\r\n open_tags.reverse()\r\n for tag in open_tags:\r\n yield END, QName(tag), pos\r\n break\r\n except html.HTMLParseError as e:\r\n msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)\r\n raise ParseError(msg, self.filename, e.lineno, e.offset)\r\n return Stream(_generate()).filter(_coalesce)", "def parse(self):\r\n def _generate():\r\n if self.encoding:\r\n reader = codecs.getreader(self.encoding)\r\n source = reader(self.source)\r\n else:\r\n source = self.source\r\n try:\r\n bufsize = 4 * 1024 # 4K\r\n done = False\r\n while 1:\r\n while not done and len(self._queue) == 0:\r\n data = source.read(bufsize)\r\n if not data: # end of data\r\n self.close()\r\n done = True\r\n else:\r\n if not isinstance(data, unicode):\r\n raise UnicodeError(\"source returned bytes, but no encoding specified\")\r\n self.feed(data)\r\n for kind, data, pos in self._queue:\r\n yield kind, data, pos\r\n self._queue = []\r\n if done:\r\n open_tags = self._open_tags\r\n open_tags.reverse()\r\n for tag in open_tags:\r\n yield END, QName(tag), pos\r\n break\r\n except html.HTMLParseError, e:\r\n msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)\r\n raise ParseError(msg, self.filename, e.lineno, e.offset)\r\n return Stream(_generate()).filter(_coalesce)", "def parse(self, source: Any) -> Any:\n if isinstance(source, etree.ElementTree):\n source = source.getroot()\n\n if isinstance(source, etree.Element):\n ctx = iterwalk(source, {})\n elif self.parser.config.process_xinclude:\n root = etree.parse(source).getroot() # nosec\n base_url = get_base_url(self.parser.config.base_url, source)\n loader = functools.partial(xinclude_loader, base_url=base_url)\n\n xinclude.include(root, loader=loader)\n ctx = iterwalk(root, {})\n else:\n ctx = etree.iterparse(source, EVENTS) # nosec\n\n return self.process_context(ctx)", "def parse(cls, xml_string, **parser_kwargs):\n\n xml_string = OOXMLtoLatexParser.change_xml_double_open_tag_to_left_arrow(xml_string)\n xml_string = OOXMLtoLatexParser._remove_self_closing_tags(xml_string)\n xml_to_latex_parser = cls(**parser_kwargs)\n\n if isinstance(xml_string, basestring):\n element = etree.fromstring(xml_string)\n sax.saxify(element, xml_to_latex_parser)\n return xml_to_latex_parser\n else:\n raise TypeError(\"xml string parameter must be str or unicode\")", "def parseXML(readable):\n return parse(readable, case_insensitive=True)", "def open_rss_link(source: str, verbose: bool or None):\n\n logger = set_logger(verbose)\n\n if not source:\n raise ValueError\n\n content = feedparser.parse(source)\n logger.info(f\"Starting reading link {source}\")\n\n return content", "def test_parse(self):\n\n # This just tests that parsing from a stream works. Actual parser\n # semantics are tested using parseString with a more focused XML\n # fragment.\n\n # Test with a filename:\n handler = pulldom.parse(tstfile)\n self.addCleanup(handler.stream.close)\n list(handler)\n\n # Test with a file object:\n with open(tstfile, \"rb\") as fin:\n list(pulldom.parse(fin))", "def from_XML(self, in_filename, entrypoint_name=None):\n\n with open(in_filename, \"r\") as infile: \n s = infile.read()\n return self.from_XML_string(s, entrypoint_name)", "def make_soup(self):\n self.soup = BeautifulSoup(self.xml_fp, 'lxml-xml')\n self.xml_fp.close()", "def fromStream(self, moStream):\n return fromXMLStream(moStream)", "def xml_to_soup(self, xml_loc):\n x = open('/tmp/todd.xml', 'r').read()\n return BeautifulSoup(x, 'xml')", "def source_from_xml(xsource, cachepath):\n stype = xsource.get(\"type\")\n if stype is None:\n logger.error(\"No type specified for source, skipping\")\n return None\n\n try:\n cls = globals()[\"%sSource\" % stype.upper()]\n except KeyError:\n logger.error(\"Unknown source type %s\")\n return None\n \n return cls(cachepath, xsource)", "def xml_readlines(source):\n encoding = get_xml_encoding(source)\n\n with data.get_readable_fileobj(source, encoding=encoding) as input:\n input.seek(0)\n xml_lines = input.readlines()\n\n return xml_lines", "def _parse_html_source( self, htmlsource ):\n try:\n parser = Parser()\n parser.feed( htmlsource )\n parser.close()\n return parser.tags, parser.url\n except: return None, None", "def _sax_xmlMarkupToDocument(text, styleRegistry, tagAliases=None):\n\n # Convert all occurrences of multiple contiguous whitespace\n # characters to a single space character.\n text = RE_REDUCESPACE.sub(\" \", text)\n\n # Convert all occurrences of the non-breaking space character\n # entity reference into its unicode equivalent (the SAX XML parser\n # doesn't recognize this one on its own, sadly).\n text = text.replace(\"&nbsp;\", NON_BREAKING_SPACE)\n\n text = text.encode(\"ascii\", \"xmlcharrefreplace\")\n\n xmlMarkupHandler = _XmlMarkupHandler(styleRegistry, tagAliases)\n try:\n xml.sax.parseString(text, xmlMarkupHandler)\n except SAXParseException as e:\n logging.error(\"Error parsing XML: '%s'; %s\", text, e)\n raise\n\n return xmlMarkupHandler.document", "def parsexml(itemelement):\n title = itemelement.findtext('title')\n pubdate = itemelement.findtext('pubDate')\n link = itemelement.findtext('link')\n thumbnail = NewsItem.parsethumbfromdescription(itemelement.find(\"description\"))\n return NewsItem(title=title,pubdate=pubdate,link=link,thumbnail=thumbnail)", "def loading_xml(self):\n\n dom = minidom.parse(self.filepath)\n return dom", "def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator that parses the HTML source, yielding markup events.
def parse(self): def _generate(): if self.encoding: reader = codecs.getreader(self.encoding) source = reader(self.source) else: source = self.source try: bufsize = 4 * 1024 # 4K done = False while 1: while not done and len(self._queue) == 0: data = source.read(bufsize) if not data: # end of data self.close() done = True else: if not isinstance(data, str): raise UnicodeError("source returned bytes, but no encoding specified") self.feed(data) for kind, data, pos in self._queue: yield kind, data, pos self._queue = [] if done: open_tags = self._open_tags open_tags.reverse() for tag in open_tags: yield END, QName(tag), pos break except html.HTMLParseError as e: msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset) raise ParseError(msg, self.filename, e.lineno, e.offset) return Stream(_generate()).filter(_coalesce)
[ "def parse(self):\r\n def _generate():\r\n if self.encoding:\r\n reader = codecs.getreader(self.encoding)\r\n source = reader(self.source)\r\n else:\r\n source = self.source\r\n try:\r\n bufsize = 4 * 1024 # 4K\r\n done = False\r\n while 1:\r\n while not done and len(self._queue) == 0:\r\n data = source.read(bufsize)\r\n if not data: # end of data\r\n self.close()\r\n done = True\r\n else:\r\n if not isinstance(data, unicode):\r\n raise UnicodeError(\"source returned bytes, but no encoding specified\")\r\n self.feed(data)\r\n for kind, data, pos in self._queue:\r\n yield kind, data, pos\r\n self._queue = []\r\n if done:\r\n open_tags = self._open_tags\r\n open_tags.reverse()\r\n for tag in open_tags:\r\n yield END, QName(tag), pos\r\n break\r\n except html.HTMLParseError, e:\r\n msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)\r\n raise ParseError(msg, self.filename, e.lineno, e.offset)\r\n return Stream(_generate()).filter(_coalesce)", "def _parse_html_source( self, htmlsource ):\n try:\n parser = Parser()\n parser.feed( htmlsource )\n parser.close()\n return parser.tags, parser.url\n except: return None, None", "def event_generator(self):\n with open(self.event_file, \"r\") as events:\n _ = events.readline() # skip header\n for event in events:\n yield self.process_event(event)\n # generator needs to be indefinite\n while True:\n yield [0],[0]", "def parse(self, data):\n self.reset()\n try:\n self.feed(data)\n except HTMLParseError:\n pass\n self.finish()\n return self.items()", "def events():\n for el in _list_events():\n yield Event(el)", "def test_parse_semantics(self):\n\n items = pulldom.parseString(SMALL_SAMPLE)\n evt, node = next(items)\n # Just check the node is a Document:\n self.assertTrue(hasattr(node, \"createElement\"))\n self.assertEqual(pulldom.START_DOCUMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"html\", node.tagName)\n self.assertEqual(2, len(node.attributes))\n self.assertEqual(node.attributes.getNamedItem(\"xmlns:xdc\").value,\n \"http://www.xml.com/books\")\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt) # Line break\n evt, node = next(items)\n # XXX - A comment should be reported here!\n # self.assertEqual(pulldom.COMMENT, evt)\n # Line break after swallowed comment:\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(\"title\", node.tagName)\n title_node = node\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n self.assertEqual(\"Introduction to XSL\", node.data)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"title\", node.tagName)\n self.assertTrue(title_node is node)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"hr\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"p\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.START_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n self.assertEqual(\"xdc:author\", node.tagName)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.CHARACTERS, evt)\n evt, node = next(items)\n self.assertEqual(pulldom.END_ELEMENT, evt)\n # XXX No END_DOCUMENT item is ever obtained:\n #evt, node = next(items)\n #self.assertEqual(pulldom.END_DOCUMENT, evt)", "def __parse_all(self):\n try:\n self.__log.debug(_('Seeking file until header is found'))\n self.__find_file_header()\n self.__log.debug(_('Parsing all entries'))\n while True:\n yield self.__parse_one()\n except StartTokenNotFoundError:\n self.__log.debug(_('Finished parsing entries'))\n return", "def blog_entry(html):\n blog_entry_pattern = r'<span class=\"date\">(.*)</span>\\s*<a href=\"(.*)\" target=\"_blank\" class=\"list-title\">(.*)</a>'\n for m_obj in re.finditer(blog_entry_pattern, html):\n log.debug('(Master) Producing blog entries {} {} {}'\n .format(m_obj.group(1), m_obj.group(2), m_obj.group(3)))\n yield m_obj.group(1), m_obj.group(2), m_obj.group(3)", "def parse(self, source: Any) -> Any:\n if isinstance(source, etree.ElementTree):\n source = source.getroot()\n\n if isinstance(source, etree.Element):\n ctx = iterwalk(source, {})\n elif self.parser.config.process_xinclude:\n root = etree.parse(source).getroot() # nosec\n base_url = get_base_url(self.parser.config.base_url, source)\n loader = functools.partial(xinclude_loader, base_url=base_url)\n\n xinclude.include(root, loader=loader)\n ctx = iterwalk(root, {})\n else:\n ctx = etree.iterparse(source, EVENTS) # nosec\n\n return self.process_context(ctx)", "def parse(self, beautiful_html):\n return beautiful_html", "def processHTML(html, url=\"\"):\n # Decide here what you want to do with the content\n return", "def parse(self):\n reader_args = (self.filename,\n self.fs,\n self.header,\n self.max_lines,\n self.field_pre_filter,\n self.record_pre_filter)\n\n with Reader(*reader_args) as reader:\n for nr, record in enumerate(reader, 1): # line numbers start from 1\n record = self.record_func(nr, self._parse_fields(record))\n if self.record_post_filter(nr, record):\n yield record", "def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)", "def __iter__(self):\n\n # case of the first page\n start_time = time.time()\n page = HTMLPage(self.seed_url)\n self.last_crawl_duration = time.time() - start_time\n self.pages_crawled.add(seed_url)\n self.domains_crawled.add(extract_domains_from_url(seed_url)[1])\n self.update_pages_to_be_crawled(page)\n yield page \n \n # all the other pages\n while (self.pages_to_be_crawled and\n len(self.pages_crawled) < self.max_crawled_pages):\n url = self.pages_to_be_crawled.pop()\n start_time = time.time()\n page = HTMLPage(url)\n self.last_crawl_duration = time.time() - start_time\n self.pages_crawled.add(url)\n self.domains_crawled.add(extract_domains_from_url(url)[1])\n self.update_pages_to_be_crawled(page)\n yield page\n raise StopIteration", "def parse(self, response):\n\t\tlogging.info('started scraping {}'.format(response.url))\n\t\tpage = json.loads(response.text)['pagecontent']\n\t\tlinks = Selector(text=page).css(\"div.col-xs-12>a::attr(href)\").getall()\n\t\tlogging.info('finished scraping'.format(response.url))\n\t\tif len(links) == self.per_page:\n\t\t\tfor i in range(len(links)):\n\t\t\t\tyield {'links': links[i]}\n\t\telif response.meta['num'] == self.num_of_pages:\n\t\t\tfor i in range(len(links)):\n\t\t\t\tyield {'links': links[i]}\n\t\telse:\n\t\t\tlogging.warning('the chosen selector did not find all the links \\\nwhich are on the page {}'.format(response.url))\n\t\t\traise CloseSpider(\"not all the links were found on the page {}. The\\\n selector has to be changed\".format(response.url))", "def get_items(self):\n # Use `iterparse`, it's more efficient, specially for big files\n context = ElementTree.iterparse(self.source, events=(\"start\", \"end\"))\n context = iter(context)\n event, root = context.next()\n for event, item in context:\n if item.tag == self.item_tag_name and event == \"end\":\n yield item\n # Releases the item from memory\n item.clear()\n root.clear()", "def _html_splitlines(lines):\n open_tag_re = re.compile(r'<(\\w+)\\s.*?[^/]?>')\n close_tag_re = re.compile(r'</(\\w+)>')\n open_tags = []\n for line in lines:\n # Reopen tags still open from the previous line\n for tag in open_tags:\n line = tag.group(0) + line\n open_tags = []\n\n # Find all tags opened on this line\n for tag in open_tag_re.finditer(line):\n open_tags.append(tag)\n\n # Find all tags closed on this line\n for ctag in close_tag_re.finditer(line):\n for otag in open_tags:\n if otag.group(1) == ctag.group(1):\n open_tags.remove(otag)\n break\n\n # Close all tags still open at the end of line, they'll get reopened at\n # the beginning of the next line\n for tag in open_tags:\n line += '</%s>' % tag.group(1)\n\n yield line", "def parse(self, source):\n\n rt, title, title_pic, markdown = libparser.parse(source)\n\n if rt == -1:\n raise SeparatorNotFound\n elif rt == -2:\n raise PostTitleNotFound\n\n # change to unicode\n title, title_pic, markdown = map(to_unicode, (title, title_pic,\n markdown))\n\n # render to html\n html = self.markdown.render(markdown)\n summary = self.markdown.render(markdown[:200])\n\n return {\n 'title': title,\n 'markdown': markdown,\n 'html': html,\n 'summary': summary,\n 'title_pic': title_pic\n }", "def iter_contents(self):\n return\n yield" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a serializer object for the given method.
def get_serializer(method='xml', **kwargs): if isinstance(method, str): method = {'xml': XMLSerializer, 'xhtml': XHTMLSerializer, 'html': HTMLSerializer, 'text': TextSerializer}[method.lower()] return method(**kwargs)
[ "def get_serializer(self, format):\n serializer = self._serializers.get(format)\n if not serializer:\n raise ValueError(format)\n return serializer()", "def get_serializer(self, format):\n creator = self.serializer_format_dict.get(format.upper())\n if not creator:\n raise ValueError(format)\n\n return creator()", "def get_serializer_class(self):\n \n if self.action == 'list':\n return FooSerializer\n elif self.action == 'retrieve':\n return FooSerializer\n elif self.action == 'create':\n return FooSerializer\n return FooSerializer", "def get_serializer_class(self):\n if self.action in {\"create\", \"update\"}:\n return TransactionSerializer\n return TransactionSummarySerializer", "def _get_serializer(self, model, serializer):\n app_lbl = getattr(model, \"_meta\").app_label\n package = apps.get_app_config(app_lbl).module\n\n if \".\" in serializer: # pragma: no cover\n module, serializer = serializer.split(\".\", 1)\n\n else:\n module = \"serializers\"\n\n module = import_module(\".\".join((package.__name__, module)))\n return getattr(module, serializer)", "def get_pagination_serializer(self, page):\n class SerializerClass(self.pagination_serializer_class):\n class Meta:\n object_serializer_class = self.get_serializer_class()\n\n pagination_serializer_class = SerializerClass\n context = self.get_serializer_context()\n return pagination_serializer_class(instance=page, context=context)", "def get_pagination_serializer(self, page=None):\n class SerializerClass(self.pagination_serializer_class):\n class Meta:\n object_serializer_class = self.get_serializer_class()\n\n pagination_serializer_class = SerializerClass\n context = self.get_serializer_context()\n return pagination_serializer_class(instance=page, context=context)", "def get_serializer_class(self):\n if self.action == \"retrieve\":\n return QuizAnalyticsSerializer\n return super().get_serializer_class()", "def get_model_serializer(model_class):\n serializer = {\n DiscoveredPackage: DiscoveredPackageSerializer,\n CodebaseResource: CodebaseResourceSerializer,\n }.get(model_class, None)\n\n if not serializer:\n raise LookupError(f\"No Serializer found for {model_class}\")\n\n return serializer", "def get_serializer_class(self):\n if self.action == \"list\":\n return BalanceSummarySerializer\n elif self.action in {\"create\", \"update\"}:\n return BalanceSerializer\n return BalanceDetailSerializer", "def get_serializer_class(self):\n if self.action in {'list', 'create'}:\n return BalanceSheetSummarySerializer\n return BalanceSheetDetailSerializer", "def get_serializer_class(self):\n if self.action == \"retrieve\":\n return VideoAnalyticsSerializer\n return super().get_serializer_class()", "def new(cls, method=\"json\", **kwargs):\n assert method in (\"json\", \"yaml\"), \"Unknown method.\"\n new_ = cls.__new__(cls)\n if method == \"json\":\n new_.from_JSON(**kwargs)\n elif method == \"yaml\":\n new_.from_YAML(**kwargs)\n return new_", "def serializer_factory(model, serializer_class=serializers.ModelSerializer, attrs=None, meta=None):\n attrs = attrs or {}\n meta = meta or {}\n meta.setdefault(\"model\", model)\n attrs.setdefault(\"Meta\", type(str(\"Meta\"), (object,), meta))\n return type(str(\"%sSerializer\" % model.__name__), (serializer_class,), attrs)", "def get_serializer_class(self):\n if self.action == \"retrieve\":\n return CourseAnalyticsSerializer\n return super().get_serializer_class()", "def get_serializer_class(self):\n if self.request.auth and self.request.user.is_active:\n serializer = self.serializer_class\n else:\n serializer = UserPartialSerializer\n\n return serializer", "def get_serializer_class(self):\n if self.request.method == \"POST\":\n return UserCreationSerializer\n else: \n return UserPublicOnlySerializer", "def get_serializer_class(self):\n if self.action == \"retrieve\":\n return AssignmentAnalyticsSerializer\n return super().get_serializer_class()", "def _method(self, verb):\n base_method = super(CustomApiClient, self)._method(verb)\n\n def method(*args, **kwargs):\n kwargs.update(\n {\n \"group\": self.group,\n \"version\": self.version,\n \"plural\": self.plural,\n }\n )\n\n # Convert body to_dict if it's a CustomObject as\n # `python-kubernetes` want a dict or a specific objects with\n # some attributes like `openapi_types`, `attributes_map`, ...\n if isinstance(kwargs.get(\"body\"), CustomObject):\n kwargs[\"body\"] = kwargs[\"body\"].to_dict()\n\n result = base_method(*args, **kwargs)\n\n # TODO: do we have a result for `delete` methods?\n return CustomObject(result)\n\n method.__doc__ = \"{verb} a {kind} {scope} object.\".format(\n verb=verb.capitalize(),\n kind=\"{s.group}/{s.version}/{s.kind}\".format(s=self),\n scope=self.scope.value,\n )\n\n return method" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the HTML serializer.
def __init__(self, doctype=None, strip_whitespace=True, cache=True): super(HTMLSerializer, self).__init__(doctype, False) self.filters = [EmptyTagFilter()] if strip_whitespace: self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE, self._NOESCAPE_ELEMS)) self.filters.append(NamespaceFlattener(prefixes={ 'http://www.w3.org/1999/xhtml': '' }, cache=cache)) if doctype: self.filters.append(DocTypeInserter(doctype)) self.cache = True
[ "def __init__(self, encoding= 'latin-1'):\n html.parser.HTMLParser.__init__(self)\n self._reset()\n self.encoding = encoding", "def init_renderers(cls):", "def __init__(self, template):\n self.template = template\n \n with open(template) as f:\n logging.info(\"HTMLExport has opened the file {}\".format(template))\n self.text = f.read()", "def __init__(self, temboo_session):\n super(HTMLEscape, self).__init__(temboo_session, '/Library/Utilities/Encoding/HTMLEscape')", "def __init__(self, page_content):\n self.soup = BeautifulSoup(page_content, \"html.parser\")", "def __init__(self):\n\n self.nodes = {}\n self.rendered = False", "def __init__(self, arch):\n HTMLParser.__init__(self)\n self.arch_link = None\n self.links = []\n self._match = '_%s-' % arch", "def __init__(self):\n try:\n HTMLParser.__init__(self)\n\n self.good_data = True\n\n self.title = None\n self.in_title = False\n self.is_same_month = False\n\n self.in_tbody = False\n self.in_abbr = False\n self.in_td = False\n\n self.year = None\n self.month = None\n\n self.tr_column_count = 3\n\n self.weather = {}\n self.weather_current_key = None\n\n self.temps_list = []\n\n except Exception as error:\n print(f\"WeatherScrapper::__init__::{error}\")", "def test_initialization(self):\n translator = rest.CustomHTMLTranslator(self.doc)\n self.assertEqual(translator.initial_header_level, 2)\n self.assertEqual(translator.head, [])\n self.assertEqual(translator.meta, [])\n self.assertEqual(translator.head_prefix, ['', '', '', '', ''])\n self.assertEqual(translator.body_prefix, [])\n self.assertEqual(translator.body_suffix, [])\n self.assertEqual(translator.stylesheet, [])\n self.assertEqual(translator.generator, (''))", "def __init__(self, **kwargs: Any) -> None:\n template_loader = jinja2.FileSystemLoader(searchpath=str(pathlib.Path(__file__).parent / \"jinja\" / \"templates\"))\n self.template_parser = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True,\n autoescape=True,\n **kwargs,\n )", "def __init__(\n self,\n html_parser,\n css_parser,\n configure,\n symbol_file_name=None\n ):\n\n helper.require_not_none(html_parser, css_parser, configure)\n helper.require_valid_type(html_parser, HTMLDOMParser)\n helper.require_valid_type(css_parser, StyleSheetParser)\n helper.require_valid_type(configure, Configure)\n helper.require_valid_type(symbol_file_name, str)\n\n self.html_parser = html_parser\n self.css_parser = css_parser\n self.configure = configure\n self._set_symbols(symbol_file_name, configure)", "def html(self):\n if not self._html:\n self._html = parse(self.input_doc, self.options.get('url'))\n\n return self._html", "def __init__(self, engine, indexer):\n template_t.__init__(self, engine, indexer)\n\n self.list_indent_per_level=4\n self.m_contents = ''\n\n # The list of generated cross references to avoid duplication\n self.m_cross_references = {}", "def _init(self):\n self.stylesheet = self._get_stylesheet()\n self._register_fonts()", "def __init__(self, *args, **kwargs):\n\n super(ElementForm, self).__init__(*args, **kwargs)\n\n # Set the form fields based on the model object\n if kwargs.has_key('instance'):\n initial_values = []\n for lang in settings.LANGUAGES:\n html = getattr(kwargs['instance'],'html_%s' % lang[0])\n if html == None:\n html = getattr(kwargs['instance'],'html_%s' % settings.LANGUAGES[0][0])\n soup = BeautifulSoup(html)\n initial_values.append(soup.label.text)\n\n self.initial['question'] = initial_values", "def __init__(self, link):\n # validate args\n if not isinstance(link, Link) or not link.is_valid:\n raise ArgumentError('HTMLPage class should be instantiated with ' \\\n 'a valid Link object. Got this: %s' %link)\n self.link = link\n\n # fetch the actual webpage\n response = requests.get(link.url)\n self.status_code = response.status_code\n self.html_content = response.text.encode('utf8')\n self.text_content = HTMLUtils.html_to_text(self.html_content)\n self.encoding = response.encoding\n\n # fetch all child links\n self.child_links = self._get_all_links()", "def __init__(self):\n this = _coin.new_ScXMLAnchorElt()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, widget_id):\n Renderer.__init__(self)\n\n self.widget_id = widget_id\n self.name = None\n self.description = None\n self.creation_date = None\n\n self.load_data(self.widget_id)", "def __init__(self, code):\n code = str(code)\n if os.path.isfile(code):\n with open(code, 'r') as markup:\n data = markup.read()\n code = data\n self._get_issue_info(code)\n # DOCTYPE fix for Ismaili Insight newsletter\n code = re.sub(\n r'<!DOCTYPE HTML PUBLIC “-//W3C//DTD HTML 4\\.01 Transitional//EN” “http://www\\.w3\\.org/TR/html4/loose\\.dtd”>', # noqa\n '<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">',\n code, flags=re.I)\n self._data = bs4.BeautifulSoup(code, 'html5lib')", "def __init__(self):\n this = _coin.new_ScXMLDocument()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests if two node tests are equal
def nodes_equal(node1, node2): if type(node1) is not type(node2): return False if type(node1) == LocalNameTest: return node1.name == node2.name return True
[ "def test_node_eq(self):\n node1 = ts.Node('a', 2)\n assert node1 == copy.copy(node1)\n assert node1 != ts.Node('b', 2)\n assert node1 != ts.Node('a', 3)\n assert node1 != ts.Node('a', 2, ts.Node('a', 1))\n assert ts.Node('a', 2, ts.Node('a', 1)) != node1\n node1.left = ts.Node('a', 1)\n assert ts.Node('a', 2, ts.Node('a', 1)) == node1\n assert not node1 != node1", "def isEqualToNode(self, other):\n is_lower = self.nodeName.lower() == other.nodeName.lower()\n same_name = self.namespace == other.namespace\n same_attrs = self.attributes == other.attributes\n is_equal = Node.isEqualToNode(self, other)\n return all([is_lower, same_name, same_attrs, is_equal])", "def test_compare(self):\n node = DOMNode(1, 'div', 'value', {}, 1, '')\n self.assertEqual(node, node)\n\n node_1 = DOMNode(1, 'div1', 'value', {}, 1, '')\n self.assertNotEqual(node, node_1)\n\n node_2 = DOMNode(2, 'div', 'value', { \"abbc\": \"bbcc\" }, 1, '')\n self.assertNotEqual(node, node_1)\n\n node_3 = DOMNode(3, 'div', 'value', { \"abbc\": \"bbcc\" }, 1, '')\n self.assertNotEqual(node, node_3)\n self.assertEqual(node_2, node_3)", "def _assert_text_node_equal(expected, actual):\n assert expected.value == actual.value", "def __eq__(self, other):\n if self.nodes == other.nodes:\n return True\n else:\n return False", "def _assert_tag_node_equal(expected, actual):\n assert_wikicode_equal(expected.tag, actual.tag)\n if expected.contents is not None:\n assert_wikicode_equal(expected.contents, actual.contents)\n else:\n assert actual.contents is None\n length = len(expected.attributes)\n assert length == len(actual.attributes)\n for i in range(length):\n exp_attr = expected.attributes[i]\n act_attr = actual.attributes[i]\n assert_wikicode_equal(exp_attr.name, act_attr.name)\n if exp_attr.value is not None:\n assert_wikicode_equal(exp_attr.value, act_attr.value)\n assert exp_attr.quotes == act_attr.quotes\n else:\n assert act_attr.value is None\n assert exp_attr.pad_first == act_attr.pad_first\n assert exp_attr.pad_before_eq == act_attr.pad_before_eq\n assert exp_attr.pad_after_eq == act_attr.pad_after_eq\n assert expected.wiki_markup == actual.wiki_markup\n assert expected.self_closing is actual.self_closing\n assert expected.invalid is actual.invalid\n assert expected.implicit is actual.implicit\n assert expected.padding == actual.padding\n assert_wikicode_equal(expected.closing_tag, actual.closing_tag)", "def are_different(node1, node2):\r\n different_children = True\r\n for child1 in node1.children:\r\n for child2 in node2.children:\r\n if child1.md5 == child2.md5:\r\n different_children = False\r\n\r\n return different_children", "def test_node_methods(self):\n\n metrics = set()\n\n node = Node('uuid-server-hardware', metrics)\n\n self.assertTrue(node.__eq__(node))\n self.assertFalse(node.__eq__(None))\n self.assertFalse(node is Node('other_node', metrics))\n self.assertEquals(node.__hash__(), node.__hash__())\n self.assertEquals(node.__repr__(), node.__repr__())", "def isEqualToNode(self, other):\n if len(self.childNodes) != len(other.childNodes):\n return False\n\n for a, b in zip(self.childNodes, other.childNodes):\n if not a.isEqualToNode(b):\n return False\n\n return True", "def _nodes_are_equivalent(G, node_a, node_b, max_history):\n return G.node[node_a][\"label\"] == G.node[node_b][\"label\"] and (\n _outgoing_edges_are_similar(G, node_a, node_b) or\n _incoming_edges(G, node_a) == _incoming_edges(G, node_b) or\n _fingerprint_node(G, node_a, max_history) == _fingerprint_node(G, node_b, max_history))", "def test_cmp(self):\n nodes = self.TreeNode\n self.assertEqual(cmp(nodes['a'], nodes['a']), 0)\n self.assertNotEqual(cmp(nodes['b'], nodes['a']), 0)\n self.assertNotEqual(cmp(nodes['a'], nodes['b']), 0)", "def _assert_wikilink_node_equal(expected, actual):\n assert_wikicode_equal(expected.title, actual.title)\n if expected.text is not None:\n assert_wikicode_equal(expected.text, actual.text)\n else:\n assert actual.text is None", "def _assert_html_entity_node_equal(expected, actual):\n assert expected.value == actual.value\n assert expected.named is actual.named\n assert expected.hexadecimal is actual.hexadecimal\n assert expected.hex_char == actual.hex_char", "def _assert_template_node_equal(expected, actual):\n assert_wikicode_equal(expected.name, actual.name)\n length = len(expected.params)\n assert length == len(actual.params)\n for i in range(length):\n exp_param = expected.params[i]\n act_param = actual.params[i]\n assert_wikicode_equal(exp_param.name, act_param.name)\n assert_wikicode_equal(exp_param.value, act_param.value)\n assert exp_param.showkey is act_param.showkey", "def test_edge_match(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"O\",\"T\")\n self.assertTrue(e1.matches(e2))", "def test_eq_other_peer(self):\n uri = 'netstring://192.168.0.1:9999'\n version = get_version()\n last_seen = 123\n contact1 = PeerNode(PUBLIC_KEY, version, uri, last_seen)\n contact2 = PeerNode(PUBLIC_KEY, version, uri, last_seen)\n self.assertTrue(contact1 == contact2)", "def __eq__(self, other):\n if isinstance(other, Node):\n return self.id == other.id\n else:\n return False", "def _compare_ast(self, left_ast, right_ast):\n self.assertEqual(lmast.dump(left_ast), lmast.dump(right_ast))", "def __ne__(self, node2):\n\t\t#return self._element == node2._element and self._name == node2._name\n\t\treturn not self == node2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert value to a scalar. If a single element Attrs() object is passed the value of the single attribute will be returned.
def as_scalar(value): if isinstance(value, Attrs): assert len(value) == 1 return value[0][1] else: return value
[ "def to_scalar(self, v):\n if v is None:\n return v\n else:\n return v.asnumpy().item()", "def extractValue(self, model, item):\n return getattr(item, self.attribute.attrname)", "def Value(self):\n if self.IsNull:\n return None\n elif self.IsBool:\n return self.AsBool\n elif self.IsInt:\n return self.AsInt\n elif self.IsFloat:\n return self.AsFloat\n elif self.IsString:\n return self.AsString\n elif self.IsKey:\n return self.AsKey\n elif self.IsBlob:\n return self.AsBlob\n elif self.IsMap:\n return self.AsMap.Value\n elif self.IsVector:\n return self.AsVector.Value\n elif self.IsTypedVector:\n return self.AsTypedVector.Value\n elif self.IsFixedTypedVector:\n return self.AsFixedTypedVector.Value\n else:\n raise TypeError('cannot convert %r to value' % self)", "def first_attr_value(self, attrname):\n if not self.members:\n return None\n return getattr(self.members[0], attrname)", "def extract_scalar(obj, dtype, to_numpy=True):\n val = dtype(obj.value)\n return val if to_numpy else (val, dtype)", "def attr_1(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.attr1\", self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)", "def get_scalar(self, name):\n return self._entries[\"Scalar\"][name].get_content() if name in self._entries[\"Scalar\"] else None", "def attribute_get(self, attr):\n attributes_struct = self.single_query_get('Attributes')\n attribute_struct = [x for x in attributes_struct\n if x['Name'] == attr]\n if len(attribute_struct) > 1:\n raise tdapi.TDException(\"Too many attributes with name {}\".format(attr))\n elif len(attribute_struct) == 0:\n return\n else:\n return attribute_struct[0]['Value']", "def get_value_for(self, instance):\n value = super(ValueMapFullTextAttr, self).get_value_for(instance)\n # handle error if value_map doesn't have mapping for the given value\n return self.value_map.get(value, None)", "def get_value(self, agent_id: str, scene_ts: int, attribute: str) -> float:\n raise NotImplementedError()", "def GetScalar(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetScalar(self)", "def scalar_value(self, matrix):\r\n return matrix[0, 0]", "def get_value(self):\n return self.node.value()", "def get_attribute_value(self, attribute_name):\n return self.attributes[attribute_name]", "def getBasicAttribute(self, name):\n return getattr(self, \"_\" + name + \"_value_\").getValue()", "def convert_value(self):\n field = self.output_field\n internal_type = field.get_internal_type()\n if internal_type == \"FloatField\":\n return (\n lambda value, expression, connection: None\n if value is None\n else float(value)\n )\n elif internal_type.endswith(\"IntegerField\"):\n return (\n lambda value, expression, connection: None\n if value is None\n else int(value)\n )\n elif internal_type == \"DecimalField\":\n return (\n lambda value, expression, connection: None\n if value is None\n else Decimal(value)\n )\n return self._convert_value_noop", "def get_value(self, d):\n try:\n return d['marginal_carbon']['value']\n except (KeyError, TypeError):\n return None", "def _get_standardized_value(self, value):\n if value is None:\n return None\n\n if isinstance(value, list):\n\n # If the list contains MiniFieldStorage objects then loop\n # through and get the values.\n if any(isinstance(storage_obj, MiniFieldStorage) for storage_obj in value):\n values = [storage_obj.value for storage_obj in value]\n\n # TODO: This needs to be removed in 2.2. A breaking change but\n # this code will result in inconsistent values\n # If there is only 1 element in the list then return the only value in the list\n if len(values) == 1:\n return values[0]\n return values\n\n return value\n\n if isinstance(value, (str, int, dict)):\n return value\n\n if not value.filename:\n return value.value\n\n if value.filename:\n return value\n\n return False", "def scalar_data(self):\n return self._scalar_data", "def touch(attribute, attr_value, is_wnva):\n # ====================================================================#\n # Detect Empty Attribute Values\n if attr_value is None or len(str(attr_value)) < 1:\n return None\n # ====================================================================#\n # STR? Search or Create Attribute by Code\n if isinstance(attribute, str):\n attribute = AttributesHelper.touch(attribute, is_wnva)\n if attribute is None:\n Framework.log().error(\"An Error Occurred while Loading Attribute\")\n return None\n # ====================================================================#\n # Search for Value in Attribute\n values = attribute.value_ids.filtered(lambda r: r.name.lower() == attr_value.lower())\n if len(values) > 0:\n return values[0]\n # ====================================================================#\n # Crate New Value for Attribute\n return ValuesHelper.create(attribute, attr_value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the given directives to the stream.
def _apply_directives(stream, directives, ctxt, vars): if directives: stream = directives[0](iter(stream), directives[1:], ctxt, **vars) return stream
[ "def directives(self, directives):\n\n self._directives = directives", "def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp", "def move_directives(directives, op):\n try:\n cupsd_lines = op.readlines(CUPSD_CONF)\n except IOError as error:\n raise IOError(error)\n\n lines_to_move = []\n for line in cupsd_lines:\n for name in directives:\n if line.lstrip().startswith(name):\n lines_to_move.append(line)\n cupsd_lines[cupsd_lines.index(line)] = ''\n\n op.write(CUPSD_CONF, 'w', cupsd_lines)\n\n if lines_to_move:\n op.write(CUPSFILES_CONF, 'a',\n '\\n# added by Leapp\\n{}'.format(''.join(lines_to_move)))", "def get_directives(textlines):\n skiplines = 0\n given_directives = dict()\n\n # Parse textlines for directives\n for line in textlines:\n d = re.search(DIRECTIVE_IDENT_STR + r'(.*)', line)\n if not d:\n # All directives are at the top of the document. Halt when no more\n # directives are encountered.\n break\n\n directive = d.groups()[0]\n arg = None\n if DIRECTIVE_ARG_SEPARATOR in directive:\n directive, arg = directive.split(DIRECTIVE_ARG_SEPARATOR, 1)\n\n if directive not in DIRECTIVES:\n msg = '\"{}\" is not a valid directive'.format(directive)\n raise ParserError(msg)\n\n skiplines += 1\n\n expected_type = DIRECTIVES[directive]\n if expected_type is None:\n # If directive takes no args, treat it as a switch then move on to\n # the next directive\n given_directives[directive] = True\n continue\n \n # If directive takes args, do typechecking\n if arg is None:\n msg = ('The directive \"{}\" requires an argument but none was '\n 'provided (syntax: \"{}{}{}argument\" without the '\n 'surrounding doublequotes)'\n ).format(DIRECTIVE_IDENT_STR, directive,\n DIRECTIVE_ARG_SEPARATOR)\n raise ParserError(msg)\n \n # Cast the given argument (which is a str) to the expected_type\n try:\n given_directives[directive] = expected_type(arg)\n except:\n msg = ('bad argument type to directive {} (expected {}, got {})'\n ).format(directive, expected_type, str(arg))\n raise ParserError(msg)\n\n # \n if not given_directives.get('ROOT_SNIP_ID', None):\n msg = ('You must declare the \"ROOT_SNIP_ID\" directive. This will be '\n 'the snip_id assigned to your root snippet.')\n raise ParserError(msg)\n\n # Handle directives dependencies/implications\n if given_directives.get('OVERWRITE_DB_SNIP_IDS', None):\n if not given_directives.get('REF_NUMS_ARE_SNIP_IDS', None):\n msg = ('The \"OVERWRITE_DB_SNIP_IDS\" directive requires the '\n '\"REF_NUMS_ARE_SNIP_IDS\" to be declared as well.')\n raise ParserError(msg)\n\n # Return directives and the textlines with directive lines pruned out\n return textlines[skiplines:], given_directives", "def read_stream(self, stream):\n self.reset()\n for token in stream:\n self._buf.append(token)", "def parse_stream_raw(self, stream, debug=False):\n tokens = tokenize.generate_tokens(stream.readline)\n return self.parse_tokens(tokens, debug)", "def _update_default_directives(self, **dirs):\n self.directives = {}\n self.directives.update(dirs)", "def _parse_directives(\n directives_ast: Optional[List[dict]]\n) -> List[\"DirectiveNode\"]:\n if directives_ast:\n return [_parse_directive(directive) for directive in directives_ast]\n return []", "def process(self, lines):\n for line in lines:\n self._process_line(line)", "def follow_file(self, path, use_stderr=False, period=0.1):\n stream = self.stderr if use_stderr else self.stdout\n if stream is None:\n raise RuntimeError('Cannot follow file outside decoration context')\n with stream.mux.follow_file(path, stream.decorator, period=period):\n yield", "def apply(self, options):\n # Iterate through the token possibilities\n tok_list_count = len(options.allowed)\n for i in range(0, tok_list_count):\n tok_list = options.allowed[i]\n for tok in tok_list:\n # If the possibility is a number and matches the argument, check whether the encountered\n # data was all inside the likely range.\n if tok.kind == DsToken.KIND_NUMBER and tok.text in self.directives:\n if (options.num_ranges[i][0] >= self.likely_range[0] and\n options.num_ranges[i][1] <= self.likely_range[1]):\n # Positive reinforcement\n tok.score += self.pos_score\n else:\n # Negative reinforcement\n tok.score += self.neg_score", "def pos_tag_io(self):\n UTF8Reader = codecs.getreader('utf8')\n input_stream = UTF8Reader(sys.stdin)\n UTF8Writer = codecs.getwriter('utf8')\n output_stream = UTF8Writer(sys.stdout)\n\n for line in input_stream:\n for w in self.tagger.tag(word_tokenize(line.strip())):\n output_stream.write(w[0])\n output_stream.write(\"\\t\")\n output_stream.write(w[1])\n output_stream.write(\"\\n\")\n output_stream.write(\"\\n\")", "def filter_stream(stream, die, filters=None):\n\n if filters is None:\n filters = FILTERS\n\n # I only loosely understand how to manipulate\n # file descriptors. Useful resources:\n #\n # - https://linuxmeerkat.wordpress.com/2011/12/02/\\\n # file-descriptors-explained/\n # - https://stackoverflow.com/a/24277852\n # - https://stackoverflow.com/a/17954769\n # - https://stackoverflow.com/a/10759061\n\n # Redirect the stream into a pipe,\n # and filter the pipe output\n fd = stream.fileno()\n oldfd = os.dup(fd)\n piper, pipew = os.pipe()\n os.dup2(pipew, fd)\n os.close(pipew)\n\n fin = os.fdopen(piper, 'r')\n fout = os.fdopen(oldfd, 'w')\n\n # Use a queue to pass lines from\n # the input stream to the output\n # stream.\n q = queue.Queue()\n\n # Use a Barrier to synchronise the\n # read, write, and calling threads\n alive = threading.Barrier(3)\n\n # The read thread runs forever,\n # just putting lines in the queue.\n def read_loop():\n alive.wait()\n while True:\n line = fin.readline()\n if line == '':\n break\n q.put(line)\n\n\n def testline(line):\n for pat in filters:\n\n if isinstance(pat, tuple): pat, skip = pat\n else: skip = 1\n\n if re.search(pat, line):\n return skip\n return 0\n\n # The write thread runs until both\n # of the following are true:\n #\n # - there are no lines in the queue\n # - the die event has been set\n def write_loop():\n skip = 0\n alive.wait()\n while True:\n try:\n line = q.get(timeout=0.25)\n except queue.Empty:\n if die.is_set(): break\n else: continue\n\n if skip > 0:\n skip -= 1\n continue\n\n skip = testline(line) - 1\n\n if skip < 0:\n fout.write(line)\n fout.flush()\n\n # Restore the original stream\n try:\n os.close(fd)\n os.close(piper)\n os.dup2(oldfd, fd)\n os.close(oldfd)\n except Exception:\n pass\n\n rt = threading.Thread(target=read_loop, daemon=True)\n wt = threading.Thread(target=write_loop, daemon=True)\n rt.start()\n wt.start()\n\n return rt, wt, alive", "def _simplify(stream, with_attrs=False):\r\n def _generate():\r\n for mark, (kind, data, pos) in stream:\r\n if kind is START:\r\n if with_attrs:\r\n data = (unicode(data[0]), dict((unicode(k), v)\r\n for k, v in data[1]))\r\n else:\r\n data = unicode(data[0])\r\n elif kind is END:\r\n data = unicode(data)\r\n elif kind is ATTR:\r\n kind = ATTR\r\n data = dict((unicode(k), v) for k, v in data[1])\r\n yield mark, kind, data\r\n return list(_generate())", "def _substitute_stream_ ( klass ) :\n index = klass.find('>>')\n while -1 != index :\n klass = klass.replace('>>','> >')\n index = klass.find( '>>' )\n index = klass.find(' ')\n while -1 != index :\n klass = klass.replace(' ',' ')\n index = klass.find( ' ' )\n return klass", "def processLines(self, lines):\n\n for line in lines:\n if len(line) == 0:\n continue\n\n if line[-1] == \"\\r\":\n line = line[:-1]\n\n # Automatically make P10 protocols have their lines parsed\n # differently\n lineobj = IRCLine(line, self.protocol.p10)\n\n #debug output\n if self.config[\"etc\"][\"debug\"]:\n self.log(line, \"<<<\")\n\n if lineobj.verb == \"ERROR\":\n #If ERROR is sent, it's already fatal.\n raise IOError\n\n #Handle server commands\n try:\n for impl in self.s2scommands[lineobj.verb]:\n try:\n impl(cod, lineobj)\n except KeyError as e:\n continue\n except Exception as e:\n if not self.config[\"etc\"][\"production\"]:\n self.servicesLog(\"%s %s %s\" %(type(e), e.message, lineobj))\n traceback.print_exc(file=sys.stdout)\n continue\n except KeyError:\n pass", "def fast_forward(self,removed_instructions):\n for instruction in removed_instructions: \n for group in instruction[\"groups\"]: \n if group.get(\"transfer\"):\n fromLocs = []\n toLocs = []\n volumes = []\n changeSettings = []\n for transfer in group[\"transfer\"]:\n pp.pprint(transfer)\n fromLocs.append(transfer[\"from\"].pop(\"locName\"))\n toLocs.append(transfer[\"to\"].pop(\"locName\"))\n volumes.append(transfer.pop(\"volume\"))\n changeSettings.append(transfer)\n self.protocol.add_transfer_to_stream(fromLocs,toLocs,volumes,changeSettings) \n elif group.get(\"mix\"):\n mixLocs = []\n volumes = []\n changeSettings = []\n for mix in group[\"mix\"]:\n pp.pprint(mix)\n mixLocs.append(mix.pop(\"locName\"))\n volumes.append(mix.pop(\"volume\"))\n changeSettings.append(mix)\n self.protocol.add_mix_to_stream(mixLocs,volumes,changeSettings)\n elif group.get(\"run\"):\n # cycler\n name = group[\"run\"].pop(\"name\")\n changeSettings = group[\"run\"] \n self.protocol.add_cycler_group(name,changeSettings)\n if self.protocol.instruction_stream[\"cmds\"]:\n self.protocol.end_stream()", "def pipe(data, *funcs):\n for func in funcs:\n data = func(data)\n return data", "def emit(self, ctx, modules, fd):\n return", "def _mux(docs: list, process_stdin: IO, q: queue.Queue):\n for i, doc in enumerate(docs):\n count = 0\n sents = doc.strip().split('\\n')\n for line in sents:\n line = line + '\\n'\n process_stdin.write(line.encode('utf-8'))\n count += 1\n q.put((i, count))\n q.put(None) #poison\n process_stdin.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute the given `Suite` object.
def _exec_suite(suite, ctxt, vars=None): if vars: ctxt.push(vars) ctxt.push({}) suite.execute(ctxt) if vars: top = ctxt.pop() ctxt.pop() ctxt.frames[0].update(top)
[ "def run_suite(self, suite, **kwargs):\n return PyunitConsumer(\n verbosity=self.verbosity,\n failfast=self.failfast,\n ).run(suite)", "def run_suite(self, suite, **kwargs):\n options = {\n 'verbosity': getattr(settings, 'TEST_OUTPUT_VERBOSE', False),\n 'descriptions': getattr(settings, 'TEST_OUTPUT_DESCRIPTIONS', False),\n 'output': getattr(settings, 'TEST_OUTPUT_DIR', '.'),\n 'outsuffix': 'result', # No date-based file suffix\n }\n return xmlrunner.XMLTestRunner(**options).run(suite)", "def handle_suite(suite):\n suite_name = suite.getAttribute(\"name\")\n cases = suite.getElementsByTagName(\"testcase\")\n for case in cases:\n handle_testcase(case, suite_name)", "def execute_random_suite(project):\n test_name = TestUtils.random_string()\n tests = [test_name]\n for t in tests:\n TestUtils.create_test(project, name=t)\n suite_name = TestUtils.random_string()\n TestUtils.create_suite(project, name=suite_name, tests=tests)\n execution = TestUtils.execute_suite(project, suite_name)\n execution['tests'] = tests\n return execution", "def enterSuite(self, suite, result=None):\n try:\n return self._announceSuiteStart(suite, result=result)\n finally:\n self.level += 1", "def execute(self, tests_by_task: Dict[str, TaskInfo]) -> None:\n raise NotImplementedError()", "def run_simulation(self):\n\n # create appropriate object\n simulation = self.all_sims[self.testcase](self.testcase, self.params.paramfile, self.root,\n self.plots, self.movies)\n\n simulation.run_simulation()\n self.finishedTestcase()", "def summariseSuiteResult(self, suite):", "def run(self, obj, env):\n if not obj.module_begin(self):\n obj.module_skip(self, None)\n return\n with self.context():\n try:\n for test in self.load(env):\n test.run(obj)\n except TestException, ex:\n if not ex.module:\n raise\n if ex.skip:\n obj.module_skip(self, ex.get())\n else:\n obj.module_fail(self, ex.get())\n except KeyboardInterrupt:\n raise\n except:\n reason = traceback.format_exc()\n obj.module_fail(self, reason)\n else:\n obj.module_pass(self)", "def run_tests(self):\n self.load_tests()\n for name in self.runtests:\n self.logger.info(\"running %s\" % name)\n try:\n self.tests[name].module.run(self)\n except Exception, e:\n self.logger.error(\"ERR: %s\" % e)", "def run(self):\n logging.info(\"Running benchmark suite...\")\n for benchmark in self._benchmarks:\n result = self.run_method(benchmark)\n print(result)\n if self._table is None:\n self._table = Table([result])\n else:\n self._table.update([result])\n self.write_results()\n self.host_results()", "def run(self, result=None):\n self.install_fixtures()\n super(TestCase, self).run(result)", "def __ExecuteSummarize(self):\n\n # If no results file is specified, use a default value.\n if len(self.__arguments) == 0:\n results_path = \"results.qmr\"\n else:\n results_path = self.__arguments[0]\n\n # The remaining arguments, if any, are test and suite IDs.\n id_arguments = self.__arguments[1:]\n # Are there any?\n if len(id_arguments) > 0:\n filter = 1\n # Expand arguments into test IDs.\n try:\n test_ids, suite_ids \\\n = self.GetDatabase().ExpandIds(id_arguments)\n except (qm.test.database.NoSuchTestError,\n qm.test.database.NoSuchSuiteError), exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n except ValueError, exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n else:\n # No IDs specified. Show all test and resource results.\n # Don't show any results by test suite though.\n filter = 0\n suite_ids = []\n\n # Get an iterator over the results.\n try:\n results = base.load_results(open(results_path, \"rb\"),\n self.GetDatabase())\n except (IOError, xml.sax.SAXException), exception:\n raise QMException, \\\n qm.error(\"invalid results file\",\n path=results_path,\n problem=str(exception))\n\n any_unexpected_outcomes = 0\n\n # Compute the list of result streams to which output should be\n # written. Results path only used for HTML/NexTest\n streams = self.__GetResultStreams(results_path)\n \n # Send the annotations through.\n for s in streams:\n s.WriteAllAnnotations(results.GetAnnotations())\n\n # Get the expected outcomes.\n outcomes = self.__GetExpectedOutcomes()\n\n # Our filtering function. Should use itertools.ifilter, once\n # we can depend on having Python 2.3.\n def good(r):\n return r.GetKind() == Result.TEST \\\n and r.GetId() in test_ids\n\n # Simulate the events that would have occurred during an\n # actual test run.\n for r in results:\n if not filter or good(r):\n for s in streams:\n s.WriteResult(r)\n if (r.GetOutcome()\n != outcomes.get(r.GetId(), Result.PASS)):\n any_unexpected_outcomes = 1\n for s in streams:\n s.Summarize()\n\n if any_unexpected_outcomes:\n return 1\n \n return 0", "def testExecute(self):\n l = TestLayer(\"test2\")\n self.assertFalse(l.executeSet)\n l.execute(1)\n self.assertTrue(l.executeSet)", "def unpack_test_suite(self, ret_val, test_suite, level):\n if level not in (\"case\", \"method\"):\n raise ValueError(\"level must be either case or method\")\n\n for test in test_suite:\n if self.is_not_suite(test):\n # Case should be suite if it has multiple methods\n t_name = '.'.join(test.id().split('.')[:-1]) if level == \"case\" else test.id()\n if isinstance(test, doctest.DocTestCase):\n t_name = f'doctest.{t_name}'\n if level == \"case\":\n if t_name not in ret_val:\n suite = unittest.TestSuite()\n ret_val[t_name] = suite\n ret_val[t_name].addTest(test)\n else:\n ret_val[t_name] = unittest.TestSuite([test])\n else:\n ret_val = self.unpack_test_suite(ret_val, test, level)\n return ret_val", "def run(self, **kwargs):\n logger.info(\"Setup \" + self.__class__.__name__)\n self.setup(**kwargs)\n self._do_outer_iteration_stage(**kwargs)", "def run(self):\n for test in self.mTests:\n self.sendRequest(\"Running\")\n\n # execute the test\n test.runTest()\n\n # flush the output buffer (so that the test information will really\n # be written in the output file)\n sys.stdout.flush()\n\n self.mTestsExecuted = self.mTestsExecuted + 1\n\n return True", "def runTests(logname, getSuite, args):\n sel = \"unit\"\n vrb = 1\n if len(args) > 1:\n sel = args[1]\n if sel == \"xml\":\n # Run with XML test output for use in Jenkins environment\n if not junitxml_present:\n print(\"junitxml module not available for XML test output\")\n raise ValueError(\"junitxml module not available for XML test output\")\n with open('xmlresults.xml', 'w') as report:\n result = junitxml.JUnitXmlResult(report)\n result.startTestRun()\n try:\n getSuite(select=\"unit\").run(result)\n finally:\n result.stopTestRun()\n else:\n if sel[0:3] in [\"uni\",\"com\",\"all\",\"int\",\"pen\"]:\n logging.basicConfig(level=logging.WARNING)\n if sel[0:3] in [\"com\",\"all\"]: vrb = 2\n else:\n # Run single test with elevated logging to file via new handler\n logging.basicConfig(level=logging.DEBUG)\n # Enable debug logging to a file\n fileloghandler = logging.FileHandler(logname,\"w\")\n fileloghandler.setLevel(logging.DEBUG)\n # Use this formatter for shorter log records\n ###filelogformatter = logging.Formatter('%(levelname)s %(message)s', \"%H:%M:%S\")\n # Use this formatter to display timing information:\n filelogformatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s', \"%H:%M:%S\")\n fileloghandler.setFormatter(filelogformatter)\n logging.getLogger('').addHandler(fileloghandler)\n vrb = 2\n runner = unittest.TextTestRunner(verbosity=vrb)\n tests = getSuite(select=sel)\n if tests: runner.run(tests)\n return", "def runtest():\n pwd = os.path.abspath(os.path.dirname(__file__))\n response = json.loads(request.body.read())\n testCases = (str(response['testCases'])).split(',')\n testCases.pop()\n _runner = (str(response['Runner']))\n _buildName = (str(response['buildName']))\n _userId = (str(response['userId']))\n _testPlanId = (str(response['testPlanId']))\n totalTestCases = len(testCases)\n if _runner == 'HTMLTestRunner':\n if totalTestCases == 0:\n return \"Select testcases to run..\"\n else:\n shutil.rmtree(pwd+'/Output/')\n os.mkdir(pwd+'/Output/')\n listOfTestSuiteNames = getTestSuiteNames(testCases)\n for testSuite in listOfTestSuiteNames:\n suite = unittest.TestSuite()\n for testCase in testCases:\n testSuiteName = ((str(testCase).split(' '))[0]).split('.')[-1]\n if testSuite == testSuiteName:\n _testSuiteName = ((str(testCase)).split(' ')[0])[1:]\n classObj = my_import(_testSuiteName)\n _testCaseName = ((((str(testCase)).split(' ')[1])[:-1]).split('='))[1]\n suite.addTest(classObj(_testCaseName))\n _testModuleName = testSuiteName#((str(testSuite).split(\".\")[-1])[0:-2]) \n _output = open(pwd+\"/Output/\"+_testModuleName+\".html\",\"w\")\n HTMLRunner = HTMLTestRunner.HTMLTestRunner(stream=_output,title=_testModuleName,description=\"Test case's for the module \"+_testModuleName)\n HTMLRunner.run(suite)\n subprocess.Popen(['python',pwd+\"/ExtLib/Statistics.py\",\"Test Automation\",pwd+\"/Output/\"])\n IndexMaker = HTMLIndexCreator.HTMLIndexCreator(pwd+\"/Output/\")\n IndexMaker.makeHTMLIndexFile() \n return \"Test completed.....\"\n else:\n return \"The specified runner does not exist.\"", "def run(limit=None, verbosity=None, exit_=False):\n\n setup_module(None, verbosity)\n\n try:\n if externals.exists('nose'):\n # Lets just use nose\n run_tests_using_nose(limit=limit,\n verbosity=verbosity,\n exit_=exit_)\n else:\n print(\"T: Warning -- major bulk of tests is skipped since nose \"\n \"is unavailable\")\n # collect all tests\n suites = collect_test_suites(verbosity=verbosity)\n\n if limit is None:\n # make global test suite (use them all)\n ts = unittest.TestSuite(suites.values())\n else:\n ts = unittest.TestSuite([suites[s] for s in limit])\n\n\n class TextTestRunnerPyMVPA(unittest.TextTestRunner):\n \"\"\"Extend TextTestRunner to print out random seed which was\n used in the case of failure\"\"\"\n def run(self, test):\n \"\"\"Run the bloody test and puke the seed value if failed\"\"\"\n result = super(TextTestRunnerPyMVPA, self).run(test)\n if not result.wasSuccessful():\n print \"MVPA_SEED=%s\" % _random_seed\n\n # finally run it\n TextTestRunnerPyMVPA(verbosity=verbosity).run(ts)\n finally:\n teardown_module(None, verbosity)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a key for the given directive class that should be used to sort it among other directives on the same `SUB` event. The default implementation simply returns the index of the directive in the `directives` list.
def get_directive_index(self, dir_cls): if dir_cls in self._dir_order: return self._dir_order.index(dir_cls) return len(self._dir_order)
[ "def key_from_class_name(class_name):\n assert class_name in Transaction._class_names\n return Transaction._class_names[class_name]", "def class_to_idx(self):\n\n return {cat: i for i, cat in enumerate(self.CLASSES)}", "def match_class(self, key):\n self._keystrokes.append(key)\n candidate = ''.join(self._keystrokes)\n matches = [c for c in self._classes if c.startswith(candidate.rstrip())]\n\n # unambiguous match\n if len(matches) == 1:\n self._keystrokes.clear()\n return matches[0]\n # potentially ambiguous match\n elif len(matches) > 1:\n # unsolvable without a space (e.g. 'ba' -> ['bar', 'baz'])\n if not candidate.endswith(' '):\n return ''\n # solvable if ending in space and a full match (e.g. 'bar ' -> ['bar', 'bar1'], choose 'bar')\n elif len(candidate) in [len(m.rstrip()) for m in matches]:\n self._keystrokes.clear()\n return min(matches, key=len)\n # not solvable yet\n else:\n return ''\n # no matches\n else:\n self._keystrokes.clear()\n\n return ''", "def _unit_key(self, unit):\r\n cls = unit.__class__\r\n ident = tuple([getattr(unit, name) for name in self._keyattrs[cls]])\r\n return \"%s:%s:%s\" % (self.name, cls.__name__, self.hash(ident))", "def get_index(usage_key, children):\n children = [str(child) for child in children]\n return children.index(usage_key)", "def java_class_params_to_key(package: str, class_name: str):\n return f'{package}.{class_name}'", "def _index_key_for(self, att, value=None):\r\n if value is None:\r\n value = getattr(self, att)\r\n if callable(value):\r\n value = value()\r\n if value is None:\r\n return None\r\n if att not in self.lists:\r\n return self._get_index_key_for_non_list_attr(att, value)\r\n else:\r\n return self._tuple_for_index_key_attr_list(att, value)", "def lookup_key(self, index: list) -> \"Token\":\n token = self.lookup(index[:-1])\n return token._get_key_token(index[-1])", "def _get_class_name(self, class_idx):\n class_category = self.config['class_idx_to_category'][class_idx] # Get the Class ID based on the index.\n return dict(self.config['category_names'])[class_category] if class_idx is not None else None", "def get_class_data_type_key(class_name: str) -> str:\n data_type_key = ''\n for letter in class_name:\n if 'A' <= letter <= 'Z':\n data_type_key += letter\n\n return data_type_key.lower()", "def getClassDefByClass(self, class_):\n return self._mapped_classes.get(class_, None)", "def keyindex(self, category):\n return self._keyindex[category]", "def sort_key(self) -> \"Attribute\":\n return self._values.get(\"sort_key\")", "def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))", "def _get_idx_to_class(class_to_idx):\n lst = [None] * len(class_to_idx)\n for key, value in class_to_idx.items():\n lst[value] = key\n return lst", "def cmp_to_key(mycmp):\n\n class ComparatorClass:\n \"\"\"A class that implements comparison methods.\"\"\"\n\n def __init__(self, obj, *args):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n return ComparatorClass", "def get_named_comparator(self, key_path: str):\n case_ignored = self._config.matches(DeltaConfig.KeyCaseIgnored)\n for k in self.named_comparators:\n if key_matches(k, key_path, case_ignored):\n return self.named_comparators[k]\n return None", "def get_key(self):\n\n # defaults\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n\n # return tuple of sort keys\n return sort_notify, sort_type, sort_status, sort_name", "def getKey(self, element):\r\n return element._key", "def get_key(self):\n\n # defaults\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n\n peer = self.peers[0]\n try:\n sort_status = self.status_key[peer.status]\n except KeyError:\n sort_status = len(self.status_key) + 1\n sort_name = peer.alias\n\n # return tuple of sort keys\n return sort_notify, sort_type, sort_status, sort_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal stream filter that performs inclusion of external template files.
def _include(self, stream, ctxt, **vars): from genshi.template.loader import TemplateNotFound for event in stream: if event[0] is INCLUDE: href, cls, fallback = event[1] if not isinstance(href, str): parts = [] for subkind, subdata, subpos in self._flatten(href, ctxt, **vars): if subkind is TEXT: parts.append(subdata) href = ''.join([x for x in parts if x is not None]) try: tmpl = self.loader.load(href, relative_to=event[2][0], cls=cls or self.__class__) for event in tmpl.generate(ctxt, **vars): yield event except TemplateNotFound: if fallback is None: raise for filter_ in self.filters: fallback = filter_(iter(fallback), ctxt, **vars) for event in fallback: yield event else: yield event
[ "def _include(self, stream, ctxt, **vars):\r\n from genshi.template.loader import TemplateNotFound\r\n\r\n for event in stream:\r\n if event[0] is INCLUDE:\r\n href, cls, fallback = event[1]\r\n if not isinstance(href, basestring):\r\n parts = []\r\n for subkind, subdata, subpos in self._flatten(href, ctxt,\r\n **vars):\r\n if subkind is TEXT:\r\n parts.append(subdata)\r\n href = ''.join([x for x in parts if x is not None])\r\n try:\r\n tmpl = self.loader.load(href, relative_to=event[2][0],\r\n cls=cls or self.__class__)\r\n for event in tmpl.generate(ctxt, **vars):\r\n yield event\r\n except TemplateNotFound:\r\n if fallback is None:\r\n raise\r\n for filter_ in self.filters:\r\n fallback = filter_(iter(fallback), ctxt, **vars)\r\n for event in fallback:\r\n yield event\r\n else:\r\n yield event", "def includebase(parser, tocken):\r\n bits = tocken.split_contents()\r\n mitemplate = bits[1]\r\n mitemplate2 = _prefijocomillas(mitemplate, templateCTX.directorio_base + \"/\")\r\n tocken.contents = tocken.contents.replace(mitemplate, mitemplate2)\r\n return template.loader_tags.do_include(parser, tocken)", "def tryinclude(parser, tocken):\r\n return WrapperIncludeNodeNoError(template.loader_tags.do_include(parser, tocken))", "def do_include_raw(parser, token):\n bits = token.split_contents()\n if len(bits) != 2:\n raise TemplateSyntaxError, \"%r tag takes one argument: the name of the template to be included\" % bits[0]\n\n template_name = bits[1]\n if template_name[0] in ('\"', \"'\") and template_name[-1] == template_name[0]:\n template_name = template_name[1:-1]\n\n source, path = _loader.load_template_source(template_name)\n\n return template.TextNode(source)", "def main():\n dest_dir = \".public\"\n if os.path.isdir(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html'])\n )\n\n ignore_files = ignoreFile()\n files_in_dir = os.walk('templates')\n filenames = [filename for _, _, filename in files_in_dir]\n files = [filename for filename in filenames[0] if filename not in ignore_files]\n for i in files:\n template = env.get_template(i)\n final_html = template.render()\n\n\n write_prefix = glob.glob(\".public\")[0]\n write_path = os.path.join(write_prefix, i)\n print write_path\n try:\n html_file = codecs.open(write_path, 'w', 'utf8')\n html_file.write(final_html)\n finally:\n html_file.close()", "def test_include_in_for_tag_tag(self):\n return self._test_template_tag(\"include/includer.2.html\")", "def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)", "def _load_template(self, template_file):\n pass", "def __preprocess(self, infile, outfile):\n with open(outfile, \"w\") as _outfile:\n _outfile.write(textwrap.dedent(\"\"\"\\\n /*\n * This file is dynamically generated and ignored by Git.\n * DO NOT MAKE CHANGES HERE. Instead, go edit its template:\n * %s\n */\n \"\"\" % infile))\n _outfile.write(Template(filename=str(infile)).render(env=self.__context()))", "def templateFilter(func):\n jinja2_env.filters[func.__name__] = func", "def source_file_filter(input_api):\n files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP) + [\n r'.+/bootstrap/.*', # third party\n r'.+/jquery/.*', # third party\n r'.+/pb\\.discovery\\.go$',\n r'.+/pb\\.discovery_test\\.go$',\n r'.+\\.pb\\.go$',\n r'.+\\.pb\\.validate\\.go$',\n r'.+\\.pb_test\\.go$',\n r'.+_dec\\.go$',\n r'.+_mux\\.go$',\n r'.+_string\\.go$',\n r'.+gae\\.py$', # symlinks from outside\n r'common/api/internal/gensupport/.*', # third party\n r'common/goroutine/goroutine_id.go',\n r'common/terminal/.*', # third party\n r'server/static/bower_components/.*', # third party\n r'server/static/upload/bower_components/.*', # third party\n ]\n files_to_check = list(input_api.DEFAULT_FILES_TO_CHECK) + [\n r'.+\\.go$',\n ]\n return lambda x: input_api.FilterSourceFile(\n x, files_to_check=files_to_check, files_to_skip=files_to_skip)", "def __init__(self):\n self.template_files = {\n 'CCDA': CCDA_TPL_FILENAME,\n 'FHIR-XML': FHIR_TPL_FILENAME,\n 'FHIR-JSON': FHIR_TPL_FILENAME\n }\n self.environment = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATES_DIR))\n\n # load filters defined in custom_filters\n for a in dir(custom_filters):\n if isinstance(custom_filters.__dict__.get(a), types.FunctionType):\n self.environment.filters[a] = custom_filters.__dict__.get(a)\n\n self.templates = {}\n for key in self.template_files:\n self.templates[key] = self.environment.get_template(self.template_files[key])", "def register_pre_resources_template(self, template):\n pass", "def _interpolate_templates():\r\n if not os.path.exists(env.rcfile):\r\n raise Exception(\"%(rcfile)s does not exist. See rcfile.sample and run fab --config=rcfile.name <commands>!\" % env)\r\n\r\n interpolated_files = []\r\n # Get a list of all template files in /etc/ that we need to interpolate\r\n template_paths = []\r\n template_paths.extend(env.template_paths)\r\n template_paths.append(env.local_etc_path)\r\n\r\n for template_path in template_paths: \r\n for root, dirs, files in os.walk(template_path):\r\n for name in files:\r\n infilename = os.path.join(root, name)\r\n if re.search('.tmpl$', infilename):\r\n debug(\"Processing template file %s\" % infilename)\r\n \r\n outfilename = os.path.splitext(infilename)[0]\r\n _interpolate_file(infilename, outfilename)\r\n # infile = open(infilename, 'r')\r\n # outfile = open(outfilename, 'w')\r\n # try:\r\n # outfile.write(infile.read() % env)\r\n # except TypeError, e:\r\n # if re.search(\"not enough arguments for format string\", e[0]):\r\n # # We can safely ignore this since it means that there's nothing to interpolate\r\n # print e[0]\r\n # print \"Continuing by using the template file (%s) as the target (ie no interpolation)\" % infilename\r\n # # Remember that we have to go back to the top due to read() being at eof\r\n # infile.seek(0)\r\n # outfile.write(infile.read())\r\n # else:\r\n # raise\r\n # \r\n # outfile.close()\r\n # infile.close()\r\n interpolated_files.append(outfilename)\r\n \r\n return interpolated_files", "def _handleCheetahInclude(self, srcArg, trans=None,\n includeFrom='file', raw=False):\n _includeID = srcArg\n if _includeID not in self._CHEETAH__cheetahIncludes:\n if not raw:\n if includeFrom == 'file':\n source = None\n if isinstance(srcArg, string_type):\n if hasattr(self, 'serverSidePath'):\n file = path = self.serverSidePath(srcArg)\n else:\n file = path = os.path.normpath(srcArg)\n else:\n file = srcArg # # a file-like object\n else:\n source = srcArg\n file = None\n # @@TR: might want to provide some syntax for specifying the\n # Template class to be used for compilation so compilerSettings\n # can be changed.\n compiler = \\\n self._getTemplateAPIClassForIncludeDirectiveCompilation(\n source, file)\n nestedTemplateClass = compiler.compile(source=source,\n file=file)\n nestedTemplate = nestedTemplateClass(\n _preBuiltSearchList=self.searchList(),\n _globalSetVars=self._CHEETAH__globalSetVars)\n # Set the inner template filters to the initial filter of the\n # outer template:\n # this is the only really safe way to use\n # filter='WebSafe'.\n nestedTemplate._CHEETAH__initialFilter = \\\n self._CHEETAH__initialFilter\n nestedTemplate._CHEETAH__currentFilter = \\\n self._CHEETAH__initialFilter\n self._CHEETAH__cheetahIncludes[_includeID] = nestedTemplate\n else:\n if includeFrom == 'file':\n path = self.serverSidePath(srcArg)\n self._CHEETAH__cheetahIncludes[_includeID] = \\\n self.getFileContents(path)\n else:\n self._CHEETAH__cheetahIncludes[_includeID] = srcArg\n ##\n if not raw:\n self._CHEETAH__cheetahIncludes[_includeID].respond(trans)\n else:\n trans.response().write(self._CHEETAH__cheetahIncludes[_includeID])", "def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)", "def add_external(self):\n if self.external_tpl_in_pairs is not None:\n if not isinstance(self.external_tpl_in_pairs,list):\n external_tpl_in_pairs = [self.external_tpl_in_pairs]\n for tpl_file,in_file in self.external_tpl_in_pairs:\n if not os.path.exists(tpl_file):\n self.logger.lraise(\"couldn't find external tpl file:{0}\".\\\n format(tpl_file))\n self.logger.statement(\"external tpl:{0}\".format(tpl_file))\n shutil.copy2(tpl_file,os.path.join(self.m.model_ws,\n os.path.split(tpl_file)[-1]))\n if os.path.exists(in_file):\n shutil.copy2(in_file,os.path.join(self.m.model_ws,\n os.path.split(in_file)[-1]))\n\n if self.external_ins_out_pairs is not None:\n if not isinstance(self.external_ins_out_pairs,list):\n external_ins_out_pairs = [self.external_ins_out_pairs]\n for ins_file,out_file in self.external_ins_out_pairs:\n if not os.path.exists(ins_file):\n self.logger.lraise(\"couldn't find external ins file:{0}\".\\\n format(ins_file))\n self.logger.statement(\"external ins:{0}\".format(ins_file))\n shutil.copy2(ins_file,os.path.join(self.m.model_ws,\n os.path.split(ins_file)[-1]))\n if os.path.exists(out_file):\n shutil.copy2(out_file,os.path.join(self.m.model_ws,\n os.path.split(out_file)[-1]))\n self.logger.warn(\"obs listed in {0} will have values listed in {1}\"\n .format(ins_file,out_file))\n else:\n self.logger.warn(\"obs listed in {0} will have generic values\")", "def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n search_path = self.search_path\r\n\r\n # Make the filename relative to the template file its being loaded\r\n # from, but only if that file is specified as a relative path, or no\r\n # search path has been set up\r\n if relative_to and (not search_path or not os.path.isabs(relative_to)):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n\r\n filename = os.path.normpath(filename)\r\n cachekey = filename\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[cachekey]\r\n if not self.auto_reload:\r\n return tmpl\r\n uptodate = self._uptodate[cachekey]\r\n if uptodate is not None and uptodate():\r\n return tmpl\r\n except (KeyError, OSError):\r\n pass\r\n\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = list(search_path) + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for loadfunc in search_path:\r\n if isinstance(loadfunc, str):\r\n loadfunc = directory(loadfunc)\r\n try:\r\n filepath, filename, fileobj, uptodate = loadfunc(filename)\r\n except IOError:\r\n continue\r\n else:\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested includes work properly without a\r\n # search path\r\n filename = filepath\r\n tmpl = self._instantiate(cls, fileobj, filepath,\r\n filename, encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[cachekey] = tmpl\r\n self._uptodate[cachekey] = uptodate\r\n finally:\r\n if hasattr(fileobj, 'close'):\r\n fileobj.close()\r\n return tmpl\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()", "def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n if encoding is None:\r\n encoding = self.default_encoding\r\n if relative_to and not os.path.isabs(relative_to):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n filename = os.path.normpath(filename)\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[filename]\r\n if not self.auto_reload or \\\r\n os.path.getmtime(tmpl.filepath) == self._mtime[filename]:\r\n return tmpl\r\n except KeyError:\r\n pass\r\n\r\n search_path = self.search_path\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = search_path + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for dirname in search_path:\r\n filepath = os.path.join(dirname, filename)\r\n try:\r\n fileobj = open(filepath, 'U')\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested include work properly without a\r\n # search path\r\n filename = os.path.join(dirname, filename)\r\n dirname = ''\r\n tmpl = cls(fileobj, basedir=dirname, filename=filename,\r\n loader=self, lookup=self.variable_lookup,\r\n encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[filename] = tmpl\r\n self._mtime[filename] = os.path.getmtime(filepath)\r\n finally:\r\n fileobj.close()\r\n return tmpl\r\n except IOError:\r\n continue\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the given expression, raising a useful error message when a syntax error is encountered.
def _parse_expr(cls, expr, template, lineno=-1, offset=-1): try: return expr and Expression(expr, template.filepath, lineno, lookup=template.lookup) or None except SyntaxError as err: err.msg += ' in expression "%s" of "%s" directive' % (expr, cls.tagname) raise TemplateSyntaxError(err, template.filepath, lineno, offset + (err.offset or 0))
[ "def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError, err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))", "def test_syntax(self):\n lisp = self.lisp\n for expr in [\n \"(\",\n \"(()\",\n \")\",\n \"())\",\n \".)\"\n ]:\n self.assertRaises(ParseError, lisp.readLisp, expr)", "def parse(s):\n t = _Tokens(s)\n ret = t.parse_expr(True)\n if len(t) != 0:\n raise ValueError('extra stuff:' + str(t))\n return ret", "def parse_primary_expression(self):\n if self.peek == \"ID\":\n identifier = self.consume(\"ID\")\n expr = self.semantics.on_variable_access(\n identifier.val, identifier.loc\n )\n elif self.peek == \"NUMBER\":\n number = self.consume()\n expr = self.semantics.on_number(number.val, number.loc)\n elif self.peek == \"FLOAT\":\n number = self.consume()\n expr = self.semantics.on_float(number.val, number.loc)\n elif self.peek == \"CHAR\":\n char = self.consume()\n expr = self.semantics.on_char(char.val, char.loc)\n elif self.peek == \"STRING\":\n txt = self.consume()\n expr = self.semantics.on_string(txt.val, txt.loc)\n elif self.peek in [\"!\", \"*\", \"+\", \"-\", \"~\", \"&\", \"--\", \"++\"]:\n op = self.consume()\n if op.val in [\"--\", \"++\"]:\n operator = op.val + \"x\"\n else:\n operator = op.val\n expr = self.parse_primary_expression()\n expr = self.semantics.on_unop(operator, expr, op.loc)\n elif self.peek == \"__builtin_va_start\":\n location = self.consume(\"__builtin_va_start\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_start(ap, location)\n elif self.peek == \"__builtin_va_arg\":\n location = self.consume(\"__builtin_va_arg\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\",\")\n typ = self.parse_typename()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_arg(ap, typ, location)\n elif self.peek == \"__builtin_va_copy\":\n location = self.consume(\"__builtin_va_copy\").loc\n self.consume(\"(\")\n dest = self.parse_assignment_expression()\n self.consume(\",\")\n src = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_copy(dest, src, location)\n elif self.peek == \"__builtin_offsetof\":\n location = self.consume(\"__builtin_offsetof\").loc\n self.consume(\"(\")\n typ = self.parse_typename()\n self.consume(\",\")\n member = self.consume(\"ID\").val\n self.consume(\")\")\n expr = self.semantics.on_builtin_offsetof(typ, member, location)\n elif self.peek == \"sizeof\":\n location = self.consume(\"sizeof\").loc\n if self.peek == \"(\":\n self.consume(\"(\")\n if self.is_declaration_statement():\n typ = self.parse_typename()\n else:\n typ = self.parse_expression()\n self.consume(\")\")\n expr = self.semantics.on_sizeof(typ, location)\n else:\n sizeof_expr = self.parse_primary_expression()\n expr = self.semantics.on_sizeof(sizeof_expr, location)\n elif self.peek == \"(\":\n loc = self.consume(\"(\").loc\n # Is this a type cast?\n if self.is_declaration_statement():\n # Cast or compound literal!\n to_typ = self.parse_typename()\n self.consume(\")\")\n if self.peek == \"{\":\n init = self.parse_initializer_list(to_typ)\n expr = self.semantics.on_compound_literal(\n to_typ, init, loc\n )\n else:\n casted_expr = self.parse_primary_expression()\n expr = self.semantics.on_cast(to_typ, casted_expr, loc)\n else:\n # Parenthized expression (reset precedence)\n expr = self.parse_expression()\n self.consume(\")\")\n else:\n self.error(\"Expected expression\")\n\n # Postfix operations (have the highest precedence):\n while self.peek in [\"--\", \"++\", \"[\", \".\", \"->\", \"(\"]:\n if self.peek in [\"--\", \"++\"]:\n op = self.consume()\n expr = self.semantics.on_unop(\"x\" + op.val, expr, op.loc)\n elif self.peek == \"[\":\n location = self.consume(\"[\").loc\n index = self.parse_expression()\n self.consume(\"]\")\n expr = self.semantics.on_array_index(expr, index, location)\n elif self.peek == \"(\":\n expr = self.parse_call(expr)\n elif self.peek == \".\":\n location = self.consume(\".\").loc\n field = self.consume(\"ID\").val\n expr = self.semantics.on_field_select(expr, field, location)\n elif self.peek == \"->\":\n location = self.consume(\"->\").loc\n field = self.consume(\"ID\").val\n # Dereference pointer:\n expr = self.semantics.on_unop(\"*\", expr, location)\n expr = self.semantics.on_field_select(expr, field, location)\n else: # pragma: no cover\n self.not_impl()\n return expr", "def validate_expression(self, expression):\n\t\t#return self.evaluate(expression, 0, 2)\n\t\tvars = set(self.get_column_names(True, True)) | set(self.variables.keys())\n\t\tfuncs = set(expression_namespace.keys())\n\t\treturn vaex.expresso.validate_expression(expression, vars, funcs)", "def test_invalid(self):\n\n expression = \"- 1 + 3\" # Invalid syntax\n\n self.assertNotEqual(eval(expression), PrefixOperation(expression).evaluate_expression())", "def test_expression_invalid_ordering(self) -> None:\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"+6\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"+5+\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"6/\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"1 + 2 - + 3\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\")(\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"(((2))\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"(+5\"))\n\n with self.assertRaises(exceptions.InvalidRegexError):\n postfix.validate_tokens(self.arithmetic_lexer.lex(\"6/)\"))", "def parse(self, sourceStr):\n self.completionMessage = \"No errors\"\n self.parseSuccessful = True\n self.scanner = Scanner(sourceStr)\n self.tree = self.expression()\n self.accept(self.scanner.get(), Token.EOE,\n \"symbol after end of expression\")", "def syntax_error(self, msg):\n\n # Despite what the Python documentation claims, the ``line`` attribute of the\n # TokenInfo contains the physical, not logical line, i.e. what we need here,\n exc = SyntaxError(msg, (self.filename, self.tokens[self.pos].start[0],\n self.tokens[self.pos].start[1], self.tokens[self.pos].line))\n\n raise exc", "def test_syntaxerror():\n inp = '@article{name}'\n with pytest.raises(pyparsing.ParseException):\n parse_entry(inp)", "def parser(string): \n#1 we tokenize the expression, thanks to the lexer and the Token constructor\n# the names are mapped thanks to the token_map dictionary\n tokens = [Token(token_map.get(x, 'ATOM'), x) for x in lex(string)]\n try:\n (e, i) = parse_iff(tokens)\n if not i:\n return e\n else:\n raise Exception('Unparsed input')\n except:\n raise", "def parse_expression(expr):\n child_expressions = []\n for child_expr in expr:\n if isinstance(child_expr, pyparsing.ParseResults):\n child_expressions.append(parse_expression(child_expr))\n else:\n child_expressions.append(child_expr)\n while len(child_expressions) > 2:\n res = eval(\"\".join(map(str, child_expressions[0:3])))\n child_expressions = [res] + child_expressions[3:]\n return int(child_expressions[0])", "def parse_expression(expression: str) -> List[str]:\n stream = InputStream(expression)\n lexer = RLexer(stream)\n tokens = CommonTokenStream(lexer)\n\n tokens.fill()\n\n filter_ = RFilter(tokens)\n filter_.stream()\n tokens.reset()\n\n parser = RParser(tokens)\n tree = parser.prog()\n\n progListener = ProgListener(tokens)\n walker = ParseTreeWalker()\n walker.walk(progListener, tree)\n\n return progListener.exps", "def test_unbalanced_parens(self):\n with self.assertRaises(SyntacticError):\n tokens = TokenStream(StringIO(\"(a (b c (d e (f (g))\"))\n lexer = Lexer()\n lexer.parse(tokens)", "def parse(self, expression_str) -> Expression:\n tree = self._parser.parse(expression_str)\n return self._transformer.transform(tree)", "def parse_eval(self, expr, lineno=1):\n\n if isinstance(expr, unicode):\n expr = renpy.python.escape_unicode(expr)\n\n try:\n rv = ast.parse(expr, 'eval').body[0].value\n except SyntaxError as e:\n raise renpy.parser.ParseError(\n filename,\n lineno + e[1][1] - 1,\n \"Syntax error while parsing python expression.\",\n e[1][3],\n e[1][2])\n\n increment_lineno(rv, lineno-1)\n\n return rv", "def syntaxError(self, filename, msg, lineno, offset, text):\r\n line = text.splitlines()[-1]\r\n if offset is not None:\r\n offset = offset - (len(text) - len(line))\r\n self._stderr.write('%s:%d:%d: %s\\n' %\r\n (filename, lineno, offset + 1, msg))\r\n else:\r\n self._stderr.write('%s:%d: %s\\n' % (filename, lineno, msg))\r\n self._stderr.write(line)\r\n self._stderr.write('\\n')\r\n if offset is not None:\r\n self._stderr.write(re.sub(r'\\S', ' ', line[:offset]) +\r\n \"^\\n\")", "def testParseWithBraces(self):\n parser = expression_parser.EventFilterExpressionParser()\n\n expression = parser.Parse('(a is 3)')\n self.assertIsNotNone(expression)\n\n # Need to close braces.\n with self.assertRaises(errors.ParseError):\n parser.Parse('(a is 3')\n\n # Need to open braces to close them.\n with self.assertRaises(errors.ParseError):\n parser.Parse('a is 3)')", "def test_except_infer_pars(self):\n src = Source([])\n self.assertRaises(RuntimeError, src.set_expression, 'a+a')\n self.assertRaises(RuntimeError, src.set_expression, '2*a')\n self.assertRaises(ValueError, src.set_expression, '2*a', ['a'])\n self.assertRaises(ValueError, src.set_expression, '2*a', grads=['2'])\n self.assertRaises(ValueError, src.set_expression, 'a*b', ['a', 'b'], ['b'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the AST representation of an assignment, and returns a function that applies the assignment of a given value to a dictionary.
def _assignment(ast): def _names(node): if isinstance(node, _ast.Tuple): return tuple([_names(child) for child in node.elts]) elif isinstance(node, _ast.Name): return node.id def _assign(data, value, names=_names(ast)): if type(names) is tuple: for idx in range(len(names)): _assign(data, value[idx], names[idx]) else: data[names] = value return _assign
[ "def assignment(self, symbol_table):\n symbol_table[self.key] = self.value.evaluate(self.value, symbol_table)", "def eval_assignment(assignment, caller_parameters, caller_arguments, motif_node_dict, local_dict):\n\tif type(assignment.rvalue).__name__ == 'FuncCall':\n\t\tmotif_node, tree_node = eval_function_call(assignment.rvalue, caller_parameters, caller_arguments, motif_node_dict, local_dict)\n\t\t# it is possible that a function being evaluated returns a non-None MotifNode that need not to be assigned to the LHS variable.\n\t\t# But if the LHS variable is in @local_dict, then the RHS function must return a non-None MotifNode.\n\t\t# consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n\t\tif (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in local_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in local_dict):\n\t\t\tif not motif_node:\n\t\t\t\tprint('\\33[101m' + '[error][eval_assignment/provenance]: ' + assignment.lvalue.name + ' is in the local dictionary. MotifNode should not be None.\\033[0m')\n\t\t\t\texit(1)\n\t\t\telse:\n\t\t\t\tlocal_dict[assignment.lvalue.name].append(motif_node)\n\t\treturn tree_node\n\t# In a case where a provenance node was declared but then assigned or reassigned. For example:\n\t# struct provenance *tprov;\n\t# ...\n\t# tprov = t->provenance;\n\t# tprov must then be in the motif_node_dict.\n\telif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in local_dict:\n\t\t# we can only infer its type from the name of the variable\n\t\tmotif_node = create_motif_node(assignment.lvalue.name)\n\t\tlocal_dict[assignment.lvalue.name].append(motif_node)\n\t\treturn None\n\telif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in local_dict:\n\t\t# similar case as the previous one, except that we have: *tprov = ...\n\t\t# we can only infer its type from the name of the variable\n\t\tmotif_node = create_motif_node(assignment.lvalue.expr.name)\n\t\tlocal_dict[assignment.lvalue.expr.name].append(motif_node)\n\t\treturn None\n\telse:\n\t\t#######################################################\n\t\t# We will consider other conditions if we ever see them\n\t\t# POSSIBLE CODE HERE.\n\t\t#######################################################\n\t\treturn None", "def ast_evaluate_dict_values(edict):\n returndict = {}\n for key, value in edict.items():\n if isinstance(value, dict):\n value = ast_evaluate_dict_values(value)\n if isinstance(value, str): # Only evaluate str values all other must be correct\n try:\n value = eval(value)\n except Exception as err:\n log.debug(\n \"Could not interpret '{}' in key '{}' as a valid object. Stays as is! Error: {}\".format(\n value, key, err\n )\n )\n\n returndict[key] = value\n return returndict", "def map_values(function, dictionary):\n return dict((k, function(dictionary[k])) for k in dictionary)", "def eval(self,opseq,valueDict):\n for (dstName,funName,inputNames) in opseq:\n inputValues = map(lambda a:valueDict[a], inputNames)\n fun = EVAL_FUNS[funName] \n result = fun(*inputValues)\n valueDict[dstName] = result\n return valueDict", "def map(func):\n # text is an alias for basestring on Python 2, which cannot be\n # instantiated and therefore can't be used to transform the value,\n # so we force to unicode instead.\n if is_py2 and text == func:\n func = unicode\n\n def expand_kv(kv):\n return func(*kv)\n\n def map_values(value):\n cls = type(value)\n if isinstance(value, dict):\n return cls(_map(expand_kv, value.items()))\n else:\n return cls(_map(func, value))\n\n return transform(map_values)", "def visit_Assignment(self, node):\n # TODO: Arithmetic Assignment\n if isinstance(node.target, asr.Variable):\n target = node.target\n value = node.value\n if isinstance(value, asr.Variable):\n new_node = Assignment(\n Variable(\n target.name\n ),\n Variable(\n value.name\n )\n )\n elif (type(value) == asr.BinOp):\n exp_ast = call_visitor(value)\n for expr in exp_ast:\n new_node = Assignment(\n Variable(target.name),\n expr\n )\n else:\n raise NotImplementedError(\"Numeric assignments not supported\")\n else:\n raise NotImplementedError(\"Arrays not supported\")\n self._py_ast.append(new_node)", "def map_nested_value(func: Callable, value: Any) -> Any:\n value_type = type(value)\n\n if value_type == list:\n return [map_nested_value(func, item) for item in value]\n\n elif value_type == tuple:\n return tuple([map_nested_value(func, item) for item in value])\n\n elif isinstance(value, tuple) and hasattr(value, \"_fields\"):\n # Namedtuple.\n return type(value)(*[map_nested_value(func, item) for item in value])\n\n elif value_type == set:\n return {map_nested_value(func, item) for item in value}\n\n elif value_type == dict:\n return {\n map_nested_value(func, key): map_nested_value(func, val) for key, val in value.items()\n }\n\n else:\n return func(value)", "def convert_functions_in_dict_to_values(dict_to_convert):\n return {key: value() if hasattr(value, '__call__') else value for key, value in dict_to_convert.items()}", "def satisfies(assignment, constraint):\n return constraint(**{var:val for var,val in assignment.items()\n if var in scope(constraint)})", "def satisfying_assignment(formula):\n #print('new_recursion:')\n #print(formula)\n if len(formula)==0: #Base case: empty formula returns empty assignments\n return {}\n\n assignments = {}\n\n ind = 0 #Which literal are we looking at?\n boolVal = True #What value does the variable in our current literal have?\n\n while ind < len(formula[0]): #Look at all variables in first clause until valid assignment is found\n new_formula = simplify_formula(formula,{formula[0][ind][0]:boolVal}) #Try setting first variable to True\n if new_formula[0] != None:\n assignments[formula[0][ind][0]] = boolVal\n assignments.update(new_formula[1])\n #print(assignments)\n try:\n assignments.update(dict(satisfying_assignment(new_formula[0])))\n break\n except TypeError:\n ind += 1\n continue\n else: #If invalid assignment,\n if boolVal: #Try assigning variable to False\n boolVal = False\n else:\n boolVal = True\n ind += 1\n\n if new_formula[0]==None:\n return None\n\n return assignments", "def visit_Assignment(self, node):\n var = node.lvalue.name\n op = node.op\n\n if op == '=':\n self.memory[var] = self.visit(node.rvalue)\n elif op == '+=':\n self.memory[var] += self.visit(node.rvalue)\n elif op == '-=':\n self.memory[var] -= self.visit(node.rvalue)\n elif op == '/=':\n self.memory[var] /= self.visit(node.rvalue)\n elif op == '*=':\n self.memory[var] *= self.visit(node.rvalue)\n\n return self.memory[var]", "def assignment_to_plan(assignment: dict[tuple[str, int], list[int]]) -> PlanDict:\n return {\n 'version': 1,\n 'partitions':\n [{'topic': t_p[0],\n 'partition': t_p[1],\n 'replicas': replica\n } for t_p, replica in assignment.items()]\n }", "def _apply_func_to_expressions(sympy_expr, function, args=None):\n if args is None:\n\n def func(expr):\n return function(expr)\n\n else:\n\n def func(expr):\n return function(expr, *args)\n\n if isinstance(sympy_expr, dict):\n new_expr = dict((k, func(expr)) for k, expr in iteritems(sympy_expr))\n elif hasattr(sympy_expr, \"__iter__\"):\n new_expr = list(func(expr) for expr in sympy_expr)\n else:\n new_expr = func(sympy_expr)\n\n return new_expr", "def dict_convert(_dict, keyfn=None, valuefn=None):\n if keyfn is None and valuefn is not None:\n for k in _dict:\n _dict[k] = valuefn(_dict[k])\n return _dict\n\n elif keyfn is not None:\n out_dict = {}\n for k in _dict:\n out_dict[keyfn(k)] = valuefn(_dict[k]) if valuefn else _dict[k]\n return out_dict\n else:\n return _dict", "def process_assign(self, node, state, *_):\n io_source = False\n is_function_call = False\n maybe_d_type_object_assign = False\n d_type_object_name = None\n # Get the GrFN element of the RHS side of the assignment which are\n # the variables involved in the assignment operations.\n sources = self.gen_grfn(node.value, state, \"assign\")\n\n node_name = node.targets[0].__repr__().split()[0][2:]\n if node_name == \"ast.Attribute\":\n node_value = node.targets[0].value\n attrib_ast = node_value.__repr__().split()[0][2:]\n if (\n attrib_ast == \"ast.Name\"\n and node_value.id in self.derived_type_objects\n ):\n maybe_d_type_object_assign = True\n d_type_object_name = node_value.id\n object_type = self.derived_type_objects[d_type_object_name]\n elif (\n attrib_ast == \"ast.Attribute\"\n and node_value.value.id in self.derived_type_objects\n ):\n maybe_d_type_object_assign = True\n d_type_object_name = node_value.value.id\n object_type = self.derived_type_objects[d_type_object_name]\n\n array_assignment = False\n is_d_type_obj_declaration = False\n # Detect assigns which are string initializations of the\n # following form: String(10). String initialization of the form\n # String(10, \"abcdef\") are valid assignments where the index of the\n # variables will be incremented but for the former case the index\n # will not be incremented and neither will its variable spec be\n # generated\n is_string_assign = False\n is_string_annotation = False\n if len(sources) > 0 and \"call\" in sources[0]:\n type_name = sources[0][\"call\"][\"function\"]\n if type_name == \"String\":\n is_string_assign = True\n # Check if it just an object initialization or initialization\n # with value assignment\n if len(sources[0][\"call\"][\"inputs\"]) == 1:\n # This is just an object initialization e.g. String(10)\n is_string_annotation = True\n elif type_name == \"Array\":\n array_assignment = True\n array_dimensions = []\n inputs = sources[0][\"call\"][\"inputs\"]\n\n # If the array type is string, the structure of inputs will\n # be a bit different than when it is int of float\n if \"call\" in inputs[0][0]:\n if inputs[0][0][\"call\"][\"function\"] == \"String\":\n array_type = \"string\"\n else:\n array_type = inputs[0][0][\"var\"][\"variable\"]\n self._get_array_dimension(sources, array_dimensions, inputs)\n elif type_name in self.derived_types:\n is_d_type_obj_declaration = True\n if isinstance(node.targets[0], ast.Name):\n variable_name = node.targets[0].id\n if variable_name not in self.module_variable_types:\n for program in self.mode_mapper[\"public_objects\"]:\n if (\n variable_name\n in self.mode_mapper[\"public_objects\"][program]\n ):\n self.module_variable_types[variable_name] = [\n program,\n type_name,\n ]\n else:\n pass\n else:\n pass\n\n # This reduce function is useful when a single assignment operation\n # has multiple targets (E.g: a = b = 5). Currently, the translated\n # python code does not appear in this way and only a single target\n # will be present.\n targets = reduce(\n (lambda x, y: x.append(y)),\n [\n self.gen_grfn(target, state, \"assign\")\n for target in node.targets\n ],\n )\n grfn = {\"functions\": [], \"variables\": [], \"containers\": []}\n # Again as above, only a single target appears in current version.\n # The `for` loop seems unnecessary but will be required when multiple\n # targets start appearing.\n target_names = []\n object_attr_num = 1\n for target in targets:\n # Bypass any assigns that have multiple targets.\n # E.g. (i[0], x[0], j[0], y[0],) = ...\n if \"list\" in target:\n return []\n target_names.append(target[\"var\"][\"variable\"])\n # Fill some data structures if this is a string\n # assignment/initialization\n if is_string_assign:\n state.variable_types[target_names[0]] = \"string\"\n state.string_assign_name = target_names[0]\n self.strings[target_names[0]] = {\n \"length\": sources[0][\"call\"][\"inputs\"][0][0][\"value\"]\n }\n if is_string_annotation:\n # If this is just a string initialization,\n # last_definition should not contain this string's index.\n # This happens only during assignments.\n del state.last_definitions[target_names[0]]\n self.strings[target_names[0]][\"annotation\"] = True\n self.strings[target_names[0]][\"annotation_assign\"] = False\n return []\n else:\n self.strings[target_names[0]][\"annotation\"] = False\n self.strings[target_names[0]][\"annotation_assign\"] = True\n\n # Pre-processing and removing certain Assigns which only pertain\n # to the Python code and do not relate to the FORTRAN code in any\n # way.\n io_match = self.check_io_variables(target_names[0])\n if io_match:\n self.exclude_list.append(target_names[0])\n return []\n\n # If the target is a list of variables, the grfn notation for the\n # target will be a list of variable names i.e. \"[a, b, c]\"\n # TODO: This does not seem right. Discuss with Clay and Paul\n # about what a proper notation for this would be\n if target.get(\"list\"):\n targets = \",\".join(\n [x[\"var\"][\"variable\"] for x in target[\"list\"]]\n )\n target = {\"var\": {\"variable\": targets, \"index\": 1}}\n\n if array_assignment:\n var_name = target[\"var\"][\"variable\"]\n state.array_assign_name = var_name\n # Just like the same reason as the variables\n # declared with annotation within function (not\n # function arguments) need to have index of zero.\n # Thus, these 3 lines of code fixes the index to\n # correct value from -1 to 0.\n if target[\"var\"][\"index\"] == -1:\n target[\"var\"][\"index\"] = 0\n state.last_definitions[target_names[0]] = 0\n is_mutable = False\n array_info = {\n \"index\": target[\"var\"][\"index\"],\n \"dimensions\": array_dimensions,\n \"elem_type\": array_type,\n \"mutable\": is_mutable,\n }\n self.arrays[var_name] = array_info\n state.array_types[var_name] = array_type\n if array_type == \"string\":\n length = inputs[0][0][\"call\"][\"inputs\"][0][0][\"value\"]\n self.strings[var_name] = {\n \"length\": length,\n \"annotation\": False,\n \"annotated_assign\": True,\n }\n\n if (\n maybe_d_type_object_assign\n and object_type\n and object_type in self.derived_types_attributes\n and target_names[0]\n in self.derived_types_attributes[object_type]\n ):\n self.current_d_object_name = d_type_object_name\n is_d_type_object_assignment = True\n\n # If targets holds more than 1 variable information and\n # it's greater than the object attribute number, then\n # the derived type object is referencing more than\n # 1 attribute (i.e. x.k.v).\n if len(targets) > 1 and len(targets) > object_attr_num:\n object_attr_num += 1\n # Therefore, we do not want to go any further before\n # collecting all the information of the attribute\n # information, so we need to simply return back to the\n # beginning of loop and restart the process\n continue\n else:\n is_d_type_object_assignment = False\n\n variable_spec = self.generate_variable_definition(\n target_names,\n d_type_object_name,\n is_d_type_object_assignment,\n state,\n )\n\n # Do not add the variable spec if this is a string annotation\n # since this can collide with the variable spec of the first\n # string assignment.\n if not is_string_annotation:\n grfn[\"variables\"].append(variable_spec)\n\n # Since a Python class (derived type) object declaration has syntax\n # is __object_name__ = __class_name__, it's considered as an\n # assignment that will create __assign__ function GrFN,\n # which should not. Thus, simply return the [grfn] here to avoid\n # generating __assign__ function.\n if is_d_type_obj_declaration:\n return [grfn]\n\n # TODO Hack to not print lambda function for IO assigns. Need a\n # proper method to handle IO moving on\n for src in sources:\n if \"call\" in src:\n if self.check_io_variables(src[\"call\"][\"function\"]):\n io_source = True\n function = src[\"call\"][\"function\"]\n # Check if the source is a function call by comparing its\n # value with the list of functions in our program (\n # obtained from the mode mapper)\n for program_functions in self.mode_mapper[\"subprograms\"]:\n if (\n function\n in self.mode_mapper[\"subprograms\"][\n program_functions\n ]\n ):\n is_function_call = True\n\n if is_function_call:\n container_name = self.generate_container_id_name(\n self.fortran_file, [\"@global\"], function\n )\n function_name = {\"name\": container_name, \"type\": \"container\"}\n else:\n function_name = self.generate_function_name(\n \"__assign__\", variable_spec[\"name\"], None\n )\n # If current assignment process is for a derived type object (i.e\n # x.k), then\n if is_d_type_object_assignment:\n # (1) we need to add derived type object as function input.\n src = [\n {\n \"var\": {\n \"variable\": d_type_object_name,\n \"index\": state.last_definitions[\n d_type_object_name\n ],\n }\n }\n ]\n sources.extend(src)\n\n # (2) Generate the object name + attributes variable name\n new_var_name = d_type_object_name\n for target_name in target_names:\n new_var_name += f\"_{target_name}\"\n self.current_d_object_attributes.append(target_name)\n\n # (3) we need to modify thee target to be \"objectName_attribute\"\n # For example, variable: x_k and index: __index_of_x_y__.\n target[\"var\"] = {\n \"variable\": new_var_name,\n \"index\": state.last_definitions[new_var_name],\n }\n\n fn = self.make_fn_dict(function_name, target, sources, state)\n if len(fn) == 0:\n return []\n\n source_list = self.make_source_list_dict(sources)\n\n if not io_source and not is_function_call:\n lambda_string = self.generate_lambda_function(\n node,\n function_name[\"name\"],\n True,\n array_assignment,\n is_string_assign,\n is_d_type_object_assignment,\n source_list,\n state,\n False,\n )\n state.lambda_strings.append(lambda_string)\n\n grfn[\"functions\"].append(fn)\n # We need to cleanup the object attribute tracking list.\n self.current_d_object_attributes = []\n return [grfn]", "def rhs_as_python_func(self, namespace={}):\n rhs = self.rhs\n\n rhs = rhs.replace('!', ' not ')\n rhs = rhs.replace('&', ' and ')\n rhs = rhs.replace('|', ' or ')\n\n name_map = {\n 'true': 'True',\n 'false': 'False'\n }\n\n for frm, to in name_map.iteritems():\n rhs = MathUtil.str_expr_replacement(frm, to, rhs)\n\n lmda_str = \"lambda %s: %s\" % (','.join(self.rhs_names), rhs)\n return eval(lmda_str, str_to_npfunc_map, namespace)", "def mapdict(itemfunc, dictionary):\r\n return dict(map(itemfunc, dictionary.items()))", "def ast_eval(node):\n if isinstance(node, ast.Num):\n return node.n\n elif isinstance(node, ast.Str):\n return node.s\n elif isinstance(node, ast.Name) and node.id in NAMED_CONSTS:\n return NAMED_CONSTS[node.id]\n elif isinstance(node, ast.Tuple):\n return tuple(ast_eval(n) for n in node.elts)\n elif isinstance(node, ast.List):\n return [ast_eval(n) for n in node.elts]\n elif isinstance(node, ast.Dict):\n return zipdict(ast_eval(node.keys), ast_eval(node.values))\n else:\n raise ValueError(\"Don't know how to eval %s\" % node.__class__.__name__)", "def visitAssignment_statement(\n self, ctx: MPParser.Assignment_statementContext):\n expression = self.visit(ctx.expression())\n assignment_lhs_list = self.visit(ctx.assignment_lhs_list())\n\n rhs_list = assignment_lhs_list[1:] + [expression]\n\n # def compose(arg):\n # def h(x):\n # return Assign(x, arg)\n # return h\n # hoo = list(map(lambda x: compose(x), rhs_list))\n return [Assign(lhs, rhs)\n for lhs, rhs in zip(assignment_lhs_list, rhs_list)][::-1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the template with the given name. If the `filename` parameter is relative, this method searches the search path trying to locate a template matching the given name. If the file name is an absolute path, the search path is ignored. If the requested template is not found, a `TemplateNotFound` exception is raised. Otherwise, a `Template` object is returned that represents the parsed template. Template instances are cached to avoid having to parse the same template file more than once. Thus, subsequent calls of this method with the same template file name will return the same `Template` object (unless the ``auto_reload`` option is enabled and the file was changed since the last parse.) If the `relative_to` parameter is provided, the `filename` is interpreted as being relative to that path.
def load(self, filename, relative_to=None, cls=None, encoding=None): if cls is None: cls = self.default_class search_path = self.search_path # Make the filename relative to the template file its being loaded # from, but only if that file is specified as a relative path, or no # search path has been set up if relative_to and (not search_path or not os.path.isabs(relative_to)): filename = os.path.join(os.path.dirname(relative_to), filename) filename = os.path.normpath(filename) cachekey = filename self._lock.acquire() try: # First check the cache to avoid reparsing the same file try: tmpl = self._cache[cachekey] if not self.auto_reload: return tmpl uptodate = self._uptodate[cachekey] if uptodate is not None and uptodate(): return tmpl except (KeyError, OSError): pass isabs = False if os.path.isabs(filename): # Bypass the search path if the requested filename is absolute search_path = [os.path.dirname(filename)] isabs = True elif relative_to and os.path.isabs(relative_to): # Make sure that the directory containing the including # template is on the search path dirname = os.path.dirname(relative_to) if dirname not in search_path: search_path = list(search_path) + [dirname] isabs = True elif not search_path: # Uh oh, don't know where to look for the template raise TemplateError('Search path for templates not configured') for loadfunc in search_path: if isinstance(loadfunc, str): loadfunc = directory(loadfunc) try: filepath, filename, fileobj, uptodate = loadfunc(filename) except IOError: continue else: try: if isabs: # If the filename of either the included or the # including template is absolute, make sure the # included template gets an absolute path, too, # so that nested includes work properly without a # search path filename = filepath tmpl = self._instantiate(cls, fileobj, filepath, filename, encoding=encoding) if self.callback: self.callback(tmpl) self._cache[cachekey] = tmpl self._uptodate[cachekey] = uptodate finally: if hasattr(fileobj, 'close'): fileobj.close() return tmpl raise TemplateNotFound(filename, search_path) finally: self._lock.release()
[ "def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n if encoding is None:\r\n encoding = self.default_encoding\r\n if relative_to and not os.path.isabs(relative_to):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n filename = os.path.normpath(filename)\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[filename]\r\n if not self.auto_reload or \\\r\n os.path.getmtime(tmpl.filepath) == self._mtime[filename]:\r\n return tmpl\r\n except KeyError:\r\n pass\r\n\r\n search_path = self.search_path\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = search_path + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for dirname in search_path:\r\n filepath = os.path.join(dirname, filename)\r\n try:\r\n fileobj = open(filepath, 'U')\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested include work properly without a\r\n # search path\r\n filename = os.path.join(dirname, filename)\r\n dirname = ''\r\n tmpl = cls(fileobj, basedir=dirname, filename=filename,\r\n loader=self, lookup=self.variable_lookup,\r\n encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[filename] = tmpl\r\n self._mtime[filename] = os.path.getmtime(filepath)\r\n finally:\r\n fileobj.close()\r\n return tmpl\r\n except IOError:\r\n continue\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()", "def loadTemplate(self, filename, path):\n # TODO what should \"path\" be relative to? I vote the Template file.\n relPath = os.path.join(self._templatePath, path)\n templateFile = os.path.join(os.path.normpath(relPath), filename)\n self._template, _ = xmlUtils.loadToTree(templateFile)", "def load_template(name: str) -> Template:\n if name not in _templates:\n with open(join(dirname(__file__), 'templates', name + '.j2'), 'r') as f:\n return Template(f.read())\n return _templates[name]", "def load_template(name=None):\n if name is None:\n name = \"default\"\n\n logger.info(\"Loading template with name %s\", name)\n try:\n template_file = open(\"%s/%s.yaml\" % (template_path, name))\n except IOError:\n raise TemplateNotFoundError\n\n template = yaml.safe_load(template_file)\n template_file.close()\n if \"extends\" in template:\n logger.debug(\"Merging %s with %s\", name, template[\"extends\"])\n template = _merge(load_template(template[\"extends\"]), template)\n\n return template", "def load_template(self, templatename, template_string=None):\r\n if template_string is not None:\r\n return self.template_class(template_string)\r\n\r\n if self.use_package_naming:\r\n divider = templatename.rfind('.')\r\n if divider >= 0:\r\n from pkg_resources import resource_filename\r\n package = templatename[:divider]\r\n basename = templatename[divider + 1:] + self.extension\r\n templatename = resource_filename(package, basename)\r\n\r\n return self.loader.load(templatename)", "def lookupTemplate(self, request):\n if self.template:\n return microdom.parseString(self.template, caseInsensitive=0, preserveCase=0)\n if not self.templateDirectory:\n mod = sys.modules[self.__module__]\n if hasattr(mod, '__file__'):\n self.templateDirectory = os.path.split(mod.__file__)[0]\n # First see if templateDirectory + templateFile is a file\n templatePath = os.path.join(self.templateDirectory, self.templateFile)\n if not os.path.exists(templatePath):\n raise RuntimeError, \"The template %r was not found.\" % templatePath\n # Check to see if there is an already parsed copy of it\n mtime = os.path.getmtime(templatePath)\n cachedTemplate = templateCache.get(templatePath, None)\n compiledTemplate = None\n\n if cachedTemplate is not None:\n if cachedTemplate[0] == mtime:\n compiledTemplate = templateCache[templatePath][1].cloneNode(deep=1)\n \n if compiledTemplate is None:\n compiledTemplate = microdom.parse(templatePath, caseInsensitive=0, preserveCase=0)\n templateCache[templatePath] = (mtime, compiledTemplate.cloneNode(deep=1))\n return compiledTemplate", "def _find_template(self, filename, start=0):\n\n filename = filename.lstrip(\"/\").replace(\"/\", os.sep)\n cachename = \":@@{0}@@:{1}\".format(start, filename)\n\n if not self._path:\n raise RestrictedError(\n \"Attempt to load template from empty search path: {0}\".format(filename)\n )\n\n if not cachename in self._find_cache:\n for (index, path) in enumerate(self._path[start:], start):\n new_filename = os.path.realpath(os.path.join(path, filename))\n if os.path.isfile(new_filename):\n self._find_cache[cachename] = (index, new_filename)\n break\n else:\n raise RestrictedError(\n \"Template not found along search path: {0}\".format(filename)\n )\n\n return self._find_cache[cachename]", "def load_template(name):\r\n tpl_config_file = None\r\n tpl_config = None\r\n for dirname in TEMPLATE_DIRS:\r\n candidate = os.path.join(dirname, name)\r\n if os.path.exists(candidate):\r\n tpl_config_file = os.path.join(candidate, 'config.json')\r\n if not os.path.exists(tpl_config_file):\r\n raise Exception(\"Found template '%s' at '%s' but it lacks config.json\" % (name, candidate))\r\n try:\r\n with open(tpl_config_file, 'r') as fp:\r\n tpl_config = json.load(fp)\r\n except ValueError:\r\n raise Exception(\"Can't load '%s' for template '%s': invalid JSON\" % (tpl_config_file, name))\r\n data_candidate = os.path.join(candidate, 'data')\r\n tpl_config['_data_dir'] = data_candidate if os.path.exists(data_candidate) else None\r\n break\r\n\r\n tpl_config['_sub_templates'] = []\r\n if 'extends' in tpl_config:\r\n for sub_tpl_name, sub_tpl_params in tpl_config['extends'].items():\r\n try:\r\n sub_tpl_config = load_template(sub_tpl_name)\r\n except Exception as e:\r\n raise Exception(\"Can't load dependency '%s' of '%s': %s\", sub_tpl_name, name, str(e))\r\n\r\n if 'include' in sub_tpl_params:\r\n sub_tpl_config['variables'] = [\r\n var\r\n for var\r\n in sub_tpl_config['variables']\r\n if var['name'] in sub_tpl_params['include']\r\n ]\r\n elif 'exclude' in sub_tpl_params:\r\n sub_tpl_config['variables'] = [\r\n var\r\n for var\r\n in sub_tpl_config['variables']\r\n if var['name'] not in sub_tpl_params['include']\r\n ]\r\n tpl_config['_sub_templates'].append(sub_tpl_config)\r\n\r\n tpl_config['internal_name'] = name\r\n return tpl_config", "def _load_template(self, template_file):\n pass", "def load(self,\n template_source,\n template_filename='',\n template_identifier='',\n template_encoding='utf-8',\n template_standard='xhtml',\n parser_parameters={}):\n assert template_standard in ('xml', 'xhtml')\n \n self.template_standard = template_standard\n \n # Determine the default template file name if possible\n if (not template_filename and\n not isinstance(template_source, basestring) and\n hasattr(template_source, 'name')):\n \n # Take the file name from the file object\n template_filename = template_source.name\n \n # Determine the template's identifier if possible\n if template_filename and not template_identifier:\n template_identifier = os.path.basename(template_filename).split('.', 1)[0].replace('-', '_')\n if not util.is_identifier(template_identifier):\n template_identifier = None\n \n # Store template names and encoding\n self.template_filename = template_filename or 'unnamed_template'\n self.template_identifier = template_identifier or 'unnamed_template'\n self.template_encoding = template_encoding\n \n # Load the template from a file object if needed\n if not isinstance(template_source, basestring):\n template_source = template_source.read()\n\n if constants.GENERATE_DEBUG_COMMENTS:\n self.template_lines = template_source.splitlines()\n # Allow indexing template lines from 1, since the element.sourceline\n # values are starting from 1, not zero\n self.template_lines.insert(0, '')\n\n # Create the appropriate parser and configure it\n kws = dict(\n encoding=template_encoding,\n resolve_entities=False,\n ns_clean=True)\n kws.update(parser_parameters)\n parser = etree.XMLParser(**kws)\n\n if self.template_standard == 'xhtml':\n #kws['load_dtd'] = True\n \n # Fail on existing DOCTYPE\n assert not template_source.lstrip().startswith('<!'), (\n \"Please remove the current <!DOCTYPE > definition or \"\n \"set the template_standard to 'xml'!\")\n \n # Prepend the DTD for the entities\n # FIXME: It would be faster to feed it to the parser before the document.\n template_source = constants.DOCTYPE_AND_HTML_ENTITIES + template_source\n \n # Parse and store the template\n self.template = etree.fromstring(template_source, parser)\n \n # Prepare namespace map and reverse map based on the actual\n # namespace declarations of the template loaded\n self.namespace_map = dict(\n (url, prefix)\n for prefix, url in self.template.nsmap.iteritems()\n if url not in constants.XML_NAMESPACES_PROCESSED)", "def get_template(self, template_name):\r\n template_name = self.config.join_path(self.template_name,\r\n template_name)\r\n\r\n if template_name in self.template_cache:\r\n return self.template_cache[template_name]\r\n rv = self.config.get_template(template_name)\r\n self.template_cache[template_name] = rv\r\n return rv", "def get_template(template_filename):\n return env.get_template(template_filename)", "def read_template(self, templateFileFull):\n templateDir = os.path.dirname(templateFileFull)\n templateFile = os.path.basename(templateFileFull)\n try:\n env = Environment(loader=FileSystemLoader(templateDir))\n self.template = env.get_template(templateFile)\n except Exception as e:\n print((\"Problem loading template {template} \"\n \"in {templateDir} \"\n ).format(template=templateFile, templateDir=templateDir))\n print(e)\n self.template = None\n return", "def get_template(self, template_name):\r\n return self.info.get_template(template_name)", "def _load_templates(self):\n assert self.dumped_context is not None\n\n say('loading templates...')\n\n context = self.dumped_context\n templates_path = build_embryo_filepath(self.path, 'templates')\n templates = {}\n\n if not os.path.isdir(templates_path):\n return templates\n\n for root, dirs, files in os.walk(templates_path):\n for fname in files:\n if fname.endswith('.swp'):\n continue\n\n # the file path may itself be templatized. here, we render the\n # filepath template using the context dict and read in the\n # template files.\n\n # fpath here is the templatized file path to the template\n fpath = os.path.join(root, fname)\n\n # rel_fpath is the path relative to the root templates dir\n rel_fpath = fpath.replace(templates_path, '').lstrip('/')\n\n # fname_template is the jinja2 Template for the rel_fpath str\n try:\n fname_template = self.jinja_env.from_string(rel_fpath)\n except TemplateSyntaxError:\n shout(\n 'could not render template '\n 'for file path string: {p}', p=fpath\n )\n raise\n\n # finally rendered_rel_fpath is the rendered relative path\n rendered_rel_fpath = fname_template.render(context)\n\n # now actually read the file into the resulting dict.\n try:\n templates[rendered_rel_fpath] = File.read(fpath)\n except Exception:\n raise TemplateLoadFailed(fpath)\n\n return templates", "def get_template(self, template_name, app_label=None, model_name=None):\n template, origin = self.find_template(template_name,\n app_label=app_label,\n model_name=model_name)\n if not hasattr(template, 'render'):\n # template needs to be compiled\n template = Template(template, origin, template_name, engine=self)\n return template", "def read_namespaced_template(self, name, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.read_namespaced_template_with_http_info(name, namespace, **kwargs)\n else:\n (data) = self.read_namespaced_template_with_http_info(name, namespace, **kwargs)\n return data", "def from_template(cls, filename, context, *args, mode='r', encoding=None,\n loader=json, **kwargs):\n\n with open(filename, mode=mode, encoding=encoding) as file:\n s = file.read(encoding=encoding)\n data = loader.loads(s.format(**context))\n\n return cls(data, *args, **kwargs)", "def parse_from_template(template_name):\n thisdir = os.path.split(__file__)[0]\n filename = os.path.join(\n thisdir, '..', 'templates', '%s.xml' % template_name\n )\n with open(filename, 'rb') as f:\n xml = f.read()\n return parse_xml(xml)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory for a load function that delegates to other loaders depending on the prefix of the requested template path. The prefix is stripped from the filename when passing on the load request to the delegate. >>> load = prefixed(
def prefixed(**delegates): def _dispatch_by_prefix(filename): for prefix, delegate in list(delegates.items()): if filename.startswith(prefix): if isinstance(delegate, str): delegate = directory(delegate) filepath, _, fileobj, uptodate = delegate( filename[len(prefix):].lstrip('/\\') ) return filepath, filename, fileobj, uptodate raise TemplateNotFound(filename, list(delegates.keys())) return _dispatch_by_prefix
[ "def register(prefix: bytes, loader: Callable):\n _loaders[prefix] = loader", "def create_loader(search_path_string=...):\n ...", "def autodelegate(prefix=''):\n def internal(self, arg):\n if '/' in arg:\n first, rest = arg.split('/', 1)\n func = prefix + first\n args = ['/' + rest]\n else:\n func = prefix + arg\n args = []\n \n if hasattr(self, func):\n try:\n return getattr(self, func)(*args)\n except TypeError:\n return web.notfound()\n else:\n return web.notfound()\n return internal", "def default_prefixer(sender, **kwargs):\n request = http.HttpRequest()\n request.META['SCRIPT_NAME'] = ''\n prefixer = urlresolvers.Prefixer(request)\n urlresolvers.set_url_prefix(prefixer)", "def templateLoader(loadname):\n with open(loadname, 'rb') as loadfile:\n settings = load(loadfile)\n \n return settings", "def loader(self,func):\n return LoaderWrapper(self,func)", "def add_prefix(path, prefix=\"E-\"): \n fname = os.path.basename(path)\n dname = os.path.dirname(path)\n return os.path.join(dname, prefix + fname)", "def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n search_path = self.search_path\r\n\r\n # Make the filename relative to the template file its being loaded\r\n # from, but only if that file is specified as a relative path, or no\r\n # search path has been set up\r\n if relative_to and (not search_path or not os.path.isabs(relative_to)):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n\r\n filename = os.path.normpath(filename)\r\n cachekey = filename\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[cachekey]\r\n if not self.auto_reload:\r\n return tmpl\r\n uptodate = self._uptodate[cachekey]\r\n if uptodate is not None and uptodate():\r\n return tmpl\r\n except (KeyError, OSError):\r\n pass\r\n\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = list(search_path) + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for loadfunc in search_path:\r\n if isinstance(loadfunc, str):\r\n loadfunc = directory(loadfunc)\r\n try:\r\n filepath, filename, fileobj, uptodate = loadfunc(filename)\r\n except IOError:\r\n continue\r\n else:\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested includes work properly without a\r\n # search path\r\n filename = filepath\r\n tmpl = self._instantiate(cls, fileobj, filepath,\r\n filename, encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[cachekey] = tmpl\r\n self._uptodate[cachekey] = uptodate\r\n finally:\r\n if hasattr(fileobj, 'close'):\r\n fileobj.close()\r\n return tmpl\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()", "def _load_file(file_path, base_path, loader_function, ignore_partitions):\n data = loader_function(file_path)\n\n if ignore_partitions:\n return data\n else:\n partitions = _take_partitions(file_path, base_path)\n return {**partitions, **data}", "def _get_prefix_parts(full_prefix):\n prefix_parts = full_prefix.split(\"/\")\n file_name, _, file_ext = prefix_parts[-1].partition(\".\")\n return FilePrefix(\n dirs=\"/\".join(prefix_parts[:-1]),\n filename=file_name,\n file_extension=file_ext,\n use_default_filename=(DEFAULT_FILENAME_TOKEN in full_prefix),\n )", "def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n if encoding is None:\r\n encoding = self.default_encoding\r\n if relative_to and not os.path.isabs(relative_to):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n filename = os.path.normpath(filename)\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[filename]\r\n if not self.auto_reload or \\\r\n os.path.getmtime(tmpl.filepath) == self._mtime[filename]:\r\n return tmpl\r\n except KeyError:\r\n pass\r\n\r\n search_path = self.search_path\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = search_path + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for dirname in search_path:\r\n filepath = os.path.join(dirname, filename)\r\n try:\r\n fileobj = open(filepath, 'U')\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested include work properly without a\r\n # search path\r\n filename = os.path.join(dirname, filename)\r\n dirname = ''\r\n tmpl = cls(fileobj, basedir=dirname, filename=filename,\r\n loader=self, lookup=self.variable_lookup,\r\n encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[filename] = tmpl\r\n self._mtime[filename] = os.path.getmtime(filepath)\r\n finally:\r\n fileobj.close()\r\n return tmpl\r\n except IOError:\r\n continue\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()", "def patch_load():\n import piglet.runtime\n\n saved = piglet.runtime.load\n piglet.runtime.load = lambda template, *args, **kwargs: template\n yield\n piglet.runtime.load = saved", "def register_custom_loader(self, format_name, loader_func):\n if not callable(loader_func):\n raise ValueError(\"loader_func must be callable\")\n self._loader_map[format_name] = loader_func", "def get_loader(spacing, patch_shape):\r\n loader = yaml.SafeLoader\r\n spacing_constructor = SpacingConstructor(spacing)\r\n patch_shape_constructor = PatchShapeConstructor(patch_shape)\r\n\r\n loader.add_constructor(\"!spacing\", spacing_constructor)\r\n loader.add_constructor(\"!patch_shape\", patch_shape_constructor)\r\n return loader", "def __init__(\n self,\n source_str: str,\n fname: str,\n templated_str: Optional[str] = None,\n sliced_file: Optional[List[TemplatedFileSlice]] = None,\n raw_sliced: Optional[List[RawFileSlice]] = None,\n ):\n self.source_str = source_str\n # An empty string is still allowed as the templated string.\n self.templated_str = source_str if templated_str is None else templated_str\n # If no fname, we assume this is from a string or stdin.\n self.fname = fname\n # Assume that no sliced_file, means the file is not templated\n self.sliced_file: List[TemplatedFileSlice]\n if sliced_file is None:\n if self.templated_str != self.source_str: # pragma: no cover\n raise ValueError(\"Cannot instantiate a templated file unsliced!\")\n # If we get here and we don't have sliced files,\n # then it's raw, so create them.\n self.sliced_file = [\n TemplatedFileSlice(\n \"literal\", slice(0, len(source_str)), slice(0, len(source_str))\n )\n ]\n assert (\n raw_sliced is None\n ), \"Templated file was not sliced, but not has raw slices.\"\n self.raw_sliced: List[RawFileSlice] = [\n RawFileSlice(source_str, \"literal\", 0)\n ]\n else:\n self.sliced_file = sliced_file\n assert raw_sliced is not None, \"Templated file was sliced, but not raw.\"\n self.raw_sliced = raw_sliced\n\n # Precalculate newlines, character positions.\n self._source_newlines = list(iter_indices_of_newlines(self.source_str))\n self._templated_newlines = list(iter_indices_of_newlines(self.templated_str))\n\n # Consistency check raw string and slices.\n pos = 0\n rfs: RawFileSlice\n for rfs in self.raw_sliced:\n assert rfs.source_idx == pos, (\n \"TemplatedFile. Consistency fail on running source length\"\n f\": {pos} != {rfs.source_idx}\"\n )\n pos += len(rfs.raw)\n assert pos == len(self.source_str), (\n \"TemplatedFile. Consistency fail on total source length\"\n f\": {pos} != {len(self.source_str)}\"\n )\n\n # Consistency check templated string and slices.\n previous_slice = None\n tfs: Optional[TemplatedFileSlice] = None\n for tfs in self.sliced_file:\n if previous_slice:\n if tfs.templated_slice.start != previous_slice.templated_slice.stop:\n raise SQLFluffSkipFile( # pragma: no cover\n \"Templated slices found to be non-contiguous. \"\n f\"{tfs.templated_slice} (starting\"\n f\" {self.templated_str[tfs.templated_slice]!r})\"\n f\" does not follow {previous_slice.templated_slice} \"\n \"(starting \"\n f\"{self.templated_str[previous_slice.templated_slice]!r}\"\n \")\"\n )\n else:\n if tfs.templated_slice.start != 0:\n raise SQLFluffSkipFile( # pragma: no cover\n \"First Templated slice not started at index 0 \"\n f\"(found slice {tfs.templated_slice})\"\n )\n previous_slice = tfs\n if self.sliced_file and templated_str is not None:\n if tfs.templated_slice.stop != len(templated_str):\n raise SQLFluffSkipFile( # pragma: no cover\n \"Length of templated file mismatch with final slice: \"\n f\"{len(templated_str)} != {tfs.templated_slice.stop}.\"\n )", "def load_template(self, templatename, template_string=None):\r\n if template_string is not None:\r\n return self.template_class(template_string)\r\n\r\n if self.use_package_naming:\r\n divider = templatename.rfind('.')\r\n if divider >= 0:\r\n from pkg_resources import resource_filename\r\n package = templatename[:divider]\r\n basename = templatename[divider + 1:] + self.extension\r\n templatename = resource_filename(package, basename)\r\n\r\n return self.loader.load(templatename)", "def _load_template(self, template_file):\n pass", "def load_prefix_includes(config, settings):\n lines = [\n l for l in settings.get('pyramid.route_includes', '').split('\\n') if l\n ]\n for line in lines:\n mod, prefix = [i.strip() for i in line.split(',')]\n get_log().info(\n \"Loading module %r with route prefix %r\" % (mod, prefix)\n )\n # '/' is the same as no prefix at all, but useful to denote the root\n # module in the config file\n if prefix == '/':\n prefix = None\n config.include(mod, route_prefix=prefix)", "def dynamicLoad():\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a template specified in python 'dot' notation, or load one from a string.
def load_template(self, templatename, template_string=None): if template_string is not None: return self.template_class(template_string) if self.use_package_naming: divider = templatename.rfind('.') if divider >= 0: from pkg_resources import resource_filename package = templatename[:divider] basename = templatename[divider + 1:] + self.extension templatename = resource_filename(package, basename) return self.loader.load(templatename)
[ "def _find_template(self, filename, start=0):\n\n filename = filename.lstrip(\"/\").replace(\"/\", os.sep)\n cachename = \":@@{0}@@:{1}\".format(start, filename)\n\n if not self._path:\n raise RestrictedError(\n \"Attempt to load template from empty search path: {0}\".format(filename)\n )\n\n if not cachename in self._find_cache:\n for (index, path) in enumerate(self._path[start:], start):\n new_filename = os.path.realpath(os.path.join(path, filename))\n if os.path.isfile(new_filename):\n self._find_cache[cachename] = (index, new_filename)\n break\n else:\n raise RestrictedError(\n \"Template not found along search path: {0}\".format(filename)\n )\n\n return self._find_cache[cachename]", "def lookupTemplate(self, request):\n if self.template:\n return microdom.parseString(self.template, caseInsensitive=0, preserveCase=0)\n if not self.templateDirectory:\n mod = sys.modules[self.__module__]\n if hasattr(mod, '__file__'):\n self.templateDirectory = os.path.split(mod.__file__)[0]\n # First see if templateDirectory + templateFile is a file\n templatePath = os.path.join(self.templateDirectory, self.templateFile)\n if not os.path.exists(templatePath):\n raise RuntimeError, \"The template %r was not found.\" % templatePath\n # Check to see if there is an already parsed copy of it\n mtime = os.path.getmtime(templatePath)\n cachedTemplate = templateCache.get(templatePath, None)\n compiledTemplate = None\n\n if cachedTemplate is not None:\n if cachedTemplate[0] == mtime:\n compiledTemplate = templateCache[templatePath][1].cloneNode(deep=1)\n \n if compiledTemplate is None:\n compiledTemplate = microdom.parse(templatePath, caseInsensitive=0, preserveCase=0)\n templateCache[templatePath] = (mtime, compiledTemplate.cloneNode(deep=1))\n return compiledTemplate", "def load_template(mol) :\n filename = os.path.join(PROT_INFO_PATH,\"template_%s.txt\"%mol)\n if os.path.isfile(filename) :\n return ProteinTemplate(filename)\n else :\n raise Exception(\"Invalid mol (%s) or file is missing (%s)\"%(mol,filename))", "def load_template(name: str) -> Template:\n if name not in _templates:\n with open(join(dirname(__file__), 'templates', name + '.j2'), 'r') as f:\n return Template(f.read())\n return _templates[name]", "def find_template(path, curr_path, config_paths):\n # type: (str, Optional[Union[str, Path]], List[Union[str, Path]])\n # -> Optional[Path]\n path = Path(path)\n if path.is_absolute():\n return path\n\n if not curr_path: # pragma: no cover\n curr_path = Path('.')\n else:\n curr_path = Path(curr_path).parent\n\n config_paths = config_paths + [curr_path]\n for cpath in config_paths:\n thepath = Path(cpath) / path\n if thepath.is_file():\n return thepath\n return None", "def uiTemplate(string, exists=bool):\n pass", "def resolve_dotted_name(name: str) -> typing.Any:\n if not isinstance(name, str):\n return name # already an object\n names = name.split(\".\")\n used = names.pop(0)\n found = __import__(used)\n for n in names:\n used += \".\" + n\n try:\n found = getattr(found, n)\n except AttributeError:\n __import__(used)\n found = getattr(found, n)\n\n return found", "def load_dotted_path(dotted_path, raise_=True, reload=False):\n obj, module = None, None\n\n parsed = _validate_dotted_path(dotted_path, raise_=raise_)\n\n if parsed:\n mod, name = parsed\n\n try:\n module = importlib.import_module(mod)\n except ImportError as e:\n if raise_:\n # we want to raise ethe same error type but chaining exceptions\n # produces a long verbose output, so we just modify the\n # original message to add more context, it's ok to hide the\n # original traceback since it will just point to lines\n # in the importlib module, which isn't useful for the user\n e.msg = ('An error happened when trying to '\n 'import dotted path \"{}\": {}'.format(\n dotted_path, str(e)))\n raise\n\n if module:\n if reload:\n module = importlib.reload(module)\n\n try:\n obj = getattr(module, name)\n except AttributeError as e:\n if raise_:\n # same as in the comment above\n e.args = (\n 'Could not get \"{}\" from module '\n '\"{}\" (loaded from: {}), make sure it is a valid '\n 'callable defined in such module'.format(\n name, mod, module.__file__), )\n raise\n return obj\n else:\n if raise_:\n raise ValueError(\n 'Invalid dotted path value \"{}\", must be a dot separated '\n 'string, with at least '\n '[module_name].[function_name]'.format(dotted_path))", "def read_template(self, template, space=None):\n pass", "def templateLoader(loadname):\n with open(loadname, 'rb') as loadfile:\n settings = load(loadfile)\n \n return settings", "def load_template(name=None):\n if name is None:\n name = \"default\"\n\n logger.info(\"Loading template with name %s\", name)\n try:\n template_file = open(\"%s/%s.yaml\" % (template_path, name))\n except IOError:\n raise TemplateNotFoundError\n\n template = yaml.safe_load(template_file)\n template_file.close()\n if \"extends\" in template:\n logger.debug(\"Merging %s with %s\", name, template[\"extends\"])\n template = _merge(load_template(template[\"extends\"]), template)\n\n return template", "def get(self, template):\n tenant = tenant_handler.tenant()\n pos = template.rfind('.')\n if pos != -1:\n format = template[pos + 1:]\n template = template[:pos]\n else:\n format = 'pdf'\n return get_document(tenant, template, format)", "def _load_template(self, template_file):\n pass", "def get_template(template_filename):\n return env.get_template(template_filename)", "def _get_template(settings):\r\n puts(\"\\nPick a template\\n\")\r\n template = None\r\n while not template:\r\n _list_templates(settings)\r\n index = raw_input(\"\\nWhich template would you like to use? [1] \")\r\n if not index:\r\n index = \"1\"\r\n try:\r\n index = int(index) - 1\r\n return settings.config[\"project_templates\"][index]\r\n except:\r\n puts(\"\\\"{0}\\\" isn't a valid option!\".format(colored.red(\"{0}\".format(index))))\r\n pass", "def find_matching_template(question, templates):\n for t in templates.values():\n for r in t[\"regexes\"]:\n if r.fullmatch(question[\"question\"]) is not None:\n return t\n assert False, \"No template found\"", "def LoadSchemeTemplate(root, name):\n path = os.path.join(root, 'ios', 'build', 'tools', name + '.template')\n with open(path) as file:\n return Template(file.read())", "def replace_template_path(path):\n segments = path.split(\".\")\n module = \".\".join(segments[0:-1])\n name = segments[-1]\n if module == \"ipypublish.html.ipypublish\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"ipy-{0}.html-tplx.json\".format(name),\n }\n elif module == \"ipypublish.html.standard\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"std-{0}.html-tplx.json\".format(name),\n }\n elif module == \"ipypublish.latex.standard\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"std-{0}.latex-tpl.json\".format(name),\n }\n elif module == \"ipypublish.latex.ipypublish\":\n return {\n \"module\": \"ipypublish.templates.segments\",\n \"file\": \"ipy-{0}.latex-tpl.json\".format(name),\n }\n else:\n print(\"Warning: unknown template path: {}\".format(path))\n return {\"module\": module, \"file\": \"{0}.json\".format(name)}", "def app_model_templates_loader(template_name, template_dirs=None):\r\n for path in get_template_sources(template_name, template_dirs):\r\n logging.debug(\"Looking for tempalte: %s\" % path)\r\n try:\r\n return (open(path).read().decode(settings.FILE_CHARSET), path)\r\n except IOError:\r\n pass\r\n raise TemplateDoesNotExist, template_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render the template to a string using the provided info.
def render(self, info, format=None, fragment=False, template=None): kwargs = self._get_render_options(format=format, fragment=fragment) return self.transform(info, template).render(**kwargs)
[ "def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "def render_template(self, *args, **kwargs):\n return self.renderer.render(*args, **kwargs)", "def render(self, template, **kw):\n t = jinja_env.get_template(template) \n self.response.out.write(t.render(kw))", "def _render_template(*args, **kwargs):\n rendered_template = render_template(*args, **kwargs, environment=current_app.config['ENVIRONMENT'], base_url=app.config['SERVER_BASE_URL'], alert_message=current_app.config['ALERT_MESSAGE'], disable_full_ads_link=current_app.config['DISABLE_FULL_ADS_LINK'])\n return rendered_template", "def renderTemplate(self,template_path,context):\n html = render_to_string(template_path,context)\n return html", "def render_to_string(template_name, context):\n return Engine(app_dirs=True).render_to_string(template_name, context=context)", "def render_template(self, template_name, output_name, context):\n raise NotImplementedError()", "def render_template(context=None, template=\"default.jinja2\", cls=True):\n if not context:\n context = {}\n screen_cleaner(cls)\n template = env.get_template(template)\n print(template.render(**context))", "def render_to(self, path, template, **data):\n html = self.render(template, **data)\n return open(path, \"w\").write(html.encode(charset))", "def render_template(name, context=None, type='html'):\n return template.render(get_template_path('%s.%s'% (name, type)), context)", "def render_template(text, context=None):\n template = engines[\"django\"].from_string(text)\n if not context:\n context = {}\n return template.render(context)", "def render(self, *args: Any, **globals_: Any) -> str:\n try:\n return self._mako_def_template.render(*args, **globals_)\n except RenderError as e:\n # _render() can be called by a chain of templates which call each other;\n # passing the original render error to the top so that it could be handled there.\n raise\n except Exception as e:\n # TODO: we could collect mako.exceptions.text_error_template().render() here,\n # because ideally it should point to the line where the error occurred,\n # but for some reason it doesn't. So we don't bother for now.\n raise RenderError(e, args, globals_, self.source)", "def render_to_string(self, request, context):\n if not self.template_name or not self.has_content:\n return ''\n\n user = request.user\n last_visited = context.get('last_visited')\n\n new_context = context.flatten()\n\n try:\n new_context.update({\n 'entry': self,\n 'entry_is_new': (\n user.is_authenticated and\n last_visited is not None and\n self.is_entry_new(last_visited=last_visited,\n user=user)),\n 'show_entry_statuses_area': (\n self.entry_pos !=\n BaseReviewRequestPageEntry.ENTRY_POS_INITIAL),\n })\n new_context.update(self.get_extra_context(request, context))\n except Exception as e:\n logger.exception('Error generating template context for %s '\n '(ID=%s): %s',\n self.__class__.__name__, self.entry_id, e,\n extra={'request': request})\n return ''\n\n try:\n return render_to_string(template_name=self.template_name,\n context=new_context,\n request=request)\n except Exception as e:\n logger.exception('Error rendering template for %s (ID=%s): %s',\n self.__class__.__name__, self.entry_id, e,\n extra={'request': request})\n return ''", "def render_template(path, args):\n return read_template(path).render(**args)", "def render_string(self, template_name, **kwargs):\n if 'tornado' == settings['TEMPLATE_ENGINE']:\n return super(BaseHandler, self).render_string(template_name, **kwargs)\n elif 'jinja2' == settings['TEMPLATE_ENGINE']:\n return jinja2_render(template_name, **kwargs)\n else:\n raise errors.SettingsError(\n '%s is not a supported TEMPLATE_ENGINE, should be `tornado` or `jinja2`'\n % settings['TEMPLATE_ENGINE'])", "def render(\n request, template_name, context=None, content_type=None, status=None, using=None\n):\n content = loader.render_to_string(template_name, context, request, using=using)\n return HttpResponse(content, content_type, status)", "def render(self):\n ctx = self.context.copy()\n ctx.content = renderTemplate(self.transform(), ctx)\n layout = self.layouts.get(ctx.layout)\n if layout:\n return renderTemplate(layout.content, ctx)\n else:\n return ctx.content", "def render_template(process, template_string, context):\n from resolwe.flow.managers import manager\n\n # Get the appropriate expression engine. If none is defined, do not evaluate\n # any expressions.\n expression_engine = process.requirements.get('expression-engine', None)\n if not expression_engine:\n return template_string\n\n return manager.get_expression_engine(expression_engine).evaluate_block(template_string, context)", "def render(always, template, dest, **kwargs):\n\n dest = plat.path(dest)\n\n if (not always) and os.path.exists(dest):\n return\n\n template = environment.get_template(template)\n text = template.render(**kwargs)\n\n f = file(dest, \"wb\")\n f.write(text.encode(\"utf-8\"))\n f.close()", "def simple_render(template, context):\n\n def parse_token(token, in_tag):\n if not in_tag:\n return token\n var = token[2:-2].strip()\n return context.get(var, '')\n\n result = []\n in_tag = False\n\n for token in tag_re.split(template):\n if token:\n result.append(parse_token(token, in_tag))\n in_tag = not in_tag\n\n return ''.join(result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify more complex nesting using otherwise.
def test_complex_nesting_otherwise(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="1"> <div py:when="1" py:choose="2"> <span py:when="1">FAIL</span> <span py:otherwise="">OK</span> </div> </div> </doc>""") self.assertEqual("""<doc> <div> <div> <span>OK</span> </div> </div> </doc>""", tmpl.generate().render(encoding=None))
[ "def is_nested(self, ):\n\t\tpass", "def IsNestedFamORAssem(self) -> bool:", "def IsNestedFamANDAssem(self) -> bool:", "def testTryExceptElse(self):\n token = self.parser.parse(filename='evo/TryExceptElse.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertFalse(res['finally'])", "def test33(self):\n self.check('aDict.nestedDict.one')", "def is_nested(input):\n return is_sequence(input) or isinstance(input, dict)", "def test31(self):\n self.check('aDict.nestedDict')", "def test_nested(dataset):\n failure = \"flat_legacy\" in dataset or \"directory_default\" in dataset or \"fs_default\" in dataset\n verify(Array(store=NestedDirectoryStore(dataset)), failure)", "def test_unbalanced_overlapping_bracket():\n assert multi_bracket_validation('([)]') is False", "def is_nested(self, raise_exception=False):\n\n ###### Check if initial_data is a MultiValueDIct ############\n ###### Convert it to a dict object ##########################\n\n if hasattr(self._initial_data, 'getlist'):\n raw_data = {}\n\n for key, value in dict(self._initial_data).items():\n if len(value) > 1:\n raw_data[key] = value\n else:\n raw_data[key] = value[0]\n\n self._initial_data = raw_data\n\n #############################################################\n\n is_mapping = isinstance(self._initial_data, Mapping)\n conditions = [is_mapping]\n\n #############################################################\n\n if not is_mapping and raise_exception:\n raise ValueError('`data` is not a map type')\n\n #############################################################\n\n matched_keys = []\n\n for key in self._initial_data.keys():\n if self.str_is_nested(key):\n matched_keys.append(True)\n break\n else:\n matched_keys.append(False)\n\n conditions += [any(matched_keys)]\n\n #############################################################\n\n if not any(matched_keys) and raise_exception:\n raise ValueError('`data` is not a nested type')\n\n #############################################################\n\n if all(conditions):\n self._validated_data = self._initial_data\n self.__run__()\n\n return all(conditions)", "def test_optional_group_mixed_children_all_missing(self):\n\n @environ.config(prefix=\"PARENT\")\n class WithOptionalChild:\n @environ.config(prefix=\"CHILD\")\n class Child:\n grandchild_a = environ.var()\n grandchild_b = environ.var(\"FOO\")\n\n child = environ.group(Child, optional=True)\n\n cfg = environ.to_config(WithOptionalChild, {})\n assert cfg.child is None", "def IsNestedFamily(self) -> bool:", "def testTryExceptElseFinallyTrailing(self):\n token = self.parser.parse(\n filename='evo/TryExceptElseFinallyTrailing.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertTrue(res['finally'])", "def test_unbalanced_bracket_2():\n assert multi_bracket_validation('(](') is False", "def test55(self):\n self.check('aDict.nestedDict.aClass')", "def test_requirements_single_nested_item(self):\n # TODO pending revision\n pass", "def test_recursion_depth(self):\n def recurse(arr):\n with recursion_depth('test_recursion_depth') as recursion_level:\n if recursion_level > 10:\n raise Exception('error')\n if arr and arr.pop():\n recurse(arr)\n\n recurse(range(0,10))\n self.assertRaises(Exception, recurse, range(0,11))\n recurse(range(0,10))\n self.assertRaises(Exception, recurse, range(0,11))", "def is_nested(collection: Iterable) -> bool:\n return all(map(not_(is_atom), collection))", "def testTryExceptElseFinally(self):\n token = self.parser.parse(filename='evo/TryExceptElseFinally.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertTrue(res['finally'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a when directive with a strip directive actually strips of the outer element.
def test_when_with_strip(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="" py:strip=""> <span py:otherwise="">foo</span> </div> </doc>""") self.assertEqual("""<doc> <span>foo</span> </doc>""", tmpl.generate().render(encoding=None))
[ "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_function_with_strip(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:def=\"echo(what)\" py:strip=\"\">\r\n <b>${what}</b>\r\n </div>\r\n ${echo('foo')}\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <b>foo</b>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_namespace_on_removed_elem(self):\r\n tmpl = MarkupTemplate(\"\"\"<?xml version=\"1.0\"?>\r\n <Test xmlns:py=\"http://genshi.edgewall.org/\">\r\n <Size py:if=\"0\" xmlns:t=\"test\">Size</Size>\r\n <Item/>\r\n </Test>\"\"\")\r\n self.assertEqual(\"\"\"<?xml version=\"1.0\"?>\\n<Test>\r\n \r\n <Item/>\r\n </Test>\"\"\", str(tmpl.generate()))", "def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n foo\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def testNonWidgetChild(self):\n try:\n class T(wd.RepeatingWidget):\n child = \"\"\n self.assert_(False)\n except pm.ParameterError:\n self.assert_(True)", "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def test_clean_uses_noop_sanitizer(self):\n mixin = SanitizerMixin()\n self.assertEqual(noop, mixin.get_sanitizer())", "def is_strip_closed(self, skey):\n\n\t\treturn not self.is_edge_on_boundary(*self.strip[skey][0])", "def test_single_strip_with_exif_checksum(self):\n self._evaluate_checksums(\"t_one_strip_with_exif\")", "def test_remove_disabled_parts_include(self):\n text = 'text <nowiki>tag</nowiki> text'\n self.assertEqual(\n textlib.removeDisabledParts(text, include=['nowiki']), text)", "def test_list_of_non_modulatory_phrases_is_empty_for_pieces_with_heavy_polymodal_frame():\n assert piece3.non_modulatory_phrases == []\n assert piece4.non_modulatory_phrases == []", "def dissert_select(strip):\n global _dissert\n _dissert = strip", "def instr_stripped_gen(self):\n yield from [x.strip() for x in self.instructions.splitlines() if len(x.strip()) > 0]", "def testEmpty(self):\n r = twilio.Response()\n r.append(twilio.Sms(\"\"))\n r = self.strip(r)\n self.assertEquals(r, '<Response><Sms/></Response>')", "def build_stripped(self, obj):\n stripped_path = build_common.get_stripped_path(obj)\n assert stripped_path, 'build_stripped takes path under out/target/<target>'\n self.build(stripped_path, 'strip', inputs=obj)", "def test_include_preventing_itself(self):\n t = parse(\"\"\"\n foo: 1\n include \"example\" if foo else \"\"\n \"\"\",\n example=\"\"\"\n foo: 0\n \"\"\")\n self.assertRaises(errors.ParadoxError, t.resolve)", "def edge_strip(self, edge):\n\n\t\tfor strip, edges in self.strip.items():\n\t\t\tif edge in edges or tuple(reversed(edge)) in edges:\n\t\t\t\treturn strip" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a `when` directive outside of a `choose` directive is reported as an error.
def test_when_outside_choose(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:when="xy" /> </doc>""") self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
[ "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_value_if_not_asked__value_if_not_asked_is_not_option__raises_exception():\n with pytest.raises(\n ValueError,\n match=\"The value_if_not_asked is not one of the options.\",\n ):\n Choice(\n SOME_NAME,\n SOME_STRING,\n SOME_OPTIONS,\n SOME_DEFAULT,\n should_ask=lambda answers: True,\n value_if_not_asked=\"a\",\n )", "def test_models_edx_problem_check_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_check_fail\"\n assert statement.page == \"x_module\"", "def test_models_edx_ui_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.name == \"problem_check\"", "def test_models_edx_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.page == \"x_module\"", "def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )", "def test_models_edx_ui_problem_show_with_valid_statement(statement):\n assert statement.event_type == \"problem_show\"\n assert statement.name == \"problem_show\"", "def test_invalid_spec(self):\n self.reject(\"test\")\n self.reject(13)\n self.reject([])\n self.reject({\"IN\": {}})\n self.reject({\"IN\": [{\"bad-name\": True}]})", "def test_invalid_dropdown_xml(self):\n problem_xml = textwrap.dedent(\"\"\"\n <problem>\n <optionresponse>\n <p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown\n problems. Edit this component to replace this template with your own assessment.</p>\n <label>Add the question text, or prompt, here. This text is required.</label>\n <description>You can add an optional tip or note related to the prompt like this. </description>\n <optioninput>\n <option correct=\"False\">an incorrect answer</option>\n <option correct=\"True\">the correct answer</option>\n <option correct=\"True\">an incorrect answer</option>\n </optioninput>\n </optionresponse>\n </problem>\n \"\"\")\n with pytest.raises(Exception):\n CapaFactory.create(xml=problem_xml)", "def test_check_options_exception(self, hp, opts):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_is_in_options(hp, opts, msg=\"XXX\")", "def test_models_edx_problem_rescore_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_rescore_fail\"\n assert statement.page == \"x_module\"", "def test_incorrect_elif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Elif_Stmt(line)\n assert \"Cpp_Elif_Stmt: '{0}'\".format(line) in str(excinfo.value)", "def test_complex_nesting_otherwise(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"1\">\r\n <div py:when=\"1\" py:choose=\"2\">\r\n <span py:when=\"1\">FAIL</span>\r\n <span py:otherwise=\"\">OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <div>\r\n <div>\r\n <span>OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def check_choice_validity(tokens_choice_inside):\n # TODO: deprecate `/` as choice separators AND percentgen\n # percentgen_count = tokens_choice_inside.count(PERCENT_GEN_SYM)\n # if percentgen_count > 0:\n # raise SyntaxError(\"Choices cannot take a percentage for generation \"+\n # \"modifier.\")\n if len(tokens_choice_inside) > 0:\n if tokens_choice_inside[-1] == CHOICE_SEP:\n raise SyntaxError(\"Choice cannot end with a choice separator. \" +\n \"Did you forget to escape the last character?\")\n if ( len(tokens_choice_inside) > 1\n and tokens_choice_inside[-1] == RAND_GEN_SYM\n and tokens_choice_inside[-2] == CHOICE_SEP):\n raise SyntaxError(\"Choice ends with an empty choice item. \" +\n \"Did you forget to escape the choice separator?\")", "def test_models_edx_ui_problem_reset_with_valid_statement(statement):\n assert statement.event_type == \"problem_reset\"\n assert statement.name == \"problem_reset\"", "def expect(condition, error_msg, exc_type=SystemExit, error_prefix=\"ERROR:\"):\n###############################################################################\n if not condition:\n msg = error_prefix + \" \" + error_msg\n raise exc_type(msg)", "def test_models_edx_ui_problem_graded_with_valid_statement(statement):\n assert statement.event_type == \"problem_graded\"\n assert statement.name == \"problem_graded\"", "def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that an `otherwise` directive outside of a `choose` directive is reported as an error.
def test_otherwise_outside_choose(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:otherwise="" /> </doc>""") self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
[ "def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n foo\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def test_complex_nesting_otherwise(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"1\">\r\n <div py:when=\"1\" py:choose=\"2\">\r\n <span py:when=\"1\">FAIL</span>\r\n <span py:otherwise=\"\">OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <div>\r\n <div>\r\n <span>OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def testSyntaxErrorDoubleElse(self):\n template = '{{ if [var] }} {{ else }} {{ else }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)", "def test_value_if_not_asked__value_if_not_asked_is_not_option__raises_exception():\n with pytest.raises(\n ValueError,\n match=\"The value_if_not_asked is not one of the options.\",\n ):\n Choice(\n SOME_NAME,\n SOME_STRING,\n SOME_OPTIONS,\n SOME_DEFAULT,\n should_ask=lambda answers: True,\n value_if_not_asked=\"a\",\n )", "def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)", "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )", "def test_incorrect_else_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Else_Stmt(line)\n assert \"Cpp_Else_Stmt: '{0}'\".format(line) in str(excinfo.value)", "def test_models_edx_problem_check_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_check_fail\"\n assert statement.page == \"x_module\"", "def testTryExceptElse(self):\n token = self.parser.parse(filename='evo/TryExceptElse.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertFalse(res['finally'])", "def test_invalid_dropdown_xml(self):\n problem_xml = textwrap.dedent(\"\"\"\n <problem>\n <optionresponse>\n <p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown\n problems. Edit this component to replace this template with your own assessment.</p>\n <label>Add the question text, or prompt, here. This text is required.</label>\n <description>You can add an optional tip or note related to the prompt like this. </description>\n <optioninput>\n <option correct=\"False\">an incorrect answer</option>\n <option correct=\"True\">the correct answer</option>\n <option correct=\"True\">an incorrect answer</option>\n </optioninput>\n </optionresponse>\n </problem>\n \"\"\")\n with pytest.raises(Exception):\n CapaFactory.create(xml=problem_xml)", "def verify_template_is_not_available(self,template):\n field=npsp_lex_locators[\"adv_mappings\"][\"field_mapping\"].format(\"Template\")\n self.selenium.click_element(field)\n element=self.selenium.get_webelement(field)\n status=element.get_attribute(\"aria-activedescendant\")\n if status is not None:\n self.selenium.page_should_not_contain(template)\n else:\n self.selenium.wait_until_page_contains(\"Default Gift Entry Template\")\n self.selenium.page_should_not_contain(template) \n self.selenium.click_button(\"Cancel\")", "def check_choice_validity(tokens_choice_inside):\n # TODO: deprecate `/` as choice separators AND percentgen\n # percentgen_count = tokens_choice_inside.count(PERCENT_GEN_SYM)\n # if percentgen_count > 0:\n # raise SyntaxError(\"Choices cannot take a percentage for generation \"+\n # \"modifier.\")\n if len(tokens_choice_inside) > 0:\n if tokens_choice_inside[-1] == CHOICE_SEP:\n raise SyntaxError(\"Choice cannot end with a choice separator. \" +\n \"Did you forget to escape the last character?\")\n if ( len(tokens_choice_inside) > 1\n and tokens_choice_inside[-1] == RAND_GEN_SYM\n and tokens_choice_inside[-2] == CHOICE_SEP):\n raise SyntaxError(\"Choice ends with an empty choice item. \" +\n \"Did you forget to escape the choice separator?\")", "def test_incorrect_elif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Elif_Stmt(line)\n assert \"Cpp_Elif_Stmt: '{0}'\".format(line) in str(excinfo.value)", "def test_preference_invalid_fail(lfric_sst):\n bbox = panel(\"africa\")\n emsg = \"Expected a preference of 'cell' or 'center' or 'point'\"\n with pytest.raises(ValueError, match=emsg):\n _ = bbox.enclosed(lfric_sst, preference=\"invalid\")", "def test_invalid_spec(self):\n self.reject(\"test\")\n self.reject(13)\n self.reject([])\n self.reject({\"IN\": {}})\n self.reject({\"IN\": [{\"bad-name\": True}]})", "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def test_incorrect_endif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Endif_Stmt(line)\n assert \"Cpp_Endif_Stmt: '{0}'\".format(line) in str(excinfo.value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that an `when` directive that doesn't have a `test` attribute is reported as an error.
def test_when_without_test(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="" py:strip=""> <py:when>foo</py:when> </div> </doc>""") self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
[ "def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_models_edx_problem_check_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_check_fail\"\n assert statement.page == \"x_module\"", "def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )", "def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_models_edx_ui_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.name == \"problem_check\"", "def test_models_edx_problem_check_with_valid_statement(statement):\n assert statement.event_type == \"problem_check\"\n assert statement.page == \"x_module\"", "def test_invalid_assignment():\n with pytest.raises(TypeError):\n MeasurementRun(\"name\", spec=Condition(\"value of pi\", value=NominalReal(3.14159, '')))\n with pytest.raises(TypeError):\n MeasurementRun(\"name\", material=FileLink(\"filename\", \"url\"))", "def test_invalid(sourcextractor):\n run = sourcextractor('--this-is-not-a-valid-flag')\n assert run.exit_code > 0\n assert 'unrecognised' in run.stderr", "def test_models_edx_problem_rescore_fail_with_valid_statement(statement):\n assert statement.event_type == \"problem_rescore_fail\"\n assert statement.page == \"x_module\"", "def test_incorrect_if_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_If_Stmt(line)\n assert \"Cpp_If_Stmt: '{0}'\".format(line) in str(excinfo.value)", "def test_signal_describe_fail():\n signal = Signal(name=\"the_none_signal\", value=None)\n with pytest.raises(ValueError) as excinfo:\n signal.describe()\n assert \"failed to describe 'the_none_signal' with value 'None'\" in str(\n excinfo.value\n )", "def test_extract_device_name_invalid():\n with pytest.raises(AttributeError):\n assert grml2usb.extract_device_name(\"/dev\")\n with pytest.raises(AttributeError):\n assert grml2usb.extract_device_name(\"foobar\")", "def raises_assertion():\n return pytest.raises(AssertionError)", "def test_dont_raise(self):\n with self.assert_doesnt_raise():\n pass", "def test_models_edx_reset_problem_fail_with_valid_statement(statement):\n assert statement.event_type == \"reset_problem_fail\"\n assert statement.page == \"x_module\"", "def test_invalid_spec(self):\n self.reject(\"test\")\n self.reject(13)\n self.reject([])\n self.reject({\"IN\": {}})\n self.reject({\"IN\": [{\"bad-name\": True}]})", "def test_incorrect_elif_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Elif_Stmt(line)\n assert \"Cpp_Elif_Stmt: '{0}'\".format(line) in str(excinfo.value)", "def test_invalid_spec(self):\n invalid_spec = {\n \"target\": \"abc\",\n \"modes\": 2,\n \"compiler\": [\"Xcov\"],\n }\n with pytest.raises(\n ValueError, match=r\"missing the following keys: \\['gate_parameters', 'layout'\\]\"\n ):\n Device(spec=invalid_spec)", "def test_models_edx_ui_problem_show_with_valid_statement(statement):\n assert statement.event_type == \"problem_show\"\n assert statement.name == \"problem_show\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that an `otherwise` directive can be used without a `test` attribute.
def test_otherwise_without_test(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="" py:strip=""> <py:otherwise>foo</py:otherwise> </div> </doc>""") self.assertEqual("""<doc> foo </doc>""", tmpl.generate().render(encoding=None))
[ "def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_complex_nesting_otherwise(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"1\">\r\n <div py:when=\"1\" py:choose=\"2\">\r\n <span py:when=\"1\">FAIL</span>\r\n <span py:otherwise=\"\">OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <div>\r\n <div>\r\n <span>OK</span>\r\n </div>\r\n </div>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def testSyntaxErrorDoubleElse(self):\n template = '{{ if [var] }} {{ else }} {{ else }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)", "def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def testTagPresenceElse(self):\n template = '{{ ifpresent [tag] }} yes {{ else }} no {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' yes')\n self.assertEqual(self.parse(template), ' no')", "def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)", "def testLoopAbsentIndex(self):\n template = '{{ for item in [tag:absent] }} x {{ endfor }}'\n self.assertFalse(self.parse(template, tag='absent'))", "def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )", "def test_incorrect_else_stmt(line):\n with pytest.raises(NoMatchError) as excinfo:\n _ = Cpp_Else_Stmt(line)\n assert \"Cpp_Else_Stmt: '{0}'\".format(line) in str(excinfo.value)", "def test_standard_failure(self):\n class Resource(object):\n @guard.guard(make_checker(False))\n def denied(self, request):\n pass\n request = http.Request.blank('/')\n try:\n Resource().denied(request)\n except http.UnauthorizedError, e:\n response = e.make_response()\n assert response.headers['Content-Type'] == 'text/plain'\n assert response.body == \"\"\"401 Unauthorized\\n\\nchecker #1 failed\\n\"\"\"\n else:\n self.fail()", "def test_stop_not_defined(self):\n guest_name = \"some guest\"\n parameters_stop = {}\n self._mock_virsh.return_value.is_defined.return_value = False\n self._hyp.login()\n self.assertRaisesRegex(RuntimeError, \"is not defined\",\n self._hyp.stop, guest_name, parameters_stop)", "def skip_validate(request):\n if request.node.get_closest_marker('validate'):\n if not request.config.getoption(\"validate\"):\n pytest.skip('Validation tests not requested.')", "def test_eat_unhealthy(self):\n self.assertEqual(\n eat(\"pizza\", is_healthy=False),\n \"I'm eating pizza, and I don't care.\",\n )", "def test_eat_unhealthy(self):\n self.assertEqual(\n eat(\"pizza\", is_healthy=False),\n \"I'm eating pizza, because YOLO\"\n )", "def test_html_with_no_visitors_planned(self):\n message = 'Você não possui entradas autorizadas.'\n self.assertContains(self.resp, message)", "def test_saml_disabled(self):\n self.enable_saml(enabled=False)\n response = self.client.get(self.METADATA_URL)\n assert response.status_code == 404", "def assert_not_unitary(self, variable):\n self.assertNotEqual(\n variable.by_treatment('As is'), variable.by_treatment('To be'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a named template function with a strip directive actually strips of the outer element.
def test_function_with_strip(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:def="echo(what)" py:strip=""> <b>${what}</b> </div> ${echo('foo')} </doc>""") self.assertEqual("""<doc> <b>foo</b> </doc>""", tmpl.generate().render(encoding=None))
[ "def test_when_with_strip(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <span py:otherwise=\"\">foo</span>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <span>foo</span>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))", "def test_namespace_on_removed_elem(self):\r\n tmpl = MarkupTemplate(\"\"\"<?xml version=\"1.0\"?>\r\n <Test xmlns:py=\"http://genshi.edgewall.org/\">\r\n <Size py:if=\"0\" xmlns:t=\"test\">Size</Size>\r\n <Item/>\r\n </Test>\"\"\")\r\n self.assertEqual(\"\"\"<?xml version=\"1.0\"?>\\n<Test>\r\n \r\n <Item/>\r\n </Test>\"\"\", str(tmpl.generate()))", "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def testTemplateConditional(self):\n template = '{{ if [a] == \"foo\" }} foo [b] {{ else }} bar [b] {{ endif }}'\n self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))", "def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),\n self.strip(example))", "def isShadowed(self, name, element):\n\t\tvalue=self.resolve(name)[1]\n\t\treturn (value and (value != element))", "def testBasicTagPresence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' hello')", "def testNonexistantFuntion(self):\n template = 'This tag function is missing [num|zoink].'\n self.assertEqual(self.parse(template), template)\n # Error is only thrown if we actually pass an argument for the tag:\n self.assertRaises(templateparser.TemplateNameError,\n self.parse, template, num=1)", "def testInlineExisting(self):\n self.parser['template'] = self.tmpl('This is a subtemplate by [name].')\n template = '{{ inline template }}'\n expected = 'This is a subtemplate by Elmer.'\n self.assertEqual(self.parse(template, name='Elmer'), expected)", "def is_valid_in_template(var, attr):\n # Remove private variables or methods\n if attr.startswith('_'):\n return False\n # Remove any attributes that raise an acception when read\n try:\n value = getattr(var, attr)\n except:\n return False\n if isroutine(value):\n # Remove any routines that are flagged with 'alters_data'\n if getattr(value, 'alters_data', False):\n return False\n else:\n # Remove any routines that require arguments\n try:\n argspec = getargspec(value)\n num_args = len(argspec.args) if argspec.args else 0\n num_defaults = len(argspec.defaults) if argspec.defaults else 0\n if num_args - num_defaults > 1:\n return False\n except TypeError:\n # C extension callables are routines, but getargspec fails with\n # a TypeError when these are passed.\n pass\n return True", "def test_clean_uses_noop_sanitizer(self):\n mixin = SanitizerMixin()\n self.assertEqual(noop, mixin.get_sanitizer())", "def test_strip_grid_from_name_basic(self):\n result = _strip_grid_from_name(\"atm_grid\")\n self.assertEqual(result, \"atm\")", "def testSimpleClosureWithoutArguments(self):\n template = '[tag|limit()]'\n result = self.parse(template, tag=self.tag)\n self.assertEqual(result, self.tag[:80])", "def test_decompose_definition(definition, static_part, expected):\n assert (\n templates_utils.decompose_definition(definition, static_part)\n == expected\n )", "def test_tokenize_strip(self):\r\n input = \"((' <this> \\\"\\\" 'text' has (lots) of (special chars} >>]\"\r\n output = [ (\"<this>\",4),(\"text\",15),(\"has\",21),(\"lots\",26),(\"of\",32),\r\n (\"special\",36),(\"chars}\",44),(\">>\",51)]\r\n self.assertEqual(output,[i for i in basic_tokenize(input)])\r\n for (itmO,itmV) in zip(output,basic_tokenize(input)):\r\n self.assertEqual(itmO,itmV)", "def test_barname_stripper(self):\n assert bu.stripper(\"base-nto+armle-v7+signed.bar\") == \"base\"", "def testComplexClosureWithoutArguments(self):\n template = '[tag|strlimit()]'\n result = self.parse(template, tag=self.tag)\n self.assertEqual(len(result), 83)\n self.assertEqual(result[:80], self.tag[:80])\n self.assertEqual(result[-3:], '...')", "def test_unpatch(self):\n unpatch()\n self.assert_is_not_wrapped(flask.render_template)\n self.assert_is_not_wrapped(flask.render_template_string)\n self.assert_is_not_wrapped(flask.templating._render)", "def _mwpfh_passes(self, func):\n failing = has_module('wikitextparser')\n patterns = [\n '{{subst:a|b=c}}',\n '{{safesubst:a|b=c}}',\n '{{msgnw:a|b=c}}',\n '{{subst::a|b=c}}'\n ]\n context = self.assertRaises(AssertionError) \\\n if failing else nullcontext()\n\n for template in patterns:\n with self.subTest(template=template, failing=failing):\n name = template.strip('{}').split('|')[0]\n with context:\n self.assertEqual(func(template),\n [(name, OrderedDict((('b', 'c'), )))])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that the directive works as expected in a text template.
def test_in_text_template(self): tmpl = TextTemplate(""" #def echo(greeting, name='world') ${greeting}, ${name}! #end ${echo('Hi', name='you')} """) self.assertEqual(""" Hi, you! """, tmpl.generate().render(encoding=None))
[ "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),\n self.strip(example))", "def testInlineExisting(self):\n self.parser['template'] = self.tmpl('This is a subtemplate by [name].')\n template = '{{ inline template }}'\n expected = 'This is a subtemplate by Elmer.'\n self.assertEqual(self.parse(template, name='Elmer'), expected)", "def test_unexpandedLiteral(self):\n self.assertEqual(\n u\"hello world\",\n self.expandToText(ConceptTemplate(u\"hello world\"), {}))", "def testBasicTagPresence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' hello')", "def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_templates(self):\n\t\tpass", "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def testCompareTag(self):\n template = '{{ if [variable] == 5 }} foo {{ endif }}'\n self.assertFalse(self.parse(template, variable=0))\n self.assertFalse(self.parse(template, variable=12))\n self.assertTrue(self.parse(template, variable=5))", "def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/centered_text.html', 'The templates were not the same')", "def testTemplateConditional(self):\n template = '{{ if [a] == \"foo\" }} foo [b] {{ else }} bar [b] {{ endif }}'\n self.assertEqual(self.strip(str(self.tmpl(template))), self.strip(template))", "def test_form_template_i18n():", "def testAddTemplate(self):\n parser = templateparser.Parser()\n self.assertEqual(len(parser), 0)\n parser.AddTemplate(self.name)\n self.assertEqual(len(parser), 1)\n self.assertEqual(parser[self.name], self.template)", "def testTagPresenceElse(self):\n template = '{{ ifpresent [tag] }} yes {{ else }} no {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' yes')\n self.assertEqual(self.parse(template), ' no')", "def test_is_placeholder(placeholder, expected):\n assert templates_utils.is_placeholder(placeholder=placeholder) == expected", "def _test_template(self, template_path):\n full_path = os.path.join(self.template_dir, template_path)\n doc = lxml.html.parse(full_path)\n expecting_vuln = _get_expecting_vuln(doc)\n templ = loader.get_template(template_path)\n context = parse_template.get_default_context()\n templ.render(context)\n methods = [\n parse_template.get_non_js_escaped_results_for_template,\n parse_template.get_non_quoted_attr_vars_for_template\n ]\n for method in methods:\n for result in method(templ):\n self.csw.handle_callback(result)\n self.assertEqual(len(self.csw.results), len(expecting_vuln))\n for result, expected in zip(self.csw.results, expecting_vuln):\n line_no = result.get_line_number()\n part = result.get_vulnerability_text()\n filename = result.get_filename()\n var = str(result._var_node.filter_expression.var)\n self.assertEqual(line_no, expected['line_number'])\n self.assertEqual(var, expected['name'])\n self.assertEqual(filename, full_path)\n self.assertTrue(var in part)", "def test_render_template_doc(mock_rts, mock_markup):\n test_string = 'some string'\n test_doc = Mock(spec=yattag.Doc)\n test_doc.getvalue.return_value = test_string\n template.render_template(test_doc)\n test_doc.getvalue.assert_called_once_with()\n mock_rts.assert_called_once_with(test_string)\n mock_markup.assert_called_once_with(mock_rts(test_string))", "def testSafeString(self):\n template = 'Template without any tags'\n parsed_template = self.tmpl(template).Parse()\n self.assertTrue(isinstance(parsed_template, templateparser.SafeString))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a named template function using "star arguments" works as expected.
def test_function_with_star_args(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:def="f(*args, **kwargs)"> ${repr(args)} ${repr(sorted(kwargs.items()))} </div> ${f(1, 2, a=3, b=4)} </doc>""") self.assertEqual("""<doc> <div> [1, 2] [('a', 3), ('b', 4)] </div> </doc>""", tmpl.generate().render(encoding=None))
[ "def test_variable_arguments(self):\n def foo(*args):\n return tuple(args)\n provider = FunctionProvider(foo)\n wrapped_function = provider()\n self.assertSequenceEqual(wrapped_function(1, 2), (1, 2))\n self.assertSequenceEqual(wrapped_function(1), (1,))", "def foo4(_, *, _): # [duplicate-argument-name, duplicate-argument-name]", "def test_passed_simplePositional(self):\n\n def func(a, b):\n pass\n\n self.assertEqual(self.checkPassed(func, 1, 2), dict(a=1, b=2))", "def testNonexistantFuntion(self):\n template = 'This tag function is missing [num|zoink].'\n self.assertEqual(self.parse(template), template)\n # Error is only thrown if we actually pass an argument for the tag:\n self.assertRaises(templateparser.TemplateNameError,\n self.parse, template, num=1)", "def test_Tucker_args():\n testing_function_with_args('tucker')", "def test_is_placeholder(placeholder, expected):\n assert templates_utils.is_placeholder(placeholder=placeholder) == expected", "def test_Complex_args():\n testing_function_with_args('complex')", "def add_call_starargs(node, name):\n value = name\n if isinstance(name, str):\n value = ast.Name(id=name, ctx=ast.Load())\n\n starred = ast.Starred(\n value=value,\n ctx=ast.Load()\n )\n node.args.append(starred)", "def test_python_callable_arguments_are_templatized(self):\n recorded_calls = []\n\n # Create a named tuple and ensure it is still preserved\n # after the rendering is done\n Named = namedtuple('Named', ['var1', 'var2'])\n named_tuple = Named('{{ ds }}', 'unchanged')\n\n task = PythonSensor(\n task_id='python_sensor',\n timeout=0.01,\n poke_interval=0.3,\n # a Mock instance cannot be used as a callable function or test fails with a\n # TypeError: Object of type Mock is not JSON serializable\n python_callable=build_recording_function(recorded_calls),\n op_args=[4, date(2019, 1, 1), \"dag {{dag.dag_id}} ran on {{ds}}.\", named_tuple],\n dag=self.dag,\n )\n\n self.dag.create_dagrun(\n run_type=DagRunType.MANUAL,\n execution_date=DEFAULT_DATE,\n start_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n with self.assertRaises(AirflowSensorTimeout):\n task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n ds_templated = DEFAULT_DATE.date().isoformat()\n # 2 calls: first: at start, second: before timeout\n self.assertEqual(2, len(recorded_calls))\n self._assert_calls_equal(\n recorded_calls[0],\n Call(\n 4,\n date(2019, 1, 1),\n f\"dag {self.dag.dag_id} ran on {ds_templated}.\",\n Named(ds_templated, 'unchanged'),\n ),\n )", "def test_special_names():", "def foo1(_, _): # [duplicate-argument-name, duplicate-argument-name]", "def test_HoLE_args():\n testing_function_with_args('hole')", "def test_extract_parameter_name(variable, name):\n assert tp.get_name(tp.VARIABLE(variable)) == name", "def test_DistMult_args():\n testing_function_with_args('distmult')", "def test_match_with_params(self):\n string = 'The {{quick|brown}} fox'\n builder = MultiTemplateMatchBuilder(self.site)\n self.assertIsNotNone(re.search(builder.pattern('quick'), string))\n self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),\n self._template_not_case_sensitive)", "def check_partial(sig, args, kwargs):\n num_pos_only, func, keyword_exclude, sigspec = sig\n if len(args) < num_pos_only:\n pad = (None,) * (num_pos_only - len(args))\n args = args + pad\n if keyword_exclude:\n kwargs = dict(kwargs)\n for item in keyword_exclude:\n kwargs.pop(item, None)\n return is_partial_args(func, args, kwargs, sigspec)", "def test_TransD_args():\n testing_function_with_args('transd')", "def test_python_callable_keyword_arguments_are_templatized(self):\n recorded_calls = []\n\n task = PythonSensor(\n task_id='python_sensor',\n timeout=0.01,\n poke_interval=0.3,\n # a Mock instance cannot be used as a callable function or test fails with a\n # TypeError: Object of type Mock is not JSON serializable\n python_callable=build_recording_function(recorded_calls),\n op_kwargs={\n 'an_int': 4,\n 'a_date': date(2019, 1, 1),\n 'a_templated_string': \"dag {{dag.dag_id}} ran on {{ds}}.\",\n },\n dag=self.dag,\n )\n\n self.dag.create_dagrun(\n run_type=DagRunType.MANUAL,\n execution_date=DEFAULT_DATE,\n start_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n with self.assertRaises(AirflowSensorTimeout):\n task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n # 2 calls: first: at start, second: before timeout\n self.assertEqual(2, len(recorded_calls))\n self._assert_calls_equal(\n recorded_calls[0],\n Call(\n an_int=4,\n a_date=date(2019, 1, 1),\n a_templated_string=\"dag {} ran on {}.\".format(\n self.dag.dag_id, DEFAULT_DATE.date().isoformat()\n ),\n ),\n )", "def has_arg(func, argname):\n return argname in getargspec(func)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify an empty 'for' value is an error
def test_for_with_empty_value(self): try: MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <py:for each=""> empty </py:for> </doc>""", filename='test.html').generate() self.fail('ExpectedTemplateSyntaxError') except TemplateSyntaxError as e: self.assertEqual('test.html', e.filename) if sys.version_info[:2] > (2,4): self.assertEqual(2, e.lineno)
[ "def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('ExpectedTemplateSyntaxError')\r\n except TemplateSyntaxError, e:\r\n self.assertEqual('test.html', e.filename)\r\n if sys.version_info[:2] > (2,4):\r\n self.assertEqual(2, e.lineno)", "def testInvalidValueWhileIterating(self):\n self.assertRaises(ValueError,\n list,\n self.manager.snimpyInvalidDescr.iteritems())", "def ensure_empty(gen):\n try:\n next(gen)\n return False\n except StopIteration:\n return True", "def test_submit_message_missing_data(self):\n default_args = [self.valid_msg, 1, 'day']\n for i in range(0, 3):\n args = default_args\n args[i] = None\n r = self._submit_message(*args)\n self.assertIn(err_messages['incomplete_form'], r.data)", "def test_fail_tails_empty(self):\n self.assertFilterErrors(\n {\n 'tails': [],\n },\n\n {\n 'tails': [f.Required.CODE_EMPTY],\n },\n )", "def testEmpty(self):\n assert Iter.empty(Iter.filter(self.alwayserror,\n self.empty())), \\\n \"Filtering an empty iterator should result in empty iterator\"", "def testLoopAbsentIndex(self):\n template = '{{ for item in [tag:absent] }} x {{ endfor }}'\n self.assertFalse(self.parse(template, tag='absent'))", "def has_error(self):\n return self._has_error", "def test_param_invalid_output_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.data, self.dataempty, 100.0)", "def iter_is_empty(i):\n try:\n i.next()\n except StopIteration:\n return True\n return False", "def _check_empty(key, value, empty):\n if not empty and not value:\n raise Exception(\"{} is empty, expecting a value\".format(key))\n elif empty and value:\n raise Exception(\n \"{} is suppose to be empty. value: {} exists\".format(key, value)\n )", "def test_iter_error():\n\tfrom ..skySurvey import SkySurvey\n\tfile_list = 0\n\ttry:\n\t\tspec = SkySurvey(tuner_list = file_list)\n\texcept TypeError:\n\t\tassert True\n\telse:\n\t\tassert False", "def _all_data_fields_are_empty(value):\n if not value:\n return True\n for v in value.values():\n if v:\n return False\n\n return True", "def testEmpty(self):\n assert Iter.equal(self.empty(), iter([]))", "def has_error(self) -> bool:\n return len(self.errors) > 0", "def test_index_error_with_data():\n test_list = OffByOneList([])\n for k in (0, 4):\n with pytest.raises(IndexError):\n test_list[k]", "def test_bug_form_no_data(self):\n\n form = BugForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEquals(len(form.errors), 3)", "def test_incorrect_value_passed(self) -> None:\n for incorrect_value in self.INCORRECT_VALUES:\n with pytest.raises(\n AEAValidationError,\n match=\"The following errors occurred during validation:\",\n ):\n self._make_configuration(incorrect_value)", "def test_param_invalid_input_array_param_length(self):\n\t\twith self.assertRaises(IndexError):\n\t\t\tresult = arrayfunc.takewhile('==', self.dataempty, self.dataout, 100.0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that outputting context data in text nodes doesn't escape quotes.
def test_text_noescape_quotes(self): tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/"> $myvar </div>""") self.assertEqual("""<div> "foo" </div>""", str(tmpl.generate(myvar='"foo"')))
[ "def test_strip_quotes(self):\n self.assertEqual(strip_quotes(\"'text'\"), \"text\")\n self.assertEqual(strip_quotes('\"text\"'), \"text\")", "def test_html_entities():\r\n for quote in ['\"', '&quot;', '&#x22;', '&#34;']:\r\n assert_tree(parse('<p>{0}abc{1}'.format(quote, quote)), [\r\n ('p', 'Block', [\r\n ('p', 'Text', '\"abc\"')])])", "def test_quotes(self):\n node1 = Attribute(wraptext(\"id\"), wraptext(\"foo\"), None)\n node2 = Attribute(wraptext(\"id\"), wraptext(\"bar\"))\n node3 = Attribute(wraptext(\"id\"), wraptext(\"foo bar baz\"))\n self.assertIs(None, node1.quotes)\n self.assertEqual('\"', node2.quotes)\n node1.quotes = \"'\"\n node2.quotes = None\n self.assertEqual(\"'\", node1.quotes)\n self.assertIs(None, node2.quotes)\n self.assertRaises(ValueError, setattr, node1, \"quotes\", \"foobar\")\n self.assertRaises(ValueError, setattr, node3, \"quotes\", None)\n self.assertRaises(ValueError, Attribute, wraptext(\"id\"),\n wraptext(\"foo bar baz\"), None)", "def test_normalize_quotes_1(self):\n text = 'This is a test that shoudln\\'t change anything.'\n clean_text = normalize_quotes(text, default_quote='\"', quotes=None)\n self.assertEquals(clean_text, text)", "def test_section__context_values(self):\n template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'\n context = {'test': {'foo': '<'}}\n\n self._assert_render(u'unescaped: < escaped: &lt;', template, context)", "def test_tag_with_double_quote(self):\n code, out, err = self.t(\"start 1h ago 'this is a \\\"test\\\"'\")\n self.assertIn(\"Note: '\\\"this is a \\\\\\\"test\\\\\\\"\\\"' is a new tag\", out)\n self.t(\"stop\")\n self.t(\"delete @1\")", "def test__escape_does_not_call_literal(self):\n engine = Renderer(literal=lambda s: s.upper(),\n escape=lambda s: \"**\" + s)\n\n template = 'literal: {{{foo}}} escaped: {{foo}}'\n context = {'foo': 'bar'}\n\n self._assert_render(u'literal: BAR escaped: **bar', template, context, engine=engine)", "def test_escape(fb, fb_secure):\n\n assert fb.escape('This has \"quotes\"') == 'This has \\\\\"quotes\\\\\"'\n assert fb.escape('This has a backslash \\\\') == 'This has a backslash \\\\\\\\'\n assert fb.escape('This has \\\\\"both\\\\\"') == 'This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"'", "def test_html_escape(user_input, expected_result):\n assert expected_result == escape_user_input(user_input)", "def test_text(self):\n self.assertTrue(type(x.text) == str)", "def testAddHTMLTags_WebsiteInQuotes(self):\n self.doTestAddHTMLTags(\n 'test \"http://www.example.com\".',\n ('test &quot;<a href=\"http://www.example.com\">'\n 'http://www.example.com</a>&quot;.'))", "def test_in_text_template(self):\r\n tmpl = TextTemplate(\"\"\"\r\n #def echo(greeting, name='world')\r\n ${greeting}, ${name}!\r\n #end\r\n ${echo('Hi', name='you')}\r\n \"\"\")\r\n self.assertEqual(\"\"\"\r\n Hi, you!\r\n\r\n \"\"\", tmpl.generate().render(encoding=None))", "def test_quotes1(self) -> None:\n self.assertEqual(\n detokenize(\n [\n '\"',\n \"I\",\n \"don't\",\n \"know\",\n \"what\",\n \"NLP\",\n \"is\",\n \",\",\n '\"',\n \"he\",\n \"said.\",\n ]\n ),\n '\"I don\\'t know what NLP is,\" he said.',\n )\n self.assertEqual(\n detokenize(\n ['\"', \"Too\", \"much\", \"punctuation\", \"!\", '\"', \"they\", \"exclaimed\", \".\"]\n ),\n '\"Too much punctuation!\" they exclaimed.',\n )", "def test_contains_quoted_with_escaped_newline(self):\n\n self.assert_selector(\n self.MARKUP,\n 'body :-soup-contains(\"Test\\\\\\ning\")',\n ['1'],\n flags=util.HTML\n )", "def test_tree_text(self):\n expected_text = None\n self.assertEqual(expected_text, self.tree.text)", "def test_contains_escapes(self):\n\n markup = \"\"\"\n <body>\n <div id=\"1\">Testing<span id=\"2\">\n that</span>contains works.</div>\n </body>\n \"\"\"\n\n self.assert_selector(\n markup,\n r'body span:-soup-contains(\"\\0a that\")',\n ['2'],\n flags=util.HTML\n )", "def test_format_text(self):\n entry = PasswordEntry(name=\"some/random/password\", store=object())\n set_property(entry, \"text\", random_string())\n self.assertEquals(\n # We enable ANSI escape sequences but strip them before we\n # compare the generated string. This may seem rather pointless\n # but it ensures that the relevant code paths are covered :-).\n dedent(ansi_strip(entry.format_text(include_password=True, use_colors=True))),\n dedent(\n \"\"\"\n some / random / password\n\n Password: {value}\n \"\"\",\n value=entry.text,\n ),\n )", "def test_render_valid_context(self):\n self.assertTrue('context' in ''.join(self.c.render()))", "def test_local_variable_escaping(self):\n try:\n local = mark_safe(\"<p>Local variable</p>\")\n raise ValueError(local)\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()\n self.assertIn(\n '<td class=\"code\"><pre>&#x27;&lt;p&gt;Local variable&lt;/p&gt;&#x27;</pre>'\n \"</td>\",\n html,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that outputting context data in attribtes escapes quotes.
def test_attr_escape_quotes(self): tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/"> <elem class="$myvar"/> </div>""") self.assertEqual("""<div> <elem class="&#34;foo&#34;"/> </div>""", str(tmpl.generate(myvar='"foo"')))
[ "def test_quotes(self):\n node1 = Attribute(wraptext(\"id\"), wraptext(\"foo\"), None)\n node2 = Attribute(wraptext(\"id\"), wraptext(\"bar\"))\n node3 = Attribute(wraptext(\"id\"), wraptext(\"foo bar baz\"))\n self.assertIs(None, node1.quotes)\n self.assertEqual('\"', node2.quotes)\n node1.quotes = \"'\"\n node2.quotes = None\n self.assertEqual(\"'\", node1.quotes)\n self.assertIs(None, node2.quotes)\n self.assertRaises(ValueError, setattr, node1, \"quotes\", \"foobar\")\n self.assertRaises(ValueError, setattr, node3, \"quotes\", None)\n self.assertRaises(ValueError, Attribute, wraptext(\"id\"),\n wraptext(\"foo bar baz\"), None)", "def attributeEscapingDoneOutside(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n return data", "def quoteattr(data, entities={}):\r\n data = escape(data, entities)\r\n if '\"' in data:\r\n if \"'\" in data:\r\n data = '\"%s\"' % data.replace('\"', \"&quot;\")\r\n else:\r\n data = \"'%s'\" % data\r\n else:\r\n data = '\"%s\"' % data\r\n return data", "def test_text_noescape_quotes(self):\r\n tmpl = MarkupTemplate(\"\"\"<div xmlns:py=\"http://genshi.edgewall.org/\">\r\n $myvar\r\n </div>\"\"\")\r\n self.assertEqual(\"\"\"<div>\r\n \"foo\"\r\n </div>\"\"\", str(tmpl.generate(myvar='\"foo\"')))", "def test_escape(fb, fb_secure):\n\n assert fb.escape('This has \"quotes\"') == 'This has \\\\\"quotes\\\\\"'\n assert fb.escape('This has a backslash \\\\') == 'This has a backslash \\\\\\\\'\n assert fb.escape('This has \\\\\"both\\\\\"') == 'This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"'", "def test_section__context_values(self):\n template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'\n context = {'test': {'foo': '<'}}\n\n self._assert_render(u'unescaped: < escaped: &lt;', template, context)", "def writeWithAttributeEscaping(write):\n def _write(data):\n write(escapeForContent(data).replace(b'\"', b'&quot;'))\n return _write", "def test_strip_quotes(self):\n self.assertEqual(strip_quotes(\"'text'\"), \"text\")\n self.assertEqual(strip_quotes('\"text\"'), \"text\")", "def test_tag_with_double_quote(self):\n code, out, err = self.t(\"start 1h ago 'this is a \\\"test\\\"'\")\n self.assertIn(\"Note: '\\\"this is a \\\\\\\"test\\\\\\\"\\\"' is a new tag\", out)\n self.t(\"stop\")\n self.t(\"delete @1\")", "def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, str) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)", "def test__escape(self):\n engine = Renderer(escape=lambda s: \"**\" + s)\n\n self._assert_render(u'**bar', '{{foo}}', {'foo': 'bar'}, engine=engine)", "def test_normalize_quotes_1(self):\n text = 'This is a test that shoudln\\'t change anything.'\n clean_text = normalize_quotes(text, default_quote='\"', quotes=None)\n self.assertEquals(clean_text, text)", "def test_html_escape(user_input, expected_result):\n assert expected_result == escape_user_input(user_input)", "def test__escape_does_not_call_literal(self):\n engine = Renderer(literal=lambda s: s.upper(),\n escape=lambda s: \"**\" + s)\n\n template = 'literal: {{{foo}}} escaped: {{foo}}'\n context = {'foo': 'bar'}\n\n self._assert_render(u'literal: BAR escaped: **bar', template, context, engine=engine)", "def test_simple_pi_with_double_quotes(self):\n pi_data = u\"\"\" \\t att=\"value\"\\n \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"att\": u\"value\"})", "def quoteAttr(self, value):\n ret = quoteattr(\"'\"+value+\"'\")\n return ret[2:len(ret)-2]", "def test_simple_pi_with_simple_quotes(self):\n pi_data = u\"\"\" \\t att='value'\\n \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"att\": u\"value\"})", "def testAddHTMLTags_WebsiteInQuotes(self):\n self.doTestAddHTMLTags(\n 'test \"http://www.example.com\".',\n ('test &quot;<a href=\"http://www.example.com\">'\n 'http://www.example.com</a>&quot;.'))", "def quoted_attribute_value(self, value):\n quote_with = '\"'\n if '\"' in value:\n if \"'\" in value:\n replace_with = '&quot;'\n value = value.replace('\"', replace_with)\n else:\n quote_with = \"'\"\n return quote_with + value + quote_with" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a namespace declaration on an element that is removed from the generated stream does not get pushed up to the next nonstripped element (see ticket 107).
def test_namespace_on_removed_elem(self): tmpl = MarkupTemplate("""<?xml version="1.0"?> <Test xmlns:py="http://genshi.edgewall.org/"> <Size py:if="0" xmlns:t="test">Size</Size> <Item/> </Test>""") self.assertEqual("""<?xml version="1.0"?>\n<Test> <Item/> </Test>""", str(tmpl.generate()))
[ "def check_namespace(self):\n if not self.tree:\n self.xml_validate()\n \n root = self.tree.getroot()\n self.namespace = root.get(\"targetNamespace\")\n if self.namespace is None:\n self.namespace = \"\"", "def remove_namespace(doc, namespace):\n ns = u'{%s}' % namespace\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]", "def CheckEnd(self, filename, clean_lines, linenum, error):\n line = clean_lines.raw_lines[linenum]\n\n # Check how many lines is enclosed in this namespace. Don't issue\n # warning for missing namespace comments if there aren't enough\n # lines. However, do apply checks if there is already an end of\n # namespace comment and it's incorrect.\n #\n # TODO(unknown): We always want to check end of namespace comments\n # if a namespace is large, but sometimes we also want to apply the\n # check if a short namespace contained nontrivial things (something\n # other than forward declarations). There is currently no logic on\n # deciding what these nontrivial things are, so this check is\n # triggered by namespace size only, which works most of the time.\n if (linenum - self.starting_linenum < 10\n and not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\b', line)):\n return\n\n # Look for matching comment at end of namespace.\n #\n # Note that we accept C style \"/* */\" comments for terminating\n # namespaces, so that code that terminate namespaces inside\n # preprocessor macros can be cpplint clean.\n #\n # We also accept stuff like \"// end of namespace <name>.\" with the\n # period at the end.\n #\n # Besides these, we don't accept anything else, otherwise we might\n # get false negatives when existing comment is a substring of the\n # expected namespace.\n if self.name:\n # Named namespace\n if not Match((r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\s+' +\n regex.escape(self.name) + r'[\\*/\\.\\\\\\s]*$'),\n line):\n error(filename, linenum, 'readability/namespace', 5,\n 'Namespace should be terminated with \"// namespace %s\"' %\n self.name)\n else:\n # Anonymous namespace\n if not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace[\\*/\\.\\\\\\s]*$', line):\n # If \"// namespace anonymous\" or \"// anonymous namespace (more text)\",\n # mention \"// anonymous namespace\" as an acceptable form\n if Match(r'^\\s*}.*\\b(namespace anonymous|anonymous namespace)\\b', line):\n error(filename, linenum, 'readability/namespace', 5,\n 'Anonymous namespace should be terminated with \"// namespace\"'\n ' or \"// anonymous namespace\"')\n else:\n error(filename, linenum, 'readability/namespace', 5,\n 'Anonymous namespace should be terminated with \"// namespace\"')", "def namespaceExit(self):\n self.doc.namespaceExit(self.p.ident)\n self.doc.namespaceExit(\"stromx\")\n self.doc.blank()", "def namespaceMismatch(self, linkingPage, linkedPage, counter) -> bool:\n if linkedPage in self.found_in:\n # We have seen this page before, don't ask again.\n return False\n\n if self.origin and self.origin.namespace() != linkedPage.namespace():\n # Allow for a mapping between different namespaces\n crossFrom = self.origin.site.family.crossnamespace.get(\n self.origin.namespace(), {})\n crossTo = crossFrom.get(self.origin.site.lang,\n crossFrom.get('_default', {}))\n nsmatch = crossTo.get(linkedPage.site.lang,\n crossTo.get('_default', []))\n if linkedPage.namespace() in nsmatch:\n return False\n\n if self.conf.autonomous:\n pywikibot.info(\n 'NOTE: Ignoring link from page {} in namespace'\n ' {} to page {} in namespace {}.'\n .format(linkingPage, linkingPage.namespace(), linkedPage,\n linkedPage.namespace()))\n # Fill up found_in, so that we will not write this notice\n self.found_in[linkedPage] = [linkingPage]\n return True\n\n preferredPage = self.getFoundInCorrectNamespace(linkedPage.site)\n if preferredPage:\n pywikibot.info(\n 'NOTE: Ignoring link from page {} in namespace {} to '\n 'page {} in namespace {} because page {} in the '\n 'correct namespace has already been found.'\n .format(linkingPage, linkingPage.namespace(),\n linkedPage, linkedPage.namespace(),\n preferredPage))\n return True\n\n choice = pywikibot.input_choice(\n 'WARNING: {} is in namespace \"{}\", but {} is in '\n 'namespace \"{}\". Follow it anyway?'\n .format(self.origin, self.origin.namespace(),\n linkedPage, linkedPage.namespace()),\n [('Yes', 'y'), ('No', 'n'),\n ('Add an alternative', 'a'), ('give up', 'g')],\n automatic_quit=False)\n\n if choice != 'y':\n # Fill up found_in, so that we will not ask again\n self.found_in[linkedPage] = [linkingPage]\n if choice == 'g':\n self.makeForcedStop(counter)\n elif choice == 'a':\n newHint = pywikibot.input(\n 'Give the alternative for language {}, not '\n 'using a language code:'\n .format(linkedPage.site.lang))\n if newHint:\n alternativePage = pywikibot.Page(\n linkedPage.site, newHint)\n # add the page that was entered by the user\n self.addIfNew(alternativePage, counter, None)\n else:\n pywikibot.info(\n f'NOTE: ignoring {linkedPage} and its interwiki links')\n return True\n\n # same namespaces, no problem\n # or no origin page yet, also no problem\n return False", "def __strip_ns__(self, tree):\n\t\tfor node in tree.iter():\n\t\t\ttry:\n\t\t\t\thas_namespace = node.tag.startswith('{')\n\t\t\texcept AttributeError:\n\t\t\t\tcontinue\n\t\t\tif has_namespace:\n\t\t\t\tnode.tag = node.tag.split('}', 1)[1]", "def test_validate_namespace_fail_without_prefix(self):\n namespace = 'telemetry.switches.1.interfaces.232.bytes_in'\n\n with self.assertRaises(NamespaceError):\n influx._validate_namespace(namespace)", "def testXMLWithUknownData(self):\n self.XMLSchemaService.loadSchema('http://queue.amazonaws.com/doc/2008-01-01/QueueService.xsd', self)\n self.runLoop.run()\n assert(self.schema)\n parser = self.schema.newParser()\n parser.feed(message_response_with_uknown_elements)\n result = parser.finish()\n self.assertEqual('8f2770293f9b94ad705d5fd742f5f885', result.ReceiveMessageResult.Message[0].MD5OfBody)", "def remove_namespace(xml):\n xmlepured = re.sub(pattern=' xmlns=\"[^\"]+\"', repl=\"\", string=xml, flags=0)\n xmlepured = xmlepured.encode(\"utf-8\")\n return xmlepured", "def has_html_ns(el: bs4.Tag) -> bool:\n\n ns = getattr(el, 'namespace') if el else None\n return bool(ns and ns == NS_XHTML)", "def test_ns_tag():\r\n namespaces = ['http://purl.org/dc/elements/1.1/',\r\n 'urn:schemas-upnp-org:metadata-1-0/upnp/',\r\n 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/']\r\n for ns_in, namespace in zip(['dc', 'upnp', ''], namespaces):\r\n res = data_structures.ns_tag(ns_in, 'testtag')\r\n correct = '{{{}}}{}'.format(namespace, 'testtag')\r\n assert res == correct", "def is_namespace(self, ):\n\t\tpass", "def soap_element_ns(self):\n if not self.acs_session.soap_namespaces:\n # unable to get soap namespaces for this acs session, return unchanged\n return self.soap_element\n\n # loop over namespaces and find the right one\n for namespace, uri in self.acs_session.soap_namespaces.items():\n if uri in self.soap_element:\n # found the matching namespace\n return self.soap_element.replace(\"{%s}\" % uri, \"%s:\" % namespace)\n\n # this is either an unknown uri or a non-namespaced soap element\n return self.soap_element", "def in_namespace(namespace):\n if not namespace:\n yield\n return\n\n org_netns_fd = os.open(PROCESS_NETNS, os.O_RDONLY)\n pynetns.setns(namespace)\n try:\n yield\n finally:\n try:\n # NOTE(cby): this code is not executed only if we fail to\n # move in target namespace\n pynetns.setns(org_netns_fd)\n except Exception as e:\n msg = _('Failed to move back in original netns: %s') % e\n LOG.critical(msg)\n raise BackInNamespaceExit(msg)", "def testExpectationRemoval(self):\n contents = validate_tag_consistency.TAG_HEADER + \"\"\"\n\n# This is a test comment\ncrbug.com/1234 [ win ] foo/test [ Failure ]\ncrbug.com/2345 [ win ] foo/test [ RetryOnFailure ]\n\n# Another comment\n[ linux ] bar/test [ RetryOnFailure ]\n[ win ] bar/test [ RetryOnFailure ]\n\"\"\"\n\n stale_expectations = [\n data_types.Expectation('foo/test', ['win'], ['Failure']),\n data_types.Expectation('bar/test', ['linux'], ['RetryOnFailure'])\n ]\n\n expected_contents = validate_tag_consistency.TAG_HEADER + \"\"\"\n\n# This is a test comment\ncrbug.com/2345 [ win ] foo/test [ RetryOnFailure ]\n\n# Another comment\n[ win ] bar/test [ RetryOnFailure ]\n\"\"\"\n\n with open(self.filename, 'w') as f:\n f.write(contents)\n\n removed_urls = expectations.RemoveExpectationsFromFile(\n stale_expectations, self.filename)\n self.assertEqual(removed_urls, set(['crbug.com/1234']))\n with open(self.filename) as f:\n self.assertEqual(f.read(), expected_contents)", "def isAbsentNamespace (self):\n return self.__uri is None", "def test_validate_namespace_success(self):\n namespace = 'kytos.kronos.telemetry.switches.1.interfaces.232.bytes_in'\n\n result = influx._validate_namespace(namespace)\n\n self.assertEqual(result, True)", "def test_unregister_sequence_decl3(collector, sequence_decl):\n collector.contributions['ecpy_pulses.BaseSequence'] = SequenceInfos()\n sequence_decl.sequence = 'ecpy_pulses.BaseSequence'\n sequence_decl.metadata = {'test': True}\n sequence_decl.register(collector, {})\n sequence_decl.unregister(collector)\n assert not collector.contributions['ecpy_pulses.BaseSequence'].metadata", "def finish_parsing(self):\n if self.current_element:\n self.diagnostics.append(Diagnostic(\n Severity.ERROR,\n self.current_element.code_range,\n f\"Unclosed element <{self.current_element.tagname}>\\n\"\n f\"Did you mean <{self.current_element.tagname} \"\n f\"...attributes... /> ?\"\n ))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the text with all entities and tags removed. >>> plaintext('1 &lt; 2') '1 < 2' The `keeplinebreaks` parameter can be set to ``False`` to replace any line
def plaintext(text, keeplinebreaks=True): text = stripentities(striptags(text)) if not keeplinebreaks: text = text.replace('\n', ' ') return text
[ "def stripentities(text, keepxmlentities=False):\r\n def _replace_entity(match):\r\n if match.group(1): # numeric entity\r\n ref = match.group(1)\r\n if ref.startswith('x'):\r\n ref = int(ref[1:], 16)\r\n else:\r\n ref = int(ref, 10)\r\n return chr(ref)\r\n else: # character entity\r\n ref = match.group(2)\r\n if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'):\r\n return '&%s;' % ref\r\n try:\r\n return chr(entities.name2codepoint[ref])\r\n except KeyError:\r\n if keepxmlentities:\r\n return '&amp;%s;' % ref\r\n else:\r\n return ref\r\n return _STRIPENTITIES_RE.sub(_replace_entity, text)", "def remove_tags(text):\n pattern = re.compile('<.*?>')\n return pattern.sub(r'', text)", "def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text", "def remove_unwanted_tags(text):\n # ? is for non-greedy to not go to last tag but end the current first\n # new lines and paragraphs\n text = re.sub(r'<br.?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'<p.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</p>', '', text, flags=re.IGNORECASE)\n # links and other anchors\n text = re.sub(r'<a.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</a>', '', text, flags=re.IGNORECASE)\n return text", "def PlainText_to_HTML(Text):\n \n Text = string.replace(Text, \"<\", \"&lt;\") \n Text = string.replace(Text, \"\\n\\n\", \"<P>\")\n Text = string.replace(Text, \"\\n\", \"<BR>\")\n return Text", "def removeHTMLParts(text: str, keeptags: Optional[List[str]] = None) -> str:\n # try to merge with 'removeDisabledParts()' above into one generic function\n # thanks to:\n # https://www.hellboundhackers.org/articles/read-article.php?article_id=841\n parser = _GetDataHTML()\n if keeptags is None:\n keeptags = ['tt', 'nowiki', 'small', 'sup']\n with parser:\n parser.keeptags = keeptags\n parser.feed(text)\n return parser.textdata", "def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two regular expressions\n return re_tag_newline.sub(r\"\", rtf_text)", "def remove_html(e):\n\n p = re.compile(r'<.*?>')\n return p.sub('', str(e))", "def clean(article):\n soup = BeautifulSoup(article, \"html.parser\")\n text = soup.get_text()\n text = text.strip()\n return text", "def strip_markup(styled_text: str) -> str:\n t = markup.render(styled_text)\n return t.plain", "def remove_newlines(text):\n # First normalize the newlines using Django's nifty utility\n normalized_text = normalize_newlines(text)\n # Then simply remove the newlines like so.\n return normalized_text.replace('\\n', '<\\br>').replace('\\r','')", "def plaintext(self, **kwargs):\n return self.doctree().astext()", "def strip_tags(value):\r\n return txcommon.rst.strip_tags(value)", "def tidy_article_allow_punct( text ): \n #Get rid of new lines. Need this for the <figure> removal\n text = text.replace('\\n', ' ').replace('\\r', '')\n #<figure> tag has contents. Remove all this.\n text = re.sub( '<figure(.*?)/figure>', '', text)\n #<span> contains \"facebook twitter google plus bst\"\n text = re.sub( '<span(.*?)/span>', '', text)\n #<sup> is a little supplementary notice\n text = re.sub( '<sup(.*?)/sup>', '', text)\n #\"Read more\" text\n text = re.sub( '<div class=\"rich-link__read-more(.*?)/div>', '', text)\n text = re.sub( 'Read more here:', '', text)\n #Remove other html tags but keep content\n text = re.sub( '<[^>]+>', ' ', text) \n #Remove numbers\n text = ' '.join(s for s in text.split() if not any(c.isdigit() for c in s))\n # Remove contractions\n text = re.sub('\\'s', '', text)\n text = re.sub('’s', '', text) \n #Remove extra spaces\n text = re.sub('\\s+', ' ' , text) \n return text.strip()", "def remove_non_alpha_chars_and_html(text) -> str:\n text_output_trimmed = ProjectCommon.trimmer(text)\n\n text_output_no_html = ProjectCommon.remove_html(text_output_trimmed)\n\n text_output_no_html_no_non_alpha_chars = \\\n ProjectCommon.remove_non_alpha_chars(text_output_no_html)\n\n return text_output_no_html_no_non_alpha_chars", "def _strip_xml(txts):\n txts = html.unescape(html.unescape(txts)) # double unescape because Wikipedia dumps are a mess\n txts = txts.split('\\n')\n\n for i in range(len(txts)):\n for pattern in patterns:\n txts[i] = pattern[0].sub(pattern[1], txts[i])\n\n txts = [''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) for txt in txts if txt != '']\n return '\\n'.join(txts)", "def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)", "def html_to_text(html_text):\n soup = BeautifulSoup(html_text, \"html.parser\")\n clean_html = ' '.join(soup.find_all(text=True))\n return clean_html", "def clean_text(text):\r\n clean_text = text.replace('\\n', ' ').replace('\\r', '').strip()\r\n return clean_text", "def remove_defined_articles(self, text: str) -> str:\n cleaned_text = re.sub(self.quote_pattern, \"\", text)\n return cleaned_text.strip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of the given text with any character or numeric entities replaced by the equivalent UTF8 characters. >>> stripentities('1 &lt; 2') '1 >> stripentities('more &hellip;') 'more \u2026' >>> stripentities('&8230;') '\u2026' >>> stripentities('&x2026;') '\u2026' If the `keepxmlentities` parameter is provided and is a truth value, the core XML entities (&amp;, &apos;, &gt;, &lt; and &quot;) are left intact. >>> stripentities('1 &lt; 2 &hellip;', keepxmlentities=True) '1 &lt; 2 \u2026'
def stripentities(text, keepxmlentities=False): def _replace_entity(match): if match.group(1): # numeric entity ref = match.group(1) if ref.startswith('x'): ref = int(ref[1:], 16) else: ref = int(ref, 10) return chr(ref) else: # character entity ref = match.group(2) if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'): return '&%s;' % ref try: return chr(entities.name2codepoint[ref]) except KeyError: if keepxmlentities: return '&amp;%s;' % ref else: return ref return _STRIPENTITIES_RE.sub(_replace_entity, text)
[ "def decode_html_entities(text):\n h = HTMLParser()\n return h.unescape(text)", "def unescape(text):\n\n return __entity_regex.sub(__replacement_for_entity, text)", "def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)", "def unescape(text):\r\n\r\n if not text:\r\n return text\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == \"&#\":\r\n # character reference\r\n try:\r\n if text[:3] == \"&#x\":\r\n return unichr(int(text[3:-1], 16))\r\n else:\r\n return unichr(int(text[2:-1]))\r\n except ValueError:\r\n pass\r\n else:\r\n # named entity\r\n try:\r\n text = unichr(htmlentitydefs.name2codepoint[text[1: -1]])\r\n except KeyError:\r\n pass\r\n return text # leave as is\r\n return re.sub(\"&#?\\w+;\", fixup, text)", "def remove_entities(self, title):\n\t\tentities, ent_types = self.get_entities(title)\n\t\tif entities == []:\n\t\t\treturn title\n\t\telse:\n\t\t\tsubstitutions = {}\n\t\t\tfor X in entities:\n\t\t\t\tsubstitutions[X] = ''\n\t\t\toutput = self.replace(title, substitutions)\n\t\t\treturn output", "def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text", "def strip_tweet(text, remove_url=True):\n if remove_url:\n text = url_pattern.sub('', text)\n else:\n text = expand_url(text)\n text = mention_pattern.sub('', text)\n text = html_parser.unescape(text)\n text = text.strip()\n return text", "def get_texts_from_entities(entities):\n texts = []\n for e in entities:\n texts.append(e.text)\n return texts", "def html_unquote(s, encoding=None):\r\n if isinstance(s, str):\r\n if s == '':\r\n # workaround re.sub('', '', u'') returning '' < 2.5.2\r\n # instead of u'' >= 2.5.2\r\n return u''\r\n s = s.decode(encoding or default_encoding)\r\n return _unquote_re.sub(_entity_subber, s)", "def strip(text):\n\n return text.strip()", "def remove_defined_articles(self, text: str) -> str:\n cleaned_text = re.sub(self.quote_pattern, \"\", text)\n return cleaned_text.strip()", "def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&amp;\")\r\n data = data.replace(\"<\", \"&lt;\")\r\n data = data.replace(\">\", \"&gt;\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data", "def clean(article):\n soup = BeautifulSoup(article, \"html.parser\")\n text = soup.get_text()\n text = text.strip()\n return text", "def extract_entities(self, text):\n results = self.fetch(self.base_url, text)\n return [_ for _ in self.process_results(results)]", "def tidy_article_allow_punct( text ): \n #Get rid of new lines. Need this for the <figure> removal\n text = text.replace('\\n', ' ').replace('\\r', '')\n #<figure> tag has contents. Remove all this.\n text = re.sub( '<figure(.*?)/figure>', '', text)\n #<span> contains \"facebook twitter google plus bst\"\n text = re.sub( '<span(.*?)/span>', '', text)\n #<sup> is a little supplementary notice\n text = re.sub( '<sup(.*?)/sup>', '', text)\n #\"Read more\" text\n text = re.sub( '<div class=\"rich-link__read-more(.*?)/div>', '', text)\n text = re.sub( 'Read more here:', '', text)\n #Remove other html tags but keep content\n text = re.sub( '<[^>]+>', ' ', text) \n #Remove numbers\n text = ' '.join(s for s in text.split() if not any(c.isdigit() for c in s))\n # Remove contractions\n text = re.sub('\\'s', '', text)\n text = re.sub('’s', '', text) \n #Remove extra spaces\n text = re.sub('\\s+', ' ' , text) \n return text.strip()", "def remove_non_alpha_chars_and_html(text) -> str:\n text_output_trimmed = ProjectCommon.trimmer(text)\n\n text_output_no_html = ProjectCommon.remove_html(text_output_trimmed)\n\n text_output_no_html_no_non_alpha_chars = \\\n ProjectCommon.remove_non_alpha_chars(text_output_no_html)\n\n return text_output_no_html_no_non_alpha_chars", "def clean_html_encodings(text: str) -> str:\n return str(BeautifulSoup(text, 'html.parser'))", "def __html2unicode(self, s):\n # First the digits:\n ents = set(html_entity_digit_re.findall(s))\n if len(ents) > 0:\n for ent in ents:\n entnum = ent[2:-1]\n try:\n entnum = int(entnum)\n s = s.replace(ent, unichr(entnum))\n except:\n pass\n # Now the alpha versions:\n ents = set(html_entity_alpha_re.findall(s))\n ents = filter((lambda x : x != amp), ents)\n for ent in ents:\n entname = ent[1:-1]\n try: \n s = s.replace(ent, unichr(htmlentitydefs.name2codepoint[entname]))\n except:\n pass \n s = s.replace(amp, \" and \")\n return s", "def clean_text(self, text: str):\n text = text.rstrip()\n if '\"\"' in text:\n if text[0] == text[-1] == '\"':\n text = text[1:-1]\n text = text.replace('\\\\\"\"', '\"')\n text = text.replace('\"\"', '\"')\n\n text = text.replace('\\\\\"\"', '\"')\n\n text = html.unescape(text)\n\n text = ' '.join(text.split())\n return text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of the text with any XML/HTML tags removed. >>> striptags('Foo bar') 'Foo bar' >>> striptags('Foo') 'Foo' >>> striptags('Foo') 'Foo'
def striptags(text): return _STRIPTAGS_RE.sub('', text)
[ "def remove_tags(text):\n pattern = re.compile('<.*?>')\n return pattern.sub(r'', text)", "def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)", "def strip_tags(html):\n\n s = HTMLStripper()\n s.feed(html)\n stripped = s.get_data()\n # Remove extra spaces\n return ' '.join(filter(None, stripped.split(' ')))", "def removeHTMLParts(text: str, keeptags: Optional[List[str]] = None) -> str:\n # try to merge with 'removeDisabledParts()' above into one generic function\n # thanks to:\n # https://www.hellboundhackers.org/articles/read-article.php?article_id=841\n parser = _GetDataHTML()\n if keeptags is None:\n keeptags = ['tt', 'nowiki', 'small', 'sup']\n with parser:\n parser.keeptags = keeptags\n parser.feed(text)\n return parser.textdata", "def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two regular expressions\n return re_tag_newline.sub(r\"\", rtf_text)", "def remove_unwanted_tags(text):\n # ? is for non-greedy to not go to last tag but end the current first\n # new lines and paragraphs\n text = re.sub(r'<br.?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'<p.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</p>', '', text, flags=re.IGNORECASE)\n # links and other anchors\n text = re.sub(r'<a.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</a>', '', text, flags=re.IGNORECASE)\n return text", "def _strip_xml(txts):\n txts = html.unescape(html.unescape(txts)) # double unescape because Wikipedia dumps are a mess\n txts = txts.split('\\n')\n\n for i in range(len(txts)):\n for pattern in patterns:\n txts[i] = pattern[0].sub(pattern[1], txts[i])\n\n txts = [''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) for txt in txts if txt != '']\n return '\\n'.join(txts)", "def strip_tags(value):\r\n return txcommon.rst.strip_tags(value)", "def strip_markup(styled_text: str) -> str:\n t = markup.render(styled_text)\n return t.plain", "def remove_tags(_content: str, *tags) -> str:\n content = _content\n for tag in tags:\n content = re.compile(tag).sub(\"\", content)\n return content", "def strip_tags( body ):\n var_mask_re = re.compile( u\"[%$]?\\<+([^<>]+)\\>+\" )\n return re.sub( var_mask_re, '', body )", "def remove_attributes_from_tags(text):\n if text:\n try:\n cleaner = clean.Cleaner(\n safe_attrs_only=True,\n remove_unknown_tags=False,\n )\n text = cleaner.clean_html(text)\n except lxml.etree.ParserError:\n return text\n return text", "def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text", "def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)", "def tag_untokenized_text(self, text):\n return self.tag_untokenized_sentences(self._sent_tokenize(text))", "def get_stripped_tags(self):\n stripped = []\n for (tags, attrs) in self.getHtmlExclusions():\n if not attrs:\n stripped.extend(tags)\n return stripped", "def strip_ascii_from_tags(self, text):\n player_ob = self.player_ob or self\n if 'no_ascii' in player_ob.tags.all():\n text = RE_ASCII.sub(\"\", text)\n text = RE_ALT_ASCII.sub(r\"\\1\", text)\n else:\n text = RE_ASCII.sub(r\"\\1\", text)\n text = RE_ALT_ASCII.sub(\"\", text)\n return text", "def sanitise_tags(tags: str):\n\n # hack out all kinds of whitespace, then split on ,\n # if you run into more illegal characters (simplenote does not want to sync them)\n # add them to the regular expression above.\n illegals_removed = tags_illegal_chars.sub('', tags)\n if len(illegals_removed) == 0:\n # special case for empty string ''\n # split turns that into [''], which is not valid\n return []\n\n else:\n return illegals_removed.split(',')", "def remove_empty_html_tags(document):\n return re.sub(r'(<\\w+\\s*/?>)', ' ', document)", "def clean_html_encodings(text: str) -> str:\n return str(BeautifulSoup(text, 'html.parser'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`.
def apply_filters(stream, filters, lexer=None): def _apply(filter_, stream): for token in filter_.filter(lexer, stream): yield token for filter_ in filters: stream = _apply(filter_, stream) return stream
[ "def apply(filters: List[Callable], q: Queryable) -> Queryable:\n for f in filters:\n q = f(q)\n return q", "def set_filters(filter_list):", "def filters(self, filters):\n\n for f in filters:\n self.filter(f[\"attribute_name\"], f[\"value\"], f[\"operator\"])", "def apply_filter(value):\n enabled_filters = get_filters()\n for filt in enabled_filters:\n value = filt(value)\n return value", "def register_filter(self, *filters):\n for f in filters:\n f.controller = self\n self._filters.append(f)", "def _run_filters(self, word):\n if len(self._filters) > 0:\n for f in self._filters:\n f.run(word)\n # print( 'running filter \\n filtername: %s \\n word: %s' % (f.__name__, word) )\n # if f.run(word) is False:\n # print( 'filter %s failed: %s' % (f.__name__, word) )\n # return False\n return True", "def applyAllFilters(categories):\n result = filterDisambiguation(categories)\n result = followRedirects(result)\n result = filterBlacklist(result)\n result = filterCountries(result)\n return result", "def apply_filters(self):\n\t\t# TODO: nahradit map-em\n\t\tfor f,is_row in self.filters:\n\t\t\tif is_row:\n\t\t\t\tself.table = list(map(f,self.table))\n\t\t\telse:\n\t\t\t\tself.table = f(self.table)", "def __call__(self, buf):\n return all(filter_(buf) for filter_ in self.filters)", "def runFilters(filters, events):\n for f in filters:\n if len(events) == 0:\n return []\n for event in events:\n event.hide = False\n events = sortEvents(events)\n events = f.process(events)\n \n events = sortEvents(events)\n return events", "def filter_cascade(filters):\n def newFilter(image):\n for f in filters:\n image = f(image)\n return image\n return newFilter", "def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))\n\n combined_filters = combine_filters(block_structure, filters)\n block_structure.filter_topological_traversal(combined_filters)", "def setFilter(self, filters=[logging.INFO, logging.ERROR]):\n self._filters = filters", "def apply_filters(self, filters, starred):\n\n feed_count = 0\n item_count = 0\n processed_feeds = set()\n\n try:\n print u\"Retrieving subscribed feeds...\"\n subs_list = self._subscription_list()\n if starred:\n print u\"Retrieving starred items...\"\n else:\n print u\"Retrieving unread items...\"\n self._retrieve_entries(starred)\n except KeyboardInterrupt:\n exit(\"cancelled\")\n\n print u\"Applying filters...\"\n\n universal_patterns = filters.get(u\"*\", [])\n\n for tag in subs_list:\n tag_has_matching_feeds = False\n for feed in subs_list[tag]:\n # get the applicable filters\n patterns = universal_patterns[:]\n try:\n patterns.extend(filters[feed[u\"title\"]])\n except KeyError:\n pass\n\n if not feed[u\"feed_id\"] in processed_feeds:\n processed_feeds.add(feed[u\"feed_id\"])\n\n if not patterns:\n # skip to next feed\n continue\n\n # since there are applicable patterns, the current tag has at least one matching feed\n if not tag_has_matching_feeds:\n tag_has_matching_feeds = True\n print u\"\\n{}\\n{}\".format(tag, u\"=\" * len(tag))\n\n feed_count += 1\n items_found = self._apply_filter(feed, patterns)\n if items_found is not None:\n print u\"found {}.\".format(items_found)\n item_count += items_found\n\n if self.to_be_filtered:\n self._filter(starred)\n\n return feed_count, item_count", "def __iter__(self) -> Generator[ReadAlignments, None, None]:\n for readAlignments in self.iter():\n for filterFunc in self._filters:\n filteredReadAlignments = filterFunc(readAlignments)\n if filteredReadAlignments is False:\n break\n else:\n readAlignments = filteredReadAlignments\n else:\n yield readAlignments", "def filter(self, **kwargs):\n\n for filter_name, filter_value in kwargs.iteritems():\n self._filters[filter_name] = filter_value\n return self", "def iter_filter(fn):\r\n\r\n sentinel = object()\r\n\r\n @pipefilter\r\n @wraps(fn)\r\n def wrapped(*args, target=None, **kwargs):\r\n def generator():\r\n # This is the generator that the wrapped function will consume from\r\n while True:\r\n item = greenlet.getcurrent().parent.switch(sentinel)\r\n if isinstance(item, GeneratorExit):\r\n return\r\n else:\r\n yield item\r\n\r\n def run_target():\r\n # greenlet executing wrapped function\r\n fn(generator(), *args, **kwargs)\r\n\r\n def run_target_generator():\r\n for item in fn(generator(), *args, **kwargs):\r\n greenlet.getcurrent().parent.switch(item)\r\n\r\n if inspect.isgeneratorfunction(fn):\r\n # Wrapping a filter (consumes an iterator, is a generator)\r\n g_consume = greenlet(run_target_generator)\r\n g_consume.switch()\r\n\r\n try:\r\n while True:\r\n try:\r\n item = (yield)\r\n except Exception as e:\r\n g_consume.throw(e)\r\n else:\r\n value = g_consume.switch(item)\r\n\r\n # Feed any values the generator yields down the pipeline\r\n while value is not sentinel:\r\n if target is not None:\r\n target.send(value)\r\n value = g_consume.switch()\r\n except GeneratorExit as e:\r\n g_consume.switch(e)\r\n else:\r\n # Wrapping a sink (consumes an iterator)\r\n g_consume = greenlet(run_target)\r\n g_consume.switch()\r\n\r\n try:\r\n while True:\r\n try:\r\n item = (yield)\r\n except Exception as e:\r\n g_consume.throw(e)\r\n else:\r\n g_consume.switch(item)\r\n except GeneratorExit as e:\r\n g_consume.switch(e)\r\n\r\n return wrapped", "def filter(self, *args):\n # type: (Union[FilterList, cgtwq.Filter]) -> Tuple[FileBoxMeta, ...]\n\n if compat.api_level() == compat.API_LEVEL_5_2:\n return self._filter_v5_2(*args)\n return self._filter_v6_1(*args)", "def apply_filter(f, ms):\n for m in ms:\n res = f(m)\n if res is not None:\n yield res", "def flatland_filter(stream, context):\n return Stream(FlatlandFilter()(stream, context))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``.
def format(self, tokensource, outfile): if self.encoding: # wrap the outfile in a StreamWriter outfile = codecs.lookup(self.encoding)[3](outfile) return self.format_unencoded(tokensource, outfile)
[ "def write_file(tokens, f):\n for t in tokens:\n f.write(\"%s:\\n\" % t[0])\n for entry in t[1:]:\n f.write(\"\\t%s\\n\" % entry)", "def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n self.output(f'<keyword> {value} </keyword>')\n elif type == 'symbol': # check for symbol\n #\"\"\" start xml formatting requirements for symbols \"\"\"\n if value == '<':\n self.output(f'<symbol> &lt; </symbol>')\n elif value == '>':\n self.output(f'<symbol> &gt; </symbol>')\n elif value == '&':\n self.output(f'<symbol> &amp; </symbol>')\n #\"\"\" end xml formatting requirements for symbols \"\"\"\n else:\n self.output(f'<symbol> {value} </symbol>')\n elif type == 'integer': # check for integer\n self.output(f'<integerConstant> {value} </integerConstant>')\n elif type == 'identifier': # check for indentifier\n self.output(f'<identifier> {value} </identifier>')\n elif type == 'string': # it's a string\n self.output(f'<stringConstant> {value} </stringConstant>')", "def writeToFile(name,mode,tok_list):\n\t\n\t# open and create output file\n\tlogfile = open(name,mode)\n\t\n\t# write every token to ouput file each token to a new line\n\tfor tok in tok_list:\n\t\tlogfile.write(str(tok) + '\\n')\n\t\n\t# close file\n\tlogfile.close()", "def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n try:\n if not outfile:\n realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.formatter import Formatter\n if isinstance(formatter, type) and issubclass(formatter, Formatter):\n raise TypeError('format() argument must be a formatter instance, '\n 'not a class')\n raise", "def print_tokens(source):\n if isinstance(source[0], Token):\n source = untokenize(source)\n\n for lines in get_lines(source):\n for token in lines:\n print(repr(token))\n print()", "def tokenize(args):\n if args.profile and not Path(args.profile).exists(): # pragma: no cover\n raise ParserError('--profile must be a path for an existing file')\n _write(args, Tokenizer(profile=args.profile)(_read(args), column=args.mapping))", "def _format_code(source, preferred_quote):\n if not source:\n return source\n\n modified_tokens = []\n\n sio = io.StringIO(source)\n for (token_type,\n token_string,\n start,\n end,\n line) in tokenize.generate_tokens(sio.readline):\n\n if token_type == tokenize.STRING:\n token_string = unify_quotes(token_string,\n preferred_quote=preferred_quote)\n\n modified_tokens.append(\n (token_type, token_string, start, end, line))\n\n return untokenize.untokenize(modified_tokens)", "def save_tokens_to_file(self, file_path):\n with open(file_path, 'w', encoding='utf-8') as fp:\n #for token in self.token2id.keys():\n for idd in range(self.size()): \n fp.write(self.id2token[idd] + '\\n')", "def _write_input(\n self, X: List[str], y: Optional[List[List[str]]], input_path: Path\n ):\n with open(input_path, \"w\") as f:\n if y is not None:\n for text, labels in zip(X, y):\n label_str = \" \".join(\n f\"__label__{FastText._escape_label(label)}\" for label in labels\n )\n f.write(f\"{label_str} {_fasttext_preprocess(text)}\\n\")\n elif y is None:\n for text in X:\n f.write(f\"{_fasttext_preprocess(text)}\\n\")", "def to_yacc(self, gp):\n d, _ = os.path.split(gp)\n tp = tempfile.mktemp('.y', dir=d)\n with open(tp, 'w') as tf:\n with open(gp) as gf:\n for l in gf:\n if l.startswith('%token'):\n _l = l.replace(',', '').replace(';', '')\n tf.write(_l)\n else:\n if l.startswith('%nodefault'):\n _l = l.replace('%nodefault', '%%')\n tf.write(_l)\n else:\n tf.write(l)\n\n return tp", "def write_input(codewords, input_file):\n with open(input_file, 'w') as f:\n for codeword in codewords:\n for x in codeword:\n f.write(str(x) + ' ')", "def parse_str(self, source, flags):\n file = tempfile.NamedTemporaryFile(mode = 'w+', suffix = '.cpp')\n file.write(source)\n file.seek(0)\n self.tu = self.index.parse(\n file.name,\n args=flags,\n options=cin.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD\n )\n file.close()\n for child in self.tu.cursor.get_children():\n if child.kind == cin.CursorKind.VAR_DECL or child.kind == cin.CursorKind.FUNCTION_DECL:\n self._py_nodes.append(self.transform(child))\n return self._py_nodes", "def tokens_to_string(tokens : List[Token]) -> str:\n output = \"\"\n for token in tokens: \n if any([isinstance(token, token_class) for token_class in \\\n [DiagToken, OperatorToken, ParenToken, MatIdentifierToken, VecIdentifierToken]]):\n output += token.value\n elif isinstance(token, InvToken) or isinstance(token, TransToken):\n sym = token.value\n if isinstance(sym, KernelToken):\n output += sym.value + \"(\" + sym.arg1 + \",\" + sym.arg2 + \")\"\n elif isinstance(sym, GroupToken):\n output += tokens_to_string(sym.tokens())\n else:\n output += sym.value \n \n if isinstance(token, InvToken):\n output += \".I\"\n else:\n output += \".T\"\n elif isinstance(token, GroupToken):\n output += tokens_to_string(token.tokens())\n else:\n output += token.value +\"(\" + token.arg1 + \",\"+ token.arg2 + \")\"\n return output", "def _format_lines(self, tokensource):\r\n nocls = self.noclasses\r\n lsep = self.lineseparator\r\n # for <span style=\"\"> lookup only\r\n getcls = self.ttype2class.get\r\n c2s = self.class2style\r\n escape_table = _escape_html_table\r\n tagsfile = self.tagsfile\r\n\r\n lspan = ''\r\n line = ''\r\n for ttype, value in tokensource:\r\n if nocls:\r\n cclass = getcls(ttype)\r\n while cclass is None:\r\n ttype = ttype.parent\r\n cclass = getcls(ttype)\r\n cspan = cclass and '<span style=\"%s\">' % c2s[cclass][0] or ''\r\n else:\r\n cls = self._get_css_class(ttype)\r\n cspan = cls and '<span class=\"%s\">' % cls or ''\r\n\r\n parts = value.translate(escape_table).split('\\n')\r\n\r\n if tagsfile and ttype in Token.Name:\r\n filename, linenumber = self._lookup_ctag(value)\r\n if linenumber:\r\n base, filename = os.path.split(filename)\r\n if base:\r\n base += '/'\r\n filename, extension = os.path.splitext(filename)\r\n url = self.tagurlformat % {'path': base, 'fname': filename,\r\n 'fext': extension}\r\n parts[0] = \"<a href=\\\"%s#%s-%d\\\">%s\" % \\\r\n (url, self.lineanchors, linenumber, parts[0])\r\n parts[-1] = parts[-1] + \"</a>\"\r\n\r\n # for all but the last line\r\n for part in parts[:-1]:\r\n if line:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + part + \\\r\n (cspan and '</span>') + lsep\r\n else: # both are the same\r\n line += part + (lspan and '</span>') + lsep\r\n yield 1, line\r\n line = ''\r\n elif part:\r\n yield 1, cspan + part + (cspan and '</span>') + lsep\r\n else:\r\n yield 1, lsep\r\n # for the last line\r\n if line and parts[-1]:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + parts[-1]\r\n lspan = cspan\r\n else:\r\n line += parts[-1]\r\n elif parts[-1]:\r\n line = cspan + parts[-1]\r\n lspan = cspan\r\n # else we neither have to open a new span nor set lspan\r\n\r\n if line:\r\n yield 1, line + (lspan and '</span>') + lsep", "def _write(self, template: list):\n with open(self.template_location, \"w\") as file:\n for line in template:\n file.write(line)", "def process_tokens(self, tokens):\n self._tokens = list(tokens)\n self._pos = 0\n self._ast = self._assert(self._chunk(), 'input to be a program')\n self._ast.store_token_groups(self._tokens)", "def tokeniser(tokenInfo):\n\n source_pass = tokenInfo[\"source_pass\"]\n tokensToSort = []\n tokensToClassify = []\n\n wordStartIndex = []\n wordEndIndex = []\n\n #Going through the words identified in the input and saving their start and end index in an array to use later\n if \"words\" not in tokenInfo[\"tokens\"].keys():\n tokensToSort.append({\n \"content\": source_pass,\n \"start_index\": 0,\n \"end_index\": len(source_pass) - 1\n })\n\n else:\n for word in tokenInfo[\"tokens\"][\"words\"]:\n wordStartIndex.append(word[\"start_index\"])\n wordEndIndex.append(word[\"end_index\"])\n\n tempTokenStartIndex = 0\n tempToken = ''\n for i in range(len(wordStartIndex)):\n tempToken = source_pass[tempTokenStartIndex:wordStartIndex[i]]\n if tempToken != '':\n tokensToSort.append({\n \"content\": tempToken,\n \"start_index\": tempTokenStartIndex,\n \"end_index\": wordStartIndex[i] - 1\n })\n tempTokenStartIndex = wordEndIndex[i] + 1\n\n if len(source_pass) > (wordEndIndex[len(wordEndIndex) - 1] + 1):\n tempToken = source_pass[wordEndIndex[len(wordEndIndex)-1]+1:len(source_pass)]\n tokensToSort.append({\n \"content\": tempToken,\n \"start_index\": wordEndIndex[len(wordEndIndex) - 1],\n \"end_index\": len(source_pass) - 1\n })\n\n #pp.pprint(tokensToSort)\n\n for tokenBeingSorted in tokensToSort:\n if tokenBeingSorted[\"content\"].isalpha() or tokenBeingSorted[\"content\"].isdigit() or isOnlySpecialCharacters(tokenBeingSorted[\"content\"]) or len(tokenBeingSorted[\"content\"])==1:\n tokensToClassify.append({\n \"content\": tokenBeingSorted[\"content\"],\n \"start_index\": tokenBeingSorted[\"start_index\"],\n \"end_index\": tokenBeingSorted[\"end_index\"]\n })\n else:\n tempTokenBeingSorted = tokenBeingSorted[\"content\"][0]\n tempStartIndex = tokenBeingSorted[\"start_index\"]\n\n for chars in range(len(tokenBeingSorted[\"content\"]) -1):\n if classifyCharacter(tokenBeingSorted[\"content\"][chars]) == classifyCharacter(tokenBeingSorted[\"content\"][chars+1]):\n tempTokenBeingSorted+=(tokenBeingSorted[\"content\"][chars+1])\n else:\n tokensToClassify.append({\n \"content\": tempTokenBeingSorted,\n \"start_index\": tempStartIndex,\n \"end_index\": tempStartIndex + len(tempTokenBeingSorted) - 1\n })\n tempStartIndex += len(tempTokenBeingSorted)\n tempTokenBeingSorted = tokenBeingSorted[\"content\"][chars+1]\n\n tokensToClassify.append({\n \"content\": tempTokenBeingSorted,\n \"start_index\": tempStartIndex,\n \"end_index\": tempStartIndex + len(tempTokenBeingSorted) - 1\n })\n\n for token in tokensToClassify:\n\n unclassifiedToken = token[\"content\"]\n classification = classifier(unclassifiedToken)\n if classification not in tokenInfo[\"tokens\"]:\n tokenInfo[\"tokens\"][classification] = []\n\n tokenInfo[\"tokens\"][classification].append({\n \"content\": unclassifiedToken,\n \"start_index\": token[\"start_index\"],\n \"end_index\": token[\"end_index\"]\n })\n\n return tokenInfo", "def save_tracks_to_file(track_source, output_file):\n for track in tracks_source:\n track_summary = make_track_summary(track)\n output_file.write(serialize_track(track_summary) + '\\n')", "def write_sources_list(self,filename=None):\n\t\tif not filename:\n\t\t\tfilename = self.filename\n\t\tf = open(filename,'w')\n\t\tf.write('\\n'.join(self.format_for_output()))\n\t\tf.close()", "def createTaggedNgramsFile(ngrams_file, tagged_ngrams_file):\n\to = open(tagged_ngrams_file, 'w')\n\t\n\tprint('Opening input n-gram counts file...')\n\tc = 0\n\tf = open(ngrams_file)\n\tfor line in f:\n\t\tc += 1\n\t\tif c % 1000000 == 0:\n\t\t\tprint(str(c) + ' n-grams processed.')\n\t\tdata = line.strip().split('\\t')\n\t\ttokens = [t.split('|||') for t in data[0].split(' ')]\n\t\tif len(tokens)==2:\n\t\t\to.write(tokens[0][0] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][0] + '\\t' + data[1] + '\\n')\n\t\telif len(tokens)==3:\n\t\t\to.write(tokens[0][0] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][min(1, len(tokens[2])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][0] + ' ' + tokens[2][min(1, len(tokens[2])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][0] + '\\t' + data[1] + '\\n')\n\t\telif len(tokens)==4:\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][0] + ' ' + tokens[3][min(1, len(tokens[3])-1)] + '\\t' + data[1] + '\\n')\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][0] + ' ' + tokens[2][min(1, len(tokens[2])-1)] + ' ' + tokens[3][min(1, len(tokens[3])-1)] + '\\t' + data[1] + '\\n')\n\t\telif len(tokens)==5:\n\t\t\to.write(tokens[0][min(1, len(tokens[0])-1)] + ' ' + tokens[1][min(1, len(tokens[1])-1)] + ' ' + tokens[2][0] + ' ' + tokens[3][min(1, len(tokens[3])-1)] + ' ' + tokens[4][min(1, len(tokens[4])-1)] + '\\t' + data[1] + '\\n')\n\tf.close()\n\tprint('N-grams file read!')\n\t\n\tprint('Saving model...')\n\to.close()\n\tprint('Finished!')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the css class of this token type prefixed with the classprefix option.
def _get_css_class(self, ttype): ttypeclass = _get_ttype_class(ttype) if ttypeclass: return self.classprefix + ttypeclass return ''
[ "def _get_class_string(self):\n\n classes = self.attrs.get(\"class\", None)\n\n # No classes were set in the attributes\n if not classes:\n return \" \".join(self.classes)\n\n classes = classes.value\n\n # Make room for the classes set in the tag\n if self.classes:\n classes += \" \"\n\n classes += \" \".join(self.classes)\n\n return classes", "def _cls(self, tag_name, class_name):\n return 'descendant-or-self::node()/%s[contains(concat(\" \", normalize-space(@class), \" \"), \" %s \")]' % (tag_name, class_name)", "def clau_class(obj):\n return mark_safe(_css_class(obj))", "def get_class_name(self):\n return self.name[:-6]", "def css_class(self):\n header = self.data.header\n normalizer = getUtility(IIDNormalizer)\n return \"portlet-multilanguage-%s\" % normalizer.normalize(header)", "def add_class(self, value: str) -> HTMLNode:\n return self.add_attr(\"class\", value)", "def _css_class(obj):\n if not obj:\n cls = ''\n\n elif isinstance(obj, str):\n # Alias or e-mail\n if is_valid_email(obj):\n cls = 'claudia-email'\n else:\n cls = 'claudia-error'\n\n elif isinstance(obj, Mapping):\n cls = 'claudia-%s' % obj.type\n if not obj.active:\n cls = 'claudia-inactive %s' % cls\n\n elif isinstance(obj, SharedDrive):\n cls = 'claudia-%s' % Mapping.get_type(obj)\n\n else:\n cls = 'claudia-%s' % Mapping.get_type(obj)\n if not obj.is_active():\n cls = 'claudia-inactive %s' % cls\n\n return cls", "def class_name(self):\n return self.element_info.class_name", "def get_class_mnemonic(rrclass):\n if type(rrclass) is type and issubclass(rrclass, Class):\n return rrclass.mnemonic\n elif isinstance(rrclass, int):\n for cls in CLASSES:\n if rrclass == CLASSES[cls].value:\n return CLASSES[cls].mnemonic\n return \"CLASS{}\".format(int(rrclass))\n elif isinstance(rrclass, str):\n if rrclass.upper() in CLASSES:\n return CLASSES[rrclass.upper()].mnemonic\n else:\n match = re.search(r'^CLASS(\\d+)$', rrclass.upper())\n if match:\n return rrclass\n raise ValueError(\n \"rrclass must be a known class mnemonic (e.g. IN, CH), an integer, \"\n \"or a CLASS### text representation of an unknown class (see RFC3597) \"\n \"({!r} is a {})\".format(rrclass, type(rrclass))\n )", "def class_abbrev(type):\n ...", "def format_class(self):\n return self.format_class_loader.get_class(name=self.format_class_name)", "def get_class_name(rrclass):\n if type(rrclass) is type and issubclass(rrclass, Class):\n return rrclass.long_name\n elif isinstance(rrclass, int):\n for cls in CLASSES:\n if rrclass == CLASSES[cls].value:\n return CLASSES[cls].long_name\n elif isinstance(rrclass, str):\n if rrclass.upper() in CLASSES:\n return CLASSES[rrclass.upper()].long_name\n else:\n match = re.search(r'^CLASS(\\d+)$', rrclass.upper())\n if match:\n return rrclass\n raise ValueError(\n \"rrclass must be a known class mnemonic (e.g. IN, CH), an integer, \"\n \"or a CLASS### text representation of an unknown class (see RFC3597) \"\n \"({!r} is a {})\".format(rrclass, type(rrclass))\n )", "def english_class_name(self):\n return self._en_class_name", "def type_class(self):\n return type_get_class_name(type_get_class(self))", "def char_class(self):\n return CLASS_NAMES.get(self.char_class_id)", "def type_prefix(self):\n if not self._type_prefix:\n self._type_prefix = ''.join(\n (self.command_prefix[0:1].upper(), self.command_prefix[1:]))\n return self._type_prefix", "def genClassCode(self):\r\n \r\n # Generate _fields.\r\n fieldsstr = self.genFieldsStr()\r\n \r\n # Parse annotations.\r\n self.parseAnnStr()\r\n \r\n tstr = self.genTypesStr()\r\n \r\n attrstr = fieldsstr + tstr\r\n \r\n return self.classtemplate.format(self.classname, self.basename, attrstr)", "def format_class_name(self):\n if \"json\" in self.format_class_loader.class_names:\n default = \"json\"\n else:\n default = self.format_class_loader.class_names[0]\n return getattr(self, \"_format_class_name\", default)", "def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class", "def class_name(self):\n return self.source_file.rsplit('.', maxsplit=1)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string or list of selectors to insert before the token type classes.
def get_style_defs(self, arg=None): if arg is None: arg = ('cssclass' in self.options and '.'+self.cssclass or '') if isinstance(arg, str): args = [arg] else: args = list(arg) def prefix(cls): if cls: cls = '.' + cls tmp = [] for arg in args: tmp.append((arg and arg + ' ' or '') + cls) return ', '.join(tmp) styles = [(level, ttype, cls, style) for cls, (style, ttype, level) in self.class2style.items() if cls and style] styles.sort() lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:]) for (level, ttype, cls, style) in styles] if arg and not self.nobackground and \ self.style.background_color is not None: text_style = '' if Text in self.ttype2class: text_style = ' ' + self.class2style[self.ttype2class[Text]][0] lines.insert(0, '%s { background: %s;%s }' % (prefix(''), self.style.background_color, text_style)) if self.style.highlight_color is not None: lines.insert(0, '%s.hll { background-color: %s }' % (prefix(''), self.style.highlight_color)) return '\n'.join(lines)
[ "def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class", "def _colorize_re(tree, noparen=0):\n result = []\n out = result.append\n \n if len(tree) > 1 and not noparen:\n out('<span class=\"%s\">(</span>' % PAREN_TAG)\n for elt in tree:\n op = elt[0]\n args = elt[1]\n\n if op == sre_constants.LITERAL:\n c = unichr(args)\n if c == '\\t': out(r'<span class=\"%s\">\\t</span>' % ESCAPE_TAG)\n elif c == '\\n': out(r'<span class=\"%s\">\\n</span>' % ESCAPE_TAG)\n elif c == '\\r': out(r'<span class=\"%s\">\\r</span>' % ESCAPE_TAG)\n elif c == '\\f': out(r'<span class=\"%s\">\\f</span>' % ESCAPE_TAG)\n elif c == '\\v': out(r'<span class=\"%s\">\\v</span>' % ESCAPE_TAG)\n elif ord(c)<32 or ord(c)>=127:\n if c < 256: template = r'<span class=\"%s\">\\x%02x</span>'\n else: template = r'<span class=\"%s\">\\u%04x</span>'\n out(template % (ESCAPE_TAG,ord(c)))\n elif c in '.^$\\\\*+?{}[]|()':\n out(r'<span class=\"%s\">\\%c</span>' % (ESCAPE_TAG, c))\n else: out(plaintext_to_html(unichr(args)))\n continue\n \n elif op == sre_constants.ANY:\n out('<span class=\"%s\">.</span>' % ANY_TAG)\n \n elif op == sre_constants.BRANCH:\n if args[0] is not None:\n raise ValueError('Branch expected None arg but got %s'\n % args[0])\n VBAR = '<span class=\"%s\">|</span>' % BRANCH_TAG\n out(VBAR.join([_colorize_re(item,1) for item in args[1]]))\n \n elif op == sre_constants.IN:\n if (len(args) == 1 and args[0][0] == sre_constants.CATEGORY):\n out(_colorize_re(args))\n else:\n out('<span class=\"%s\">[</span>' % CHOICE_TAG)\n out(_colorize_re(args, 1))\n out('<span class=\"%s\">]</span>' % CHOICE_TAG)\n \n elif op == sre_constants.CATEGORY:\n out('<span class=\"%s\">' % CATEGORY_TAG)\n if args == sre_constants.CATEGORY_DIGIT: out(r'\\d')\n elif args == sre_constants.CATEGORY_NOT_DIGIT: out(r'\\D')\n elif args == sre_constants.CATEGORY_SPACE: out(r'\\s')\n elif args == sre_constants.CATEGORY_NOT_SPACE: out(r'\\S')\n elif args == sre_constants.CATEGORY_WORD: out(r'\\w')\n elif args == sre_constants.CATEGORY_NOT_WORD: out(r'\\W')\n else: raise ValueError('Unknown category %s' % args)\n out('</span>')\n \n elif op == sre_constants.AT:\n out('<span class=\"%s\">' % AT_TAG)\n if args == sre_constants.AT_BEGINNING_STRING: out(r'\\A')\n elif args == sre_constants.AT_BEGINNING: out(r'^')\n elif args == sre_constants.AT_END: out(r'$')\n elif args == sre_constants.AT_BOUNDARY: out(r'\\b')\n elif args == sre_constants.AT_NON_BOUNDARY: out(r'\\B')\n elif args == sre_constants.AT_END_STRING: out(r'\\Z')\n else: raise ValueError('Unknown position %s' % args)\n out('</span>')\n \n elif op == sre_constants.MAX_REPEAT:\n min = args[0]\n max = args[1]\n if max == sre_constants.MAXREPEAT:\n if min == 0:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">*</span>' % STAR_TAG)\n elif min == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">+</span>' % PLUS_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,}</span>' % (RNG_TAG, min))\n elif min == 0:\n if max == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">?</span>' % QMRK_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{,%d}</span>' % (RNG_TAG, max))\n elif min == max:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d}</span>' % (RNG_TAG, max))\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,%d}</span>' % (RNG_TAG, min, max))\n\n elif op == sre_constants.MIN_REPEAT:\n min = args[0]\n max = args[1]\n if max == sre_constants.MAXREPEAT:\n if min == 0:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">*?</span>' % STAR_TAG)\n elif min == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">+?</span>' % PLUS_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,}?</span>' % (RNG_TAG, min))\n elif min == 0:\n if max == 1:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">??</span>' % QMRK_TAG)\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{,%d}?</span>' % (RNG_TAG, max))\n elif min == max:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d}?</span>' % (RNG_TAG, max))\n else:\n out(_colorize_re(args[2]))\n out('<span class=\"%s\">{%d,%d}?</span>'%(RNG_TAG, min, max))\n\n elif op == sre_constants.SUBPATTERN:\n if args[0] is None:\n out('<span class=\"%s\">(?:</span>' % PAREN_TAG)\n elif isinstance(args[0], (int, long)):\n # This is cheating:\n out('<span class=\"%s\">(</span>' % PAREN_TAG)\n else:\n out('<span class=\"%s\">(?P&lt;</span>' % PAREN_TAG)\n out('<span class=\"%s\">%s</span>' %\n (REF_TAG, plaintext_to_html(args[0])))\n out('<span class=\"%s\">&gt;</span>' % PAREN_TAG)\n out(_colorize_re(args[1], 1))\n out('<span class=\"%s\">)</span>' % PAREN_TAG)\n\n elif op == sre_constants.GROUPREF:\n out('<span class=\"%s\">\\\\%d</span>' % (REF_TAG, args))\n\n elif op == sre_constants.RANGE:\n start = _colorize_re( ((sre_constants.LITERAL, args[0]),) )\n end = _colorize_re( ((sre_constants.LITERAL, args[1]),) )\n out('%s<span class=\"%s\">-</span>%s' % (start, CHOICE_TAG, end))\n \n elif op == sre_constants.NEGATE:\n out('<span class=\"%s\">^</span>' % CHOICE_TAG)\n\n elif op == sre_constants.ASSERT:\n if args[0]: out('<span class=\"%s\">(?=</span>' % ASSERT_TAG)\n else: out('<span class=\"%s\">(?&lt;=</span>' % ASSERT_TAG)\n out(''.join(_colorize_re(args[1], 1)))\n out('<span class=\"%s\">)</span>' % ASSERT_TAG)\n \n elif op == sre_constants.ASSERT_NOT:\n if args[0]: out('<span class=\"%s\">(?!</span>' % ASSERT_TAG)\n else: out('<span class=\"%s\">(?&lt;!</span>' % ASSERT_TAG)\n out(''.join(_colorize_re(args[1], 1)))\n out('<span class=\"%s\">)</span>' % ASSERT_TAG)\n\n elif op == sre_constants.NOT_LITERAL:\n lit = _colorize_re( ((sre_constants.LITERAL, args),) )\n out('<span class=\"%s\">[^</span>%s<span class=\"%s\">]</span>' %\n (CHOICE_TAG, lit, CHOICE_TAG))\n else:\n log.error(\"Error colorizing regexp: unknown elt %r\" % elt)\n if len(tree) > 1 and not noparen: \n out('<span class=\"%s\">)</span>' % PAREN_TAG)\n return u''.join(result)", "def css(self, query: str) -> list[\"Node\"]:\n ...", "def clau_class(obj):\n return mark_safe(_css_class(obj))", "def parse_styleguide(element):\n assert element.tag == 'styleguide'\n styles = []\n for child in element:\n styles.append(parse_class(child))\n return styles", "def __call__(cls, *args, **kwds):\r\n if '_tokens' not in cls.__dict__:\r\n cls._all_tokens = {}\r\n cls._tmpname = 0\r\n if hasattr(cls, 'token_variants') and cls.token_variants:\r\n # don't process yet\r\n pass\r\n else:\r\n cls._tokens = cls.process_tokendef('', cls.get_tokendefs())\r\n\r\n return type.__call__(cls, *args, **kwds)", "def lazy_style(*args):\n if len(args) == 0:\n raise TypeError(\n \"When applying a style method to a color, the color instance \"\n \"cannot be mutated with the style method - the method can \"\n \"only be used to apply the color and style to a specified \"\n \"argument, which must be provided to the style method.\"\n )\n sty = style(code)\n return sty(self.__call__(args[0]))", "def __init__(self, sourceFileName, argType=\"file\", **kwargs):\n ## reset global state ##\n global doxygenCommentCache\n doxygenCommentCache = \"\"\n\n if (argType == \"file\"):\n self.sourceFileName = os.path.expandvars(sourceFileName)\n self.mainClass = os.path.split(self.sourceFileName)[1][:-3]\n sourceFileStr = \"\"\n elif argType == \"string\":\n self.sourceFileName = \"\"\n self.mainClass = \"???\"\n sourceFileStr = sourceFileName\n else:\n raise Exception(\"Arg type must be either file or string\")\n self.curClass = \"\"\n \n self.functions = []\n\n if (len(self.sourceFileName)):\n fd = open(self.sourceFileName)\n sourceFileStr = \"\".join(fd.readlines())\n fd.close() \n \n # Make sure supportedAccessSpecifier are sane\n \n self.braceDepth = 0\n lex.lex()\n lex.input(sourceFileStr)\n curLine = 0\n curChar = 0\n function_name=\"\"\n self.nameStack = []\n self.openParenStack = []\n self.closeParenStack = []\n self.openBraceStack = []\n self.closeBraceStack = []\n self.classstack = []\n self.openBraceStackClass = []\n self.closeBraceStackClass = []\n self.paramStack = []\n self.namespace = \"\"\n while True:\n tok = lex.token()\n if not tok: break\n curLine = tok.lineno\n curChar = tok.lexpos\n if tok.type == 'NAME':\n if tok.value in keywords:\n continue\n if len(self.openParenStack)>len(self.closeParenStack):\n continue\n self.nameStack.append(tok)\n\n elif tok.type == 'SEMI_COLON':\n self.nameStack = []\n self.openParenStack = []\n self.closeParenStack = []\n self.namespace = \"\"\n\n elif tok.type == 'OPEN_BRACE':\n if len(self.nameStack)>=2 and self.nameStack[-2].value==\"class\":\n #class named的情况下\n classname = self.nameStack[-1].value\n if len(self.classstack)>0: #如果有class,将class的大括号入栈\n self.openBraceStackClass.append(tok)\n self.classstack.append(classname)\n self.openBraceStackClass = [] #只有一个class\n self.closeBraceStackClass = []\n self.openBraceStackClass.append(tok)\n continue\n\n if len(self.nameStack)>=2 and len(self.openParenStack)==1\\\n and len(self.closeParenStack)==1: #函数的情况\n #只有函数名的情况\n function_name = self.nameStack[-1].value\n self.openBraceStack = []\n self.closeBraceStack = []\n self.openBraceStack.append(tok)\n if function_name == \"const\":\n function_name = self.nameStack[-2].value\n if self.namespace != \"\":\n function_name = self.namespace+\"::\"+function_name\n elif len(self.classstack)>0:\n function_name = self.classstack[-1]+\"::\"+function_name\n fo = FunctionObj()\n fo.name = function_name\n fo.startline = tok.lineno\n self.functions.append(fo)\n self.nameStack = []\n self.openParenStack = []\n self.closeParenStack = []\n continue\n\n self.openBraceStack.append(tok)\n\n self.nameStack = []\n self.namespace = \"\"\n\n elif tok.type == 'CLOSE_BRACE':\n self.closeBraceStack.append(tok)\n self.closeBraceStackClass.append(tok)\n if len(self.closeBraceStack) == len(self.openBraceStack):\n if function_name:\n self.functions[-1].endline = tok.lineno\n function_name = \"\"\n if len(self.closeBraceStackClass) == len(self.openBraceStackClass):\n self.classname = \"\"\n self.namespace = \"\"\n\n elif tok.type == 'OPEN_PAREN':\n self.openParenStack.append(tok) \n elif tok.type == 'CLOSE_PAREN':\n pos = 0\n if len(self.openParenStack)>0:\n pos = self.openParenStack[-1].lexpos\n temp = []\n temp.extend(self.nameStack) \n for idx in range(len(temp)):\n tt = temp[idx]\n if tt.lexpos>pos:\n self.nameStack.remove(tt)\n self.closeParenStack.append(tok) \n elif tok.type == 'COLONCOLON':\n if len(self.openParenStack)>len(self.closeParenStack):\n continue\n if len(self.nameStack)>0:\n self.namespace = self.nameStack[-1].value\n else:\n pass", "def make_style(self, opts=(), **kwargs):\n if len(kwargs) == 0 and len(opts) == 0:\n return lambda text: text\n return lambda text: self.colorize(text, opts, **kwargs)", "def find_style(term):\r\n\r\n styles = all_styles()\r\n found_styles = []\r\n\r\n if term =='':\r\n return found_styles\r\n else:\r\n for style in styles:\r\n if term in style:\r\n found_styles.append(style)\r\n else:\r\n continue\r\n\r\n return found_styles", "def get_color_class(self, pt, classes):\n\n view_id = self.view.buffer_id()\n if not self.color_classes[view_id] or self.view.settings().get('color_helper.refresh', True):\n util.debug(\"Clear color class stash\")\n self.view.settings().set('color_helper.refresh', False)\n self.color_classes[view_id] = util.get_settings_colors()\n\n # Check if the first point within the color matches our scope rules\n # and load up the appropriate color class\n color_class = None\n filters = []\n for item in classes:\n try:\n value = self.view.score_selector(pt, item[\"scopes\"])\n if not value:\n continue\n else:\n class_options = self.color_classes[view_id].get(item[\"class\"])\n if class_options is None:\n continue\n module = class_options.get(\"class\", \"ColorHelper.lib.coloraide.Color\")\n if isinstance(module, str):\n if module == \"ColorHelper.lib.coloraide.Color\":\n color_class = self.base\n else:\n # Initialize the color module and cache it for this view\n color_class = util.import_color(module)\n class_options[\"class\"] = color_class\n else:\n color_class = module\n filters = class_options.get(\"filters\", [])\n break\n except Exception:\n pass\n return color_class, filters", "def using_classes(order):\n\n # create a list of class instances for each valid `color`\n colors = [Color(**getColor(color)) for color in order if getColor(color) is not None]\n # print the color information for each class instance\n for color in colors:\n print(color)", "def css_tree(self) -> Tree:\n from rich.columns import Columns\n from rich.console import Group\n from rich.panel import Panel\n\n from .widget import Widget\n\n def render_info(node: DOMNode) -> Columns:\n \"\"\"Render a node for the tree.\"\"\"\n if isinstance(node, Widget):\n info = Columns(\n [\n Pretty(node),\n highlighter(f\"region={node.region!r}\"),\n highlighter(\n f\"virtual_size={node.virtual_size!r}\",\n ),\n ]\n )\n else:\n info = Columns([Pretty(node)])\n return info\n\n highlighter = ReprHighlighter()\n tree = Tree(render_info(self))\n\n def add_children(tree: Tree, node: DOMNode) -> None:\n \"\"\"Add children to the tree.\"\"\"\n for child in node.children:\n info: RenderableType = render_info(child)\n css = child.styles.css\n if css:\n info = Group(\n info,\n Panel.fit(\n Text(child.styles.css),\n border_style=\"dim\",\n title=\"css\",\n title_align=\"left\",\n ),\n )\n branch = tree.add(info)\n if tree.children:\n add_children(branch, child)\n\n add_children(tree, self)\n return tree", "def get_tokendefs(cls):\r\n tokens = {}\r\n inheritable = {}\r\n for c in itertools.chain((cls,), cls.__mro__):\r\n toks = c.__dict__.get('tokens', {})\r\n\r\n for state, items in toks.items():\r\n curitems = tokens.get(state)\r\n if curitems is None:\r\n tokens[state] = items\r\n try:\r\n inherit_ndx = items.index(inherit)\r\n except ValueError:\r\n continue\r\n inheritable[state] = inherit_ndx\r\n continue\r\n\r\n inherit_ndx = inheritable.pop(state, None)\r\n if inherit_ndx is None:\r\n continue\r\n\r\n # Replace the \"inherit\" value with the items\r\n curitems[inherit_ndx:inherit_ndx+1] = items\r\n try:\r\n new_inh_ndx = items.index(inherit)\r\n except ValueError:\r\n pass\r\n else:\r\n inheritable[state] = inherit_ndx + new_inh_ndx\r\n\r\n return tokens", "def annotate_conc(tokens):\n from colorama import Fore, Back, Style, init\n init(autoreset=True)\n cols = get_matching_indices(tokens)\n color = tokens[-1]\n if color in ['dim', 'normal', 'bright']:\n sty = 'Style'\n elif 'back' in tokens or 'background' in tokens:\n sty = 'Back'\n else:\n sty = 'Fore'\n #if tokens[-2].lower() in ['back', 'dim', 'fore', 'normal', 'bright', 'background', 'foreground']:\n # sty = tokens[-2].title().replace('ground', '')\n\n for line in cols:\n if not int(line) in list(objs.concordance.index):\n continue\n # if there is already info for this line number, add present info\n if objs._conc_colours[len(objs._old_concs)-1].get(line):\n objs._conc_colours[len(objs._old_concs)-1][line][sty] = color\n else:\n objs._conc_colours[len(objs._old_concs)-1][line] = {}\n objs._conc_colours[len(objs._old_concs)-1][line][sty] = color\n\n single_command_print(['concordance'] + tokens)", "def _get_annotation_class_attr(self, index, el):\n\n attr = {}\n cls = ['annotatable-span', 'highlight']\n highlight_key = 'highlight'\n color = el.get(highlight_key)\n\n if color is not None:\n if color in self.highlight_colors:\n cls.append('highlight-' + color)\n attr['_delete'] = highlight_key\n attr['value'] = ' '.join(cls)\n\n return {'class': attr}", "def _generate_header(self, resources):\n header = []\n\n # Add pygments CSS\n\n from pygments.formatters import HtmlFormatter\n formatter = HtmlFormatter(style=self.style)\n pygments_css = formatter.get_style_defs(self.highlight_class)\n header.append(pygments_css)\n\n return header", "def preprocessor(*args, **kwargs):\n logger.debug(\"Adding preprocessor from %s\", args)\n return _unwrap(_preprocessors, *args, **kwargs,\n is_list=False, cache_name=\"preprocessor\")", "def add_css_classes(self, *css_classes):\n for cls in css_classes:\n self._css_classes.add(cls)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just format the tokens, without any wrapping tags. Yield individual lines.
def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator # for <span style=""> lookup only getcls = self.ttype2class.get c2s = self.class2style escape_table = _escape_html_table tagsfile = self.tagsfile lspan = '' line = '' for ttype, value in tokensource: if nocls: cclass = getcls(ttype) while cclass is None: ttype = ttype.parent cclass = getcls(ttype) cspan = cclass and '<span style="%s">' % c2s[cclass][0] or '' else: cls = self._get_css_class(ttype) cspan = cls and '<span class="%s">' % cls or '' parts = value.translate(escape_table).split('\n') if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line += (lspan and '</span>') + cspan + part + \ (cspan and '</span>') + lsep else: # both are the same line += part + (lspan and '</span>') + lsep yield 1, line line = '' elif part: yield 1, cspan + part + (cspan and '</span>') + lsep else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line += (lspan and '</span>') + cspan + parts[-1] lspan = cspan else: line += parts[-1] elif parts[-1]: line = cspan + parts[-1] lspan = cspan # else we neither have to open a new span nor set lspan if line: yield 1, line + (lspan and '</span>') + lsep
[ "def tokens(self):\n for t in self._ast.tokens:\n yield t", "def token_layout_generator(self):\n tfs_space, tfs_newline = ' ', os.linesep\n yield (Token.Prompt, self.config['IPYSH_TERMINAL_PROMPT'])\n\n layout = self.config['IPYSH_TOKEN_LAYOUT']\n if layout not in self.LAYOUT_OPTIONS:\n layout = self.defaults['IPYSH_TOKEN_LAYOUT']\n if layout == self.OFF:\n raise StopIteration\n elif layout == self.ONELINE:\n yield (Token, tfs_space)\n else:\n yield (Token, tfs_newline)\n\n token_sep = tfs_newline if layout == self.STACKED else tfs_space\n format_label = '{:{width}} : '.format\n width = max([len(label) for label in self.LABELS.itervalues()])\n\n # Current Dicrectory\n yield (Token.Prompt, ']')\n yield (Token, self._prepare_current_directory_token())\n yield (Token.Prompt, '[')\n\n # Git Branch\n yield (Token, token_sep)\n yield (Token.Prompt, ')')\n yield (Token, self._prepare_current_branch_token())\n #yield (Token.Prompt, '(Branch: ' )\n yield (Token.Prompt, format_label('Branch', width=width))\n yield (Token.Prompt, '(' )\n\n # Python Virtual Environment\n yield (Token, token_sep)\n yield (Token.Prompt, ')')\n yield (Token, self._prepare_current_venv_token())\n #yield (Token.Prompt, '(PyEnv: ' )\n yield (Token.Prompt, format_label('PyEnv', width=width))\n yield (Token.Prompt, '(' )\n\n # Application Environment\n yield (Token, token_sep)\n yield (Token.Prompt, ')')\n yield (Token, os.environ.get('APP_ENVIRONMENT', 'System'))\n yield (Token.Prompt, format_label('AppEnv', width=width))\n yield (Token.Prompt, '(' )", "def read_tokens(self, include_tag=False):\n for line in self.readlines():\n start = line.find('>') + 1 if not include_tag else 0\n line = line[start:].strip(string.whitespace)\n tokens = line.split()\n for token in tokens:\n yield token", "def _group_tokens(tokens):\n prev_vid = None\n buffer = []\n for x in tokens:\n vid = {\"chapterId\": x.pop(\"chapterId\"), \"verseNum\": x.pop(\"verseNum\")}\n if prev_vid and prev_vid != vid:\n yield {**prev_vid, **{\"tokens\": buffer}}\n buffer = []\n prev_vid = vid\n buffer.append(x)\n yield {**vid, **{\"tokens\": buffer}}", "def _render(self, tokens, options, env):\n pending_tags = []\n pending_content = [[]]\n for t, token in enumerate(tokens):\n if token.type == \"fence\": # Special case\n pending_content[-1].append(self.fence(tokens, t, options, env))\n elif token.tag != \"\":\n if not token.nesting: # Directly append to content\n c = [token.content] if token.content else []\n tag = getattr(dominate.tags, token.tag)\n tag = tag(*c) if token.attrs is None else tag(*c, **token.attrs)\n pending_content[-1].append(tag)\n elif len(pending_tags) > 0 and pending_tags[-1] == token.tag: # Closing tag\n t = pending_tags.pop()\n c = pending_content.pop()\n tag = getattr(dominate.tags, t)\n tag = tag(c) if token.attrs is None else tag(c, **token.attrs)\n pending_content[-1].append(tag)\n else: # Opening tag\n if token.tag == \"p\" and len(pending_tags) > 0 and pending_tags[-1] == \"li\":\n continue\n\n pending_tags.append(token.tag)\n pending_content.append([])\n elif token.children is not None:\n assert len(token.children) > 0\n pending_content[-1].extend(self._render(token.children, options, env))\n else:\n if not token.hidden:\n pending_content[-1].append(escapeHtml(token.content))\n\n assert len(pending_tags) == 0, pending_tags\n assert len(pending_content) == 1, pending_content\n\n return pending_content[-1]", "def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError(\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column//tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n if line[pos] == '#':\r\n comment_token = line[pos:].rstrip('\\r\\n')\r\n nl_pos = pos + len(comment_token)\r\n yield (COMMENT, comment_token,\r\n (lnum, pos), (lnum, pos + len(comment_token)), line)\r\n yield (NL, line[nl_pos:],\r\n (lnum, nl_pos), (lnum, len(line)), line)\r\n else:\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError(\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n newline = NEWLINE\r\n if parenlev > 0:\r\n newline = NL\r\n yield (newline, token, spos, epos, line)\r\n elif initial == '#':\r\n assert not token.endswith(\"\\n\")\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n # This yield is new; needed for better idempotency:\r\n yield (NL, token, spos, (lnum, pos), line)\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')", "def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError, (\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column/tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError, (\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n yield (parenlev > 0 and NL or NEWLINE,\r\n token, spos, epos, line)\r\n elif initial == '#':\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')", "def itercodelines(self):\r\n codeline = CodeLine(0)\r\n for token in self.itertokens():\r\n codeline.append(token)\r\n if codeline.complete:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline\r\n codeline = CodeLine(codeline.end_row + 1)\r\n if codeline.string:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline", "def _print_tokens(self, tokens) -> None:\n print(' '.join([self.get_index_token(tok.item()) for tok in tokens]))\n return", "def _annotate_first_pass(self, tokens):\n for aug_tok in tokens:\n self._first_pass_annotation(aug_tok)\n yield aug_tok", "def _annotate_tokens(self, tokens):\n # Make a preliminary pass through the document, marking likely\n # sentence breaks, abbreviations, and ellipsis tokens.\n tokens = self._annotate_first_pass(tokens)\n\n # Make a second pass through the document, using token context\n # information to change our preliminary decisions about where\n # sentence breaks, abbreviations, and ellipsis occurs.\n tokens = self._annotate_second_pass(tokens)\n\n return tokens", "def merge(self, tokens):\r\n tokens = iter(tokens)\r\n (lasttype, lastval) = tokens.next()\r\n for ttype, value in tokens:\r\n if ttype is lasttype:\r\n lastval += value\r\n else:\r\n yield(lasttype, lastval)\r\n (lasttype, lastval) = (ttype, value)\r\n if lastval.endswith('\\n'):\r\n lastval = lastval[:-1]\r\n if lastval:\r\n yield(lasttype, lastval)", "def _format(\n self, node: Node, level: int = 0, prefix: Text = \"\", suffix: Text = \"\"\n ) -> Iterator[_Line]:\n\n # noinspection PyTypeChecker\n yield from {\n FlatNode: self._format_flat,\n ListNode: self._format_list,\n MappingNode: self._format_mapping,\n }[node.__class__](node, level, prefix, suffix)", "def tokenize(self):", "def transform(self, actual_tokens):\n POGGER.debug(\"\\n\\n---\\n\")\n transform_state, output_html, actual_tokens_size = (\n TransformState(actual_tokens),\n \"\",\n len(actual_tokens),\n )\n for next_token in transform_state.actual_tokens:\n (\n transform_state.add_trailing_text,\n transform_state.add_leading_text,\n transform_state.next_token,\n ) = (None, None, None)\n if (transform_state.actual_token_index + 1) < actual_tokens_size:\n transform_state.next_token = actual_tokens[\n transform_state.actual_token_index + 1\n ]\n if next_token.token_name in self.start_token_handlers:\n start_handler_fn = self.start_token_handlers[next_token.token_name]\n output_html = start_handler_fn(output_html, next_token, transform_state)\n\n elif next_token.is_end_token:\n if next_token.type_name in self.end_token_handlers:\n end_handler_fn = self.end_token_handlers[next_token.type_name]\n output_html = end_handler_fn(\n output_html, next_token, transform_state\n )\n else:\n assert (\n False\n ), f\"Markdown token end type {next_token.type_name} not supported.\"\n else:\n assert False, f\"Markdown token type {type(next_token)} not supported.\"\n\n POGGER.debug(\"======\")\n POGGER.debug(\n \"add_trailing_text-->$<--\",\n transform_state.add_trailing_text,\n )\n POGGER.debug(\"add_leading_text -->$<--\", transform_state.add_leading_text)\n POGGER.debug(\"output_html -->$<--\", output_html)\n\n if transform_state.add_trailing_text:\n output_html = self.__apply_trailing_text(output_html, transform_state)\n\n if transform_state.add_leading_text:\n output_html = self.__apply_leading_text(output_html, transform_state)\n\n POGGER.debug(\"------\")\n POGGER.debug(\"next_token -->$<--\", next_token)\n POGGER.debug(\"output_html -->$<--\", output_html)\n POGGER.debug(\"transform_stack-->$<--\", transform_state.transform_stack)\n\n transform_state.last_token = next_token\n transform_state.actual_token_index += 1\n if output_html and output_html[-1] == ParserHelper.newline_character:\n output_html = output_html[:-1]\n POGGER.debug(\"output_html -->$<--\", output_html)\n return output_html", "def structure_representation(self):\n lines = []\n for token in self.tokens:\n head = token.head.id if token.head is not None else 0\n lemma = token.lemma if token.lemma is not None else '_'\n line = '{token.id}\\t{token.text}\\t{lemma}\\t{token.pos}\\t_\\t_\\t' \\\n '{head}\\t{token.dependency_relation}' \\\n '' \\\n ''\n line = line.format(token=token, lemma=lemma, head=head)\n lines.append(line)\n\n return '\\n'.join(lines)", "def print_tokens(source):\n if isinstance(source[0], Token):\n source = untokenize(source)\n\n for lines in get_lines(source):\n for token in lines:\n print(repr(token))\n print()", "def right_truncations (tokens):\n while tokens:\n yield tokens\n tokens = tokens [1 :]", "def make_nl_token(self):\n t1 = token_module.NEWLINE\n t2 = '\\n'\n t3 = (0, 0) # Not used.\n t4 = (0, 0) # Not used.\n t5 = '\\n'\n return t1, t2, t3, t4, t5" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Highlighted the lines specified in the `hl_lines` option by postprocessing the token stream coming from `_format_lines`.
def _highlight_lines(self, tokensource): hls = self.hl_lines for i, (t, value) in enumerate(tokensource): if t != 1: yield t, value if i + 1 in hls: # i + 1 because Python indexes start at 0 if self.noclasses: style = '' if self.style.highlight_color is not None: style = (' style="background-color: %s"' % (self.style.highlight_color,)) yield 1, '<span%s>%s</span>' % (style, value) else: yield 1, '<span class="hll">%s</span>' % value else: yield 1, value
[ "def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)", "def _highlit_line(content, offsets, markup, markdown, encoding):\n def chunks():\n try:\n # Start on the line the highlights are on:\n chars_before = content.rindex('\\n', 0, offsets[0][0]) + 1\n except ValueError:\n chars_before = None\n for start, end in offsets:\n yield cgi.escape(content[chars_before:start].decode(encoding,\n 'replace'))\n yield markup\n yield cgi.escape(content[start:end].decode(encoding, 'replace'))\n yield markdown\n chars_before = end\n # Make sure to get the rest of the line after the last highlight:\n try:\n next_newline = content.index('\\n', chars_before)\n except ValueError: # eof\n next_newline = None\n yield cgi.escape(content[chars_before:next_newline].decode(encoding,\n 'replace'))\n return ''.join(chunks()).lstrip()", "def convert_line_to_html(self, empty):\n\n line = []\n do_highlight = self.curr_row in self.hl_lines\n\n while self.end <= self.size:\n # Get text of like scope\n scope_name = self.view.scope_name(self.pt)\n while self.view.scope_name(self.end) == scope_name and self.end < self.size:\n self.end += 1\n\n color_match = self.view.style_for_scope(scope_name)\n color = color_match.get('foreground', self.fground)\n bgcolor = color_match.get('background')\n style = []\n if color_match.get('bold', False):\n style.append('bold')\n if color_match.get('italic', False):\n style.append('italic')\n if color_match.get('underline', False):\n style.append('underline')\n if color_match.get('glow', False):\n style.append('glow')\n\n if do_highlight:\n sfg = color_match.get('selection_forground', self.defaults.get('selection_forground'))\n if sfg:\n color = sfg\n bgcolor = color_match.get('selection', '#0000FF')\n\n region = sublime.Region(self.pt, self.end)\n # Normal text formatting\n tidied_text = self.html_encode(self.view.substr(region))\n self.format_text(line, tidied_text, color, bgcolor, style, empty)\n\n # Continue walking through line\n self.pt = self.end\n self.end = self.pt + 1\n\n # ~~~\n # # Get the color for the space at the end of a line\n # if self.end < self.view.size():\n # end_key = self.view.scope_name(self.pt)\n # color_match = self.view.style_for_scope(end_key)\n # self.ebground = color_match.get('background')\n # ~~~\n\n # Join line segments\n return ''.join(line)", "def highlight_line(self, line, fcolor, bcolor):\n pass", "def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.document().markContentsDirty(block.position(),\r\n block.position() + block.length())\r\n self.rehighlightBlock(block)", "def apply(self, *args) -> \"void\":\n return _coin.SoLineHighlightRenderAction_apply(self, *args)", "def highlight_regions(self, replaced_lines):\n # type: (List[HunkReference]) -> None\n add_regions = [] # type: List[sublime.Region]\n add_bold_regions = []\n remove_regions = [] # type: List[sublime.Region]\n remove_bold_regions = []\n\n for section_start, section_end, hunk, line_types, raw_lines in replaced_lines:\n for line_type, lines_ in groupby(\n range(section_start, section_end),\n key=lambda line: line_types[line - section_start]\n ):\n lines = list(lines_)\n start, end = lines[0], lines[-1]\n start_line = self.view.full_line(self.view.text_point(start, 0))\n end_line = (\n self.view.full_line(self.view.text_point(end, 0))\n if start != end\n else start_line\n )\n region = sublime.Region(start_line.begin(), end_line.end())\n container = add_regions if line_type == \"+\" else remove_regions\n container.append(region)\n\n # For symmetric modifications show highlighting for the in-line changes\n if sum(1 if t == \"+\" else -1 for t in line_types) == 0:\n # Determine start of hunk/section.\n section_start_idx = self.view.text_point(section_start, 0)\n\n # Removed lines come first in a hunk.\n remove_start = section_start_idx\n first_added_line = line_types.index(\"+\")\n add_start = section_start_idx + len(\"\".join(raw_lines[:first_added_line]))\n\n removed_part = \"\".join(raw_lines[:first_added_line])\n added_part = \"\".join(raw_lines[first_added_line:])\n changes = util.diff_string.get_changes(removed_part, added_part)\n\n for change in changes:\n if change.type in (util.diff_string.DELETE, util.diff_string.REPLACE):\n # Display bold color in removed hunk area.\n region_start = remove_start + change.old_start\n region_end = remove_start + change.old_end\n remove_bold_regions.append(sublime.Region(region_start, region_end))\n\n if change.type in (util.diff_string.INSERT, util.diff_string.REPLACE):\n # Display bold color in added hunk area.\n region_start = add_start + change.new_start\n region_end = add_start + change.new_end\n add_bold_regions.append(sublime.Region(region_start, region_end))\n\n self.view.add_regions(\n \"git-savvy-added-lines\",\n add_regions,\n scope=\"diff.inserted.git-savvy.inline-diff\"\n )\n self.view.add_regions(\n \"git-savvy-removed-lines\",\n remove_regions,\n scope=\"diff.deleted.git-savvy.inline-diff\"\n )\n self.view.add_regions(\n \"git-savvy-added-bold\",\n add_bold_regions,\n scope=\"diff.inserted.char.git-savvy.inline-diff\"\n )\n self.view.add_regions(\n \"git-savvy-removed-bold\",\n remove_bold_regions,\n scope=\"diff.deleted.char.git-savvy.inline-diff\"\n )", "def rehighlight_lines(self, lines, errors=True):\r\n if errors:\r\n errors_lines = self._get_errors_lines()\r\n refresh_lines = set(lines + errors_lines)\r\n else:\r\n refresh_lines = set(lines + self.selected_word_lines)\r\n self.selected_word_lines = lines\r\n self._rehighlight_lines(refresh_lines)", "def _highlight_line_difflib(self, line, next_):\n\n if line['action'] == 'del':\n old, new = line, next_\n else:\n old, new = next_, line\n\n oldwords = self._token_re.split(old['line'])\n newwords = self._token_re.split(new['line'])\n sequence = difflib.SequenceMatcher(None, oldwords, newwords)\n\n oldfragments, newfragments = [], []\n for tag, i1, i2, j1, j2 in sequence.get_opcodes():\n oldfrag = ''.join(oldwords[i1:i2])\n newfrag = ''.join(newwords[j1:j2])\n if tag != 'equal':\n if oldfrag:\n oldfrag = '-'\n if newfrag:\n newfrag = '+'\n oldfragments.append(oldfrag)\n newfragments.append(newfrag)\n\n old['line'] = \"\".join(oldfragments)\n new['line'] = \"\".join(newfragments)", "def print_highlighted(line, hl_color=Back.WHITE):\n try:\n # Highlight positives\n colorer = re.compile(r'([^\\s]+) POSITIVES: ([1-9]) ')\n line = colorer.sub(Fore.YELLOW + r'\\1 ' + 'POSITIVES: ' + Fore.YELLOW + r'\\2 ' + Style.RESET_ALL, line)\n colorer = re.compile(r'([^\\s]+) POSITIVES: ([0-9]+) ')\n line = colorer.sub(Fore.RED + r'\\1 ' + 'POSITIVES: ' + Fore.RED + r'\\2 ' + Style.RESET_ALL, line)\n # Keyword highlight\n colorer = re.compile(r'([A-Z_]{2,}:)\\s', re.VERBOSE)\n line = colorer.sub(Fore.BLACK + hl_color + r'\\1' + Style.RESET_ALL + ' ', line)\n print line\n except Exception, e:\n pass", "def rehighlight_lines(self, lines, errors=True):\r\n if errors:\r\n errors_lines = self._get_errors_lines()\r\n refresh_lines = set(lines + errors_lines)\r\n else:\r\n refresh_lines = set(lines)\r\n self._rehighlight_lines(refresh_lines)", "def _format_lines(self, tokensource):\r\n nocls = self.noclasses\r\n lsep = self.lineseparator\r\n # for <span style=\"\"> lookup only\r\n getcls = self.ttype2class.get\r\n c2s = self.class2style\r\n escape_table = _escape_html_table\r\n tagsfile = self.tagsfile\r\n\r\n lspan = ''\r\n line = ''\r\n for ttype, value in tokensource:\r\n if nocls:\r\n cclass = getcls(ttype)\r\n while cclass is None:\r\n ttype = ttype.parent\r\n cclass = getcls(ttype)\r\n cspan = cclass and '<span style=\"%s\">' % c2s[cclass][0] or ''\r\n else:\r\n cls = self._get_css_class(ttype)\r\n cspan = cls and '<span class=\"%s\">' % cls or ''\r\n\r\n parts = value.translate(escape_table).split('\\n')\r\n\r\n if tagsfile and ttype in Token.Name:\r\n filename, linenumber = self._lookup_ctag(value)\r\n if linenumber:\r\n base, filename = os.path.split(filename)\r\n if base:\r\n base += '/'\r\n filename, extension = os.path.splitext(filename)\r\n url = self.tagurlformat % {'path': base, 'fname': filename,\r\n 'fext': extension}\r\n parts[0] = \"<a href=\\\"%s#%s-%d\\\">%s\" % \\\r\n (url, self.lineanchors, linenumber, parts[0])\r\n parts[-1] = parts[-1] + \"</a>\"\r\n\r\n # for all but the last line\r\n for part in parts[:-1]:\r\n if line:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + part + \\\r\n (cspan and '</span>') + lsep\r\n else: # both are the same\r\n line += part + (lspan and '</span>') + lsep\r\n yield 1, line\r\n line = ''\r\n elif part:\r\n yield 1, cspan + part + (cspan and '</span>') + lsep\r\n else:\r\n yield 1, lsep\r\n # for the last line\r\n if line and parts[-1]:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + parts[-1]\r\n lspan = cspan\r\n else:\r\n line += parts[-1]\r\n elif parts[-1]:\r\n line = cspan + parts[-1]\r\n lspan = cspan\r\n # else we neither have to open a new span nor set lspan\r\n\r\n if line:\r\n yield 1, line + (lspan and '</span>') + lsep", "def detectLines(self, lines=...) -> lines:\n ...", "def refresh_lines_highlight(self, linenos):\n Vim.command('setlocal modifiable')\n\n sz = min(len(self.nodes), len(Vim.current.buffer))\n for i in linenos:\n if i < sz:\n self.vim_set_line(i, self.nodes[i].highlight_content)\n Vim.command('setlocal nomodifiable')", "def print_highlight(source_file, syntax, theme, print_only_matches=False):\n \n def get_colour_code(name):\n \"\"\" Returns:\n str: Colour code associated with syntax name.\n \"\"\"\n \n return \"\\033[{}m\".format(theme[name])\n \n # base colour escape code - no colour\n start_code = end_code = \"\\033[0m\"\n \n # open file containing source code\n with open(source_file, \"r\") as source:\n \n line = source.readline()\n while line:\n regex_match_in_line = False;\n \n # Special case: comments do not contain source code\n comment_line = \"\"\n \n # for each given syntax entry\n for k in syntax.keys():\n \n # get syntax attributes and get regex result\n syntax_name = syntax[k]\n start_code = get_colour_code(syntax_name)\n regex = re.compile(r'{}'.format(k)) # test for regex match in current line\n result = regex.search(line)\n \n i = 0\n # while there is a regex match in line (in case multiple of same syntax present)\n while result:\n regex_match_in_line = True\n \n # GET AND PREPARE MATCHED SLICE OF LINE\n \n # indexes of result\n span = result.span()\n # if there is no such result but still bypassed while, break\n if not result.string or span[0] == span[1] == 0:\n break\n # account for index shift if multiple results in line\n indices = (span[0] + i, span[1] + i)\n \n \n # ADD HIGHLIGHTS\n \n # does not colour encasing whitespaces\n if indices[1] != len(line) and line[indices[1] - 1] == ' ':\n line = line[:indices[1] - 1] + end_code + line[indices[1] - 1:]\n else:\n line = line[:indices[1]] + end_code + line[indices[1]:]\n if indices[0] != 0 and line[indices[0]] == ' ':\n line = line[:indices[0] + 1] + start_code + line[indices[0] + 1:]\n else:\n line = line[:indices[0]] + start_code + line[indices[0]:]\n \n \n \n # SPECIAL SYNTAX CASES\n \n # Special case 1: comments do not contain source code\n if syntax_name == \"comment\":\n comment_line = line[indices[0]:]\n line = line[:indices[0]]\n \n # Special case 2: colour each 'word' in string literals\n if syntax_name == \"string\":\n # get string literal\n string_match = line[indices[0] + len(start_code):indices[1] + len(start_code)]\n initial_string_length = len(string_match)\n # get all whitespaces in string using regex\n whitespaces_in_string = [(match.start(), match.end()) for match in re.finditer(\"\\s\", string_match)]\n \n if whitespaces_in_string:\n j = 0\n # for each whitespace, encase 'word' in highlight colour for string\n for w in whitespaces_in_string:\n string_match = string_match[:w[0] + j] + end_code + string_match[w[0] + j:w[1] + j] + start_code + string_match[w[1] + (w[1]-w[0]) + j - 1:]\n j += len(start_code) + len(end_code)\n # ensure highlight for first 'word' in string literal\n if not string_match[1 + len(end_code)].isspace():\n string_match = string_match[0] + start_code + string_match[1 + len(end_code):]\n i += len(string_match) - initial_string_length\n \n # insert modified string to line\n line = line[:indices[0] + len(start_code)] + string_match + line[indices[0] + len(start_code) + initial_string_length:]\n \n # PREPARE NEXT ITERATION\n # get new i shifted - to search rest of line for same regex\n i = indices[1] + len(end_code) + len(start_code)\n # get result from rest of line\n result = regex.search(line[i:])\n \n # Special case 1: comments do not contain source code\n # remove all other colour codes from comment\n if comment_line:\n # find colour escape code using regex\n colour_regex = re.compile(r'\\033\\[(.*?)m')\n i = 3\n result = colour_regex.search(comment_line[3:])\n # for each colour escape code within comment,\n # remove escape code and update comment line to print\n while result:\n comment_line = comment_line[:(result.span()[0]) + i] + comment_line[(result.span()[1] + i):]\n i = result.span()[0]\n result = colour_regex.search(comment_line[i:])\n comment_line = comment_line.rstrip() + \"\\033[0m\"\n \n # print coloured line and get next line\n if not print_only_matches or regex_match_in_line:\n print(line.strip('\\n') + comment_line.strip('\\n'))\n line = source.readline()", "def _highlight_composition(self):\n\n self._line.setUpdatesEnabled(False)\n ################# UPDATES DISABLED #################\n\n # clear any existing text colors\n self._color_clear()\n\n # the parse failed, so there will be invalid text to highlight\n if self._parser_error:\n self._color_invalid()\n\n # paint any valid tokens\n self._color_tokens()\n\n ################# UPDATES ENABLED #################\n self._line.setUpdatesEnabled(True)\n\n # done\n return", "def __call__(self, source, language=None, metadata=None):\n from pygments.formatters import HtmlFormatter\n\n if not language:\n language = self.pygments_lexer\n\n return _pygments_highlight(\n source if len(source) > 0 else \" \",\n # needed to help post processors:\n HtmlFormatter(\n cssclass=escape(f\" highlight hl-{language}\"), **self.extra_formatter_options\n ),\n language,\n metadata,\n )", "def highlight_line(self, line, fcolor, bcolor):\n self._run_commands([\n \"highlight CoverletLine{0} ctermfg={1} ctermbg={2}\".format(str(line), fcolor, bcolor),\n 'let s:coverlet_match_{0} = matchaddpos(\"CoverletLine{1}\", [[{2}, 1, 1]])'.format(str(line), str(line), str(line))\n ])", "def highlight_errored_lines(code_edit, error_line_numbers):\n extraSelections = []\n\n cursor = code_edit.textCursor()\n doc = code_edit.document()\n for lineno in error_line_numbers:\n\n selection = QtWidgets.QTextEdit.ExtraSelection()\n lineColor = QtGui.QColor.fromRgbF(0.8,\n 0.1,\n 0,\n 0.2)\n\n selection.format.setBackground(lineColor)\n selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection,\n True)\n\n block = doc.findBlockByLineNumber(lineno-1)\n cursor.setPosition(block.position())\n selection.cursor = cursor\n selection.cursor.clearSelection()\n extraSelections.append(selection)\n code_edit.setExtraSelections(extraSelections)", "def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the character size.
def get_char_size(self): return self.fonts['NORMAL'].getsize('M')
[ "def getTextLen(self):\r\n return self.TextLen", "def getTextSize(self):\n return self.textSize", "def string_length(self):\n return type_get_string_length(self)", "def getLength(self) -> \"int\":\n return _coin.SbName_getLength(self)", "def characters_count(self) -> int:\n return self.characters.count()", "def get_nbr_of_characters():\n\tchars = Character.objects()\n\treturn len(chars)", "def getTextLength(self):\n return len(self.pokemon.name)", "def width(self):\n\n\t\treturn self.fontsize / 2 * len(self.text)", "def _get_length(self):\n return self.Data.Length", "def message_size(self):\n string = self.message.encode('utf8')\n return len(string)", "def getDimensions(self):\n #return .75*self._size*(2+len(self._text)), 2.*self._size\n return _graphicsManager.addCommandToQueue((\"get text size\", self._text, self._size), True)", "def read_size(self):\n if self.data_length is None:\n return 8 - len(self.buffer)\n if self.data_length:\n return self.data_length - len(self.buffer) # control length is really part of the data.\n if self.control_length is None:\n return 4 - len(self.buffer)\n return self.control_length - len(self.buffer)", "def getcontentlength(self):\n return DAVElement.getcontentlength( str(self.st_size) )", "def byte_length(text: str) -> int:\n return len(text.encode(\"utf8\"))", "def _string_width(self, s):\r\n s = str(s)\r\n w = 0\r\n for i in s:\r\n w += self.character_widths[i]\r\n return w * self.font_size / 1000.0", "def getFontSize(self) -> \"float\":\n return _coin.SoCallbackAction_getFontSize(self)", "def char_size_px(self) -> \"tuple[int, int]\":\n px, py = self.term_size_px\n rows, cols = self.output.get_size()\n # If we can't get the pixel size, just guess wildly\n return px // cols or 10, py // rows or 22", "def max_characters(self) -> int:\n return self._max_characters", "def getBufferSize(self) -> \"size_t\":\n return _coin.SoOutput_getBufferSize(self)", "def get_file_size(self):\n return self.file_size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the font based on bold and italic flags.
def get_font(self, bold, oblique): if bold and oblique: return self.fonts['BOLDITALIC'] elif bold: return self.fonts['BOLD'] elif oblique: return self.fonts['ITALIC'] else: return self.fonts['NORMAL']
[ "def Font(self, attr=None):\n if attr is None:\n self._font = 0\n else:\n mask = 1 << attr\n self._font ^= mask\n font = self._font & ((1 << renderer.BOLD) |\n (1 << renderer.CODE) |\n (1 << renderer.ITALIC))\n if font & (1 << renderer.CODE):\n embellishment = 'C'\n elif font == ((1 << renderer.BOLD) | (1 << renderer.ITALIC)):\n embellishment = 'Z'\n elif font == (1 << renderer.BOLD):\n embellishment = 'B'\n elif font == (1 << renderer.ITALIC):\n embellishment = 'I'\n else:\n embellishment = 'N'\n return self._csi + embellishment", "def font(self, style, substyle=-1):\n if substyle >= 0:\n font = self.__lex.substyleFont(style, substyle)\n else:\n font = self.__lex.font(style)\n \n return font", "def getFont(self):\r\n return self.font", "def createFont(family=None, size=16, bold=False, italic=False):\n return {\"font_name\": family, \"font_size\": size, \"bold\": bold,\n \"italic\": italic}", "def _format_font(self, value):\n try:\n return self.root.fonts[value]\n except KeyError:\n message = \"'%s' font is not defined\" % value\n raise KeyError(message)", "def findfont(self, fontnames): ###\n\n def matchfont(fontname):\n bold = italic = False\n for i in range(0, 1):\n if fontname.lower().endswith(\" italic\"):\n italic = True\n fontname = fontname[: -len(\" italic\")]\n if fontname.lower().endswith(\" bold\"):\n bold = True\n fontname = fontname[: -len(\" bold\")]\n try:\n f = pygame.font.match_font(fontname, bold=int(bold), italic=int(italic))\n except MemoryError:\n f = (\n pygame.font.get_default_font()\n ) # works around mysterious issue on Japanese systems reported by Chad Boulay 20121115\n return f\n\n if not isinstance(fontnames, (list, tuple)):\n fontnames = [fontnames]\n fontnames = [f for f in fontnames if f != None]\n f = ([_f for _f in map(matchfont, fontnames) if _f] + [None])[0]\n if (\n f == None and sys.platform == \"darwin\"\n ): # pygame on OSX doesn't seem even to try to find fonts...\n f = (\n list(\n filter(\n os.path.isfile,\n [\n os.path.realpath(\"/Library/Fonts/%s.ttf\" % x)\n for x in fontnames\n ],\n )\n )\n + [None]\n )[0]\n return f", "def font(obj):\n return match(obj, font_matchers)", "def get_font(font_location:str):\n \n small_font = pygame.font.Font(font_location,20)\n \n medium_font = pygame.font.Font(font_location,28)\n \n large_font = pygame.font.Font(font_location,40)\n \n return (small_font,medium_font,large_font)", "def font(self, font_name):\n return self._font[font_name]", "def get_font_string(self):\n return self.font().toString()", "def __get_font(self, box):\n if box.boxstr not in self.__fonts:\n style_sheet = self.doc.get_style_sheet()\n style_name = style_sheet.get_draw_style(box.boxstr)\n style_name = style_name.get_paragraph_style()\n self.__fonts[box.boxstr] = \\\n style_sheet.get_paragraph_style(style_name).get_font()\n \n return self.__fonts[box.boxstr]", "def getFontName(self) -> \"SbString\":\n return _coin.SoFontStyle_getFontName(self)", "def getFontName(self) -> \"SbString\":\n return _coin.SoVRMLFontStyle_getFontName(self)", "def get_font_asset(self) -> BitmapFontAsset:\n return self.mc.bitmap_fonts[self.options['font_name_r']]", "def font(self):\n if self._font is not None:\n return self._font\n\n self._font = self.glyphs_module.GSFont()\n for index, ufo in enumerate(self.ufos):\n master = self.glyphs_module.GSFontMaster()\n self.to_glyphs_font_attributes(ufo, master,\n is_initial=(index == 0))\n self._font.masters.insert(len(self._font.masters), master)\n # TODO: all the other stuff!\n return self._font", "def get_font_info(self):\n return {\n 'antialias': self._font_antialias,\n 'background_color': self._font_background_color,\n 'color': self._font_color,\n 'name': self._font_name,\n 'selected_color': self._font_selected_color,\n 'size': self._font_size\n }", "def getFont(self):\n from pagebot.fonttoolbox.objects.font import getFont\n from pagebot.contexts.platform import getFontPaths\n fontPath = getFontPaths().get(self.font, self.font)\n return getFont(fontPath)", "def _get_font(self):\n return self._control.document().defaultFont()", "def getFonts():# list\n\treturn pygame.font.get_fonts()", "def findFont(styleNames, italic=False):\n # Any TypeNetwork TYPETR Productus or Proforma installed in the system?\n fontNames = findInstalledFonts(('Proforma', 'Productus'))\n if not forceTN or not fontNames: # Not installed, find something else that is expected to exist in OSX:\n for pattern in ('Bodoni', 'AmericanTypewriter', 'Avenir', 'Georgia'):\n fontNames = findInstalledFonts(pattern)\n if fontNames:\n break\n for styleName in styleNames:\n for fontName in fontNames:\n if styleName in fontName:\n return fontName\n return None # Nothing found." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the height of a line.
def _get_line_height(self): return self.fonth + self.line_pad
[ "def get_height(self):\n\t\treturn self.y[1] - self.y[0]", "def lineHeight(scr, lineNode):\n if lineNode is None:\n return 0\n manyLines = (len(lineNode.value)+1)//scr.getmaxyx()[1]+1\n # above solution doesn't account for tabs\n return manyLines", "def _get_height(self) -> \"int\" :\n return _core.TextCommandPalette__get_height(self)", "def manh_length(self, line: Line) -> float:\n coords = self.coords_on_line(line)\n return abs(coords[0][0] - coords[1][0]) + abs(coords[0][1] - coords[1][1])", "def height(self):\n return capi.get_band_ysize(self.ptr)", "def get_height(self):\n return self.textsurf.get_height()", "def lineByteLength(self):\n lineLength, remainder = divmod(8 + (self.pixelBitLength * self.width), 8)\n \n if remainder != 0:\n # Line breaks always happen on exact byte boundaries\n lineLength += 1\n \n return lineLength", "def getHeight( self ):\n return self.height", "def line_width(self):\n return self._data.get(b'strokeStyleLineWidth')", "def get_text_height(self, context):\n return self.get_font_extent(context)[2]", "def _get_height(self) -> \"int\" :\n return _core.Palette__get_height(self)", "def lineHeight(self, p_float=None, p_float_1=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n return 0.0", "def getHeight(self, x):\n if np.any(self.h == None):\n self.calculateHeight()\n return self.h_approx(x)", "def textLineSize(self, text):\n\t\treturn self.window.getTextSize(text, self.font[1], self.font[0])", "def border_height(self):\r\n return self.padding_height() + self.border_top_width + \\\r\n self.border_bottom_width", "def height(self):\n return max([point[1] for point in self.points])", "def get_height(self, x):\n return (x * self._xscale) + self._min_height", "def Height(self):\n return _handle.OperatorHandle_Height(self)", "def _compute_statistics_line_height(page_class: PAGE.Page, verbose: bool=False) -> Tuple[float, float, float]:\n y_lines_coords = [[c.y for c in tl.coords] for tr in page_class.text_regions for tl in tr.text_lines if tl.coords]\n line_heights = np.array([np.max(y_line_coord) - np.min(y_line_coord) for y_line_coord in y_lines_coords])\n\n # Remove outliers\n if len(line_heights) > 3:\n outliers = _is_outlier(np.array(line_heights))\n line_heights_filtered = line_heights[~outliers]\n else:\n line_heights_filtered = line_heights\n if verbose:\n print('Considering {}/{} lines to compute line height statistics'.format(len(line_heights_filtered),\n len(line_heights)))\n\n # Compute mean, std, median\n mean = np.mean(line_heights_filtered)\n median = np.median(line_heights_filtered)\n standard_deviation = np.std(line_heights_filtered)\n\n return mean, standard_deviation, median", "def get_height(self):\n try:\n return self.image.height\n except Exception:\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Y coordinate of a line number.
def _get_line_y(self, lineno): return lineno * self._get_line_height() + self.image_pad
[ "def ycoord(pt):\n return pt.y", "def get_ycoord(self, y):\n return (y - self.ylimits[0]) / self.dy", "def get_pos_y(self):\n return self._position[1]", "def getline(self, bno):\r\n return self.breakpt[bno]['line']", "def get_y(self):\n\n return math.floor(self.position.y)", "def _get_y(self) -> \"double\" :\n return _core.Point2D__get_y(self)", "def get_line_number(self):\n return self.line_number", "def getY(self):\n return self.pos.y", "def yposition(self):\n return self._yposition", "def line_number(self, line):\n ret_val = self._line_number(line)\n return ret_val", "def GetY(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetY(self)", "def find_subline_at_pos(self, y):\n subline_no = int(y / self.fontheight)\n if subline_no < len(self.sublines):\n return subline_no, self.sublines[subline_no][1]\n return subline_no, \"\"", "def get_y(self):\r\n return self.get_3d_position()[\"position\"].y", "def _get_line_end_pos(self):\n return self._get_line_end_cursor().position()", "def _y_for_x(self, x):\n if self.slope() == float('inf'):\n return None\n return self.slope() * x + self.y_intercept()", "def y(self, v):\n return self._ring_coordinates_gens['y'+str(v)]", "def OriginY(self) -> float:", "def __get_x_y(self, number):\n return number % self.map.width, number / self.map.width", "def current_y(self):\n return self._current_position[1]", "def y(self):\n return self.center[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the width of a character.
def _get_char_width(self): return self.fontw
[ "def get_width( o ):\n \"\"\"获取该字符在屏幕上的显示的长度\"\"\"\n global widths\n if o == 0xe or o == 0xf:\n return 0\n for num, wid in widths:\n if o <= chr(num):\n return wid\n return 1", "def fontwidth(word):\n return sum([lookup.ASCIIPIXELS[letter] + 1\n if letter in lookup.ASCIIPIXELS\n else 10\n for letter in word]) - 1", "def _string_width(self, s):\r\n s = str(s)\r\n w = 0\r\n for i in s:\r\n w += self.character_widths[i]\r\n return w * self.font_size / 1000.0", "def _get_width(self) -> \"int\" :\n return _core.TextCommandPalette__get_width(self)", "def width(self):\n\n\t\treturn self.fontsize / 2 * len(self.text)", "def getWidth(self) -> \"float\":\n return _coin.SoGlyph_getWidth(self)", "def get_width(self):\n return self.textsurf.get_width()", "def _get_width(self) -> \"int\" :\n return _core.Palette__get_width(self)", "def get_width(self):\n\t\treturn self.x[1] - self.x[0]", "def characters_count(self) -> int:\n return self.characters.count()", "def count_number_of_characters(text):\r\n return len(text)", "def get_nbr_of_characters():\n\tchars = Character.objects()\n\treturn len(chars)", "def ansi_len(string):\n return len(string) - wcswidth(re.compile(r'\\x1b[^m]*m').sub('', string))", "def width(self) -> int:\n return self.winfo_width()", "def Width(self):\n return _handle.OperatorHandle_Width(self)", "def get_terminal_width(self):\n width = 60 # Use this as a minimum\n try:\n size = os.get_terminal_size()\n except OSError:\n size = None\n if size and size[0] > width:\n width = size[0]\n if os.name == 'nt':\n width -= 1 # Windows needs 1 empty space for newline\n return width", "def textWidth(data):\n label = pyglet.text.Label(data,\n x=0, y=0,\n anchor_x=textAlignConst[attrib.textAlign[0]],\n anchor_y=textAlignConst[attrib.textAlign[1]],\n **attrib.font)\n return label.content_width", "def get_width(self):\n dividechars = 1\n table_size = self.hits.get_width() + self.columns[1][0] + self.columns[2][0] + dividechars * 3\n return table_size", "def width(self) -> float:\n return self._width", "def columnWidth(string):\n if app.config.strict_debug:\n assert isinstance(string, unicode)\n width = 0\n for i in string:\n width += charWidth(i, width)\n return width" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the X coordinate of a character position.
def _get_char_x(self, charno): return charno * self.fontw + self.image_pad + self.line_number_width
[ "def get_pos_x(self):\n return self._position[0]", "def get_char_position(char):\n i = CHAR_SET.index(char)\n if args.vertical:\n y = i % SHEET_HEIGHT\n x = i // SHEET_HEIGHT\n else:\n x = i % SHEET_WIDTH\n y = i // SHEET_WIDTH\n return (x, y)", "def xposition(self):\n return self._xposition", "def get_x(self):\n\n return math.floor(self.position.x)", "def get_x(self):\r\n return self.get_3d_position()[\"position\"].x", "def xcoord(pt):\n return pt.x", "def get_char_coords(x, y):\n\n x = MARGIN_X + (x * (FONT_WIDTH + CHAR_SPACING_X))\n y = MARGIN_Y + (y * (FONT_HEIGHT + CHAR_SPACING_Y))\n\n return (x, y)", "def current_x(self):\n return self._current_position[0]", "def get_x_pos(self):\n if(type(self._x_pos) != float):\n self._logger.write(\"Error! x_pos must be of type float\")\n elif(self._x_pos == None):\n self._logger.write(\"Error! x_pos contains no value\")\n else:\n try:\n return self._x_pos\n except Exception as e:\n self._logger.write(\"Error! Could not fetch the x_pos: \\n %s\" % e)", "def get_start(self) -> int:\n return self.__pos_x", "def get_x():\n pos_x = ctypes.c_int8(0)\n ret = _LIB.joystick_click_get_x(ctypes.byref(pos_x))\n if ret < 0:\n raise Exception(\"joystick click get x failed\")\n return pos_x.value", "def get_xcoord(self, x):\n return (x - self.xlimits[0]) / self.dx", "def pos_to_coord(pos):\n x, y = pos\n return \"%s%s\" % (string.letters[x], string.letters[y])", "def getoriginx(self):\n return self.origin[0]", "def x(self):\n return self.center[0]", "def xValue(self, position):\n return self.SBProfile.xValue(position)", "def x(self):\n if self.repr != 'cartesian':\n self.to_cartesian_coords()\n return self.__coord.x.value", "def get_char(self, coord):\n\t\tassert coord.x >= 0 and coord.x < self.width, \"X Coordinate out of range\"\n\t\tassert coord.y >= 0 and coord.y < self.height, \"Y Coordinate out of range\"\n\t\treturn self.content[self.y_max - coord.y][coord.x]", "def origin_x(self):\n ret = self._get_attr(\"originX\")\n return ret", "def _get_x_position_title(self, surface):\n return self._get_x_center_position_title() - surface.get_width() / 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the actual position for a character and line position.
def _get_text_pos(self, charno, lineno): return self._get_char_x(charno), self._get_line_y(lineno)
[ "def get_char_position(char):\n i = CHAR_SET.index(char)\n if args.vertical:\n y = i % SHEET_HEIGHT\n x = i // SHEET_HEIGHT\n else:\n x = i % SHEET_WIDTH\n y = i // SHEET_WIDTH\n return (x, y)", "def _compute_position(input, index):\n line = 1\n col = 1\n eol = None # last end of line character\n for c in input[:index]:\n if c == '\\n' or c == '\\r':\n if eol is None or eol == c:\n eol = c\n line += 1\n col = 1\n else:\n # ignore second of '\\n\\r' and '\\r\\n' sequences\n eol = None\n else:\n col += 1\n return (line, col)", "def get_char_coords(x, y):\n\n x = MARGIN_X + (x * (FONT_WIDTH + CHAR_SPACING_X))\n y = MARGIN_Y + (y * (FONT_HEIGHT + CHAR_SPACING_Y))\n\n return (x, y)", "def _get_line_start_pos(self):\n return self._get_line_start_cursor().position()", "def get_containing_line(self, pos):\n _, col, lino = self.contentTextCtrl.PositionToXY(pos)\n left = pos - col\n return (left, left + self.contentTextCtrl.GetLineLength(lino))", "def CurrentLineAndColumn():\n # See the comment in CurrentColumn about the calculation for the line and\n # column number\n line, column = vim.current.window.cursor\n line -= 1\n return line, column", "def _text_position(self, size, text):\n width, height = self._font.getsize(text)\n left = (size - width) / 2.0\n # I just don't know why 3 :)\n top = (size - height) / 2.0\n return left, top", "def find_char_at_pos(self, x, y):\n left = 0\n try:\n subline_no, text = self.find_subline_at_pos(y)\n except TypeError:\n return None\n i = -1\n while text:\n i += 1\n c, text = text[0], text[1:]\n if c in (FormatType.BOLD, FormatType.RESET, FormatType.UNDERLINE):\n continue\n if c == FormatType.COLOR:\n if len(text) > 0 and text[0] in \"0123456789\":\n if len(text) > 1 and text[1] in \"0123456789\":\n text = text[2:]\n i += 2\n else:\n text = text[1:]\n i += 1\n if len(text) > 1 and text[0] == \",\" and text[1] in \"0123456789\":\n if len(text) > 2 and text[2] in \"0123456789\":\n text = text[3:]\n i += 3\n else:\n text = text[2:]\n i += 2\n continue\n\n layout, (width, height) = self.get_pango_layout(c, False)\n\n if left <= x < left + width:\n return subline_no, i, c\n\n left += width\n return subline_no, i + 1, \"\"", "def offset_at_position(self):\n offset = 0\n for i, curr_line in enumerate(self.doc.iter_lines()):\n if i == self.line:\n break\n offset += len(curr_line)\n\n return offset + self.col", "def get_position(event):\n\tline, column = text.index('insert').split('.')\n\ts = \"line=%s column=%s\" % (line, column)\n\tprint \"Karthik\",\n\tprint s", "def get_pos(fai, chromosome, start, end):\n chrom = fai.records[chromosome]\n fai_entry_length = chrom.length\n fai_entry_offset = chrom.offset\n fai_entry_line_length = chrom.line_length\n fai_entry_line_length_bytes = chrom.line_length_bytes\n seq_len = end - start\n line_ratio = fai_entry_line_length * (fai_entry_line_length_bytes - fai_entry_line_length)\n newlines_total = int(fai_entry_length / line_ratio)\n newlines_before = 0\n if start > 0:\n newlines_before = int(start / line_ratio)\n newlines_to_end = int(end / line_ratio)\n byte_len_seq = newlines_to_end - newlines_before + seq_len\n byte_start = fai_entry_offset + newlines_before + start\n byte_end = fai_entry_offset + newlines_total + fai_entry_length\n return byte_start, byte_end, byte_len_seq", "def handle_cursor_pos(file, pos):\n if pos <= 0:\n return (0, None)\n\n with open(file, 'rb') as f:\n # Account for 1-indexed lines\n for line_num in range(pos-1):\n f.readline()\n cursor_byte = f.tell() # Cursor position in bytes\n\n # Decodes bytes to string and strip trailing chars\n prev_text = f.readline().decode(\"utf-8\").rstrip()\n return cursor_byte, prev_text", "def text_to_position(self, text):\n # Check for invalid board positions\n if text[0] not in self.board[0] or text[1] not in self.board[1]:\n print(\"You have entered an invalid chess board location\\n\"\n \"Valid locations are a1 through h8\")\n return\n\n return self.board[0].index(text[0]), self.board[1].index(text[1])", "def get_position(self):\n return self._rect.x, self._rect.y", "def get_position(position, entry):\n\n return entry.split(' ')[position]", "def _get_line_end_pos(self):\n return self._get_line_end_cursor().position()", "def getPosition(self) -> \"SbVec3f const &\":\n return _coin.SbLine_getPosition(self)", "def getPosition(self) -> \"SbVec3d const &\":\n return _coin.SbDPLine_getPosition(self)", "def char_position(char):\n for i, row in enumerate(keys_lo):\n for j, key in enumerate(row):\n if key == char or char == keys_hi[i][j]:\n return i, j,", "def coords_on_line(\n self, line: Line\n ) -> Tuple[Tuple[float, float], Tuple[float, float]]:\n start = line.interpolate(self.position)\n end = line.interpolate(self.position + self.length)\n return (start, end)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the correct font for the style.
def _get_style_font(self, style): return self.fonts.get_font(style['bold'], style['italic'])
[ "def getFont(self):\r\n return self.font", "def _get_font(self):\n return self._control.document().defaultFont()", "def font(self, font_name):\n return self._font[font_name]", "def getFont(self):\n from pagebot.fonttoolbox.objects.font import getFont\n from pagebot.contexts.platform import getFontPaths\n fontPath = getFontPaths().get(self.font, self.font)\n return getFont(fontPath)", "def get_font_string(self):\n return self.font().toString()", "def _format_font(self, value):\n try:\n return self.root.fonts[value]\n except KeyError:\n message = \"'%s' font is not defined\" % value\n raise KeyError(message)", "def font(self, style, substyle=-1):\n if substyle >= 0:\n font = self.__lex.substyleFont(style, substyle)\n else:\n font = self.__lex.font(style)\n \n return font", "def getFontName(self) -> \"SbString\":\n return _coin.SoFontStyle_getFontName(self)", "def getFontName(self) -> \"SbString\":\n return _coin.SoVRMLFontStyle_getFontName(self)", "def __get_font(self, box):\n if box.boxstr not in self.__fonts:\n style_sheet = self.doc.get_style_sheet()\n style_name = style_sheet.get_draw_style(box.boxstr)\n style_name = style_name.get_paragraph_style()\n self.__fonts[box.boxstr] = \\\n style_sheet.get_paragraph_style(style_name).get_font()\n \n return self.__fonts[box.boxstr]", "def default_font(self):\n \n return self._dflt_rastfont", "def defaultFont(self, style, substyle=-1):\n if substyle >= 0:\n font = self.__lex.substyleDefaultFont(style, substyle)\n else:\n font = self.__lex.defaultFont(style)\n \n return font", "def usedFont(self, defaultFont):\r\n if self.__data.paintAttributes & self.PaintUsingTextFont:\r\n return self.__data.font\r\n return defaultFont", "def default_font(self):\n return {\"family\": \"serif\", \"color\": \"darkred\", \"weight\": \"normal\", \"size\": 16}", "def style_parse(ttFont):\n if 'fvar' in ttFont:\n dflt_instance_coords = {a.axisTag: a.defaultValue for a in ttFont['fvar'].axes}\n for instance in ttFont['fvar'].instances:\n if instance.coordinates == dflt_instance_coords:\n name = ttFont['name'].getName(instance.subfamilyNameID, 3, 1, 1033).toUnicode()\n return _style_parse(name)\n import os\n filename = os.path.basename(ttFont.reader.file.name)\n if len(filename.split(\"-\")) != 2:\n # Google Fonts policy on font file naming scheme\n # requires that only a single dash is used\n # to separate family name from style.\n return None\n else:\n style = filename.split(\"-\")[1].split(\".\")[0]\n return _style_parse(style)", "def _supply_font():\n font = \"\"\n if platform == \"linux\" or platform == \"linux2\":\n font = \"/usr/share/fonts/gnu-free/FreeSans.ttf\"\n elif platform == \"darwin\":\n font = \"/Library/Fonts/arial.ttf\"\n elif platform == \"win32\":\n font = \"c:\\\\windows\\\\font\\\\arial.ttf\"\n\n if os.path.isfile(font):\n return font\n\n return None", "def glutFont(font):\n return GLUTFONTS.get(font,GLUTFONTS['9x15'])", "def get_font(font_location:str):\n \n small_font = pygame.font.Font(font_location,20)\n \n medium_font = pygame.font.Font(font_location,28)\n \n large_font = pygame.font.Font(font_location,40)\n \n return (small_font,medium_font,large_font)", "def load_font(font: str, size: int) -> 'pygame.font.Font':\n return pygame_menu.font.get_font(font, size)", "def nametofont(name):\n try:\n return font.nametofont(name)\n except tk.TclError:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create drawables for the token content.
def _create_drawables(self, tokensource): lineno = charno = maxcharno = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value.expandtabs(4) lines = value.splitlines(True) #print lines for i, line in enumerate(lines): temp = line.rstrip('\n') if temp: self._draw_text( self._get_text_pos(charno, lineno), temp, font = self._get_style_font(style), fill = self._get_text_color(style) ) charno += len(temp) maxcharno = max(maxcharno, charno) if line.endswith('\n'): # add a line for each extra line in the value charno = 0 lineno += 1 self.maxcharno = maxcharno self.maxlineno = lineno
[ "def DrawTokensBlue():\r\n for i in range(4):\r\n Tokens(TokenBlue,BlueChips[i][0],BlueChips[i][1])", "def DrawTokensRed():\r\n for i in range(4):\r\n Tokens(TokenRed,RedChips[i][0],RedChips[i][1])", "def make_drawable(self):\n drawable_env = []\n for i in range(self.Y):\n drawable_line = \"\"\n for j in range(self.X):\n who = '.'\n for a, b in self.atlas.items():\n if [j, i] == b:\n someone_here = True\n who = self.symbols[a]\n drawable_line += who\n drawable_env.append(drawable_line)\n return drawable_env", "def generate_image(self) -> None:", "def display_tokens(tokens, image):\n new_image = image.convert('RGBA')\n dr = ImageDraw.Draw(new_image)\n fnt = ImageFont.load_default()\n font_size = 28\n try:\n # Pretty font, pretty sure I'm the only one to have it though - Andy\n fnt = ImageFont.truetype(\"DejaVuSans.ttf\", font_size, encoding=\"unic\")\n except Exception:\n try:\n # More common font\n fnt = ImageFont.truetype(\"arial.ttf\", font_size, encoding=\"unic\")\n except Exception:\n print(\"fallback to default font\")\n pass\n\n token_colors = [\n (255, 0, 0),\n (0, 255, 0),\n (0, 255, 255),\n (255, 125, 0),\n (0, 255, 255),\n ]\n\n tokens = list(tokens)\n\n for token, color in zip(tokens, itertools.cycle(token_colors)):\n corners = token.pixel_corners\n centre = token.pixel_centre\n avg_point = np.mean(corners, axis=0)\n dr.line(corners + [corners[0]], fill=color, width=4)\n ellipse_pos = [\n (centre[0] - 5, centre[1] - 5),\n (centre[0] + 5, centre[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n for point in corners:\n ellipse_pos = [\n (point[0] - 5, point[1] - 5),\n (point[0] + 5, point[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n _draw_centered(\n (int(avg_point[0]), int(avg_point[1])),\n str(token.id),\n fnt,\n color,\n dr,\n )\n del dr\n return new_image", "def create_permanent_widgets(self):\n\n\t\timg_soccer_pitch = Image(source='./data/images/background-field.png')\n\t\timg_bottom_bar = Image(source='./data/images/interface-lowpanel-plain.png', pos=(0, -260))\n\t\timg_mid_bar = Image(source='./data/images/interface-midpanel-logo.png',pos=(0, -147))\n\t\tblack_bar = Image(source='./data/images/interface-message-bar.png',pos=(0, -77))\n\n\t\tself.add_widget(img_soccer_pitch)\n\t\tself.add_widget(img_bottom_bar)\n\t\tself.add_widget(img_mid_bar)\n\t\tself.add_widget(black_bar)\n\n\t\twith self.canvas: Rectangle(pos=(0, 120), size=(360, 3))", "def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,0)\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.dropletSprite)", "def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,200)\r\n\r\n cloud = pyglet.image.load('images/cloud.png')\r\n self.cloudSprite = pyglet.sprite.Sprite(cloud)\r\n self.cloudSprite.y = 100\r\n\r\n lightening = pyglet.image.load('images/lightening.png')\r\n self.lSprite = pyglet.sprite.Sprite(lightening)\r\n self.lSprite.y = 200\r\n\r\n car = pyglet.image.load('images/car.png')\r\n self.carSprite = pyglet.sprite.Sprite(car, -500, 0)\r\n\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.cloudSprite)\r\n self.drawableObjects.append(self.lSprite)\r\n self.drawableObjects.append(self.dropletSprite)\r\n self.drawableObjects.append(self.carSprite)", "def draw(self, file_format):\n import pydot\n\n graph = pydot.Dot(graph_type='graph', dpi=\"52\")\n for index, atom in enumerate(self.atoms):\n atom_type = '{0!s} {1!s} '.format(index+1, atom.label if atom.label != '' else '')\n atom_type += ','.join([at.label for at in atom.atomtype])\n atom_type = '\"' + atom_type + '\"'\n graph.add_node(pydot.Node(name=str(index + 1), label=atom_type, fontname=\"Helvetica\", fontsize=\"16\"))\n for atom1 in self.atoms:\n for atom2, bond in atom1.bonds.items():\n index1 = self.atoms.index(atom1)\n index2 = self.atoms.index(atom2)\n if index1 < index2:\n bond_type = ','.join([order for order in bond.get_order_str()])\n bond_type = '\"' + bond_type + '\"'\n graph.add_edge(pydot.Edge(src=str(index1 + 1), dst=str(index2 + 1),\n label=bond_type, fontname=\"Helvetica\", fontsize=\"16\"))\n\n img = graph.create(prog='neato', format=file_format)\n return img", "def create_graphic(self):\n x, y = self.coords\n self.graphic_id = self.world.create_arc(x - Entity.RADIUS, y - Entity.RADIUS,\n x + Entity.RADIUS, y + Entity.RADIUS,\n # A little mouth\n start=self.heading + self.mouth_angle / 2,\n extent= 360 - self.mouth_angle,\n fill=self.color, outline=self.outline)", "def create_svg_icon(symbolizers):\n svg_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/svg_1_0.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n icon_paths = []\n for symbolizer in symbolizers:\n if 'PolygonSymbolizer' in symbolizer.original_tagname_:\n styles = []\n styles.extend(process_stroke_styling(symbolizer))\n styles.extend(process_fill_styling(symbolizer))\n fill_found = False\n for style in styles:\n if 'fill=' in style:\n fill_found = True\n if not fill_found:\n print('no fill found, adding it as empty style')\n styles.append('fill=\"none\"')\n polygon_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/polygon.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': polygon_points,\n 'styles': ' '.join(styles)\n }\n content = polygon_template.render(**template_params)\n icon_paths.append(content)\n\n elif 'LineSymbolizer' in symbolizer.original_tagname_:\n styles = []\n styles.extend(process_stroke_styling(symbolizer))\n # TODO: Add support for geometry Handling\n line_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/line.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': line_points,\n 'styles': ' '.join(styles)\n }\n content = line_template.render(**template_params)\n icon_paths.append(content)\n elif 'PointSymbolizer' in symbolizer.original_tagname_:\n # TODO: Check how to handle a Point\n if symbolizer.Graphic:\n if symbolizer.Graphic.Mark:\n styles = []\n for mark in symbolizer.Graphic.Mark:\n styles.extend(process_fill_styling(mark))\n if mark.WellKnownName == 'square':\n polygon_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/polygon.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': square_points,\n 'styles': ' '.join(styles)\n }\n content = polygon_template.render(**template_params)\n icon_paths.append(content)\n elif symbolizer.Geometry:\n # TODO: implement geometry symbolizer\n print('point symbolizer does not support geometry for now')\n # else:\n # styles = [\n # 'stroke=\"black\"',\n # 'stroke-width=\"1\"',\n # 'fill=\"red\"'\n # ]\n # polygon_template = Template(\n # filename=AssetResolver('pyconizer').resolve(\n # 'lib/api/svg/templates/circle.xml').abspath(),\n # input_encoding='utf-8',\n # output_encoding='utf-8'\n # )\n # template_params = {\n # 'x': '2',\n # 'y': '2',\n # 'radius': '1',\n # 'styles': ' '.join(styles)\n # }\n # content = polygon_template.render(**template_params)\n # class_svg_paths.append(content)\n\n # only add a svg path if it would have content\n if len(icon_paths) > 0:\n svg_content = svg_template.render(**{\n 'geometry_tag': '\\n'.join(icon_paths)\n })\n return svg_content", "def build_door_tags(bg_fname, student_list):\n\n # confirm TMP_DIR exists and is empty\n if os.path.exists(TMP_DIR):\n shutil.rmtree(TMP_DIR)\n os.mkdir(TMP_DIR)\n\n # prepare base image, adding the opaque caption region at bottom\n original = Image.open(bg_fname)\n base_img = original.copy()\n base_img = ImageOps.fit(base_img, SIZE)\n canvas = aggdraw.Draw(base_img)\n brush = aggdraw.Brush('white', opacity=CAPTION_OPACITY)\n canvas.rectangle((0, SIZE[1] - CAPTION_BGHEIGHT, SIZE[0], SIZE[1]), brush)\n canvas.flush()\n\n # read in student list\n residents = [Student(*line)\n for line in reader(open(student_list, 'rU'))\n if not line[0].startswith('#')]\n #residents = [Student.wholeName(*line)\n # for line in reader(open(student_list, 'rU'))\n # if not line[0].startswith('#')]\n residents.sort(key=attrgetter('roomnumber'))\n\n # set fonts for drawing on base image\n font = ImageFont.truetype(FONT, FONTSIZE)\n smallfont = ImageFont.truetype(SMALLFONT, SMALLFONTSIZE)\n\n # for each resident, draw name and room no, and save in TMP_DIR\n for resident in residents:\n tag = base_img.copy()\n canvas = ImageDraw.Draw(tag)\n x, y = font.getsize(resident.first)\n fontsize = (SIZE[0] / 2 - x / 2,\n SIZE[1] - CAPTION_HEIGHT / 2 - y / 2.75)\n canvas.text(fontsize, resident.first, font=font, fill=0)\n canvas.text((12, 12), resident.roomnumber, font=smallfont, fill=0)\n fname = '-'.join([resident.roomnumber, resident.netid])\n fname += '.jpg'\n tag.save(os.path.join(TMP_DIR, fname))\n\n # arrange the images on a pdf document using tables\n doc = SimpleDocTemplate(PDF_FNAME, pagesize=landscape(LETTER))\n table_styles = [('BOTTOMPADDING', (0, 0), (-1, -1), 6),\n ('TOPPADDING', (0, 0), (-1, -1), 6)]\n elements = []\n table_data = []\n images = os.listdir(TMP_DIR)\n for image in images:\n table_data.append(RLImage(os.path.join(TMP_DIR, image),\n width=SIZE[0] * DPI / PPI,\n height=SIZE[1] * DPI / PPI))\n\n # cluster table data into groups of 2 for table cols\n if len(table_data) % 2 != 0:\n table_data.append(table_data[-1])\n table_data = zip(*[iter(table_data)] * 2)\n\n # build and save the pdf doc\n table = Table(table_data, style=table_styles)\n elements.append(table)\n doc.build(elements)", "def logo():", "def create_rects(self):\n for row in range(0, self.rows):\n self.sprite_tuples.append([])\n\n for col in range(0, self.cols):\n self.sprite_tuples[row].append((col * self.sprite_width, row * self.sprite_height, self.sprite_width, self.sprite_height))", "def create_textures(self):\n self.log.info(__name__ + ': ' + 'def ' + self.create_textures.__name__ + '(): ' + self.create_textures.__doc__)\n\n button = pygame.Surface((self.button_x, self.button_y), pygame.SRCALPHA, 32)\n button.fill((0, 0, 0, 0), None, pygame.BLEND_RGBA_MULT)\n pygame.draw.rect(button, Colors.DEEPSKYBLUE, (0, 0, self.button_x, self.button_y))\n pygame.draw.rect(button, Colors.BLACK, (0, 0, self.button_x, self.button_y), 2)\n self.textures['button'] = button", "def prepare_icons(self):\n icons = []\n cols = np.linspace(0, self.size[1]-1, len(self.modes)+1).astype(np.int64)\n cols = [(cols[i], cols[i+1]) for i in range(len(cols)-1)]\n \n icon_pos = {}\n mode_pos = {}\n for i, image_name in enumerate(os.listdir(self.idir)):\n img = cv2.imread(self.idir+image_name)\n img = cv2.resize(img, (cols[i][1]-cols[i][0], self.vui_part))\n icon_pos[cols[i]] = img\n mode_pos[self.modes[i]] = cols[i]\n self.cols = cols \n self.icon_position = icon_pos\n self.current_icons = icon_pos\n self.mode_pos = mode_pos", "def draw_game_state(self):\n info_0_x, info_0_y = self.coord['info_0_x'], self.coord['info_0_y']\n data = [\n [f'Game ID: {self.game_id}', 10],\n [f'Your Token: {self.access_token}', 10],\n ['-' * 55, 14],\n [f'cards to take: {self.cards_to_take}', 14],\n [f'turns to wait: {self.turns_to_wait}', 14],\n [f'requests: color: {self.requested_color}, value: {self.requested_value}', 14]\n ]\n for index, info in enumerate(data):\n info_y = info_0_y - index * 20\n label = pyglet.text.Label(text=info[0], x=info_0_x, y=info_y,\n color=self.colors['lbl_menu'], font_size=info[1])\n self.draw_objects.append(label)\n\n name = choice(['red_joker.png', 'black_joker.png'])\n if self.requested_value is not None:\n name = f'hearts_{self.requested_value}.png'\n elif self.requested_color is not None:\n name = f'{self.requested_color}_A.png'\n\n card_image = common.resize_center_card_image(self.card_images[name], self.screen.height, 4)\n info_y = info_0_y - 20 * len(data) - card_image.height / 1.9\n card = pyglet.sprite.Sprite(img=card_image, x=info_0_x + card_image.width * 1.3, y=info_y)\n self.draw_objects.append(card)", "def make_image_list(self):\n return [\n tools.get_image(48, 0, 16, 16, self.sprite_sheet),\n tools.get_image(0, 0, 22, 16, setup.GFX['sword2'])\n ]", "def make_assets(self):\n # Handle anchor\n anchor_group = LabeledColorImage(\n self.anchor,\n color=WHITE,\n label=\"Anchor\",\n stroke_width=self.stroke_width,\n font_size=self.font_size,\n buff=self.buff,\n )\n # Handle positive\n positive_group = LabeledColorImage(\n self.positive,\n color=GREEN,\n label=\"Positive\",\n stroke_width=self.stroke_width,\n font_size=self.font_size,\n buff=self.buff,\n )\n # Handle negative\n negative_group = LabeledColorImage(\n self.negative,\n color=RED,\n label=\"Negative\",\n stroke_width=self.stroke_width,\n font_size=self.font_size,\n buff=self.buff,\n )\n # Distribute the groups uniformly vertically\n assets = Group(anchor_group, positive_group, negative_group)\n assets.arrange(DOWN, buff=1.5)\n\n return assets", "def drawObjects(self):\r\n\t\tpass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Paint the line number background on the image.
def _paint_line_number_bg(self, im): if not self.line_numbers: return if self.line_number_fg is None: return draw = ImageDraw.Draw(im) recth = im.size[-1] rectw = self.image_pad + self.line_number_width - self.line_number_pad draw.rectangle([(0, 0), (rectw, recth)], fill=self.line_number_bg) draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) del draw
[ "def draw_horizontal_lines(img):\n row, col = img.shape\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n interval = row / 10\n for i in range(1, 10):\n (x0, y0) = map(int, [0, i * interval])\n (x1, y1) = map(int, [col, i * interval])\n img = cv2.line(img, (x0, y0), (x1, y1), (0, 255, 0), 1)\n\n return img", "def _draw_background(self):\r\n for i in range(self._size):\r\n for j in range(self._size):\r\n self._grid.draw_entity((i, j), BACK_GROUND)", "def line(self, start, end, color, width=None, title='image', destroy=True):\n line = cv2.line(self.img, start, end, color, width)\n if destroy == False:\n cv2.imshow(title, self.img)\n if destroy == True:\n cv2.imshow(title, self.img)\n cv2.waitKey(0)\n cv2.destroyAllWindows", "def generate_lines(height, width, dpi, line_width, path, orientation='vertical', N_lines=None):\n\n ppmm = dpi / 25.4\n w = int(np.round((width * ppmm)))\n h = int(np.round((height * ppmm)))\n\n if N_lines is not None:\n if orientation == 'vertical':\n line_width = width // (2*N_lines)\n else:\n line_width = height // (2*N_lines)\n\n D = int(np.round(line_width * ppmm))\n\n im = np.full((h, w), 255, dtype=np.uint8)\n if orientation == 'vertical':\n black_id = np.hstack( [np.arange(i*D, i*D+D) for i in range(0, w//D, 2)] )\n if black_id[-1] + D < w:\n black_id = np.hstack([black_id, np.arange(w//D*D, w)])\n im[:, black_id] = 0\n else:\n black_id = np.hstack( [np.arange(i*D, i*D+D) for i in range(0, h//D, 2)] )\n if black_id[-1] + D < h:\n black_id = np.hstack([black_id, np.arange(h//D*D, h)])\n im[black_id] = 0\n\n image_comment = f'{orientation} lines\\nline width: {line_width}\\n DPI: {dpi}'\n save_image(path, im, dpi, comment=image_comment)\n print(f'Image saved to {path}.')\n return im", "def __draw_grid(self):\n for i in range(10):\n color = 'blue' if i % 3 == 0 else \"gray\"\n\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)", "def draw_lines (self, mm_img, lines, **kwargs) :\n\n r = 255\n g = 0\n b = 132\n\n if kwargs.has_key('color') :\n r = kwargs['color'][0]\n g = kwargs['color'][1]\n b = kwargs['color'][2]\n\n opacity = kwargs.get('opacity', 1)\n line_width = kwargs.get('line_width', 2)\n\n cairo_surface = self._setup_surface(mm_img, **kwargs)\n\n for coords in lines :\n points = []\n\n for c in coords :\n points.append(self._coord_to_point(c))\n\n ctx = self._draw_polyline_points(cairo_surface, points, False)\n ctx.set_source_rgba(r, g, b, opacity)\n ctx.set_line_width(line_width)\n ctx.stroke()\n\n\treturn self._return_surface(cairo_surface, **kwargs)", "def lines(self):\r\n w, h = self.width, self.height # create local shortcut for image size\r\n board = ImageDraw.Draw(self.board) # create interactive image (for drawing)\r\n for loop in range(8): # draw 8 pairs of random lines on image\r\n xa, ya, xb, yb = rr(w), rr(h), rr(w), rr(h) # select random coordinates\r\n board.line((xa, 0, xb, h), width=2, fill='#000') # line from top to bottom\r\n board.line((0, ya, w, yb), width=2, fill='#000') # line from left to right\r\n self.label['image'] = self.image = ImageTk.PhotoImage(self.board) # update\r", "def draw_line(self):\n gl.glColor4f(*self.color)\n gl.glLineWidth(self.thickness)\n gl.glBegin(gl.GL_LINES)\n gl.glVertex2f(self.coordx[0], self.coordy[0])\n gl.glVertex2f(self.coordx[1], self.coordy[1])\n gl.glEnd()", "def Line(self, prePos):\n \n if self.Draw:\n pygame.draw.line(self.Parent, self.Colour, prePos, self.GetPos(), self.Width)", "def highlight_line(self, start, end):\n a, b = start, end\n startpos = ((a[0]+0.5)*CELL_SIZE, (a[1]+0.5)*CELL_SIZE)\n endpos = ((b[0]+0.5)*CELL_SIZE, (b[1]+0.5)*CELL_SIZE)\n pygame.draw.line(self.board, WINNER_LINE_COLOR, startpos, endpos, 4)\n self.screen.blit(self.board, (0, 0))\n pygame.display.flip()", "def line_draw(image):\n img = image.copy()\n \n #read in background for paper appearance\n paper = cv2.imread(\"ink-paper.jpg\", cv2.IMREAD_COLOR)\n\n paper = cv2.resize(paper, (img.shape[1], img.shape[0]))\n\n img = cv2.medianBlur(img, 5)\n edges = cv2.Canny(img, 100 , 125)\n\n c_img, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n \n #iterate through each contour found in the image\n for c in contours:\n #draw contours on image. Can vary intensity of lines\n #c_img = cv2.drawContours(c_img, c, -1, (125,125,0), 4)\n c_img = cv2.drawContours(c_img, c, -1, (255,255,255), 2) \n \n #Invert the line drawing\n c_img = 255 - c_img\n c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)\n\n c_img_blur = cv2.blur(c_img, (5,5))\n \n #convert to BGR to enable adding\n edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\n \n edges = np.uint8(edges) \n c_img_blur = np.uint8(c_img_blur)\n \n #add blurred and contoured to paper to create an overlay/blend\n output = cv2.addWeighted(c_img_blur, .35, paper, .65, 0)\n output = np.uint8(output)\n \n return output", "def draw_lines(img, lines, color=[1, 0, 0], thickness=2):\n for line in lines:\n p1 = line[0]\n p2 = line[1]\n cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color, thickness)", "def paint_borders(self, color: ColorsType, width: int) -> None:", "def draw_crosshairs(self):\n center_x = self.frame.shape[1] // 2\n center_y = self.frame.shape[0] // 2\n # horizontal line\n self.frame[center_y, center_x - 10:center_x + 11] = [0, 50, 255]\n # vertical line\n self.frame[center_y - 10:center_y + 11, center_x] = [0, 50, 255]", "def draw_lines(img, lines, color=(0, 255, 0), thickness=6):\n img_blank = np.zeros_like(img)\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(img_blank, (x1, y1), (x2, y2), color, thickness)\n return img_blank", "def line(self, y = 0):\n\n if y > self.size[1]:\n return None\n\n # Every 60 lines, change starting\n color = int(y / 60) % 7\n\n r = ((color >> 0) & 1)\n g = ((color >> 1) & 1)\n b = ((color >> 2) & 1)\n\n rgb = b\"\"\n for i in range(0, self.size[0]):\n # Fill with gradient\n pos = int(256 * i / self.size[0])\n if r:\n r_grad = pos\n else:\n r_grad = 255 - pos\n if g:\n g_grad = pos\n else:\n g_grad = 255 - pos\n if b:\n b_grad = pos\n else:\n b_grad = 255 - pos\n rgb += struct.pack(\"BBB\", r_grad, g_grad, b_grad)\n pass\n \n return rgb", "def draw_line(self, index, a, b, c, d):\n for mask_index in range(min(self.mask_count + 1, self.trail_size)):\n cv2.line(self.masks[mask_index], (a,b),(c,d), \\\n self.color[index].tolist(), 2, lineType=cv2.CV_AA)", "def render_gap(img_width, line: Line, gap: Gap) -> Image:\n img = Image.new(\"L\", (img_width, img_width), color=0)\n draw = ImageDraw.Draw(img)\n\n gap_start, gap_end = gap.coords_on_line(line)\n width = line.width + 2\n draw.line([*gap_start, *gap_end], width=width, fill=255)\n\n return img", "def create_final_line(self):\n p = Image(\n c.screen_width - 100,\n 0,\n 100,\n c.screen_height,\n 'images/finish_line.png')\n self.finish_line = p\n self.objects.append(p)", "def continuous_line(klass, lane, surface, stripes_count, longitudinal, side):\n starts = [longitudinal + 0 * klass._stripe_spacing]\n ends = [longitudinal + stripes_count * klass._stripe_spacing + klass._stripe_length]\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n klass.draw_stripes(lane, surface, starts, ends, lats)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items.
def format(self, tokensource, outfile): self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( 'RGB', self._get_image_size(self.maxcharno, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) draw = ImageDraw.Draw(im) # Highlight if self.hl_lines: x = self.image_pad + self.line_number_width - self.line_number_pad + 1 recth = self._get_line_height() rectw = im.size[0] - x for linenumber in self.hl_lines: y = self._get_line_y(linenumber - 1) draw.rectangle([(x, y), (x + rectw, y + recth)], fill=self.hl_color) for pos, value, font, kw in self.drawables: draw.text(pos, value, font=font, **kw) im.save(outfile, self.image_format.upper())
[ "def display_tokens(tokens, image):\n new_image = image.convert('RGBA')\n dr = ImageDraw.Draw(new_image)\n fnt = ImageFont.load_default()\n font_size = 28\n try:\n # Pretty font, pretty sure I'm the only one to have it though - Andy\n fnt = ImageFont.truetype(\"DejaVuSans.ttf\", font_size, encoding=\"unic\")\n except Exception:\n try:\n # More common font\n fnt = ImageFont.truetype(\"arial.ttf\", font_size, encoding=\"unic\")\n except Exception:\n print(\"fallback to default font\")\n pass\n\n token_colors = [\n (255, 0, 0),\n (0, 255, 0),\n (0, 255, 255),\n (255, 125, 0),\n (0, 255, 255),\n ]\n\n tokens = list(tokens)\n\n for token, color in zip(tokens, itertools.cycle(token_colors)):\n corners = token.pixel_corners\n centre = token.pixel_centre\n avg_point = np.mean(corners, axis=0)\n dr.line(corners + [corners[0]], fill=color, width=4)\n ellipse_pos = [\n (centre[0] - 5, centre[1] - 5),\n (centre[0] + 5, centre[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n for point in corners:\n ellipse_pos = [\n (point[0] - 5, point[1] - 5),\n (point[0] + 5, point[1] + 5),\n ]\n dr.ellipse(ellipse_pos, fill=color)\n _draw_centered(\n (int(avg_point[0]), int(avg_point[1])),\n str(token.id),\n fnt,\n color,\n dr,\n )\n del dr\n return new_image", "def _format_lines(self, tokensource):\r\n nocls = self.noclasses\r\n lsep = self.lineseparator\r\n # for <span style=\"\"> lookup only\r\n getcls = self.ttype2class.get\r\n c2s = self.class2style\r\n escape_table = _escape_html_table\r\n tagsfile = self.tagsfile\r\n\r\n lspan = ''\r\n line = ''\r\n for ttype, value in tokensource:\r\n if nocls:\r\n cclass = getcls(ttype)\r\n while cclass is None:\r\n ttype = ttype.parent\r\n cclass = getcls(ttype)\r\n cspan = cclass and '<span style=\"%s\">' % c2s[cclass][0] or ''\r\n else:\r\n cls = self._get_css_class(ttype)\r\n cspan = cls and '<span class=\"%s\">' % cls or ''\r\n\r\n parts = value.translate(escape_table).split('\\n')\r\n\r\n if tagsfile and ttype in Token.Name:\r\n filename, linenumber = self._lookup_ctag(value)\r\n if linenumber:\r\n base, filename = os.path.split(filename)\r\n if base:\r\n base += '/'\r\n filename, extension = os.path.splitext(filename)\r\n url = self.tagurlformat % {'path': base, 'fname': filename,\r\n 'fext': extension}\r\n parts[0] = \"<a href=\\\"%s#%s-%d\\\">%s\" % \\\r\n (url, self.lineanchors, linenumber, parts[0])\r\n parts[-1] = parts[-1] + \"</a>\"\r\n\r\n # for all but the last line\r\n for part in parts[:-1]:\r\n if line:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + part + \\\r\n (cspan and '</span>') + lsep\r\n else: # both are the same\r\n line += part + (lspan and '</span>') + lsep\r\n yield 1, line\r\n line = ''\r\n elif part:\r\n yield 1, cspan + part + (cspan and '</span>') + lsep\r\n else:\r\n yield 1, lsep\r\n # for the last line\r\n if line and parts[-1]:\r\n if lspan != cspan:\r\n line += (lspan and '</span>') + cspan + parts[-1]\r\n lspan = cspan\r\n else:\r\n line += parts[-1]\r\n elif parts[-1]:\r\n line = cspan + parts[-1]\r\n lspan = cspan\r\n # else we neither have to open a new span nor set lspan\r\n\r\n if line:\r\n yield 1, line + (lspan and '</span>') + lsep", "def write_file(tokens, f):\n for t in tokens:\n f.write(\"%s:\\n\" % t[0])\n for entry in t[1:]:\n f.write(\"\\t%s\\n\" % entry)", "def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n try:\n if not outfile:\n realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.formatter import Formatter\n if isinstance(formatter, type) and issubclass(formatter, Formatter):\n raise TypeError('format() argument must be a formatter instance, '\n 'not a class')\n raise", "def DrawTokensRed():\r\n for i in range(4):\r\n Tokens(TokenRed,RedChips[i][0],RedChips[i][1])", "def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n self.output(f'<keyword> {value} </keyword>')\n elif type == 'symbol': # check for symbol\n #\"\"\" start xml formatting requirements for symbols \"\"\"\n if value == '<':\n self.output(f'<symbol> &lt; </symbol>')\n elif value == '>':\n self.output(f'<symbol> &gt; </symbol>')\n elif value == '&':\n self.output(f'<symbol> &amp; </symbol>')\n #\"\"\" end xml formatting requirements for symbols \"\"\"\n else:\n self.output(f'<symbol> {value} </symbol>')\n elif type == 'integer': # check for integer\n self.output(f'<integerConstant> {value} </integerConstant>')\n elif type == 'identifier': # check for indentifier\n self.output(f'<identifier> {value} </identifier>')\n elif type == 'string': # it's a string\n self.output(f'<stringConstant> {value} </stringConstant>')", "def DrawTokensBlue():\r\n for i in range(4):\r\n Tokens(TokenBlue,BlueChips[i][0],BlueChips[i][1])", "def print_tokens(source):\n if isinstance(source[0], Token):\n source = untokenize(source)\n\n for lines in get_lines(source):\n for token in lines:\n print(repr(token))\n print()", "def formatter( fname, fout = None, space_count = 2, \n *kargs, special = 0, EXCEPTION = True, DEBUG = False ):\n \n import sys\n if special == None:\n special = 0\n\n # Prevent user from accessing 16\n if special & 16:\n special ^= 16\n \n shift = 0\n shift_delay = 0 #For 4\n cond_shift = 0 #For 16\n cond_delay = 0 #For 16\n mline_shift = 0 #Future Use\n brace_start = '{'\n brace_end = '}'\n stack = [] #For 1\n space_char = ' ' #For 2\n\n #Files \n source_code = open(fname)\n fout = (fname + \"_edit.txt\") if (fout == None) else fout \n dest_code = open(fout, \"w\" )\n ###err_code = open(fname + \"_err.txt\", \"w\" )\n\n print(\"%s starting with %s. \\nOutput is %s.\" % \n (sys._getframe(0).f_code.co_name , fname, fout) )\n\n #SPECIAL\n if special & 2 :\n space_char = '\\t'\n\n for (count,line) in enumerate(source_code) :\n\n ###err_code.write( '%03d | %s' % (len(line.strip()), line))\n\n #Empty Line are Empty\n empty_line = 1 if line.strip() else 0\n \n line = ( ( empty_line * ( shift + cond_shift + mline_shift )* \n space_count * space_char ) \n + line.strip() )\n \n #Insert Extra Formatting here\n if special > 0:\n if special & 4 : \n if r'/*' in line:\n shift_delay +=1\n if r'*/' in line:\n shift_delay -=1\n if special & 8 :\n if (line.lstrip()).startswith('//'):\n if (line[0] == ' ' or line[0] == '\\t' ): #CHECK ME\n line = line[1:]\n if special & 16:\n if ( 'if' in line or 'else' in line \n or 'for' in line or 'while' in line ) and brace_start not in line:\n cond_shift = 1\n else:\n cond_shift = 0\n if special & 1 :\n if brace_start in line and brace_end not in line :\n temp = line.strip()[:-1] \n temp = \"\".join(temp.split('{').split('}'))\n stack.append(temp)\n elif brace_start not in line and brace_end in line :\n line = \"%s%s%s\" % (line, \" // \", stack.pop())\n \n #Write to File\n dest_code.write( \"%s%s\" % (line, '\\n') )\n\n ##Calculate Shift for next line\n if brace_start in line :\n shift += 1\n if brace_end in line :\n shift -= 1\n if shift_delay != 0 :\n shift += shift_delay\n shift_delay = 0\n \n #Check if negative shift\n if EXCEPTION and shift < 0 :\n print( \"\\n File \\\"%s\\\", line %i, in %s\" % \n ( fname, count, sys._getframe().f_code.co_name ) )\n raise UnbalancedBraces( 0 , \"Unbalanced Closing Braces in the file\" )\n \n #Check if there is extra shift at end.\n if EXCEPTION and shift != 0:\n print( \"\\n File \\\"%s\\\" , in %s\" % \n ( fname, sys._getframe().f_code.co_name ) )\n raise UnbalancedBraces( 0 , \"Unbalanced Opening Braces in the file!\" )\n\n print( \"%s compeleted!\" % sys._getframe(0).f_code.co_name )", "def visualize_activations(\n tokens,\n activations,\n darken=2,\n colors=[\"#d35f5f\", \"#00aad4\"],\n text_direction=\"ltr\",\n char_limit=60,\n font_size=20,\n filter_fn=lambda x: x,\n):\n ################################ Validation ################################\n valid_text_directions = [\"ltr\", \"rtl\"]\n text_direction = text_direction.lower()\n assert (\n text_direction in valid_text_directions\n ), f\"text_direction must be one of {valid_text_directions}\"\n\n assert len(tokens) == len(\n activations\n ), f\"Number of tokens and activations must match\"\n\n ################################ Filtering ################################\n if filter_fn == \"top_tokens\":\n\n def keep_top_tokens(acts):\n max_val = max([abs(a) for a in acts])\n new_acts = [a if abs(a) > 0.8 * max_val else 0 for a in acts]\n return new_acts\n\n filter_fn = keep_top_tokens\n activations_filtered = filter_fn(activations)\n assert len(activations) == len(activations_filtered)\n activations = activations_filtered\n\n ############################## Drawing Setup ###############################\n text = \" \".join(tokens)\n\n # Estimate individual character sizes\n char_width = font_size * 0.601 # Magic number for Courier font\n char_height = font_size * 1.25 # 1.25 is line height of rendered font\n\n # Compute number of lines\n lines = _break_lines(text, limit=char_limit)\n\n # Compute image size based on character sizes and number of lines\n image_height = len(lines) * char_height * 1.2\n image_width = (char_limit + 1) * char_width\n\n # Create drawing canvas\n dwg = svgwrite.Drawing(\"tmp.svg\", size=(image_width, image_height), profile=\"full\")\n dwg.viewbox(0, 0, image_width, image_height)\n group = dwg.g()\n\n ####################### Activation Rendering limits ########################\n scores = activations\n max_score = max(scores)\n min_score = abs(min(scores))\n limit = max(max_score, min_score)\n\n for _ in range(darken):\n word_idx = 0\n line_horizontal_offsets = []\n for line_idx, line in enumerate(lines):\n char_idx = 0\n words = line.split(\" \")\n if text_direction == \"rtl\":\n words = reversed(words)\n for word in words:\n score = scores[word_idx]\n if score > 0:\n color = colors[1]\n opacity = score / limit\n else:\n color = colors[0]\n opacity = abs(score) / limit\n\n # Add rectangle for every character in current word\n for _ in word:\n rect_position = (char_idx * char_width, 7 + line_idx * char_height)\n rect_size = (f\"{char_width:0.3f}px\", f\"{char_height:0.3f}px\")\n group.add(\n dwg.rect(\n insert=rect_position,\n size=rect_size,\n style=_get_rect_style(color, opacity),\n )\n )\n char_idx += 1\n\n # Add rectangle for empty space after word\n final_rect_pos = (char_idx * char_width, 7 + line_idx * char_height)\n final_rect_size = (f\"{char_width:0.3f}px\", f\"{char_height:0.3f}px\")\n group.add(\n dwg.rect(\n insert=final_rect_pos,\n size=final_rect_size,\n style=_get_rect_style(color, opacity),\n )\n )\n\n char_idx += 1\n word_idx += 1\n if text_direction == \"ltr\":\n line_horizontal_offsets.append(0)\n else:\n line_horizontal_offsets.append(char_idx * char_width)\n\n # Draw the actual text over the drawn rectangles\n for line_idx, line in enumerate(lines):\n text_insert = (\n line_horizontal_offsets[line_idx],\n font_size * 1.25 * (line_idx + 1),\n )\n text = dwg.text(\n line, insert=text_insert, fill=\"black\", style=_get_text_style(font_size)\n )\n group.add(text)\n\n dwg.add(group)\n\n return dwg", "def _drawStrGen(x=x, y=y, string=string, width=width, height=height):\n for char in string:\n if y == height:\n raise TDLError('End of console reached.')\n #batch.append(((x, y), _formatChar(char))) # ((x, y), ch)\n yield((x, y), _formatChar(char))\n x += 1 # advance cursor\n if x == width: # line break\n x = 0\n y += 1", "def tabbed_generator(self, source_path, source_vocab, target_vocab, eos=None):\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n for line_idx, line in enumerate(source_file):\n if line:\n source, target = split_graphemes_phonemes(line)\n if not (source and target):\n tf.logging.warning(\"Invalid data format in line {} in {}:\\n\"\n \"{}\\nGraphemes and phonemes should be separated by white space.\"\n .format(line_idx, source_path, line))\n continue\n source_ints = source_vocab.encode(source) + eos_list\n target_ints = target_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}", "def save_tokens_to_file(self, file_path):\n with open(file_path, 'w', encoding='utf-8') as fp:\n #for token in self.token2id.keys():\n for idd in range(self.size()): \n fp.write(self.id2token[idd] + '\\n')", "def _write_input(\n self, X: List[str], y: Optional[List[List[str]]], input_path: Path\n ):\n with open(input_path, \"w\") as f:\n if y is not None:\n for text, labels in zip(X, y):\n label_str = \" \".join(\n f\"__label__{FastText._escape_label(label)}\" for label in labels\n )\n f.write(f\"{label_str} {_fasttext_preprocess(text)}\\n\")\n elif y is None:\n for text in X:\n f.write(f\"{_fasttext_preprocess(text)}\\n\")", "def build_edges(self, tokens, M, token2idx):\n window_size = self.WINDOW_SIZE\n\n for i in range(len(tokens) - window_size+1):\n span = tokens[i:i+window_size]\n center, contexts = span[0], span[1:]\n v = token2idx[center]\n for context in contexts:\n w = token2idx[context]\n insert_edge(M, v, w)", "def _get_format_from_document(self, token: Any, document: Any) -> Any:\n # Modified by EKR.\n # These lines cause unbounded recursion.\n # code, html = next(self._formatter._format_lines([(token, u'dummy')]))\n # self._document.setHtml(html)\n return QtGui.QTextCursor(self._document).charFormat()", "def _print_tokens(self, tokens) -> None:\n print(' '.join([self.get_index_token(tok.item()) for tok in tokens]))\n return", "def token_generator(source_path, target_path, token_vocab, eos=None):\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = token_vocab.encode(source) + eos_list\n target_ints = token_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()", "def generateOutput(self):\n if not hasattr(self, 'xcms'):\n self.getCenterOfMass()\n\n fh = open(self.settings['output'], 'w')\n rg = open(self.settings['output'].split('.')[0]+'.reg', 'w')\n fh.write('#X coordinate in pixels [starts from 1]\\n')\n fh.write('#Y coordinate in pixels [starts from 1]\\n')\n rg.write('#File written on {0:>s}\\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))\n for x, y in zip(self.xcms, self.ycms):\n fh.write('%10.3f %10.3f\\n' % (x + 1, y + 1))\n rg.write('circle({0:.3f},{1:.3f},5)\\n'.format(x + 1, y + 1))\n fh.close()\n rg.close()", "def plot_token_scores(\n token_probs, sentence, id2label_tok,\n plot_name=None, show=False):\n sentence_length = len(sentence.tokens)\n token_probs = token_probs[:][:sentence_length].T\n (nrows, ncols) = token_probs.shape\n color_data = []\n\n for i, [r, g, b] in enumerate(head_colours[:nrows]):\n row = []\n for j in range(ncols):\n row.append([r, g, b, token_probs[i][j]])\n color_data.append(row)\n\n plt.figure(figsize=(16, 12), dpi=100)\n row_labels = [\"O\"] + [str(id2label_tok[i + 1]) for i in range(nrows-1)]\n col_labels = [token.value for token in sentence.tokens]\n plt.imshow(color_data, vmin=0, vmax=sentence_length)\n plt.xticks(range(ncols), col_labels, rotation=45)\n plt.yticks(range(nrows), row_labels)\n plt.tight_layout()\n if plot_name is not None:\n plt.savefig(\"%s_%d.png\" % (plot_name, int(time.time())),\n format=\"png\", dpi=100, bbox_inches='tight', pad_inches=0)\n if show:\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters.
def get_tokens(self, text, unfiltered=False): if not isinstance(text, str): if self.encoding == 'guess': try: text = text.decode('utf-8') if text.startswith('\ufeff'): text = text[len('\ufeff'):] except UnicodeDecodeError: text = text.decode('latin1') elif self.encoding == 'chardet': try: import chardet except ImportError: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') # check for BOM first decoded = None for bom, encoding in _encoding_map: if text.startswith(bom): decoded = str(text[len(bom):], encoding, errors='replace') break # no BOM found, so use chardet if decoded is None: enc = chardet.detect(text[:1024]) # Guess using first 1KB decoded = str(text, enc.get('encoding') or 'utf-8', errors='replace') text = decoded else: text = text.decode(self.encoding) else: if text.startswith('\ufeff'): text = text[len('\ufeff'):] # text now *is* a unicode string text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if self.ensurenl and not text.endswith('\n'): text += '\n' def streamer(): for i, t, v in self.get_tokens_unprocessed(text): yield t, v stream = streamer() if not unfiltered: stream = apply_filters(stream, self.filters, self) return stream
[ "def get_tokens(self, text, unfiltered=False):\r\n if not isinstance(text, unicode):\r\n if self.encoding == 'guess':\r\n try:\r\n text = text.decode('utf-8')\r\n if text.startswith(u'\\ufeff'):\r\n text = text[len(u'\\ufeff'):]\r\n except UnicodeDecodeError:\r\n text = text.decode('latin1')\r\n elif self.encoding == 'chardet':\r\n try:\r\n import chardet\r\n except ImportError:\r\n raise ImportError('To enable chardet encoding guessing, '\r\n 'please install the chardet library '\r\n 'from http://chardet.feedparser.org/')\r\n # check for BOM first\r\n decoded = None\r\n for bom, encoding in _encoding_map:\r\n if text.startswith(bom):\r\n decoded = unicode(text[len(bom):], encoding,\r\n errors='replace')\r\n break\r\n # no BOM found, so use chardet\r\n if decoded is None:\r\n enc = chardet.detect(text[:1024]) # Guess using first 1KB\r\n decoded = unicode(text, enc.get('encoding') or 'utf-8',\r\n errors='replace')\r\n text = decoded\r\n else:\r\n text = text.decode(self.encoding)\r\n else:\r\n if text.startswith(u'\\ufeff'):\r\n text = text[len(u'\\ufeff'):]\r\n\r\n # text now *is* a unicode string\r\n text = text.replace('\\r\\n', '\\n')\r\n text = text.replace('\\r', '\\n')\r\n if self.stripall:\r\n text = text.strip()\r\n elif self.stripnl:\r\n text = text.strip('\\n')\r\n if self.tabsize > 0:\r\n text = text.expandtabs(self.tabsize)\r\n if self.ensurenl and not text.endswith('\\n'):\r\n text += '\\n'\r\n\r\n def streamer():\r\n for i, t, v in self.get_tokens_unprocessed(text):\r\n yield t, v\r\n stream = streamer()\r\n if not unfiltered:\r\n stream = apply_filters(stream, self.filters, self)\r\n return stream", "def analyze(text):\n\n for token in tokenize(text):\n normalized = normalize(token)\n if filter_text(normalized):\n yield normalized", "def __filter_text(self, text):\r\n analyzer_num_tag = self.analyzer_type.num\r\n analyzer_noun_tag = self.analyzer_type.noun\r\n analyzer_loc_tag = self.analyzer_type.loc\r\n surname = clean_text.get_surname(self.url)\r\n sentence = []\r\n out_text = []\r\n surname_re = re.compile(r'' + surname)\r\n for sent in text:\r\n for token in sent:\r\n if (analyzer_num_tag in token and (self.pattern.match(token[0]) is not None)) or (\r\n analyzer_loc_tag in token and analyzer_noun_tag in token and surname_re.match(\r\n str(token[0])) is None):\r\n sentence.append(token)\r\n if [tup for tup in sentence if analyzer_num_tag in tup]:\r\n if [tup for tup in sentence if analyzer_loc_tag in tup]:\r\n out_text.append(sentence)\r\n sentence = []\r\n return out_text", "def tokenize_with_preprocess(text):\n return map(__stemmer.stem, filter(lambda w: w not in stop,\n nltk.word_tokenize(re.sub(_punc_pattern, '', text.lower()))))", "def flatten_text(text: Text) -> Iterator[Union[int, str]]:\n for sent in text:\n for token in sent:\n yield token", "def filter(unfiltered_data: List) -> List:\n filtered_data = [_markdown_to_text(unfiltered_str)\n for unfiltered_str in unfiltered_data]\n return filtered_data", "def engTokenize(text):\n return [token.text for token in eng.tokenizer(text)]", "def preprocess(text):\n text = normalize_unicode(text)\n text = remove_newline(text)\n text = text.lower()\n text = decontracted(text)\n text = replace_negative(text)\n text = removePunctuations(text)\n text = remove_number(text)\n text = remove_space(text)\n text = removeArticlesAndPronouns(text)\n text = removeNLTKStopWords(text)\n #text = performStemming(text)\n return text", "def tag_untokenized_text(self, text):\n return self.tag_untokenized_sentences(self._sent_tokenize(text))", "def iter_from(self, fieldname, text):\n\n term_info = self.term_info\n for term in self.terms_from(fieldname, text):\n yield (term, term_info(*term))", "def prepare_for_tokenization(self, text, **kwargs):\n return text", "def tokenize(self, text: str) -> list:\n indices = self.atomize(text)\n return list(map(lambda x: self.decoder[x], indices))", "def tokenize(self, text):\n split_tokens = [] # list of `SubToken`s.\n for token, orig_token, is_good_token in self.basic_tokenizer.tokenize(text):\n if not is_good_token:\n split_tokens.append(SubToken(token, orig_token, is_good=False))\n continue\n\n # Preserve special tokens such as '[Q]' and '[SEP]'.\n if bert_tokenization.preserve_token(token, self.vocab):\n split_tokens.append(SubToken(token, orig_token, is_good=True))\n continue\n\n # For everything else, send the text-like tokens that have survived\n # whitespace and puncutation splitting through a wordpiece tokenizer.\n for sub_token in self.wordpiece_tokenizer.tokenize(\n [SubToken(token, orig_token, is_good_token)]):\n # `sub_token` has type `SubToken`.\n split_tokens.append(sub_token)\n\n return split_tokens", "def process_text(text, stem=True):\r\n #text = text.translate(None,string.punctuation)\r\n tokens = word_tokenize(text)\r\n \r\n if stem:\r\n stemmer = PorterStemmer()\r\n tokens = [stemmer.stem(t) for t in tokens]\r\n \r\n return tokens", "def FilterInput(self, text):\n return text", "def extract_phrases(self, text: str):\n\n sentence = Sentence(text)\n self.chunk_tagger.predict(sentence)\n\n token_list: List[str] = []\n token_tags: List[str] = []\n\n for token in sentence:\n token_list.append(token.text)\n\n for label_type in token.annotation_layers.keys():\n # if token.get_labels(label_type)[0].value == \"O\":\n # token_tags.append('O')\n # if token.get_labels(label_type)[0].value == \"_\":\n # token_tags.append('_')\n token_tags.append(token.get_labels(label_type)[0].value) # Append token tags for each token\n\n phrases: List[str] = self._get_flair_phrases(token_list, token_tags)\n\n return phrases", "def preprocessText(self, text):\n self.rawText = text\n self.stoppedText = self.removeStopWordsFromText(text)\n # self.vectorizedText = self.textToVector(self.stoppedText)\n # self.topic = self.detectTopic(self.vectorizedText)\n # self.POSedText = self.POSTagText(self.stoppedText)", "def process_all_text(text_string, quick=False, use_placenames=False):\r\n # print(\"Preliminary tagging...\")\r\n token_list = core.tgc(text_string)\r\n # print(\"Name Entity chunking...\")\r\n token_list = core.ne_group_extended(token_list)\r\n # for x in token_list:\r\n # print(type(x), x)\r\n if use_placenames:\r\n # print(\"Tagging Place Names...\")\r\n token_list = pn.tag_all_placenames(token_list, quick)\r\n # print(\"Tagging Geo Features...\")\r\n token_list = gn.tag_geonouns(token_list)\r\n # print(\"Tagging Spatial Grammar...\")\r\n token_list = sg.tag_all_spatial_grammar(token_list)\r\n # print(\"Done\")\r\n # print(token_list)\r\n return token_list", "def preprocess(text: str) -> List[str]:\n return __PARAGRAPH_SEP.split(\n Tokenizer.join_hyphenated_words_across_linebreaks(text)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback that yields multiple actions for each group in the match.
def bygroups(*args): def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: data = match.group(i + 1) if data is not None: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), data), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback
[ "def _performManyAct(self, action, args, messages, D):\n readCount = 0\n i = -1\n\n for message in messages:\n if message.read:\n readCount += 1\n yield action(message, **args)\n i += 1\n D.callback((readCount, i+1-readCount))", "def group_action():\n return _controller.group_action()", "def apply(self, callback, route):\n @functools.wraps( callback)\n def inner_group_by(**kw):\n all_the_results = callback( **kw)\n grouper = bottle.request.GET.get('group')\n if not grouper:\n return all_the_results\n groups = dict( ( model.full_name.split( grouper, 1)[0], model)\n for model in all_the_results)\n grouper = grouper if grouper not in self.ignored_groupers else None\n return [ FakeModel(grouper,host,name) for name,host in groups.items() ]\n return inner_group_by", "def applyactions(self):\n \n for a in self.registration.actions:\n actions.apply(a, self.registration, self.log)", "def droplet_actions_group():\n\tpass", "def _process_matches(self, match, match_index):\n match = self._process_match_processor(match, self.pre_match_processor)\n if not match:\n return\n\n if not self._process_match(match, match_index):\n return\n\n for child in match.children:\n if not self._process_match(child, match_index, child=True):\n return\n\n match = self._process_match_processor(match, self.post_match_processor)\n if not match:\n return\n\n if (self._should_include_parent or self.private_parent) and match.name not in self.ignore_names:\n yield match\n if self._should_include_children or self.private_children:\n children = [x for x in match.children if x.name not in self.ignore_names]\n for child in children:\n yield child", "def _step_apply(self, action_tuple):\n for action in action_tuple:\n assert action in range(self.num_actions)\n\n table = self.factory.tables[self.current_agent]\n action_result = do_action(table, self.factory, Action(action))\n self.factory.add_move(self.current_agent, Action(action), action_result)", "def apply_action_block(stmt):\n return [ast.ActionBlock(stmt)]", "def process_match(match, team, augment_data=True):\n experiences = []\n\n # This section controls data agumentation of the match. Certain submissions in the draft are\n # submitted consecutively by the same team during the same phase (ie team1 pick0 -> team1 pick1).\n # Although these submissions were produced in a particular order, from a draft perspective\n # there is no difference between submissions of the form\n # team1 pick0 -> team1 pick1 vs team1 pick1 -> team0 pickA\n # provided that the two picks are from the same phase (both bans or both picks).\n # Therefore it is possible to augment the order in which these submissions are processed.\n\n # Note that we can also augment the banning phase if desired. Although these submissions technically\n # fall outside of the conditions listed above, in practice bans made in the same phase are\n # interchangable in order.\n\n # Build queue of actions from match reference (augmenting if desired)\n augments_list = [\n (\"blue\",\"bans\",slice(0,3)), # Blue bans 0,1,2 are augmentable\n (\"blue\",\"bans\",slice(3,5)), # Blue bans 3,4 are augmentable\n (\"red\",\"bans\",slice(0,3)),\n (\"red\",\"bans\",slice(3,5)),\n (\"blue\",\"picks\",slice(1,3)), # Blue picks 1,2 are augmentable\n (\"blue\",\"picks\",slice(3,5)), # Blue picks 3,4 are augmentable\n (\"red\",\"picks\",slice(0,2)) # Red picks 0,1 are augmentable\n ]\n if(augment_data):\n augmented_match = deepcopy(match) # Deepcopy match to avoid side effects\n for aug in augments_list:\n (k1,k2,aug_range) = aug\n count = len(augmented_match[k1][k2][aug_range])\n augmented_match[k1][k2][aug_range] = random.sample(augmented_match[k1][k2][aug_range],count)\n\n action_queue = build_action_queue(augmented_match)\n else:\n action_queue = build_action_queue(match)\n\n # Set up draft state\n draft = DraftState(team)\n\n finish_memory = False\n while action_queue:\n # Get next pick from deque\n submission = action_queue.popleft()\n (submitting_team, pick, position) = submission\n\n # There are two conditions under which we want to finalize a memory:\n # 1. Non-designated team has finished submitting picks for this phase (ie next submission belongs to the designated team)\n # 2. Draft is complete (no further picks in the draft)\n if submitting_team == team:\n if finish_memory:\n # This is case 1 to store memory\n r = get_reward(draft, match, a, a)\n s_next = deepcopy(draft)\n memory = (s, a, r, s_next)\n experiences.append(memory)\n finish_memory = False\n # Memory starts when upcoming pick belongs to designated team\n s = deepcopy(draft)\n # Store action = (champIndex, pos)\n a = (pick, position)\n finish_memory = True\n else:\n # Mask positions for pick submissions belonging to the non-designated team\n if position != -1:\n position = 0\n\n draft.update(pick, position)\n\n # Once the queue is empty, store last memory. This is case 2 above.\n # There is always an outstanding memory at the completion of the draft.\n # RED_TEAM always gets last pick. Therefore:\n # if team = BLUE_TEAM -> There is an outstanding memory from last RED_TEAM submission\n # if team = RED_TEAM -> Memory is open from just before our last submission\n if(draft.evaluate() == DraftState.DRAFT_COMPLETE):\n assert finish_memory == True\n r = get_reward(draft, match, a, a)\n s_next = deepcopy(draft)\n memory = (s, a, r, s_next)\n experiences.append(memory)\n else:\n print(\"Week {} match_id {} {} vs {}\".format(match[\"week\"], match[\"id\"], match[\"blue_team\"],match[\"red_team\"]))\n draft.display()\n print(\"Error code {}\".format(draft.evaluate()))\n print(\"Number of experiences {}\".format(len(experiences)))\n for experience in experiences:\n _,a,_,_ = experience\n print(a)\n print(\"\")#raise\n\n return experiences", "def choose_action(self):\n for ag in self.agents:\n ag.choose_action()\n self.next_action[ag.name] = ag.action", "def action_for_all(self, name, **kwargs):\n\n for k in self._manager.keys():\n self._manager[k].action(name, **kwargs)", "def play_actions(self, target):\n for method_name, args, kwargs in self.actions:\n method = getattr(target, method_name)\n method(*args, **kwargs)", "def trigger_action_on_multi_resource(data):\n for item in data:\n trigger_action_on_a_resource(item['resource_url'],item['action'],item['provider'][0])\n return \"\",return_code['OK']", "def expand_actions(self, actions):\n results = list()\n\n for action in actions:\n if action in self.aliased_actions:\n results.append(action)\n for item in self.expand_actions(self.aliased_actions[action]):\n results.append(item)\n else:\n results.append(action)\n\n return results", "def get_actions(self) -> List[GameAction]:\n pass", "def test_group_switch_on_all_groups(\n self,\n keymap: Keymap,\n mod_key: str,\n mod: ModifierMask,\n key: str,\n keysyms: tuple[str],\n ):\n for group, keysym in enumerate(keysyms, start=1):\n print(group, keysym)\n keymap.tap_and_check(key, keysym, group=group)\n self.switch_group(keymap, mod_key, mod, group % len(keysyms) + 1)\n # Check the group wraps\n keymap.tap_and_check(key, keysyms[0], group=1)", "def actions(self, action_dict) -> list:\n # use self.game_state\n return []", "def _join_match_group(matches):\n data = {}\n for match in matches:\n data.update(_parse_single_match(match))\n return data", "def at_multimatch_cmd(caller, matches):\r\n string = \"There were multiple matches:\"\r\n for num, match in enumerate(matches):\r\n # each match is a tuple (candidate, cmd)\r\n cmdname, arg, cmd, dum, dum = match\r\n\r\n is_channel = hasattr(cmd, \"is_channel\") and cmd.is_channel\r\n if is_channel:\r\n is_channel = _(\" (channel)\")\r\n else:\r\n is_channel = \"\"\r\n if cmd.is_exit and cmd.destination:\r\n is_exit = (\" (exit to %s)\") % cmd.destination\r\n else:\r\n is_exit = \"\"\r\n\r\n id1 = \"\"\r\n id2 = \"\"\r\n if (not (is_channel or is_exit) and\r\n (hasattr(cmd, 'obj') and cmd.obj != caller) and\r\n hasattr(cmd.obj, \"key\")):\r\n # the command is defined on some other object\r\n id1 = \"%s-%s\" % (num + 1, cmdname)\r\n id2 = \" (%s)\" % (cmd.obj.key)\r\n else:\r\n id1 = \"%s-%s\" % (num + 1, cmdname)\r\n id2 = \"\"\r\n string += \"\\n %s%s%s%s\" % (id1, id2, is_channel, is_exit)\r\n return string", "def ueach(accept, iterable, *args, **kwargs):\n kwargs[\"_unpack\"] = True\n each(accept, iterable, *args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state.
def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() return callback
[ "def using(_other, **kwargs):\n gt_kwargs = {}\n if 'state' in kwargs:\n s = kwargs.pop('state')\n if isinstance(s, (list, tuple)):\n gt_kwargs['stack'] = s\n else:\n gt_kwargs['stack'] = ('root', s)\n\n if _other is this:\n def callback(lexer, match, ctx=None):\n # if keyword arguments are given the callback\n # function has to create a new lexer instance\n if kwargs:\n # XXX: cache that somehow\n kwargs.update(lexer.options)\n lx = lexer.__class__(**kwargs)\n else:\n lx = lexer\n s = match.start()\n for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):\n yield i + s, t, v\n if ctx:\n ctx.pos = match.end()\n else:\n def callback(lexer, match, ctx=None):\n # XXX: cache that somehow\n kwargs.update(lexer.options)\n lx = _other(**kwargs)\n\n s = match.start()\n for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):\n yield i + s, t, v\n if ctx:\n ctx.pos = match.end()\n return callback", "def some(pred):\n\n @Parser\n def _some(tokens, s):\n if s.pos >= len(tokens):\n raise NoParseError('no tokens left in the stream', s)\n else:\n t = tokens[s.pos]\n if pred(t):\n pos = s.pos + 1\n s2 = State(pos, max(pos, s.max))\n if debug:\n log.debug('*matched* \"%s\", new state = %s' % (t, s2))\n return t, s2\n else:\n if debug:\n log.debug('failed \"%s\", state = %s' % (t, s))\n raise NoParseError('got unexpected token', s)\n\n _some.name = '(some)'\n return _some", "def _process_match(self, match):\n if (\n type(match.prev_symbol) is Rule\n and type(match.next_symbol.next_symbol) is Rule\n ):\n # Reuse an existing rule.\n rule: Rule = match.prev_symbol\n self._substitute(rule)\n else:\n # Create a new rule.\n rule = Rule(0, self.bigrams)\n rule.join(rule)\n rule.prev_symbol.append(self.value)\n rule.prev_symbol.append(self.next_symbol.value)\n match._substitute(rule)\n self._substitute(rule)\n self.bigrams[rule.next_symbol._bigram()] = rule.next_symbol\n # Check for an underused rule\n if type(rule.next_symbol.value) is Rule:\n target_rule: Rule = rule.next_symbol.value\n if target_rule.value == 1:\n rule.next_symbol._expand()", "def match_state_id(self, state_id, match):\n pass", "def some(pred):\n\n @Parser\n def _some(tokens, s):\n if s.pos >= len(tokens):\n s2 = State(s.pos, s.max, _some if s.pos == s.max else s.parser)\n raise NoParseError(\"got unexpected end of input\", s2)\n else:\n t = tokens[s.pos]\n if pred(t):\n pos = s.pos + 1\n s2 = State(pos, max(pos, s.max), s.parser)\n if debug:\n log.debug(\"*matched* %r, new state = %s\" % (t, s2))\n return t, s2\n else:\n s2 = State(s.pos, s.max, _some if s.pos == s.max else s.parser)\n if debug:\n log.debug(\n \"failed %r, state = %s, expected = %s\" % (t, s2, s2.parser.name)\n )\n raise NoParseError(\"got unexpected token\", s2)\n\n _some.name = \"some(...)\"\n return _some", "def at_language_callback(self, lexer: Any, match: Any) -> Generator:\n from pygments.token import Name\n language = match.group(2)\n # #2484: The language is known if there is a lexer for it.\n if self.pygments_isValidLanguage(language):\n self.language = language\n yield match.start(), Name.Decorator, match.group(0)\n else:\n # Color only the @language, indicating an unknown language.\n yield match.start(), Name.Decorator, match.group(1)", "def match_any_state(self, match):\n pass", "def lex(self, string):\n # Contains the string position that needs to be fast-fowarded to.\n ff = 0\n # The token string. Nodes get appended to this.\n t_stream = []\n # The catch-all accumulator.\n other_acc = \"\"\n # These test the string for a token.\n tests = [\n self._test_newline,\n self._test_escape,\n self._test_bin_start,\n self._test_bin_end,\n self._test_unary_tag,\n ]\n for i in xrange(len(string)):\n # Fast forward `i` to `ff`\n if i < ff:\n continue\n # Run all the token tests at the current position\n for test in tests:\n (ff, node) = test(string, i)\n if i < ff:\n # If a test accepts the string then\n # ff will be set to where the test stopped\n # accepting.\n if other_acc:\n if other_acc[0] == \"\\n\":\n # If the first char of the OTHER token starts with a\n # newline, say that it starts on the line after the\n # last tag.\n self._acc_newline_count += 1\n # Flush the `other_acc` into the `t_stream`\n t_stream.append(lexertokens.OTHER(other_acc, self._acc_newline_count))\n self._acc_newline_count = self._newline_count\n other_acc = \"\"\n # Append the node returned by the test onto the `t_stream`\n t_stream.append(node)\n # Break, because a test has accepted the string\n break\n if i >= ff:\n # If no test accepted the string, so add the current character\n # onto the catch-all `other_acc`\n if string[i] == \"\\n\":\n # Count the newlines in the OTHER acc\n self._newline_count += 1\n other_acc += string[i]\n\n if other_acc:\n trailing_newline_count = self._count_trailing_char(other_acc, \"\\n\")\n # Flush the `other_acc` if it contains anything.\n t_stream.append(\n lexertokens.OTHER(\n other_acc,\n self._newline_count - trailing_newline_count)\n )\n\n return tokenstream.TokenStream(t_stream)", "def consume(self, inp):\n if self.state.is_end:\n raise ValueError(\"state %s is terminal\" % self.state.name)\n # Follow the first matched rule of current state.\n for predicate, target, action in self.state.rules:\n if predicate(inp, self.stack):\n if action is not None:\n action(inp, self.stack)\n self.state = target\n break\n else: # No match found, follow default.\n if self.state.default_action is not None:\n self.state.default_action(inp, self.stack)\n self.state = self.state.default_target", "def _run_rule(parser, visitor, start_rule, global_state):\n start_func = getattr(parser, start_rule)\n tree = start_func()\n if global_state:\n visitor.visit(tree)\n return visitor\n else:\n return visitor.visit(tree)", "def _match(self, pattern, input_string, context=None): # pragma: no cover", "def match(self, text, pos, lno):\n mtch = self.pattern.match(text, pos)\n ret = []\n if self.next_rule is not None and mtch is not None:\n pos = 0\n for rule in self.next_rule:\n another_mtch, another_t = rule.match(mtch.group(), pos, 0)\n if another_mtch:\n ret.append(another_t)\n pos += len(another_mtch.group())\n else:\n if mtch:\n ret = mtch.group()\n else:\n ret = ''\n return mtch, Token(self.identifier, content=ret, position=pos, lineno=lno)", "def test_lexer():\n generator_stream = cStringIO.StringIO()\n generator_stream.write(\"\"\"\n[[:newline:]] NEWLINE\n[[:whitespace:]] IGNORE\n'namespace'[[:whitespace:]]* NAMESPACE\n[a-z][a-z0-9_?!]* ID\n':='[[:whitespace:]]* ASSIGNMENT\n'+'[[:whitespace:]]* PLUS\n'.' DOT\n\"\"\")\n generator_stream.seek(0)\n\n table_1 = toe.symbol.table()\n generator = TLexerGenerator(table_1)\n\n lexer = TLexer()\n lexer.states = generator.load(generator_stream, False)\n\n #for i in range(len(lexer.states)):\n # print generator.string_transitions(i)\n\n assert(len(lexer.states) > 2) # initial, invalid\n yield (\"len(lexer.states)\", len(lexer.states))\n\n test_stream = cStringIO.StringIO()\n test_stream.write(\"\"\"namespace aaa.aaa.aaa\n\n\"\"\")\n test_stream.seek(0)\n lexer.source_stream = test_stream\n\n while not lexer.eof_p:\n yield lexer.token\n lexer.consume()", "def prepare_scan():\n\n # Start a new grammar.\n grammar = LexicalGrammar()\n\n # Regular context.\n query = grammar.add_rule('query')\n\n # Whitespace characters and comments (discarded).\n query.add_token(r'''\n SPACE: [\\s]+ | [#] [^\\0\\r\\n]*\n ''', is_junk=True)\n\n # A sequence of characters encloses in single quotes.\n query.add_token(r'''\n STRING: ['] ( [^'\\0] | [']['] )* [']\n ''', unquote=(lambda t: t[1:-1].replace(\"''\", \"'\")))\n\n # An opening quote character without a closing quote.\n query.add_token(r'''\n BAD_STRING: [']\n ''', error=\"cannot find a matching quote mark\")\n\n # A number in exponential notation.\n query.add_token(r'''\n FLOAT: ( [0-9]+ ( [.] [0-9]* )? | [.] [0-9]+ ) [eE] [+-]? [0-9]+\n ''')\n\n # A number with a decimal point.\n query.add_token(r'''\n DECIMAL:\n [0-9]+ [.] [0-9]* | [.] [0-9]+\n ''')\n\n # An unsigned integer number.\n query.add_token(r'''\n INTEGER:\n [0-9]+\n ''')\n\n # A sequence of alphanumeric characters (not starting with a digit).\n query.add_token(r'''\n NAME: [\\w]+\n ''')\n\n # Operators and punctuation characters. The token code coincides\n # with the token value.\n query.add_token(r'''\n SYMBOL: [~] | [!][~] | [<][=] | [<] | [>][=] | [>] |\n [=][=] | [=] | [!][=][=] | [!][=] |\n [\\^] | [?] | [-][>] | [@] | [:][=] |\n [!] | [&] | [|] | [+] | [-] | [*] | [/] |\n [(] | [)] | [{] | [}] | [.] | [,] | [:] | [;] | [$]\n ''', is_symbol=True)\n\n # The `[` character starts an identity constructor.\n query.add_token(r'''\n LBRACKET:\n [\\[]\n ''', is_symbol=True, push='identity')\n\n # An unmatched `]`.\n query.add_token(r'''\n BAD_RBRACKET:\n [\\]]\n ''', error=\"cannot find a matching '['\")\n\n # The input end.\n query.add_token(r'''\n END: $\n ''', is_symbol=True, pop=1)\n\n # Identity constructor context.\n identity = grammar.add_rule('identity')\n\n # Whitespace characters (discarded).\n identity.add_token(r'''\n SPACE: [\\s]+\n ''', is_junk=True)\n\n # Start of a nested label group.\n identity.add_token(r'''\n LBRACKET:\n [\\[] | [(]\n ''', is_symbol=True, push='identity')\n\n # End of a label group or the identity constructor.\n identity.add_token(r'''\n RBRACKET:\n [\\]] | [)]\n ''', is_symbol=True, pop=1)\n\n # Label separator.\n identity.add_token(r'''\n SYMBOL: [.]\n ''', is_symbol=True)\n\n # Unquoted sequence of alphanumeric characters and dashes.\n identity.add_token(r'''\n LABEL: [\\w-]+\n ''')\n\n # A sequence of characters encloses in single quotes.\n identity.add_token(r'''\n STRING: ['] ( [^'\\0] | [']['] )* [']\n ''', unquote=(lambda t: t[1:-1].replace(\"''\", \"'\")))\n\n # An opening quote character without a closing quote.\n identity.add_token(r'''\n BAD_STRING: [']\n ''', error=\"cannot find a matching quote mark\")\n\n # A reference indicator.\n identity.add_token(r'''\n REFERENCE:\n [$]\n ''', is_symbol=True, push='name')\n\n # Unexpected end of input.\n identity.add_token(r'''\n END: $\n ''', error=\"cannot find a matching ']'\")\n\n # A context for an identifier following the `$` indicator\n # in an identity constructor. We need a separate rule because\n # `%NAME` and `%LABEL` productions intersect.\n name = grammar.add_rule('name')\n\n # Whitespace characters (discarded).\n name.add_token(r'''\n SPACE: [\\s]+\n ''', is_junk=True)\n\n # An integer number; not expected here, but ensures that the following\n # `%NAME` production does not start with a digit.\n name.add_token(r'''\n INTEGER:\n [0-9]+\n ''', pop=1)\n\n # A sequence of alphanumeric characters (not starting with a digit).\n name.add_token(r'''\n NAME: [\\w]+\n ''', pop=1)\n\n # Anything else.\n name.add_token(r'''\n OTHER: ()\n ''', is_junk=True, pop=1)\n\n # Add a `%DIRSIG` token in front of `+` and `-` direction indicators\n # to distinguish them from addition/subtraction operators.\n grammar.add_signal('''\n DIRSIG: ( `+` | `-` )+ ( `:` | `,` | `;` | `)` | `}` )\n ''')\n\n # Add `%PIPESIG` in front of `/:` pipe indicator to prevent it from\n # being recognized as a division operator.\n grammar.add_signal('''\n PIPESIG:\n `/` `:`\n ''')\n\n # Add `%LHSSIG` in front of a left-hand side of an assignment expression.\n grammar.add_signal('''\n LHSSIG: `$`? %NAME ( `.` `$`? %NAME )*\n ( `(` ( `$`? %NAME ( `,` `$`? %NAME )* `,`? )? `)` )?\n `:=`\n ''')\n\n # Generate and return the scanner.\n return grammar()", "def set_lexer(self) -> Any:\n if self.language == 'patch':\n self.language = 'diff'\n key = f\"{self.language}:{id(self)}\"\n lexer = self.lexers_dict.get(key)\n if not lexer:\n lexer = self.get_lexer(self.language)\n lexer = self.patch_lexer(self.language, lexer)\n self.lexers_dict[key] = lexer\n return lexer", "def word_search_forward(self, event: Event) -> None: # pragma: no cover (interactive)\n # Set flag for show_find_options.\n self.whole_word = True\n self.show_find_options()\n # Set flag for do_find_next().\n self.request_whole_word = True\n # Go.\n self.start_state_machine(event,\n prefix='Word Search: ',\n handler=self.start_search1, # See start-search\n escape_handler=self.start_search_escape1, # See start-search\n )", "def lex(self, line):\n\n # only add line if we are in a continuation or line is not empty\n if self.continuation is True or line.strip() != '':\n self.line += line\n\n self.continuation = False\n # keep running states until out of data or we need a continuation\n while self.continuation is False and len(self.line) > 0:\n for token in self.state():\n if token.ident == Lexer.error.ident:\n yield token\n # reset state on error\n self._reset()\n return\n yield token", "def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r", "def isearch_forward_regexp(self, event: Event) -> None: # pragma: no cover (cmd)\n self.start_incremental(event, 'isearch-forward-regexp',\n forward=True, ignoreCase=False, regexp=True)", "def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:\n return new_state" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge tokens from superclasses in MRO order, returning a single tokendef dictionary. Any state that is not defined by a subclass will be inherited automatically. States that are defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state.
def get_tokendefs(cls): tokens = {} inheritable = {} for c in itertools.chain((cls,), cls.__mro__): toks = c.__dict__.get('tokens', {}) for state, items in toks.items(): curitems = tokens.get(state) if curitems is None: tokens[state] = items try: inherit_ndx = items.index(inherit) except ValueError: continue inheritable[state] = inherit_ndx continue inherit_ndx = inheritable.pop(state, None) if inherit_ndx is None: continue # Replace the "inherit" value with the items curitems[inherit_ndx:inherit_ndx+1] = items try: new_inh_ndx = items.index(inherit) except ValueError: pass else: inheritable[state] = inherit_ndx + new_inh_ndx return tokens
[ "def get_tokendefs(cls):\n tokens = {}\n inheritable = {}\n for c in cls.__mro__:\n toks = c.__dict__.get('tokens', {})\n\n for state, items in toks.items():\n curitems = tokens.get(state)\n if curitems is None:\n # N.b. because this is assigned by reference, sufficiently\n # deep hierarchies are processed incrementally (e.g. for\n # A(B), B(C), C(RegexLexer), B will be premodified so X(B)\n # will not see any inherits in B).\n tokens[state] = items\n try:\n inherit_ndx = items.index(inherit)\n except ValueError:\n continue\n inheritable[state] = inherit_ndx\n continue\n\n inherit_ndx = inheritable.pop(state, None)\n if inherit_ndx is None:\n continue\n\n # Replace the \"inherit\" value with the items\n curitems[inherit_ndx:inherit_ndx+1] = items\n try:\n # N.b. this is the index in items (that is, the superclass\n # copy), so offset required when storing below.\n new_inh_ndx = items.index(inherit)\n except ValueError:\n pass\n else:\n inheritable[state] = inherit_ndx + new_inh_ndx\n\n return tokens", "def read_class(is_private, tokens):\n name = tokens.pop(0)\n validate_name(name)\n superclass = None\n if tokens[0] == 'extends':\n tokens.pop(0)\n superclass = tokens.pop(0)\n validate_name(superclass)\n if tokens[0] != '{':\n raise SyntaxError('expected {')\n tokens.pop(0)\n exp = []\n while tokens and tokens[0] != '}':\n exp.append(read_statement(tokens))\n if not tokens:\n raise SyntaxError('expected }')\n else:\n tokens.pop(0)\n return {'op': 'class', \n 'name': name, \n 'body': exp,\n 'super': superclass,\n 'private': is_private}", "def _superdict(cls):\n d = OrderedDict()\n for base in reversed(cls.mro()):\n d.update(base.__dict__)\n return d", "def __call__(cls, *args, **kwds):\r\n if '_tokens' not in cls.__dict__:\r\n cls._all_tokens = {}\r\n cls._tmpname = 0\r\n if hasattr(cls, 'token_variants') and cls.token_variants:\r\n # don't process yet\r\n pass\r\n else:\r\n cls._tokens = cls.process_tokendef('', cls.get_tokendefs())\r\n\r\n return type.__call__(cls, *args, **kwds)", "def merge_inherited(self):\n # Clear the history in case someone is importing and calling this method more than once\n self.merge_history = []\n # Todo: Implement a callback system and get this merge_history appending BS out of this method\n current = {}\n if self.inherited:\n for declaration in self.inherited:\n if not current:\n self.merge_history.append(f\"{Fore.BLUE}Initial Layer {declaration.layer_path}:{Fore.RESET}\")\n self.merge_history.append(\"====================================\")\n self.merge_history.append(yaml.dump(declaration.base))\n current = declaration.base\n else:\n self.merge_history.append(f\"{Fore.BLUE}Merge Layer {declaration.layer_path}:{Fore.RESET}\")\n self.merge_history.append(\"====================================\")\n current_lines = yaml.safe_dump(current).split('\\n')\n current = self.merge_controller.merge(current, declaration.base)\n post_lines = yaml.safe_dump(current).split('\\n')\n for line in difflib.ndiff(current_lines, post_lines):\n self.merge_history.append(self._colorize_diff(line))\n self.merge_history.append(f\"{Fore.BLUE}Merge Layer {self.layer_path}:{Fore.RESET}\")\n self.merge_history.append(\"====================================\")\n current_lines = yaml.safe_dump(current).split('\\n')\n post = self.merge_controller.merge(current, self.base)\n post_lines = yaml.safe_dump(post).split('\\n')\n for line in difflib.ndiff(current_lines, post_lines):\n self.merge_history.append(self._colorize_diff(line))\n self.merge_history.append(\"Final Result\")\n self.merge_history.append(\"====================================\")\n self.merge_history.append(yaml.safe_dump(post))\n return post", "def map_subclasses(super_cls, scope):\n sc_map = {}\n for _, cls_obj in scope.items():\n if isinstance(cls_obj, type) and \\\n issubclass(cls_obj, super_cls) and \\\n cls_obj is not super_cls:\n for name in cls_obj.names:\n sc_map[name] = cls_obj\n return sc_map", "def __getstate__(self) -> Dict:\n state = super().__getstate__() # type: ignore\n state.update(self.__dict__)\n return state", "def resolve_super (self, ast):\n for n in ast.values ():\n sups = n.super\n n.super = []\n for sup in sups:\n\tif sup == '':\n\t continue\n if sup in ast:\n n.super.append (ast[sup])\n ast[sup].derived.append (n)\n elif sup in ast_params['includes_map']:\n n.super_non_nodes.append (sup)\n n.includes[ast_params['includes_map'][sup]] = True\n else:\n raise Exception(\"Unknown super type: '%s'.\" % (sup))", "def _BuildTokens(self):\n supported_tokens, supported_sub_tokens = super()._BuildTokens()\n\n supported_tokens |= {'logging', 'owner'}\n\n supported_sub_tokens.update({'option': {'established', 'tcp-established'},\n # Warning, some of these are mapped\n # differently. See _ACTION_TABLE\n 'action': {'accept', 'deny', 'reject', 'next',\n 'reject-with-tcp-rst'}})\n return supported_tokens, supported_sub_tokens", "def construct_superclass_mapping(fine_labels, super_labels):\n pairs = set(zip(fine_labels, super_labels))\n class_mapping = [s_label for _, s_label in sorted(pairs, key=lambda x: x[0])]\n return class_mapping", "def _update_defaults(self, new, base=None):\n base = base or self.__state\n # handle objects not already in instance state\n disjoint = set(new) - set(base)\n base.update({x: new[x] for x in disjoint})\n # handle overlaps\n overlap = set(base) & set(new)\n for item in overlap:\n obj1, obj2 = base[item], new[item]\n if inspect.isfunction(obj2):\n base[item] = obj2\n elif hasattr(obj2, \"__dict__\") and hasattr(obj1, \"__dict__\"):\n if obj1 is not obj2:\n self._update_defaults(obj2.__dict__, obj1.__dict__)\n else:\n base[item] = obj2", "def __init__(self, tokens: List[Token]):\n super(GroupToken, self).__init__(tokens)", "def _parse_all_classes(self, file_contents: str) -> Dict[str, Dict]:\n parsed_content = ast.parse(file_contents)\n ast_classes = [node for node in parsed_content.body if isinstance(node, ast.ClassDef)]\n ast_imports = [node for node in parsed_content.body if isinstance(node, ast.ImportFrom)]\n\n # Filter the list of classes to only include confirmed Operator classes\n operator_classes = self._filter_operator_classes(ast_classes, ast_imports)\n\n return {\n operator.name: {\n \"init_function\": self._get_class_init_function_def(operator),\n \"docstring\": self._get_class_docstring(operator) or \"\",\n }\n for operator in operator_classes\n }", "def __getstate__(self):\n return {k: self.__dict__[k] if k != \"feature_functions_\" else {} for k in self.__dict__}", "def merge(self, tokens):\r\n tokens = iter(tokens)\r\n (lasttype, lastval) = tokens.next()\r\n for ttype, value in tokens:\r\n if ttype is lasttype:\r\n lastval += value\r\n else:\r\n yield(lasttype, lastval)\r\n (lasttype, lastval) = (ttype, value)\r\n if lastval.endswith('\\n'):\r\n lastval = lastval[:-1]\r\n if lastval:\r\n yield(lasttype, lastval)", "def _fill_genotype_class(alleles, genotype_info_to_fill):\n genotype_class = \"homozygous\"\n genotype_subclass = \"reference\"\n alt_subclass_name = \"alt\"\n\n if alleles[0] != alleles[1]:\n genotype_class = \"heterozygous\"\n alt_subclass_name = \"compound\"\n\n if \"0\" not in alleles:\n genotype_subclass = alt_subclass_name\n\n result = {genotype_class: genotype_subclass}\n genotype_info_to_fill.genotype_subclass_by_class = result\n return result", "def process_class_def(self, node, state, *_):\n grfn = {\"name\": \"\", \"type\": \"type\", \"attributes\": []}\n namespace = self._get_namespace(self.fortran_file)\n type_name = f\"@type::{namespace}::@global::{node.name}\"\n grfn[\"name\"] = type_name\n\n # Keep a track of declared user-defined types\n self.derived_types.append(node.name.lower())\n self.derived_types_attributes[node.name] = []\n\n attributes = node.body[0].body\n # Populate class member variables into attributes array.\n for attrib in attributes:\n attrib_is_array = False\n attrib_ast = attrib.__repr__().split()[0][2:]\n if attrib_ast == \"ast.AnnAssign\":\n attrib_name = attrib.target.attr\n if attrib.annotation.id in self.annotate_map:\n attrib_type = self.annotate_map[attrib.annotation.id]\n elif attrib.annotation.id in self.derived_types:\n attrib_type = attrib.annotation.id\n elif attrib_ast == \"ast.Assign\":\n attrib_name = attrib.targets[0].attr\n attrib_type = attrib.value.func.id\n assert (\n attrib_type in self.derived_types\n or attrib_type in self.library_types\n ), f\"User-defined type [{attrib_type}] does not exist.\"\n\n if attrib_type == \"Array\":\n attrib_is_array = True\n\n if attrib_is_array:\n elem_type = attrib.value.args[0].id\n # TODO: Currently, derived type array attributes are assumed\n # to be a single dimensional array with integer type. It maybe\n # appropriate to handle a multi-dimensional with variable used\n # as a dimension size.\n dimension_info = attrib.value.args[1]\n is_literal = False\n is_name = False\n\n single_dimension = False\n dimension_list = []\n if isinstance(dimension_info.elts[0], ast.Tuple):\n lower_bound = int(dimension_info.elts[0].elts[0].n)\n single_dimension = True\n\n # Retrieve upper bound of an array.\n if isinstance(dimension_info.elts[0].elts[1], ast.Num):\n upper_bound = int(dimension_info.elts[0].elts[1].n)\n is_literal = True\n elif isinstance(dimension_info.elts[0].elts[1], ast.Name):\n upper_bound = dimension_info.elts[0].elts[1].id\n is_name = True\n else:\n assert False, (\n f\"Currently, ast type \"\n f\"[{type(dimension_info.elts[0].elts[1])}] is not \"\n f\"supported.\"\n )\n\n if is_literal:\n dimension = (upper_bound - lower_bound) + 1\n elif is_name:\n dimension = upper_bound\n else:\n pass\n\n dimension_list.append(dimension)\n\n elif isinstance(dimension_info.elts[0], ast.Call):\n lower_bound = int(dimension_info.elts[0].func.elts[0].n)\n if isinstance(\n dimension_info.elts[0].func.elts[1], ast.Num\n ):\n upper_bound = int(\n dimension_info.elts[0].func.elts[1].n\n )\n is_literal = True\n elif isinstance(\n dimension_info.elts[0].func.elts[1], ast.Name\n ):\n upper_bound = dimension_info.elts[0].func.elts[1].id\n is_name = True\n\n if is_literal:\n first_dimension = (upper_bound - lower_bound) + 1\n elif is_name:\n first_dimension = upper_bound\n\n dimension_list.append(first_dimension)\n\n lower_bound = int(dimension_info.elts[0].args[0].n)\n\n if isinstance(dimension_info.elts[0].args[1], ast.Num):\n upper_bound = int(dimension_info.elts[0].args[1].n)\n is_literal = True\n elif isinstance(dimension_info.elts[0].args[1], ast.Name):\n upper_bound = dimension_info.elts[0].args[1].id\n is_name = True\n\n if is_literal:\n second_dimension = (upper_bound - lower_bound) + 1\n elif is_name:\n second_dimension = upper_bound\n\n dimension_list.append(second_dimension)\n\n dimensions = dimension_list\n\n grfn[\"attributes\"].append(\n {\n \"name\": attrib_name,\n \"type\": attrib_type,\n \"elem_type\": elem_type,\n \"dimensions\": dimensions,\n }\n )\n # Here index is not needed for derived type attributes,\n # but simply adding it as a placeholder to make a constant\n # structure with other arrays.\n self.arrays[attrib_name] = {\n \"index\": 0,\n \"dimensions\": dimensions,\n \"elem_type\": elem_type,\n \"mutable\": True,\n }\n else:\n grfn[\"attributes\"].append(\n {\"name\": attrib_name, \"type\": attrib_type}\n )\n pass\n self.derived_types_attributes[node.name].append(attrib_name)\n\n state.variable_types[attrib_name] = attrib_type\n\n return [grfn]", "def add_new_token(self, *vargs, token_subclass=None, **kwargs):\n if token_subclass == None:\n token_subclass = self.default_token_class\n\n class new_token_class(token_subclass):\n pass\n\n assert(issubclass(new_token_class, Token))\n\n new_token_class.init(*vargs, **kwargs)\n\n # XXX: what is new_token_class.name\n if new_token_class.name in self.__table:\n raise KeyError(\"Class named '{}' was already in the token table.\".format(new_token_class.name))\n\n new_token_class.__name__ = token_subclass.__name__ + \"-\" + new_token_class.name\n\n self.add_token(new_token_class)", "def get_token_types(self):\r\n \r\n # With help from: https://deplinenoise.wordpress.com/2012/01/04/python-tip-regex-based-tokenizer/\r\n SCANNER = re.compile(r'''\r\n (\\s+) | # whitespace\r\n (//)[^\\n]* | # comments\r\n 0[xX]([0-9A-Fa-f]+) | # hexadecimal integer literals\r\n (\\d+) | # integer literals\r\n (<<|>>) | # multi-char punctuation\r\n ([][(){}<>=,;:*+-/|&~]) | # punctuation \r\n ([A-Za-z_][A-Za-z0-9_]*) | # identifiers\r\n \"\"\"(.*?)\"\"\" | # multi-line string literal\r\n \"((?:[^\"\\n\\\\]|\\\\.)*)\" | # regular string literal\r\n (.) | # an error!\r\n ''', re.DOTALL | re.VERBOSE)\r\n \r\n for match in re.finditer(SCANNER, self.scanner.modified_source_text): \r\n \r\n (space, comment, hexint, integer, mpunct, \r\n punct, word, mstringlit, stringlit, badchar) = match.groups()\r\n \r\n if word: \r\n #-------------------------------------------------------------------\r\n # check if word is an keyword\r\n #-------------------------------------------------------------------\r\n if word in self.symbols.keyword: \r\n keyword_token = Token(word, \"keyword\") \r\n self.token_list.append(keyword_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an identifier\r\n #-------------------------------------------------------------------\r\n else:\r\n identifier_token = Token(word, \"identifier\") \r\n self.token_list.append(identifier_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an integerConstant\r\n #-------------------------------------------------------------------\r\n if integer:\r\n Int_token = Token(integer, \"integerConstant\") \r\n self.token_list.append(Int_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an symbol \r\n #-------------------------------------------------------------------\r\n if punct: \r\n symbol_token = Token(punct, \"symbol\") \r\n self.token_list.append(symbol_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an stringConstant\r\n #------------------------------------------------------------------- \r\n if stringlit: \r\n string_token = Token(stringlit, \"stringConstant\") \r\n self.token_list.append(string_token) \r\n #-------------------------------------------------------------------\r\n # append EOF token\r\n #------------------------------------------------------------------- \r\n EOF_token = Token(self.endmark, \"EOF\") \r\n self.token_list.append(EOF_token) \r\n \r\n return self.token_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate cls after preprocessing its token definitions.
def __call__(cls, *args, **kwds): if '_tokens' not in cls.__dict__: cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) return type.__call__(cls, *args, **kwds)
[ "def __init__(self):\n self.__parser = SpaCyParser()\n self.__word_substitutor = WordSubstitutor()", "def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")", "def compile_class(self):\n self.tokenizer.advance() # class\n self.class_name = self.tokenizer.advance()[TOKEN_NAME]\n self.tokenizer.advance() # {\n # compile the variables declaration part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods o functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine_dec()\n self.tokenizer.advance() # }", "def __init__(self, toktype, line, lexeme):\n self.__toktype, self.__line, self.__lexeme = toktype, line, lexeme", "def __init__(self, text):\n\n self.text = text\n\n self.tokenize()", "def __init__(self, parser_instance):\n self.parser_instance = parser_instance\n # Dict of sorted lists of constructs by [head_or_tail][trigger_token_label]\n self.construct_lookup_dict = {}\n self.construct_lookup_dict[HEAD] = {}\n self.construct_lookup_dict[TAIL] = {}", "def __init__(self, tokens: List[Token]):\n super(GroupToken, self).__init__(tokens)", "def add_new_token(self, *vargs, token_subclass=None, **kwargs):\n if token_subclass == None:\n token_subclass = self.default_token_class\n\n class new_token_class(token_subclass):\n pass\n\n assert(issubclass(new_token_class, Token))\n\n new_token_class.init(*vargs, **kwargs)\n\n # XXX: what is new_token_class.name\n if new_token_class.name in self.__table:\n raise KeyError(\"Class named '{}' was already in the token table.\".format(new_token_class.name))\n\n new_token_class.__name__ = token_subclass.__name__ + \"-\" + new_token_class.name\n\n self.add_token(new_token_class)", "def __init__(self, corpus, name, tokens):\n\n self.corpus = corpus\n\n self.name = name\n self.tokens = asarray(tokens)", "def __init__(self, token, state, extra):\n self.state = state\n self.token = token\n self.extra = extra\n pass", "def __init__(self, depth, tokens=None):\n self.depth = depth\n self._tokens = tokens or []\n self.disable = False\n\n if self._tokens:\n # Set up a doubly linked list.\n for index, tok in enumerate(self._tokens[1:]):\n # Note, 'index' is the index to the previous token.\n tok.previous_token = self._tokens[index]\n self._tokens[index].next_token = tok", "def Parse(self, lex):\n\n # The next two variables store a stack of commands the user wants\n # to manually add to the list of stackable instance_commands.\n # (Allowing the users to directly manipulate the transformation stack\n # is an experimental feature as of 2015- Most users don't need this.)\n user_push_left_commands = []\n user_push_right_commands = []\n\n #sys.stdout.write(' -- Parse() invoked --\\n')\n\n # Keep track of the location in the users' input files where this\n # class object is first defined. (Keep in mind that the user might\n # augment their original class definition, adding new content to an\n # existing class. In that case self.srcloc_begin will have already\n # been assigned. We don't want to overwrite it in that case.)\n if self.srcloc_begin is None: # <-- not defined yet?\n self.srcloc_begin = lex.GetSrcLoc()\n\n while True:\n\n cmd_token = lex.get_token()\n\n #print('Parse(): token = \\\"'+cmd_token+'\\\", '+lex.error_leader())\n\n if cmd_token == lex.eof:\n #print('Parse(): EOF encountered\\n')\n break\n\n if (cmd_token in ('write',\n 'write_once',\n 'create_var',\n 'create_static_var',\n 'replace')):\n\n open_paren = lex.get_token()\n\n #print('Parse(): open_paren=\\\"'+open_paren+'\\\"')\n if open_paren == '{':\n # ..then the user neglected to specify the \"dest\" file-name\n # argument. In that case, supply the default, ''.\n # (which is shorthand for the standard out in this case)\n open_curly = open_paren[0]\n open_paren = ''\n close_paren = ''\n tmpl_filename = ''\n srcloc = lex.GetSrcLoc()\n else:\n tmpl_filename = lex.get_token()\n if tmpl_filename == ')':\n tmpl_filename = ''\n close_paren = ')'\n else:\n close_paren = lex.get_token()\n open_curly = lex.get_token()\n srcloc = lex.GetSrcLoc()\n\n if ((cmd_token == 'create_var') or\n (cmd_token == 'create_static_var')):\n tmpl_filename = None\n # This means: define the template without attaching\n # a file name to it. (IE., don't write the contents\n # of what's enclosed in the curly brackets { } to a file.\n # Why?\n # \"create_var\" commands are implemented as \"write() {...}\"\n # commands (containing one or more variables) which\n # never get written to a file or the terminal. Parsing\n # the contents of the curly brackets defines the variables \n # inside in the same way as parsing the text inside an\n # ordinary \"write() {...}\" command.\n\n if (cmd_token == 'replace'):\n tmpl_filename = \"ttree_replacements.txt\"\n\n if ((open_curly != '{') or\n ((open_paren == '') and (close_paren != '')) or\n ((open_paren == '(') and (close_paren != ')'))):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in ' + lex.error_leader() + '\\n\\n'\n 'Syntax error at the beginning of the \\\"' + cmd_token + '\\\" command.')\n if tmpl_filename != None:\n tmpl_filename = RemoveOuterQuotes(\n tmpl_filename, lex.quotes)\n # ( The previous line is similar to:\n # tmpl_filename = tmpl_filename.strip(lex.quotes) )\n\n tmpl_contents = lex.ReadTemplate()\n StaticObj.CleanupReadTemplate(tmpl_contents, lex)\n\n #sys.stdout.write(' Parse() after ReadTemplate, tokens:\\n\\n')\n # print(tmpl_contents)\n # sys.stdout.write('\\n----------------\\n')\n\n if (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n\n # Check for a particular bug:\n # Ordinary instance variables (preceded by a '$')\n # should never appear in a write_once() statement.\n for entry in tmpl_contents:\n if (isinstance(entry, VarRef) and\n (entry.prefix[0] == '$')):\n err_msg = ('Error(' + g_module_name + '.StaticObj.Parse()):\\n' +\n ' Error near ' + ErrorLeader(entry.srcloc.infile,\n entry.srcloc.lineno) + '\\n' +\n ' Illegal variable: \\\"' + entry.prefix + entry.descr_str + entry.suffix + '\\\"\\n' +\n ' All variables in a \\\"' + cmd_token + '\\\" statement must be statically\\n' +\n ' defined, and hence they must begin with a \\'@\\' prefix character.\\n' +\n ' (not a \\'$\\' character).\\n')\n if (cmd_token == 'write_once'):\n err_msg += ' Suggestion: Use the \\\"write()\\\" command instead.\\n'\n raise InputError(err_msg)\n\n if cmd_token == 'write':\n commands = self.instance_commands\n elif (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n commands = self.commands\n elif (cmd_token == 'create_var'):\n commands = self.instance_commands\n else:\n assert(False)\n\n command = WriteFileCommand(tmpl_filename,\n tmpl_contents,\n srcloc)\n commands.append(command)\n\n # end of \"if (cmd_token == 'write') or (cmd_token ==\n # 'write_once'):\"\n\n elif cmd_token == 'delete':\n\n instobj_descr_str = lex.get_token()\n instobj_srcloc = lex.GetSrcLoc()\n delete_command = DeleteCommand(instobj_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n self.instance_commands.append(mod_command)\n\n elif cmd_token == 'using':\n\n namespacecom_str = lex.get_token()\n if namespacecom_str != 'namespace':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The \\\"' + cmd_token + '\\\" command must be followed by the \\\"namespace\\\" keyword.')\n namespace_str = lex.get_token()\n\n stnode = StrToNode(namespace_str,\n self,\n lex.GetSrcLoc())\n\n self.namespaces.append(stnode)\n\n elif cmd_token == 'category':\n cat_name = lex.get_token()\n\n cat_count_start = 1\n cat_count_incr = 1\n backup_wordterminators = lex.wordterminators\n lex.wordterminators += ','\n #sys.stderr.write('DEBUG: wordterminators=\"'+str(lex.wordterminators)+'\"\\n')\n\n open_paren = lex.get_token()\n if (open_paren == '('):\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer, float, or string\n try:\n cat_count_start = int(token)\n except ValueError:\n try:\n cat_count_start = float(token)\n except ValueError:\n cat_count_start = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer,float,or string\n try:\n cat_count_incr = int(token)\n except ValueError:\n try:\n cat_count_incr = float(token)\n except ValueError:\n cat_count_incr = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token != ')':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '...\\\" has too many arguments,\\n'\n ' or lacks a close-paren \\')\\'.\\n')\n\n else:\n lex.push_token(open_paren)\n\n if (isinstance(cat_count_start, basestring) or\n isinstance(cat_count_incr, basestring)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '(' +\n str(cat_count_start) + ',' +\n str(cat_count_incr) + ')\\\"\\n'\n ' Only numeric counters are currently supported.\\n')\n\n # check for really stupid and unlikely errors:\n if type(cat_count_start) is not type(cat_count_incr):\n if ((isinstance(cat_count_start, int) or\n isinstance(cat_count_start, float))\n and\n (isinstance(cat_count_incr, int) or\n isinstance(cat_count_incr, float))):\n cat_count_start = float(cat_count_start)\n cat_count_incr = float(cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Problem with \\\"' + cmd_token + '\\\" command.\\n')\n\n prefix = cat_name[0]\n cat_name = cat_name[1:]\n # Add this category to the list.\n if prefix == '@':\n self.categories[cat_name] = Category(cat_name)\n self.categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n elif prefix == '$':\n self.instance_categories[cat_name] = Category(cat_name)\n self.instance_categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' category name = \\\"' + cat_name + '\\\" lacks a \\'$\\' or \\'&\\' prefix.\\n'\n ' This one-character prefix indicates whether the variables in this\\n'\n ' new category will be static or dynamics variables\\n')\n\n\n lex.wordterminators = backup_wordterminators\n\n\n elif (cmd_token == '}') or (cmd_token == ''):\n # a '}' character means we have reached the end of our scope.\n # Stop parsing and let the caller deal with the remaining text.\n # (And a '' means we reached the end of the file... I think.)\n break\n\n # elif (cmd_token == 'include'):\n # \"include filename\" loads a file (adds it to the file stack)\n # The \"TtreeShlex\" class (from which \"lex\" inherits) handles\n # \"include\" statements (ie. \"source\" statements) automatically.\n\n elif ((cmd_token == 'push') or\n (cmd_token == 'push_left') or\n (cmd_token == 'push_right')):\n\n push_cmd_src_loc = lex.GetSrcLoc()\n push_cmd_text = lex.GetParenExpr()\n if ((len(push_cmd_text) < 2) or\n (push_cmd_text[0] != '(') or\n (push_cmd_text[-1] != ')')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Bad \\\"push\\\" command. Expected an expression in parenthesis.\\n')\n push_cmd_text = push_cmd_text[1:-1]\n\n if (cmd_token == 'push_right'):\n push_command = PushRightCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_right_commands.append(push_command)\n else:\n push_command = PushLeftCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_left_commands.append(push_command)\n self.instance_commands.append(push_command)\n\n elif ((cmd_token == 'pop') or\n (cmd_token == 'pop_left') or\n (cmd_token == 'pop_right')):\n\n pop_cmd_text = lex.GetParenExpr()\n pop_cmd_src_loc = lex.GetSrcLoc()\n if (cmd_token == 'pop_right'):\n if len(user_push_right_commands) > 0:\n push_command = user_push_right_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many \\\"pop_right\\\" commands.\\n')\n pop_command = PopRightCommand(push_command,\n pop_cmd_src_loc)\n else:\n if len(user_push_left_commands) > 0:\n push_command = user_push_left_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many pop, (or pop_left) commands.\\n')\n pop_command = PopLeftCommand(push_command,\n pop_cmd_src_loc)\n self.instance_commands.append(pop_command)\n\n else:\n\n # Otherwise, 'cmd_token' is not a command at all.\n # Instead it's the name of an object which needs to be\n # defined or instantiated.\n # First, let's figure out which.\n\n # (small detail: The \"class\" keyword is optional\n # and can be skipped.)\n if cmd_token == 'class':\n object_name = lex.get_token()\n else:\n object_name = cmd_token\n\n next_symbol = lex.get_token()\n #print('Parse(): next_token=\\\"'+next_symbol+'\\\"')\n\n class_parents = []\n\n if next_symbol == 'inherits':\n\n # Then read in the list of classes which are parents of\n # of this class. (Multiple inheritance is allowed.)\n # (We don't yet check to insure that these are valid class\n # names. We'll do this later in LookupStaticRefs().)\n\n syntax_err_inherits = False\n\n while True:\n next_symbol = lex.get_token()\n if ((next_symbol == '{') or\n (next_symbol == lex.eof)):\n break\n elif (next_symbol == '='):\n syntax_err_inherits = True\n break\n else:\n class_parents.append(StrToNode(next_symbol,\n self,\n lex.GetSrcLoc()))\n if len(class_parents) == 0:\n syntax_err_inherits = True\n\n if syntax_err_inherits:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"inherits\\\" should be followed by one or more class names.\\n')\n\n if next_symbol == '{':\n child_name = object_name\n\n # Check to see if this class has already been defined.\n # (IE. check if it present in the list of children.)\n # If the name (child_name) matches another class (child),\n # then the contents of the new class will be appended to\n # the old. This way, class definitions can be augmented\n # later. (This is the way \"namespaces\" work in C++.)\n child = self.children.get(child_name)\n # If found, we refer to it as \"child\".\n # If not, then we create a new StaticObj named \"child\".\n if child is None:\n child = StaticObj(child_name, self)\n self.children[child_name] = child\n assert(child.name == child_name)\n\n # Either way we invoke child.Parse(), to\n # add contents (class commands) to child.\n child.Parse(lex)\n child.class_parents += class_parents\n\n elif next_symbol == '=':\n next_symbol = lex.get_token()\n if next_symbol == 'new':\n base_name = object_name\n base_srcloc = lex.GetSrcLoc()\n array_slice_str = ''\n if base_name.find('/') != -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n'\n ' (You can not instantiate some other object\\'s members.)\\n'\n ' Invalid instance name: \\\"' + base_name + '\\\"\\n')\n\n elif base_name in self.instname_refs:\n ref_srcloc = self.instname_refs[base_name]\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Duplicate class/array \\\"' + base_name + '\\\"\\n'\n ' This occurs near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n'\n ' and also near:\\n'\n ' ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n')\n else:\n self.instname_refs[base_name] = base_srcloc\n\n # Check for syntax allowing the user to instantiate\n # PART of an array. For example, check for this syntax:\n # \"monomers[20-29] = new ...\". This only fills in a\n # portion of the array from: monomers[20]...monomers[29]\n #\n # We also have to deal with multidimensional syntax\n # like this: \"cells[3][2-3][1][4-7] = new...\"\n # Split the \"cells[3][2-3][2][4-7][2]\" string into\n # \"cells[3][\", \"][1][\", and \"]\".\n # Later, we will instantiate InstanceObjs with names:\n # \"cells[3][2][1][4]\"\n # \"cells[3][2][1][5]\"\n # \"cells[3][2][1][6]\"\n # \"cells[3][2][1][7]\"\n # \"cells[3][3][1][4]\"\n # \"cells[3][3][1][5]\"\n # \"cells[3][3][1][6]\"\n # \"cells[3][3][1][7]\"\n\n p1 = base_name.find('[')\n if p1 == -1:\n p1 = len(base_name)\n else:\n p1 += 1\n array_name_tkns = [base_name[0:p1]]\n array_name_offsets = []\n\n p2 = -1\n p4 = p1\n while p4 < len(base_name):\n p3 = base_name.find(']', p1)\n\n if p3 == -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\']\\' character following:\\n'\n ' \\\"' +\n base_name[0:p1] +\n '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n # Search for a '-', ':', or '*' character between []\n # For example \"monomers[20-29] = \"\n # If present, the user wants us to fill a range\n # inside an array. This could be a multi-dimensional\n # array, (eg \"cells[3][2-6][4-11] = \"), so we must\n # figure out which entries in the array the user\n # wants us to fill (in this case, \"[2-6][4-11]\")\n p2 = base_name.find('-', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find(':', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find('*', p1)\n if p2 == -1:\n p2 = len(base_name)\n\n p4 = p3 + 1\n if p4 < len(base_name):\n if base_name[p4] == '[':\n p4 += 1 # skip over it\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\'[\\' character forllowing a \\']\\' character in\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n if p2 > p3:\n # Then no '-', ':', or '*' character was found\n # between '[' and the subsequent ']' character\n # In that case, ignore this token\n\n token = base_name[p1:p4]\n # append all this text to the previous token\n if len(array_name_tkns) == 0:\n array_name_tkns.append(token)\n else:\n array_name_tkns[-1] = array_name_tkns[-1] + token\n array_slice_str = 'slice '\n else:\n\n assert((p1 < p2) and (p2 < p3))\n index_offset_str = base_name[p1:p2]\n if len(index_offset_str) == 0:\n index_offset = 0\n\n elif (not str.isdigit(index_offset_str)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a nonnegative integer preceding the \\'' +\n base_name[\n p2] + '\\' character in:\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n else:\n index_offset = int(index_offset_str)\n token = base_name[p3:p4]\n array_name_tkns.append(token)\n array_name_offsets.append(index_offset)\n\n p1 = p4\n\n # If the statobj_str token contains a ']' character\n # then this means the user wants us to make multiple\n # copies of this template. The number of copies\n # to instantiate is enclosed in the [] characters\n # (Example wat = new Water[3000] creates\n # 3000 instantiations of the Water template\n # named wat[1], wat[2], wat[3], ... wat[3000]).\n\n # Note: Here '[' and ']' have a special meaning.\n # So lex.get_token() should not treat them as\n # ordinary word characters. To prevent this:\n orig_wordterminators = lex.wordterminators\n lex.wordterminators += '[],'\n\n class_name_str = lex.get_token()\n if ((class_name_str == lex.eof) or\n (class_name_str == '}')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n 'Class ends prematurely. (Incomplete \\\"new\\\" statement.)')\n\n assert(len(class_name_str) > 0)\n\n if (class_name_str[0] == '['):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' new ' + class_name_str + '\\n'\n 'Bracketed number should be preceeded by a class name.')\n class_names = []\n weights = []\n num_by_type = []\n if class_name_str == 'random':\n class_names, weights, num_by_type = self._ParseRandom(\n lex)\n tmp_token = lex.get_token()\n if len(tmp_token) > 0:\n if tmp_token[0] == '.':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + tmp_token + '\\\" should not follow random()\\n'\n '\\n'\n ' Coordinate transformations and other commands (such as \\\"' +\n tmp_token + '\\\")\\n'\n ' should appear after each class name inside the random() statement,\\n'\n ' not after it. For example, do not use:\\n'\n ' \\\"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\\\"\\n'\n ' Use this instead:\\n'\n ' \\\"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\\\"\\n')\n lex.push_token(tmp_token)\n else:\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n\n array_size = []\n array_suffixes = []\n array_srclocs = []\n\n # A general \"new\" statement could look like this:\n # \"m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)\n # [3].trans(0,0,4.5)\"\n # So far we have processed \"m = new Mol.scale(3)\".\n # Now, we need to deal with:\n # \"[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)\"\n while True:\n new_token = lex.get_token()\n # if ((new_token == '') or (new_token == lex.eof)):\n # break\n if new_token == '[':\n number_str = lex.get_token()\n close_bracket = lex.get_token()\n if ((not str.isdigit(number_str)) or\n (close_bracket != ']')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near ' + lex.error_leader() + '\\n'\n ' A \\'[\\' character should be followed by a number and a \\']\\' character.')\n array_size.append(int(number_str))\n suffix = lex.get_token()\n\n if ((suffix == '') or (suffix == lex.eof)):\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n break\n if suffix[0] == '.':\n lex.push_token(suffix[1:])\n suffix_func = lex.GetParenExpr()\n suffix = '.' + suffix_func\n array_suffixes.append(suffix)\n array_srclocs.append(lex.GetSrcLoc())\n else:\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n lex.push_token(suffix)\n if suffix != '[':\n break\n else:\n lex.push_token(new_token)\n break\n srcloc_final = lex.GetSrcLoc()\n\n lex.wordterminators = orig_wordterminators\n\n assert(len(array_size) == len(array_suffixes))\n\n if len(array_size) > 0:\n if len(array_name_offsets) == 0:\n assert(len(array_name_tkns) == 1)\n array_name_offsets = [0] * len(array_size)\n array_name_tkns[0] = array_name_tkns[0] + '['\n for d in range(0, len(array_size) - 1):\n array_name_tkns.append('][')\n array_name_tkns.append(']')\n\n if len(array_name_offsets) != len(array_size):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near/before ' + lex.error_leader() + '\\n'\n ' Array ' + array_slice_str +\n 'dimensionality on the left side of the \\'=\\' character (' + str(\n len(array_name_offsets)) + ')\\n'\n ' does not match the array dimensionality on the right side (' + str(len(array_size)) + ').\\n')\n\n # If the user wants us to instantiate a\n # multidimensional array of class instances\n # then we must loop through this multidimensional\n # array and create a new instance for each entry.\n # For example fill a 3 dimensional volume\n # with 1000 water molecules\n # Example 1:\n # solvent = new Water [10][10][10]\n # (The coordinates must be read separately.)\n # In this example array_size = [10,10,10]\n # array_suffixes = ['','','']\n # Example 2:\n # solvent = new Water.transcm(0,0,0)\n # [10].trans(0,0,4)\n # [10].trans(0,4,0).rot(45,0,0,1)\n # [10].trans(4,0,0)\n # (This command generates a 10x10x10 lattice\n # simple cubic lattice of regularly spaced\n # water molecules pointing the same direction.)\n # In this example array_size = [10,10,10]\n # and\n # class_suffix = 'transcm(0,0,0)'\n # and\n # array_suffixes = ['trans(0,0,4)',\n # 'trans(0,4,0).rot(45,0,0,1)',\n # 'trans(4,0,0)']\n # Note that tree ignores the \"trans()\"\n # commands, it stores them so that inherited\n # classes can attempt to process them.\n\n D = len(array_size)\n if D > 0:\n\n i_elem = 0 # (used to look up selection_list[])\n if len(num_by_type) > 0:\n selection_list = []\n for i in range(0, len(num_by_type)):\n selection_list += [i] * num_by_type[i]\n random.shuffle(selection_list)\n\n num_elements = 1\n for d in range(0, D):\n num_elements *= array_size[d]\n err_msg_str = str(array_size[0])\n for d in range(1, D):\n err_msg_str += '*' + str(array_size[d])\n if num_elements != len(selection_list):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near or before ' + lex.error_leader() + '\\n'\n ' The sum of the numbers in the \\\"new random([],[])\\\" command (' + str(\n len(selection_list)) + ')\\n'\n ' does not equal the number of elements in the array (' + err_msg_str + ')\\n')\n\n digits = [0 for d in range(0, D)]\n table_filled = False\n pushed_commands = []\n while (not table_filled):\n instance_name = array_name_tkns[0]\n for d in range(0, D):\n i = digits[d]\n instance_name += str(i +\n array_name_offsets[d]) +\\\n array_name_tkns[d + 1]\n\n # Does the user want us to select\n # a class at random?\n if len(class_names) > 0:\n\n if len(num_by_type) > 0:\n class_name_str = class_names[\n selection_list[i_elem]]\n else:\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(\n class_name_str, lex)\n\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(instance_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n # Now go to the next entry in the table.\n # The indices of this table are similar to\n # a D-digit integer. We increment this d-digit\n # number now.\n d_carry = D - 1\n while True:\n digits[d_carry] += 1\n if digits[d_carry] >= array_size[d_carry]:\n digits[d_carry] = 0\n if array_suffixes[d_carry] != '':\n for i in range(0, array_size[d_carry] - 1):\n partner = pushed_commands.pop()\n command = PopRightCommand(partner,\n srcloc_final)\n self.instance_commands.append(\n command)\n d_carry -= 1\n else:\n if array_suffixes[d_carry] != '':\n command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),\n array_srclocs[d_carry])\n pushed_commands.append(command)\n self.instance_commands.append(\n command)\n break\n if d_carry < 0:\n table_filled = True\n break\n\n # (used to look up selection_list[])\n i_elem += 1\n pass\n\n else:\n if len(class_names) > 0:\n assert(len(num_by_type) == 0)\n # if len(num_by_type) > 0:\n # class_name_str = class_names[selection_list[i_elem]]\n # else:\n # class_name_str = RandomSelect(class_names,\n # weights)\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(base_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n else:\n\n # Now check for commands using this syntax:\n #\n # \"MolNew = MolOld.rot(45,1,0,0).scale(100.0)\"\n # /|\\ /|\\ `-----------.------------'\n # | | |\n # child_name parent_name optional suffix\n\n child_name = object_name\n parent_name_str = next_symbol\n\n child = StaticObj(child_name, self)\n\n parent_name, suffix, suffix_srcloc = \\\n self._ProcessClassName(parent_name_str, lex)\n\n child.class_parents.append(StrToNode(parent_name,\n self,\n lex.GetSrcLoc()))\n\n if suffix != '':\n # Assume the command is a StackableCommand. (This\n # way it will enclose the commands of the parents.)\n # Stackable commands come in (Push...Pop) pairs.\n push_command = PushLeftCommand(suffix,\n suffix_srcloc)\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc)\n push_mod_command = ModCommand(push_command, './')\n pop_mod_command = ModCommand(pop_command, './')\n child.instance_commands_push.append(\n push_mod_command)\n child.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n #sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\\n')\n\n #sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\\n')\n\n # Check to see if this class has already been defined.\n if self.children.get(child_name) is not None:\n if self.children[i].IsDeleted():\n del self.children[child_name]\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The name \\\"' + child_name + '\\\" is already in use.')\n\n self.children[child_name] = child\n\n else:\n\n # Otherwise hopefully this is a post-instance command\n # (a command applied to a class which has been instantiated)\n # In that case, the object_name would be followed by\n # a dot and a function-call containing a '(' paren (which\n # would have ended up stored in the next_symbol variable).\n\n open_paren_encountered = False\n if (next_symbol == '('):\n open_paren_encountered = True\n # put '(' back in the stream\n lex.push_token(next_symbol)\n\n i_dot = object_name.rfind('.')\n i_slash = object_name.rfind('/')\n dot_encountered = ((i_dot != -1) and\n ((i_slash == -1) or (i_slash < i_dot)))\n\n if (open_paren_encountered and dot_encountered and\n (object_name[:1] != '[')):\n\n obj_descr_str, suffix, suffix_srcloc = \\\n self._ExtractSuffix(object_name, lex)\n\n path_tokens = obj_descr_str.split('/')\n\n i_last_ptkn, staticobj = FollowPath(path_tokens,\n self,\n lex.GetSrcLoc())\n instobj_descr_str = './' + \\\n '/'.join(path_tokens[i_last_ptkn:])\n\n # I still support the \"object_name.delete()\" syntax for\n # backwards compatibility. (However newer input files\n # use this equivalent syntax: \"delete object_name\")\n if suffix == 'delete()':\n delete_command = DeleteCommand(suffix_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n staticobj.instance_commands.append(mod_command)\n else:\n push_command = PushLeftCommand(suffix,\n suffix_srcloc,\n '.')\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc,\n '.')\n push_mod_command = ModCommand(push_command,\n instobj_descr_str)\n pop_mod_command = ModCommand(pop_command,\n instobj_descr_str)\n if instobj_descr_str != './':\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands\\n')\n staticobj.instance_commands.append(\n push_mod_command)\n staticobj.instance_commands.append(\n pop_mod_command)\n else:\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands_push\\n')\n # Question: Should I make these PushRight commands and\n # append them in the opposite order?\n # If so I also have to worry about the case\n # above.\n staticobj.instance_commands_push.append(\n push_mod_command)\n staticobj.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n else:\n # Otherwise, the cmd_token is not any of these:\n # \"write\", \"write_once\", \"replace\",\n # \"create_var\", \"create_static_var\",\n # \"delete\", or \"category\".\n # ... and it is ALSO not any of these:\n # the name of a class (StaticObj), or\n # the name of an instance (InstanceObj)\n # followed by either a '.' or \"= new\"\n #\n # In that case, it is a syntax error:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Syntax error at or before ' + lex.error_leader() + '\\n'\n ' \\\"' + object_name + ' ' + next_symbol + '\\\".')\n\n # Keep track of the location in the user's input files\n # where the definition of this object ends.\n self.srcloc_end = lex.GetSrcLoc()\n\n # Finally, if there are any remaining user_push_left_commands or\n # user_push_right_commands, deal with them (by popping them).\n for push_command in user_push_left_commands:\n push_command = user_push_left_commands.pop()\n pop_command = PopLeftCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)\n\n for push_command in user_push_right_commands:\n push_command = user_push_right_commands.pop()\n pop_command = PopRightCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)", "def __init__(self):\n self.commandList = []\n self.tagStack = []\n self.symbolLocationTable = {}\n self.macroMap = {}\n self.endTagSymbol = 1\n\n self.commandHandler = {\n TAL_DEFINE: self.compileCmdDefine,\n TAL_CONDITION: self.compileCmdCondition,\n TAL_REPEAT: self.compileCmdRepeat,\n TAL_CONTENT: self.compileCmdContent,\n TAL_REPLACE: self.compileCmdReplace,\n TAL_ATTRIBUTES: self.compileCmdAttributes,\n TAL_OMITTAG: self.compileCmdOmitTag,\n # Metal commands\n METAL_USE_MACRO: self.compileMetalUseMacro,\n METAL_DEFINE_SLOT: self.compileMetalDefineSlot,\n METAL_FILL_SLOT: self.compileMetalFillSlot,\n METAL_DEFINE_MACRO: self.compileMetalDefineMacro\n }\n\n # Default namespaces\n self.setTALPrefix('tal')\n self.tal_namespace_prefix_stack = []\n self.metal_namespace_prefix_stack = []\n self.tal_namespace_prefix_stack.append('tal')\n self.setMETALPrefix('metal')\n self.metal_namespace_prefix_stack.append('metal')\n\n self.log = logging.getLogger(\"simpleTAL.TemplateCompiler\")", "def __init__(self, lexeme, token_type, line_num):\n self.type = token_type\n self.lexeme = lexeme\n self.lineNum = line_num\n self.lexicalError = None", "def instantiate_classes(self) -> None:\n self.config_init = self.parser.instantiate_classes(self.config)\n self.datamodule = self.config_init.get(\"data\")\n self.model = self.config_init[\"model\"]\n self.instantiate_trainer()", "def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok", "def __init__(self, tokeniser, lStopwords):\n\n self.tokeniser = tokeniser\n self.lStopwords = lStopwords", "def __init__(self, tokenization_src={}, tokenization_tgt={}):\n self.tokenization_src = tokenization_src\n self.tokenization_tgt = tokenization_tgt\n self.tokenization_src_lut = {self.tokenization_src[key]: key for key in self.tokenization_src}\n self.tokenization_tgt_lut = {self.tokenization_tgt[key]: key for key in self.tokenization_tgt}\n self.rules = {} # dict used to look up rules faster (get_rules function)\n self.all_rules = [] # list of all rules", "def test_constructor(self):\n\n t = TokenKind(5, \"foo\")\n\n self.assertEqual(t.value, 5)\n self.assertEqual(t.name, \"foo\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream.
def do_insertions(insertions, tokens): insertions = iter(insertions) try: index, itokens = next(insertions) except StopIteration: # no insertions for item in tokens: yield item return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary yield realpos, t, v[oldi:] realpos += len(v) - oldi # leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary
[ "def applyToTokens(self, tokens: List[str]) -> None:\n\n if self.__tokenIndex > len(tokens):\n raise IndexError(\"InsertOperation: cannot insert at index {} (out of bounds)\".format(self.__tokenIndex))\n\n tokens.insert(self.__tokenIndex, self.__newToken)", "def Insert(iterable, index, items):\n items = items if is_iterable(items) else itt.repeat(items)\n for elem, item in zip(iterable, items):\n elem = list(elem)\n head, tail = elem[:index], elem[index:]\n yield tuple(head + as_list(item) + tail)", "def insert(self, target, insertions):\r\n for string in insertions:\r\n index = random.randint(0, len(target)-1)\r\n target = target[0:index] + string + target[index:]\r\n return target", "def insert(self, index, elements):\n i = index\n for element in elements:\n self.list.insert(i, element)\n i += 1", "def insert_sampling(self, token_list: List[str or Tuple[str, str]],\n sample_idx: List[int]) -> List[str or Tuple[str, str]]:\n result = []\n for i, token in enumerate(token_list):\n if i in sample_idx:\n result.append(token)\n result.append(token)\n return result", "def insert_at_index(self, index: int, items: list) -> None:\n for i in range(len(items)):\n self.entries.insert(index + i, items[i])\n self.list_size += len(items)", "def process_tokenized_input(self, tokens):\n # Tokenize input\n bert_tokens, orig_to_bert_tok_map = self._wordpiece_tokenization(tokens)\n\n # Pad the sequences\n max_sent = len(max(bert_tokens, key=len))\n bert_tokens = [sent + [PAD] * (max_sent - len(sent)) for sent in bert_tokens]\n\n # Convert token to vocabulary indices\n indexed_tokens = [self.tokenizer.convert_tokens_to_ids(sent) for sent in bert_tokens]\n indexed_tokens = torch.tensor(indexed_tokens).to(self.device)\n\n # Generate attention masks for pad values\n attention_masks = [[float(idx > 0) for idx in sent] for sent in indexed_tokens]\n attention_masks = torch.tensor(attention_masks).to(self.device)\n\n return indexed_tokens, orig_to_bert_tok_map, attention_masks", "def extract_insertions(\n fusions, # type: Iterable[Fusion]\n gtf_path, # type: pathlib.Path\n features_path, # type: pathlib.Path\n chromosomes=None, # type: List[str]\n assembled_gtf_path=None, # type: pathlib.Path\n ffpm_fastq_path=None # type: pathlib.Path\n): # type: (...) -> Iterable[Insertion]\n\n # Annotate for genes.\n gtf_reference = TranscriptReference.from_gtf(\n gtf_path, chromosomes=chromosomes)\n\n annotated = annotate_fusions_for_genes(fusions, gtf_reference)\n\n # Annotate for assembly (if given).\n if assembled_gtf_path is not None:\n assem_reference = TranscriptReference.from_gtf(\n assembled_gtf_path, chromosomes=chromosomes)\n\n annotated = annotate_fusions_for_assembly(annotated, gtf_reference,\n assem_reference)\n\n # Annotate for transposon.\n annotated = annotate_fusions_for_transposon(annotated, features_path)\n\n # Drop any fusions without a transposon feature.\n annotated = (fusion for fusion in annotated\n if 'feature_name' in fusion.metadata)\n\n # Calculate FFPM scores.\n if ffpm_fastq_path is not None:\n annotated = annotate_ffpm(annotated, fastq_path=ffpm_fastq_path)\n\n # Convert to insertions.\n insertions = Insertion.from_transposon_fusions(\n annotated, id_fmt_str='INS_{}')\n\n for insertion in insertions:\n yield insertion", "def get_insertions(self, aln, minlength, maxlength, extension):\n return self.__get_insdel(aln, _modeller.mod_alignment_next_insert,\n minlength, maxlength, extension)", "def insert(self, index, command, chained_by=None):\n command = self._normalize_command(command)\n chained_by = self._normalize_chained_by(chained_by)\n\n self.commands.insert(index, command)\n self._strings.insert(index, str(command))\n self._operators.insert(index, chained_by)", "def call(self, insertions):\n\n raise NotImplementedError()", "def insert(self, index, s):\n raise NotImplementedError", "def applyToTokens(self, tokens: List[str]) -> None:\n\n if self.__tokenIndex >= len(tokens):\n raise IndexError(\"ReplaceOperation: cannot replace at index {} (out of bounds)\".format(self.__tokenIndex))\n\n tokens[self.__tokenIndex] = self.__newToken", "def insert(self, index=None, *nodes):\n # If no index is provided we assume we are appending\n index = len(self.nodes) if index is None else index\n for node in nodes:\n if self.is_descendant(node) or node == self:\n # You cannot add a node to its descendant/ child or itself\n continue\n node.remove() # Remove node from whatever parent it belongs to\n self.nodes.insert(index, node)\n index += 1\n node.parent_node = self\n node.depth = self.depth + 1\n node.lift(self.body)\n if self._expanded:\n self.collapse()\n self.expand()\n if len(self.nodes) > 0:\n self._set_expander(self.COLLAPSED_ICON)\n self.expand()", "def insert_sequence(dna1,dna2,index):\r\n return dna1[:index]+dna2+dna1[index:]", "def bulk_insert(self, keys):\n for k in keys:\n self.insert(k)", "def insert(list_it, insert_data, n):\n # Reset iterators\n list_it.reset()\n\n # n = int(n)\n\n # Get to the nth line in the list\n for _ in range(n):\n list_it.next()\n\n # Add elements\n for lines in insert_data:\n list_it.add_here(lines)", "def insert(self, i, x):", "def append_tokens(self, tokens):\n if type(tokens) != list:\n raise FatalRuntimeError(\"Tokens type error\")\n self.tokens += tokens\n self.length += len(tokens)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a subclass of baselexer that accepts the ObjectiveC syntax extensions.
def objective(baselexer): # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, # since that's quite common in ordinary C/C++ files. It's OK to match # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. # # The upshot of this is that we CANNOT match @class or @interface _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') # Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : ) # (note the identifier is *optional* when there is a ':'!) _oc_message = re.compile(r'\[\s*[a-zA-Z_][a-zA-Z0-9_]*\s+' r'(?:[a-zA-Z_][a-zA-Z0-9_]*\s*\]|' r'(?:[a-zA-Z_][a-zA-Z0-9_]*)?:)') class GeneratedObjectiveCVariant(baselexer): """ Implements Objective-C syntax on top of an existing C family lexer. """ tokens = { 'statements': [ (r'@"', String, 'string'), (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'@0[0-7]+[Ll]?', Number.Oct), (r'@\d+[Ll]?', Number.Integer), (r'(in|@selector|@private|@protected|@public|@encode|' r'@synchronized|@try|@throw|@catch|@finally|@end|@property|' r'@synthesize|@dynamic|@optional)\b', Keyword), (r'(id|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b', Keyword.Type), (r'@(true|false|YES|NO)\n', Name.Builtin), (r'(YES|NO|nil)\b', Name.Builtin), (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), ('#pop', 'oc_classname')), (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), ('#pop', 'oc_forward_classname')), inherit, ], 'oc_classname' : [ # interface definition that inherits ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?', bygroups(Name.Class, Text, Name.Class), '#pop'), # interface definition for a category ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))', bygroups(Name.Class, Text, Name.Label), '#pop'), # simple interface / implementation ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop') ], 'oc_forward_classname' : [ ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)', bygroups(Name.Class, Text), 'oc_forward_classname'), ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)', bygroups(Name.Class, Text), '#pop') ], 'root': [ # methods (r'^([-+])(\s*)' # method marker r'(\(.*?\))?(\s*)' # return type r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name bygroups(Keyword, Text, using(this), Text, Name.Function), 'method'), inherit, ], 'method': [ include('whitespace'), # TODO unsure if ellipses are allowed elsewhere, see # discussion in Issue 789 (r',', Punctuation), (r'\.\.\.', Punctuation), (r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this), Name.Variable)), (r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function), (';', Punctuation, '#pop'), ('{', Punctuation, 'function'), ('', Text, '#pop'), ], } def analyse_text(text): if _oc_keywords.search(text): return 1.0 elif '@"' in text: # strings return 0.8 elif _oc_message.search(text): return 0.8 return 0 return GeneratedObjectiveCVariant
[ "def create_lexer(self):\n raise NotImplementedError()", "def test_lexer():\n generator_stream = cStringIO.StringIO()\n generator_stream.write(\"\"\"\n[[:newline:]] NEWLINE\n[[:whitespace:]] IGNORE\n'namespace'[[:whitespace:]]* NAMESPACE\n[a-z][a-z0-9_?!]* ID\n':='[[:whitespace:]]* ASSIGNMENT\n'+'[[:whitespace:]]* PLUS\n'.' DOT\n\"\"\")\n generator_stream.seek(0)\n\n table_1 = toe.symbol.table()\n generator = TLexerGenerator(table_1)\n\n lexer = TLexer()\n lexer.states = generator.load(generator_stream, False)\n\n #for i in range(len(lexer.states)):\n # print generator.string_transitions(i)\n\n assert(len(lexer.states) > 2) # initial, invalid\n yield (\"len(lexer.states)\", len(lexer.states))\n\n test_stream = cStringIO.StringIO()\n test_stream.write(\"\"\"namespace aaa.aaa.aaa\n\n\"\"\")\n test_stream.seek(0)\n lexer.source_stream = test_stream\n\n while not lexer.eof_p:\n yield lexer.token\n lexer.consume()", "def make_lex(symbols):\n ...", "def grammar(cls):\n name = pp.Word(pp.alphanums + '_-')\n return (\n pp.Suppress(pp.Literal('@keyframes')) +\n name +\n pp.Suppress(pp.Literal('{')) +\n pp.ZeroOrMore(KeyframeProperties.parser()) +\n pp.Suppress(pp.Literal('}'))\n )", "def grammar(cls):\n # Todo: Handle keyframe properties where there are more than one\n # keyframe selectors\n keyframe_selector = (\n (\n pp.Word(pp.nums + '.') +\n pp.Suppress(pp.Literal('%')).leaveWhitespace()\n ) |\n pp.Literal('from') |\n pp.Literal('to')\n )\n return (\n keyframe_selector +\n pp.Suppress(pp.Literal('{')) +\n pp.ZeroOrMore(Property.parser()) +\n pp.Suppress(pp.Literal('}'))\n )", "def generate_syntax(core_grammar_path, extension_grammar_paths):\n\n def load_keywords(core_grammar_path, extension_grammar_paths, keywords):\n \"\"\"Load JSON arammers.\"\"\"\n\n def instruction_exists(instruction):\n \"\"\"Returns True if instruction does, False otherwise.\"\"\"\n for names in keywords.itervalues():\n for inst in names:\n if inst == instruction:\n return True\n return False\n\n with open(core_grammar_path) as grammar_file:\n grammar = json.loads(grammar_file.read())\n\n if 'instructions' in grammar:\n for instruction in grammar['instructions']:\n opname = instruction['opname']\n if not instruction_exists(opname):\n keywords['Instruction'].append(opname)\n\n if 'operand_kinds' in grammar:\n for operand_kind in grammar['operand_kinds']:\n if 'enumerants' in operand_kind:\n for enumerant in operand_kind['enumerants']:\n enumname = enumerant['enumerant']\n if enumname not in keywords['Enumerant']:\n keywords['Enumerant'].append(enumname)\n\n extinst_group_names = []\n for grammar_path in extension_grammar_paths:\n with open(grammar_path) as grammar_file:\n grammar = json.loads(grammar_file.read())\n grammar_name = ''.join(\n word.capitalize()\n for word in os.path.basename(grammar_path).lstrip(\n 'extinst.').rstrip('.grammer.json').split('.'))\n\n if 'instructions' in grammar:\n keywords[grammar_name] = []\n for instruction in grammar['instructions']:\n opname = instruction['opname']\n if not instruction_exists(opname):\n keywords[grammar_name].append(opname)\n\n extinst_group_names.append('Spirv{0}'.format(grammar_name))\n\n return keywords, extinst_group_names\n\n def write(string):\n \"\"\"Append to the content string.\"\"\"\n write.content += string\n\n write.content = ''\n\n keywords, extinst_group_names = load_keywords(\n core_grammar_path, extension_grammar_paths, KEYWORDS)\n\n write('''\" File: spirv.vim\n\" Author: Kenneth Benzie (Benie) <k.benzie83@gmail.com>\n\" Description: Vim syntax file for the Khronos Group's SPIR-V standard.\n\" Last Modified: {0}\n\n\" Don't load the sytnax multiple times\nif exists('b:current_syntax')\n finish\nendif\n\n'''.format(datetime.datetime.now().strftime('%B %d, %Y')))\n\n write(r'''\" Generic matches\nsyn match SpirvSpecialComment contained\n\\ \"\\(SPIR-V\\|\\(Version\\|Generator\\|Bound\\|Schema\\):\\)\"\nsyn match SpirvComment \";.*$\" contains=SpirvSpecialComment\nsyn match SpirvError \"\\w\\+\"\nsyn match SpirvID \"%\\w\\+\"\nsyn region SpirvString start=+\"+ end=+\"+\nsyn match SpirvNumber \"\\s\\zs\\d\\+\"\nsyn match SpirvFloat \"\\s\\zs\\d\\+\\.\\d\\+\"\n''')\n\n for group, group_keywords in keywords.iteritems():\n write('\\n\" %s keywords\\n' % group)\n syn_keyword = 'syn keyword Spirv%s' % group\n write(syn_keyword)\n\n length = len(syn_keyword)\n for keyword in group_keywords:\n opname = ' ' + keyword\n keyword_length = len(opname)\n\n if length + keyword_length > 80:\n write('\\n\\\\')\n length = 1\n\n write(opname)\n length += keyword_length\n write('\\n')\n\n write('\\n\" Define highlight groups\\n')\n for group_name in GROUP_NAMES:\n write('hi default link {0} {1}\\n'.format(group_name[0], group_name[1]))\n\n write('''\n\" Define current ID highlight group\nif exists('g:spirv_enable_current_id') && g:spirv_enable_current_id\n execute 'hi SpirvCurrentID '.g:spirv_current_id_highlight\nendif\n''')\n\n if len(extinst_group_names):\n groups = ([], [])\n for group in extinst_group_names:\n groups[0].append(' hi default link {0} SpirvError'.format(group))\n groups[1].append(' hi default link {0} SpirvInstruction'.format(\n group))\n\n write('''\n\" Define extended instruction highlight groups\nif exists('g:spirv_enable_extinst_error') && g:spirv_enable_extinst_error\n{0}\nelse\n{1}\nendif\n'''.format('\\n'.join(groups[0]), '\\n'.join(groups[1])))\n\n return write.content", "def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")", "def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok", "def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\r\n\r\n # A concrete compiler class that does not override compile()\r\n # should implement _compile().\r\n pass", "def Parse(self, lex):\n\n # The next two variables store a stack of commands the user wants\n # to manually add to the list of stackable instance_commands.\n # (Allowing the users to directly manipulate the transformation stack\n # is an experimental feature as of 2015- Most users don't need this.)\n user_push_left_commands = []\n user_push_right_commands = []\n\n #sys.stdout.write(' -- Parse() invoked --\\n')\n\n # Keep track of the location in the users' input files where this\n # class object is first defined. (Keep in mind that the user might\n # augment their original class definition, adding new content to an\n # existing class. In that case self.srcloc_begin will have already\n # been assigned. We don't want to overwrite it in that case.)\n if self.srcloc_begin is None: # <-- not defined yet?\n self.srcloc_begin = lex.GetSrcLoc()\n\n while True:\n\n cmd_token = lex.get_token()\n\n #print('Parse(): token = \\\"'+cmd_token+'\\\", '+lex.error_leader())\n\n if cmd_token == lex.eof:\n #print('Parse(): EOF encountered\\n')\n break\n\n if (cmd_token in ('write',\n 'write_once',\n 'create_var',\n 'create_static_var',\n 'replace')):\n\n open_paren = lex.get_token()\n\n #print('Parse(): open_paren=\\\"'+open_paren+'\\\"')\n if open_paren == '{':\n # ..then the user neglected to specify the \"dest\" file-name\n # argument. In that case, supply the default, ''.\n # (which is shorthand for the standard out in this case)\n open_curly = open_paren[0]\n open_paren = ''\n close_paren = ''\n tmpl_filename = ''\n srcloc = lex.GetSrcLoc()\n else:\n tmpl_filename = lex.get_token()\n if tmpl_filename == ')':\n tmpl_filename = ''\n close_paren = ')'\n else:\n close_paren = lex.get_token()\n open_curly = lex.get_token()\n srcloc = lex.GetSrcLoc()\n\n if ((cmd_token == 'create_var') or\n (cmd_token == 'create_static_var')):\n tmpl_filename = None\n # This means: define the template without attaching\n # a file name to it. (IE., don't write the contents\n # of what's enclosed in the curly brackets { } to a file.\n # Why?\n # \"create_var\" commands are implemented as \"write() {...}\"\n # commands (containing one or more variables) which\n # never get written to a file or the terminal. Parsing\n # the contents of the curly brackets defines the variables \n # inside in the same way as parsing the text inside an\n # ordinary \"write() {...}\" command.\n\n if (cmd_token == 'replace'):\n tmpl_filename = \"ttree_replacements.txt\"\n\n if ((open_curly != '{') or\n ((open_paren == '') and (close_paren != '')) or\n ((open_paren == '(') and (close_paren != ')'))):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in ' + lex.error_leader() + '\\n\\n'\n 'Syntax error at the beginning of the \\\"' + cmd_token + '\\\" command.')\n if tmpl_filename != None:\n tmpl_filename = RemoveOuterQuotes(\n tmpl_filename, lex.quotes)\n # ( The previous line is similar to:\n # tmpl_filename = tmpl_filename.strip(lex.quotes) )\n\n tmpl_contents = lex.ReadTemplate()\n StaticObj.CleanupReadTemplate(tmpl_contents, lex)\n\n #sys.stdout.write(' Parse() after ReadTemplate, tokens:\\n\\n')\n # print(tmpl_contents)\n # sys.stdout.write('\\n----------------\\n')\n\n if (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n\n # Check for a particular bug:\n # Ordinary instance variables (preceded by a '$')\n # should never appear in a write_once() statement.\n for entry in tmpl_contents:\n if (isinstance(entry, VarRef) and\n (entry.prefix[0] == '$')):\n err_msg = ('Error(' + g_module_name + '.StaticObj.Parse()):\\n' +\n ' Error near ' + ErrorLeader(entry.srcloc.infile,\n entry.srcloc.lineno) + '\\n' +\n ' Illegal variable: \\\"' + entry.prefix + entry.descr_str + entry.suffix + '\\\"\\n' +\n ' All variables in a \\\"' + cmd_token + '\\\" statement must be statically\\n' +\n ' defined, and hence they must begin with a \\'@\\' prefix character.\\n' +\n ' (not a \\'$\\' character).\\n')\n if (cmd_token == 'write_once'):\n err_msg += ' Suggestion: Use the \\\"write()\\\" command instead.\\n'\n raise InputError(err_msg)\n\n if cmd_token == 'write':\n commands = self.instance_commands\n elif (cmd_token == 'write_once' or\n cmd_token == 'replace' or\n cmd_token == 'create_static_var'):\n commands = self.commands\n elif (cmd_token == 'create_var'):\n commands = self.instance_commands\n else:\n assert(False)\n\n command = WriteFileCommand(tmpl_filename,\n tmpl_contents,\n srcloc)\n commands.append(command)\n\n # end of \"if (cmd_token == 'write') or (cmd_token ==\n # 'write_once'):\"\n\n elif cmd_token == 'delete':\n\n instobj_descr_str = lex.get_token()\n instobj_srcloc = lex.GetSrcLoc()\n delete_command = DeleteCommand(instobj_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n self.instance_commands.append(mod_command)\n\n elif cmd_token == 'using':\n\n namespacecom_str = lex.get_token()\n if namespacecom_str != 'namespace':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The \\\"' + cmd_token + '\\\" command must be followed by the \\\"namespace\\\" keyword.')\n namespace_str = lex.get_token()\n\n stnode = StrToNode(namespace_str,\n self,\n lex.GetSrcLoc())\n\n self.namespaces.append(stnode)\n\n elif cmd_token == 'category':\n cat_name = lex.get_token()\n\n cat_count_start = 1\n cat_count_incr = 1\n backup_wordterminators = lex.wordterminators\n lex.wordterminators += ','\n #sys.stderr.write('DEBUG: wordterminators=\"'+str(lex.wordterminators)+'\"\\n')\n\n open_paren = lex.get_token()\n if (open_paren == '('):\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer, float, or string\n try:\n cat_count_start = int(token)\n except ValueError:\n try:\n cat_count_start = float(token)\n except ValueError:\n cat_count_start = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token == ',':\n token = lex.get_token()\n if token != ')':\n # Interpret token as an integer,float,or string\n try:\n cat_count_incr = int(token)\n except ValueError:\n try:\n cat_count_incr = float(token)\n except ValueError:\n cat_count_incr = RemoveOuterQuotes(\n token, '\\'\\\"')\n token = lex.get_token()\n if token != ')':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '...\\\" has too many arguments,\\n'\n ' or lacks a close-paren \\')\\'.\\n')\n\n else:\n lex.push_token(open_paren)\n\n if (isinstance(cat_count_start, basestring) or\n isinstance(cat_count_incr, basestring)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + cmd_token + ' ' + cat_name + '(' +\n str(cat_count_start) + ',' +\n str(cat_count_incr) + ')\\\"\\n'\n ' Only numeric counters are currently supported.\\n')\n\n # check for really stupid and unlikely errors:\n if type(cat_count_start) is not type(cat_count_incr):\n if ((isinstance(cat_count_start, int) or\n isinstance(cat_count_start, float))\n and\n (isinstance(cat_count_incr, int) or\n isinstance(cat_count_incr, float))):\n cat_count_start = float(cat_count_start)\n cat_count_incr = float(cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Problem with \\\"' + cmd_token + '\\\" command.\\n')\n\n prefix = cat_name[0]\n cat_name = cat_name[1:]\n # Add this category to the list.\n if prefix == '@':\n self.categories[cat_name] = Category(cat_name)\n self.categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n elif prefix == '$':\n self.instance_categories[cat_name] = Category(cat_name)\n self.instance_categories[cat_name].counter = SimpleCounter(cat_count_start,\n cat_count_incr)\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' category name = \\\"' + cat_name + '\\\" lacks a \\'$\\' or \\'&\\' prefix.\\n'\n ' This one-character prefix indicates whether the variables in this\\n'\n ' new category will be static or dynamics variables\\n')\n\n\n lex.wordterminators = backup_wordterminators\n\n\n elif (cmd_token == '}') or (cmd_token == ''):\n # a '}' character means we have reached the end of our scope.\n # Stop parsing and let the caller deal with the remaining text.\n # (And a '' means we reached the end of the file... I think.)\n break\n\n # elif (cmd_token == 'include'):\n # \"include filename\" loads a file (adds it to the file stack)\n # The \"TtreeShlex\" class (from which \"lex\" inherits) handles\n # \"include\" statements (ie. \"source\" statements) automatically.\n\n elif ((cmd_token == 'push') or\n (cmd_token == 'push_left') or\n (cmd_token == 'push_right')):\n\n push_cmd_src_loc = lex.GetSrcLoc()\n push_cmd_text = lex.GetParenExpr()\n if ((len(push_cmd_text) < 2) or\n (push_cmd_text[0] != '(') or\n (push_cmd_text[-1] != ')')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Bad \\\"push\\\" command. Expected an expression in parenthesis.\\n')\n push_cmd_text = push_cmd_text[1:-1]\n\n if (cmd_token == 'push_right'):\n push_command = PushRightCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_right_commands.append(push_command)\n else:\n push_command = PushLeftCommand(push_cmd_text,\n push_cmd_src_loc)\n user_push_left_commands.append(push_command)\n self.instance_commands.append(push_command)\n\n elif ((cmd_token == 'pop') or\n (cmd_token == 'pop_left') or\n (cmd_token == 'pop_right')):\n\n pop_cmd_text = lex.GetParenExpr()\n pop_cmd_src_loc = lex.GetSrcLoc()\n if (cmd_token == 'pop_right'):\n if len(user_push_right_commands) > 0:\n push_command = user_push_right_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many \\\"pop_right\\\" commands.\\n')\n pop_command = PopRightCommand(push_command,\n pop_cmd_src_loc)\n else:\n if len(user_push_left_commands) > 0:\n push_command = user_push_left_commands.pop()\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' Too many pop, (or pop_left) commands.\\n')\n pop_command = PopLeftCommand(push_command,\n pop_cmd_src_loc)\n self.instance_commands.append(pop_command)\n\n else:\n\n # Otherwise, 'cmd_token' is not a command at all.\n # Instead it's the name of an object which needs to be\n # defined or instantiated.\n # First, let's figure out which.\n\n # (small detail: The \"class\" keyword is optional\n # and can be skipped.)\n if cmd_token == 'class':\n object_name = lex.get_token()\n else:\n object_name = cmd_token\n\n next_symbol = lex.get_token()\n #print('Parse(): next_token=\\\"'+next_symbol+'\\\"')\n\n class_parents = []\n\n if next_symbol == 'inherits':\n\n # Then read in the list of classes which are parents of\n # of this class. (Multiple inheritance is allowed.)\n # (We don't yet check to insure that these are valid class\n # names. We'll do this later in LookupStaticRefs().)\n\n syntax_err_inherits = False\n\n while True:\n next_symbol = lex.get_token()\n if ((next_symbol == '{') or\n (next_symbol == lex.eof)):\n break\n elif (next_symbol == '='):\n syntax_err_inherits = True\n break\n else:\n class_parents.append(StrToNode(next_symbol,\n self,\n lex.GetSrcLoc()))\n if len(class_parents) == 0:\n syntax_err_inherits = True\n\n if syntax_err_inherits:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"inherits\\\" should be followed by one or more class names.\\n')\n\n if next_symbol == '{':\n child_name = object_name\n\n # Check to see if this class has already been defined.\n # (IE. check if it present in the list of children.)\n # If the name (child_name) matches another class (child),\n # then the contents of the new class will be appended to\n # the old. This way, class definitions can be augmented\n # later. (This is the way \"namespaces\" work in C++.)\n child = self.children.get(child_name)\n # If found, we refer to it as \"child\".\n # If not, then we create a new StaticObj named \"child\".\n if child is None:\n child = StaticObj(child_name, self)\n self.children[child_name] = child\n assert(child.name == child_name)\n\n # Either way we invoke child.Parse(), to\n # add contents (class commands) to child.\n child.Parse(lex)\n child.class_parents += class_parents\n\n elif next_symbol == '=':\n next_symbol = lex.get_token()\n if next_symbol == 'new':\n base_name = object_name\n base_srcloc = lex.GetSrcLoc()\n array_slice_str = ''\n if base_name.find('/') != -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n'\n ' (You can not instantiate some other object\\'s members.)\\n'\n ' Invalid instance name: \\\"' + base_name + '\\\"\\n')\n\n elif base_name in self.instname_refs:\n ref_srcloc = self.instname_refs[base_name]\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Duplicate class/array \\\"' + base_name + '\\\"\\n'\n ' This occurs near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n'\n ' and also near:\\n'\n ' ' + ErrorLeader(base_srcloc.infile,\n base_srcloc.lineno) + '\\n')\n else:\n self.instname_refs[base_name] = base_srcloc\n\n # Check for syntax allowing the user to instantiate\n # PART of an array. For example, check for this syntax:\n # \"monomers[20-29] = new ...\". This only fills in a\n # portion of the array from: monomers[20]...monomers[29]\n #\n # We also have to deal with multidimensional syntax\n # like this: \"cells[3][2-3][1][4-7] = new...\"\n # Split the \"cells[3][2-3][2][4-7][2]\" string into\n # \"cells[3][\", \"][1][\", and \"]\".\n # Later, we will instantiate InstanceObjs with names:\n # \"cells[3][2][1][4]\"\n # \"cells[3][2][1][5]\"\n # \"cells[3][2][1][6]\"\n # \"cells[3][2][1][7]\"\n # \"cells[3][3][1][4]\"\n # \"cells[3][3][1][5]\"\n # \"cells[3][3][1][6]\"\n # \"cells[3][3][1][7]\"\n\n p1 = base_name.find('[')\n if p1 == -1:\n p1 = len(base_name)\n else:\n p1 += 1\n array_name_tkns = [base_name[0:p1]]\n array_name_offsets = []\n\n p2 = -1\n p4 = p1\n while p4 < len(base_name):\n p3 = base_name.find(']', p1)\n\n if p3 == -1:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\']\\' character following:\\n'\n ' \\\"' +\n base_name[0:p1] +\n '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n # Search for a '-', ':', or '*' character between []\n # For example \"monomers[20-29] = \"\n # If present, the user wants us to fill a range\n # inside an array. This could be a multi-dimensional\n # array, (eg \"cells[3][2-6][4-11] = \"), so we must\n # figure out which entries in the array the user\n # wants us to fill (in this case, \"[2-6][4-11]\")\n p2 = base_name.find('-', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find(':', p1)\n if p2 == -1:\n p2 = len(base_name)\n if p2 > p3:\n p2 = base_name.find('*', p1)\n if p2 == -1:\n p2 = len(base_name)\n\n p4 = p3 + 1\n if p4 < len(base_name):\n if base_name[p4] == '[':\n p4 += 1 # skip over it\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a \\'[\\' character forllowing a \\']\\' character in\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n\n if p2 > p3:\n # Then no '-', ':', or '*' character was found\n # between '[' and the subsequent ']' character\n # In that case, ignore this token\n\n token = base_name[p1:p4]\n # append all this text to the previous token\n if len(array_name_tkns) == 0:\n array_name_tkns.append(token)\n else:\n array_name_tkns[-1] = array_name_tkns[-1] + token\n array_slice_str = 'slice '\n else:\n\n assert((p1 < p2) and (p2 < p3))\n index_offset_str = base_name[p1:p2]\n if len(index_offset_str) == 0:\n index_offset = 0\n\n elif (not str.isdigit(index_offset_str)):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Expected a nonnegative integer preceding the \\'' +\n base_name[\n p2] + '\\' character in:\\n'\n ' \\\"' +\n base_name[\n 0:p2 + 1] + '\\\", located near:\\n'\n ' ' + ErrorLeader(ref_srcloc.infile,\n ref_srcloc.lineno) + '\\n')\n else:\n index_offset = int(index_offset_str)\n token = base_name[p3:p4]\n array_name_tkns.append(token)\n array_name_offsets.append(index_offset)\n\n p1 = p4\n\n # If the statobj_str token contains a ']' character\n # then this means the user wants us to make multiple\n # copies of this template. The number of copies\n # to instantiate is enclosed in the [] characters\n # (Example wat = new Water[3000] creates\n # 3000 instantiations of the Water template\n # named wat[1], wat[2], wat[3], ... wat[3000]).\n\n # Note: Here '[' and ']' have a special meaning.\n # So lex.get_token() should not treat them as\n # ordinary word characters. To prevent this:\n orig_wordterminators = lex.wordterminators\n lex.wordterminators += '[],'\n\n class_name_str = lex.get_token()\n if ((class_name_str == lex.eof) or\n (class_name_str == '}')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n 'Class ends prematurely. (Incomplete \\\"new\\\" statement.)')\n\n assert(len(class_name_str) > 0)\n\n if (class_name_str[0] == '['):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' new ' + class_name_str + '\\n'\n 'Bracketed number should be preceeded by a class name.')\n class_names = []\n weights = []\n num_by_type = []\n if class_name_str == 'random':\n class_names, weights, num_by_type = self._ParseRandom(\n lex)\n tmp_token = lex.get_token()\n if len(tmp_token) > 0:\n if tmp_token[0] == '.':\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' \\\"' + tmp_token + '\\\" should not follow random()\\n'\n '\\n'\n ' Coordinate transformations and other commands (such as \\\"' +\n tmp_token + '\\\")\\n'\n ' should appear after each class name inside the random() statement,\\n'\n ' not after it. For example, do not use:\\n'\n ' \\\"lipids=new random([DPPC,DLPC],[0.5,0.5]).move(0,0,23.6)\\\"\\n'\n ' Use this instead:\\n'\n ' \\\"lipids=new random([DPPC.move(0,0,23.6),DLPC.move(0,0,23.6)],[0.5,0.5])\\\"\\n')\n lex.push_token(tmp_token)\n else:\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n\n array_size = []\n array_suffixes = []\n array_srclocs = []\n\n # A general \"new\" statement could look like this:\n # \"m = new Mol.scale(3) [2].trans(0,4.5,0).rotate(30,0,0,1)\n # [3].trans(0,0,4.5)\"\n # So far we have processed \"m = new Mol.scale(3)\".\n # Now, we need to deal with:\n # \"[2].trans(0,4.5,0).rotate(30,0,0,1) [3].trans(0,0,4.5)\"\n while True:\n new_token = lex.get_token()\n # if ((new_token == '') or (new_token == lex.eof)):\n # break\n if new_token == '[':\n number_str = lex.get_token()\n close_bracket = lex.get_token()\n if ((not str.isdigit(number_str)) or\n (close_bracket != ']')):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near ' + lex.error_leader() + '\\n'\n ' A \\'[\\' character should be followed by a number and a \\']\\' character.')\n array_size.append(int(number_str))\n suffix = lex.get_token()\n\n if ((suffix == '') or (suffix == lex.eof)):\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n break\n if suffix[0] == '.':\n lex.push_token(suffix[1:])\n suffix_func = lex.GetParenExpr()\n suffix = '.' + suffix_func\n array_suffixes.append(suffix)\n array_srclocs.append(lex.GetSrcLoc())\n else:\n array_suffixes.append('')\n array_srclocs.append(base_srcloc)\n lex.push_token(suffix)\n if suffix != '[':\n break\n else:\n lex.push_token(new_token)\n break\n srcloc_final = lex.GetSrcLoc()\n\n lex.wordterminators = orig_wordterminators\n\n assert(len(array_size) == len(array_suffixes))\n\n if len(array_size) > 0:\n if len(array_name_offsets) == 0:\n assert(len(array_name_tkns) == 1)\n array_name_offsets = [0] * len(array_size)\n array_name_tkns[0] = array_name_tkns[0] + '['\n for d in range(0, len(array_size) - 1):\n array_name_tkns.append('][')\n array_name_tkns.append(']')\n\n if len(array_name_offsets) != len(array_size):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error in \\\"new\\\" statement near/before ' + lex.error_leader() + '\\n'\n ' Array ' + array_slice_str +\n 'dimensionality on the left side of the \\'=\\' character (' + str(\n len(array_name_offsets)) + ')\\n'\n ' does not match the array dimensionality on the right side (' + str(len(array_size)) + ').\\n')\n\n # If the user wants us to instantiate a\n # multidimensional array of class instances\n # then we must loop through this multidimensional\n # array and create a new instance for each entry.\n # For example fill a 3 dimensional volume\n # with 1000 water molecules\n # Example 1:\n # solvent = new Water [10][10][10]\n # (The coordinates must be read separately.)\n # In this example array_size = [10,10,10]\n # array_suffixes = ['','','']\n # Example 2:\n # solvent = new Water.transcm(0,0,0)\n # [10].trans(0,0,4)\n # [10].trans(0,4,0).rot(45,0,0,1)\n # [10].trans(4,0,0)\n # (This command generates a 10x10x10 lattice\n # simple cubic lattice of regularly spaced\n # water molecules pointing the same direction.)\n # In this example array_size = [10,10,10]\n # and\n # class_suffix = 'transcm(0,0,0)'\n # and\n # array_suffixes = ['trans(0,0,4)',\n # 'trans(0,4,0).rot(45,0,0,1)',\n # 'trans(4,0,0)']\n # Note that tree ignores the \"trans()\"\n # commands, it stores them so that inherited\n # classes can attempt to process them.\n\n D = len(array_size)\n if D > 0:\n\n i_elem = 0 # (used to look up selection_list[])\n if len(num_by_type) > 0:\n selection_list = []\n for i in range(0, len(num_by_type)):\n selection_list += [i] * num_by_type[i]\n random.shuffle(selection_list)\n\n num_elements = 1\n for d in range(0, D):\n num_elements *= array_size[d]\n err_msg_str = str(array_size[0])\n for d in range(1, D):\n err_msg_str += '*' + str(array_size[d])\n if num_elements != len(selection_list):\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near or before ' + lex.error_leader() + '\\n'\n ' The sum of the numbers in the \\\"new random([],[])\\\" command (' + str(\n len(selection_list)) + ')\\n'\n ' does not equal the number of elements in the array (' + err_msg_str + ')\\n')\n\n digits = [0 for d in range(0, D)]\n table_filled = False\n pushed_commands = []\n while (not table_filled):\n instance_name = array_name_tkns[0]\n for d in range(0, D):\n i = digits[d]\n instance_name += str(i +\n array_name_offsets[d]) +\\\n array_name_tkns[d + 1]\n\n # Does the user want us to select\n # a class at random?\n if len(class_names) > 0:\n\n if len(num_by_type) > 0:\n class_name_str = class_names[\n selection_list[i_elem]]\n else:\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(\n class_name_str, lex)\n\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(instance_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n # Now go to the next entry in the table.\n # The indices of this table are similar to\n # a D-digit integer. We increment this d-digit\n # number now.\n d_carry = D - 1\n while True:\n digits[d_carry] += 1\n if digits[d_carry] >= array_size[d_carry]:\n digits[d_carry] = 0\n if array_suffixes[d_carry] != '':\n for i in range(0, array_size[d_carry] - 1):\n partner = pushed_commands.pop()\n command = PopRightCommand(partner,\n srcloc_final)\n self.instance_commands.append(\n command)\n d_carry -= 1\n else:\n if array_suffixes[d_carry] != '':\n command = PushRightCommand(array_suffixes[d_carry].lstrip('.'),\n array_srclocs[d_carry])\n pushed_commands.append(command)\n self.instance_commands.append(\n command)\n break\n if d_carry < 0:\n table_filled = True\n break\n\n # (used to look up selection_list[])\n i_elem += 1\n pass\n\n else:\n if len(class_names) > 0:\n assert(len(num_by_type) == 0)\n # if len(num_by_type) > 0:\n # class_name_str = class_names[selection_list[i_elem]]\n # else:\n # class_name_str = RandomSelect(class_names,\n # weights)\n class_name_str = RandomSelect(class_names,\n weights)\n class_name, class_suffix, class_suffix_srcloc = \\\n self._ProcessClassName(class_name_str, lex)\n if class_suffix != '':\n class_suffix_command = \\\n PushRightCommand(class_suffix.lstrip('.'),\n class_suffix_srcloc)\n self.instance_commands.append(\n class_suffix_command)\n command = \\\n InstantiateCommand(base_name,\n ClassReference(class_name,\n base_srcloc),\n base_srcloc)\n self.instance_commands.append(command)\n\n if class_suffix != '':\n command = \\\n PopRightCommand(class_suffix_command,\n srcloc_final)\n self.instance_commands.append(command)\n\n else:\n\n # Now check for commands using this syntax:\n #\n # \"MolNew = MolOld.rot(45,1,0,0).scale(100.0)\"\n # /|\\ /|\\ `-----------.------------'\n # | | |\n # child_name parent_name optional suffix\n\n child_name = object_name\n parent_name_str = next_symbol\n\n child = StaticObj(child_name, self)\n\n parent_name, suffix, suffix_srcloc = \\\n self._ProcessClassName(parent_name_str, lex)\n\n child.class_parents.append(StrToNode(parent_name,\n self,\n lex.GetSrcLoc()))\n\n if suffix != '':\n # Assume the command is a StackableCommand. (This\n # way it will enclose the commands of the parents.)\n # Stackable commands come in (Push...Pop) pairs.\n push_command = PushLeftCommand(suffix,\n suffix_srcloc)\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc)\n push_mod_command = ModCommand(push_command, './')\n pop_mod_command = ModCommand(pop_command, './')\n child.instance_commands_push.append(\n push_mod_command)\n child.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n #sys.stderr.write('child.instance_commands_push = '+str(child.instance_commands_push)+'\\n')\n\n #sys.stderr.write('child.instance_commands_pop = '+str(child.instance_commands_pop)+'\\n')\n\n # Check to see if this class has already been defined.\n if self.children.get(child_name) is not None:\n if self.children[i].IsDeleted():\n del self.children[child_name]\n else:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Error near ' + lex.error_leader() + '\\n'\n ' The name \\\"' + child_name + '\\\" is already in use.')\n\n self.children[child_name] = child\n\n else:\n\n # Otherwise hopefully this is a post-instance command\n # (a command applied to a class which has been instantiated)\n # In that case, the object_name would be followed by\n # a dot and a function-call containing a '(' paren (which\n # would have ended up stored in the next_symbol variable).\n\n open_paren_encountered = False\n if (next_symbol == '('):\n open_paren_encountered = True\n # put '(' back in the stream\n lex.push_token(next_symbol)\n\n i_dot = object_name.rfind('.')\n i_slash = object_name.rfind('/')\n dot_encountered = ((i_dot != -1) and\n ((i_slash == -1) or (i_slash < i_dot)))\n\n if (open_paren_encountered and dot_encountered and\n (object_name[:1] != '[')):\n\n obj_descr_str, suffix, suffix_srcloc = \\\n self._ExtractSuffix(object_name, lex)\n\n path_tokens = obj_descr_str.split('/')\n\n i_last_ptkn, staticobj = FollowPath(path_tokens,\n self,\n lex.GetSrcLoc())\n instobj_descr_str = './' + \\\n '/'.join(path_tokens[i_last_ptkn:])\n\n # I still support the \"object_name.delete()\" syntax for\n # backwards compatibility. (However newer input files\n # use this equivalent syntax: \"delete object_name\")\n if suffix == 'delete()':\n delete_command = DeleteCommand(suffix_srcloc)\n mod_command = ModCommand(delete_command,\n instobj_descr_str)\n staticobj.instance_commands.append(mod_command)\n else:\n push_command = PushLeftCommand(suffix,\n suffix_srcloc,\n '.')\n pop_command = PopLeftCommand(push_command,\n suffix_srcloc,\n '.')\n push_mod_command = ModCommand(push_command,\n instobj_descr_str)\n pop_mod_command = ModCommand(pop_command,\n instobj_descr_str)\n if instobj_descr_str != './':\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands\\n')\n staticobj.instance_commands.append(\n push_mod_command)\n staticobj.instance_commands.append(\n pop_mod_command)\n else:\n # sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+\n # staticobj.name+'.instance_commands_push\\n')\n # Question: Should I make these PushRight commands and\n # append them in the opposite order?\n # If so I also have to worry about the case\n # above.\n staticobj.instance_commands_push.append(\n push_mod_command)\n staticobj.instance_commands_pop.insert(\n 0, pop_mod_command)\n\n else:\n # Otherwise, the cmd_token is not any of these:\n # \"write\", \"write_once\", \"replace\",\n # \"create_var\", \"create_static_var\",\n # \"delete\", or \"category\".\n # ... and it is ALSO not any of these:\n # the name of a class (StaticObj), or\n # the name of an instance (InstanceObj)\n # followed by either a '.' or \"= new\"\n #\n # In that case, it is a syntax error:\n raise InputError('Error(' + g_module_name + '.StaticObj.Parse()):\\n'\n ' Syntax error at or before ' + lex.error_leader() + '\\n'\n ' \\\"' + object_name + ' ' + next_symbol + '\\\".')\n\n # Keep track of the location in the user's input files\n # where the definition of this object ends.\n self.srcloc_end = lex.GetSrcLoc()\n\n # Finally, if there are any remaining user_push_left_commands or\n # user_push_right_commands, deal with them (by popping them).\n for push_command in user_push_left_commands:\n push_command = user_push_left_commands.pop()\n pop_command = PopLeftCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)\n\n for push_command in user_push_right_commands:\n push_command = user_push_right_commands.pop()\n pop_command = PopRightCommand(push_command,\n self.srcloc_end)\n self.instance_commands.append(pop_command)", "def generate(env):\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n env[\"LEXFLAGS\"] = SCons.Util.CLVar(\"\")\n\n if sys.platform == 'win32':\n # ignore the return - we do not need the full path here\n _ = get_lex_path(env, append_paths=True)\n env[\"LEX\"] = env.Detect(BINS)\n if not env.get(\"LEXUNISTD\"):\n env[\"LEXUNISTD\"] = SCons.Util.CLVar(\"\")\n env[\"LEXCOM\"] = \"$LEX $LEXUNISTD $LEXFLAGS -t $SOURCES > $TARGET\"\n else:\n env[\"LEX\"] = env.Detect(BINS)\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS -t $SOURCES > $TARGET\"", "def fol_language():\n def make_symbols(start):\n \"\"\"E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9'].\"\"\"\n return [chr(ord(start) + i) + str(n)\n for i in range(0, 3)\n for n in range(1, 10)]\n\n return Language(\n collections.OrderedDict([\n (IDENTITY_SYMBOL, 0),\n (NEGATION_SYMBOL, 1),\n (AND_SYMBOL, 2),\n (OR_SYMBOL, 2),\n (XOR_SYMBOL, 2),\n (IMPLIES_SYMBOL, 2),\n (FOR_ALL_SYMBOL, 2),\n (EXISTS_SYMBOL, 2),\n (RELATION_SYMBOL.format(1), 2), # unary-relation\n (RELATION_SYMBOL.format(2), 3), # binary-relation\n ]),\n predicates=make_symbols('p'),\n constants=make_symbols('a'),\n variables=make_symbols('x'),\n )", "def prepare_context(grammar=None, lexer=None, lkt_file=None,\n warning_set=default_warning_set,\n symbol_canonicalizer=None, show_property_logging=False,\n types_from_lkt=False, lkt_semantic_checks=False,\n case_insensitive: bool = False,\n version: Optional[str] = None,\n build_date: Optional[str] = None,\n standalone: bool = False,\n property_exceptions: Set[str] = set()):\n\n # Have a clean build directory\n if P.exists('build'):\n shutil.rmtree('build')\n os.mkdir('build')\n\n # Try to emit code\n ctx = CompileCtx(lang_name='Foo', short_name='foo', lexer=lexer,\n grammar=grammar,\n symbol_canonicalizer=symbol_canonicalizer,\n show_property_logging=show_property_logging,\n lkt_file=lkt_file,\n types_from_lkt=types_from_lkt,\n lkt_semantic_checks=lkt_semantic_checks,\n case_insensitive=case_insensitive,\n version=version,\n build_date=build_date,\n standalone=standalone,\n property_exceptions=property_exceptions)\n ctx.warnings = warning_set\n ctx.pretty_print = pretty_print\n\n return ctx", "def parse(self, tokens):\n self.logger.debug(\"Parsing some nice C code!\")\n self.init_lexer(tokens)\n self.typedefs = set()\n cu = self.parse_translation_unit()\n self.logger.info(\"Parsing finished\")\n return cu", "def op_compiler(src: str, style: dict=wsstyle.STL, strict=False):\n ins = []\n ins_buff = None\n for token in Lexer(Reader(src, style), strict):\n if hasattr(token, 'ARGS'):\n if token.ARGS == 0:\n # dealing operators\n ins.append(token())\n else:\n ins_buff = token\n if isinstance(token, WSLiteral):\n ins.append(ins_buff(token))\n ins_buff = None\n return ins", "def minimal_grammar():\n return _make_grammar('target = letter', {})", "def generate_tokens(readline):\r\n lnum = parenlev = continued = 0\r\n namechars, numchars = string.ascii_letters + '_', '0123456789'\r\n contstr, needcont = '', 0\r\n contline = None\r\n indents = [0]\r\n\r\n while 1: # loop over lines in stream\r\n try:\r\n line = readline()\r\n except StopIteration:\r\n line = ''\r\n lnum = lnum + 1\r\n pos, max = 0, len(line)\r\n\r\n if contstr: # continued string\r\n if not line:\r\n raise TokenError(\"EOF in multi-line string\", strstart)\r\n endmatch = endprog.match(line)\r\n if endmatch:\r\n pos = end = endmatch.end(0)\r\n yield (STRING, contstr + line[:end],\r\n strstart, (lnum, end), contline + line)\r\n contstr, needcont = '', 0\r\n contline = None\r\n elif needcont and line[-2:] != '\\\\\\n' and line[-3:] != '\\\\\\r\\n':\r\n yield (ERRORTOKEN, contstr + line,\r\n strstart, (lnum, len(line)), contline)\r\n contstr = ''\r\n contline = None\r\n continue\r\n else:\r\n contstr = contstr + line\r\n contline = contline + line\r\n continue\r\n\r\n elif parenlev == 0 and not continued: # new statement\r\n if not line: break\r\n column = 0\r\n while pos < max: # measure leading whitespace\r\n if line[pos] == ' ': column = column + 1\r\n elif line[pos] == '\\t': column = (column//tabsize + 1)*tabsize\r\n elif line[pos] == '\\f': column = 0\r\n else: break\r\n pos = pos + 1\r\n if pos == max: break\r\n\r\n if line[pos] in '#\\r\\n': # skip comments or blank lines\r\n if line[pos] == '#':\r\n comment_token = line[pos:].rstrip('\\r\\n')\r\n nl_pos = pos + len(comment_token)\r\n yield (COMMENT, comment_token,\r\n (lnum, pos), (lnum, pos + len(comment_token)), line)\r\n yield (NL, line[nl_pos:],\r\n (lnum, nl_pos), (lnum, len(line)), line)\r\n else:\r\n yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],\r\n (lnum, pos), (lnum, len(line)), line)\r\n continue\r\n\r\n if column > indents[-1]: # count indents or dedents\r\n indents.append(column)\r\n yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)\r\n while column < indents[-1]:\r\n if column not in indents:\r\n raise IndentationError(\r\n \"unindent does not match any outer indentation level\",\r\n (\"<tokenize>\", lnum, pos, line))\r\n indents = indents[:-1]\r\n yield (DEDENT, '', (lnum, pos), (lnum, pos), line)\r\n\r\n else: # continued statement\r\n if not line:\r\n raise TokenError(\"EOF in multi-line statement\", (lnum, 0))\r\n continued = 0\r\n\r\n while pos < max:\r\n pseudomatch = pseudoprog.match(line, pos)\r\n if pseudomatch: # scan for tokens\r\n start, end = pseudomatch.span(1)\r\n spos, epos, pos = (lnum, start), (lnum, end), end\r\n token, initial = line[start:end], line[start]\r\n\r\n if initial in numchars or \\\r\n (initial == '.' and token != '.'): # ordinary number\r\n yield (NUMBER, token, spos, epos, line)\r\n elif initial in '\\r\\n':\r\n newline = NEWLINE\r\n if parenlev > 0:\r\n newline = NL\r\n yield (newline, token, spos, epos, line)\r\n elif initial == '#':\r\n assert not token.endswith(\"\\n\")\r\n yield (COMMENT, token, spos, epos, line)\r\n elif token in triple_quoted:\r\n endprog = endprogs[token]\r\n endmatch = endprog.match(line, pos)\r\n if endmatch: # all on one line\r\n pos = endmatch.end(0)\r\n token = line[start:pos]\r\n yield (STRING, token, spos, (lnum, pos), line)\r\n else:\r\n strstart = (lnum, start) # multiple lines\r\n contstr = line[start:]\r\n contline = line\r\n break\r\n elif initial in single_quoted or \\\r\n token[:2] in single_quoted or \\\r\n token[:3] in single_quoted:\r\n if token[-1] == '\\n': # continued string\r\n strstart = (lnum, start)\r\n endprog = (endprogs[initial] or endprogs[token[1]] or\r\n endprogs[token[2]])\r\n contstr, needcont = line[start:], 1\r\n contline = line\r\n break\r\n else: # ordinary string\r\n yield (STRING, token, spos, epos, line)\r\n elif initial in namechars: # ordinary name\r\n yield (NAME, token, spos, epos, line)\r\n elif initial == '\\\\': # continued stmt\r\n # This yield is new; needed for better idempotency:\r\n yield (NL, token, spos, (lnum, pos), line)\r\n continued = 1\r\n else:\r\n if initial in '([{': parenlev = parenlev + 1\r\n elif initial in ')]}': parenlev = parenlev - 1\r\n yield (OP, token, spos, epos, line)\r\n else:\r\n yield (ERRORTOKEN, line[pos],\r\n (lnum, pos), (lnum, pos+1), line)\r\n pos = pos + 1\r\n\r\n for indent in indents[1:]: # pop remaining indent levels\r\n yield (DEDENT, '', (lnum, 0), (lnum, 0), '')\r\n yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')", "def augment_grammar(g):\n new_start = g.start + \"'\"\n old_start = g.start\n g.start = new_start\n g.nonterm.append(new_start)\n new_rule = grammar.Rule([new_start, [old_start]])\n g.rules.append(new_rule)", "def compileTerm(self):\n self.current_compile = \"compileTerm\"\n # integerConstant\n if self.currentTokenTagEquals(\"integerConstant\"):\n self.vm_writer.writePush(\"constant\", self.eatTag(\"integerConstant\"))\n # stringConstant\n elif self.currentTokenTagEquals(\"stringConstant\"):\n string = self.eatTag(\"stringConstant\")\n self.vm_writer.writePush(\"constant\", len(string)) \n self.vm_writer.writeCall(\"String.new\", 1)\n for char in string:\n self.vm_writer.writePush(\"constant\", ord(char))\n self.vm_writer.writeCall(\"String.appendChar\", 2)\n # This, True, False, Null\n elif self.currentTokenTagEquals(\"keyword\"):\n keyword = self.eatTag(\"keyword\")\n if keyword in \"this\":\n self.vm_writer.writePush(\"pointer\", 0)\n elif keyword in \"true\":\n self.vm_writer.writePush(\"constant\", 0)\n self.vm_writer.writeArithmetic(\"not\")\n elif keyword in [\"false\", \"null\"]:\n self.vm_writer.writePush(\"constant\", 0)\n else:\n print(f\"\\\"{keyword}\\\" keyword not handled\")\n sys.exit(1)\n # ( expression )\n elif self.currentTokenEquals(\"(\"):\n self.eat(\"(\")\n self.compileExpression()\n self.eat(\")\")\n # unaryOp term\n elif self.currentTokenEquals([\"~\", \"-\"]):\n unary_op = self.eat([\"~\", \"-\"])\n self.compileTerm()\n if unary_op in \"~\":\n self.vm_writer.writeArithmetic(\"not\")\n else:\n self.vm_writer.writeArithmetic(\"neg\")\n else:\n identifier = self.eatTag(\"identifier\")\n\n # varName [ expression ]\n if self.currentTokenEquals(\"[\"):\n self.vm_writer.writePush(self.symbol_table.kindOf(identifier), self.symbol_table.indexOf(identifier))\n self.eat(\"[\")\n self.compileExpression()\n self.eat(\"]\")\n self.vm_writer.writeArithmetic(\"add\")\n self.vm_writer.writePop(\"pointer\", 1)\n self.vm_writer.writePush(\"that\", 0)\n # function call\n elif self.currentTokenEquals(\"(\"):\n self.eat(\"(\")\n arguments = self.compileExpressionList()\n self.eat(\")\")\n self.vm_writer.writePush(\"pointer\", 0)\n self.writeCall(f\"{self.class_name}.{identifier}\", arguments + 1)\n # method call\n elif self.currentTokenEquals(\".\"):\n arguments = 0\n self.eat(\".\")\n method_name = self.eatTag(\"identifier\")\n if self.symbol_table.exists(identifier):\n symbol_segment = self.symbol_table.kindOf(identifier)\n symbol_index = self.symbol_table.indexOf(identifier)\n identifier = self.symbol_table.typeOf(identifier)\n self.vm_writer.writePush(symbol_segment, symbol_index)\n arguments = 1\n self.eat(\"(\")\n arguments = self.compileExpressionList() + arguments\n self.eat(\")\")\n self.vm_writer.writeCall(f\"{identifier}.{method_name}\", arguments)\n # var\n elif self.symbol_table.exists(identifier):\n self.vm_writer.writePush(self.symbol_table.kindOf(identifier), self.symbol_table.indexOf(identifier))\n # oops\n else:\n print(self.symbol_table.class_table)\n print(self.symbol_table.sub_table)\n print(f\"\\\"{identifier}\\\" identifier not handled\")\n sys.exit(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set an explicit indentation level for a block scalar.
def set_block_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() context.block_scalar_indent = None if not text: return increment = match.group(1) if increment: current_indent = max(context.indent, 0) increment = int(increment) context.block_scalar_indent = current_indent + increment if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def indent_level(self, indent_level):\n\n self.container['indent_level'] = indent_level", "def indent(self, lvl=1):\n self.current_level += lvl\n assert self.current_level >= 0, \"Level of indentation cannot become negative\"\"\"", "def update_indent(self) -> None:\n self.indent = self.base_indent * self.level\n self.newline_indent = \"\\n\" + self.indent", "def indentTo(level: int):\n OPTIONS.indent = max(0, level)", "def set_level(self, elem):\n tag_level = int(elem.tag[-1])\n if not self.is_base_level_adjusted:\n self.base_level = self.base_level + 1 - tag_level\n self.is_base_level_adjusted = True\n level = tag_level + self.base_level\n if level > 6:\n level = 6\n elem.tag = \"h%d\" % level", "def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )", "def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)", "def _indent(self, level: int) -> Text:\n\n return self.indent * level", "def indent(self, levels=1):\n self._indentation_level += levels\n return self", "def setIndent(self, *args):\r\n return _osgDB.Output_setIndent(self, *args)", "def increaseIndentation():\n\tglobal indentLength\n\tindentLength = indentLength + 3", "def SetLevel(self, level):\n self.level = level", "def incrementIndent(self, levels: 'int const'=1) -> \"void\":\n return _coin.SoOutput_incrementIndent(self, levels)", "def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"", "def elementtree_indent(elem, level=...):\n ...", "def _indent(self):\n if self._debug:\n self._debug += 1", "def incrementIndent(self, levels = 1):\n return _coin.SoOutput_incrementIndent(self, levels)", "def indentation():\n try:\n indent()\n yield\n finally:\n unindent()", "def indent_level(self):\n return self.container['indent_level']", "def incIndent():\n if _rootLogger != None:\n curLevel = _rootLogger._logIndentLevel\n _rootLogger.incIndent()\n return curLevel\n else:\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process indentation spaces in a block scalar.
def parse_block_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() return context.block_scalar_indent = len(text) else: if len(text) < context.block_scalar_indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"", "def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))", "def __calculate_for_container_blocks(\n parser_state: ParserState,\n grab_bag: ContainerGrabBag,\n ) -> None:\n grab_bag.current_container_blocks = [\n ind for ind in parser_state.token_stack if ind.is_list\n ]\n grab_bag.block_quote_data = BlockQuoteData(\n 0\n if grab_bag.initial_block_quote_count is None\n else grab_bag.initial_block_quote_count,\n parser_state.count_of_block_quotes_on_stack(),\n )\n\n ContainerBlockProcessor.__calculate_adjusted_whitespace(\n parser_state,\n grab_bag,\n )", "def indentation():\n try:\n indent()\n yield\n finally:\n unindent()", "def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )", "def __calculate_adjusted_whitespace(\n parser_state: ParserState,\n grab_bag: ContainerGrabBag,\n ) -> None:\n\n grab_bag.adj_ws = grab_bag.extracted_whitespace\n assert grab_bag.adj_ws is not None\n\n last_block_stack_index = parser_state.find_last_list_block_on_stack()\n if last_block_stack_index <= 0:\n assert not grab_bag.current_container_blocks\n POGGER.debug(\"PLFCB>>No Started lists\")\n if grab_bag.adjusted_block_index is None:\n POGGER.debug(\"PLFCB>>No Started Block Quote\")\n else:\n POGGER.debug(\"PLFCB>>Started Block Quote\")\n assert grab_bag.extracted_whitespace is not None\n grab_bag.adj_ws = grab_bag.extracted_whitespace[\n grab_bag.adjusted_block_index :\n ]\n else:\n assert grab_bag.current_container_blocks\n POGGER.debug(\n \"PLFCB>>Started list-last stack>>$\",\n parser_state.token_stack,\n )\n POGGER.debug(\n \"PLFCB>>Started list-last stack>>$\",\n parser_state.token_stack[last_block_stack_index],\n )\n\n (\n token_index,\n found_block_quote_token,\n ) = ContainerBlockProcessor.__look_for_any_list_start(parser_state)\n\n assert grab_bag.adj_ws is not None\n ContainerBlockProcessor.__calculate_adjusted_whitespace_kludge(\n parser_state,\n token_index,\n found_block_quote_token,\n grab_bag,\n )\n assert grab_bag.adj_ws is not None", "def _indent(self):\n if self._debug:\n self._debug += 1", "def _compute_chunk_indentation(self, i1, i2, j1, j2):\n # We'll be going through all the opcodes in this equals chunk and\n # grouping with adjacent opcodes based on whether they have\n # indentation changes or not. This allows us to keep the lines with\n # indentation changes from being collapsed in the diff viewer.\n indentation_changes = {}\n prev_has_indent = False\n prev_start_i = i1\n prev_start_j = j1\n\n a = self.differ.a\n b = self.differ.b\n\n for i, j in zip(range(i1, i2), range(j1, j2)):\n old_line = a[i]\n new_line = b[j]\n new_indentation_changes = {}\n\n indent_info = self._compute_line_indentation(old_line, new_line)\n has_indent = indent_info is not None\n\n if has_indent:\n key = '%d-%d' % (i + 1, j + 1)\n new_indentation_changes[key] = indent_info\n\n if has_indent != prev_has_indent:\n if prev_start_i != i or prev_start_j != j:\n # Yield the previous group.\n yield prev_start_i, i, prev_start_j, j, indentation_changes\n\n # We have a new group. Set it up, starting with the current\n # calculated state.\n prev_start_i = i\n prev_start_j = j\n prev_has_indent = has_indent\n indentation_changes = new_indentation_changes\n elif has_indent:\n indentation_changes.update(new_indentation_changes)\n\n # Yield the last group, if we haven't already yielded it.\n if prev_start_i != i2 or prev_start_j != j2:\n yield prev_start_i, i2, prev_start_j, j2, indentation_changes", "def get_block_indent(text):\n lines = text.split('\\n')\n cnt = []\n for i in lines:\n if i != '' and not i.isspace():\n cnt.append(get_indent(i))\n return min(cnt)", "def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)", "def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)", "def test_multipleIndents(self):\n self.parser.push(\"js/\")\n\n def push():\n indent = self.parser._indent\n badLine = \"{indent}{indent}x: a\".format(indent=indent)\n self.parser.push(badLine)\n\n self.assertRaises(config.IndentationError, push)", "def format_indentation(string):\n return string.replace(\" \", \"&nbsp;&nbsp;&nbsp;&nbsp;\")", "def getIndentation(self, line):\n\t\n\t\tnonSpace = re.search('\\S', line)\n\t\n\t\tif nonSpace is None:\n\t\t\treturn 0\n\t\t\t\n\t\telse:\n\t\t\tif re.match('^\\t*\\S', line):\n\t\t\t\treturn nonSpace.start()\n\t\t\t\t\n\t\t\telif re.match('^\\ *\\S', line):\n\t\t\t\treturn nonSpace.start() / 4", "def increaseIndentation():\n\tglobal indentLength\n\tindentLength = indentLength + 3", "def _check_valid_indentation(self, lineno: int, line: str, left_stripped: str) -> None:\n if linelen := len(line):\n indent = linelen - len(left_stripped)\n expected_ind = 0 if line.startswith(('.', '+', '-', '$')) else self.indent\n if indent != expected_ind:\n diag = self.diags.indentation\n loc = self.make_source_range(' ' * indent, line, lineno)\n mess = f'Invalid indentation ({indent}), all regular (non-empty, non-parameter, non-seealso) text must be indented to {self.indent} columns'\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, loc, patch=Patch(loc, ' ' * expected_ind)\n )\n return", "def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)", "def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree", "def test_block_comment_whitespace_signature(self):\n \n inp = '2_3_block_comment.txt'\n self.run_single_file_case(inp)", "def RetainHorizontalSpacing(self, first_column, depth):\n previous = self.previous_token\n if not previous:\n return\n\n if previous.is_pseudo:\n previous = previous.previous_token\n if not previous:\n return\n\n cur_lineno = self.lineno\n prev_lineno = previous.lineno\n if previous.is_multiline_string:\n prev_lineno += previous.value.count('\\n')\n\n if (cur_lineno != prev_lineno or\n (previous.is_pseudo and previous.value != ')' and\n cur_lineno != previous.previous_token.lineno)):\n self.spaces_required_before = (\n self.column - first_column + depth * style.Get('INDENT_WIDTH'))\n return\n\n cur_column = self.column\n prev_column = previous.column\n prev_len = len(previous.value)\n\n if previous.is_pseudo and previous.value == ')':\n prev_column -= 1\n prev_len = 0\n\n if previous.is_multiline_string:\n prev_len = len(previous.value.split('\\n')[-1])\n if '\\n' in previous.value:\n prev_column = 0 # Last line starts in column 0.\n\n self.spaces_required_before = cur_column - (prev_column + prev_len)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process indentation spaces in a plain scalar.
def parse_plain_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() if len(text) <= context.indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def get_spaces(self):\n pass", "def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))", "def remove_indent(self) -> None:\n w = abs(self.tab_width)\n if self.result:\n s = self.result[-1]\n if s.isspace():\n self.result.pop()\n s = s.replace('\\t', ' ' * w)\n if s.startswith('\\n'):\n s2 = s[1:]\n self.result.append('\\n' + s2[: -w])\n else:\n self.result.append(s[: -w])", "def _indent(self, level: int) -> Text:\n\n return self.indent * level", "def get_trim_whitespace(current_value, new_value):\n return get_convert_paragraphs(current_value, new_value)", "def _indent(self):\n if self._debug:\n self._debug += 1", "def test_Indent_zero():\n assert fmt.Indent(0, \"abc\\nd\") == \"abc\\nd\"", "def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)", "def RetainHorizontalSpacing(self, first_column, depth):\n previous = self.previous_token\n if not previous:\n return\n\n if previous.is_pseudo:\n previous = previous.previous_token\n if not previous:\n return\n\n cur_lineno = self.lineno\n prev_lineno = previous.lineno\n if previous.is_multiline_string:\n prev_lineno += previous.value.count('\\n')\n\n if (cur_lineno != prev_lineno or\n (previous.is_pseudo and previous.value != ')' and\n cur_lineno != previous.previous_token.lineno)):\n self.spaces_required_before = (\n self.column - first_column + depth * style.Get('INDENT_WIDTH'))\n return\n\n cur_column = self.column\n prev_column = previous.column\n prev_len = len(previous.value)\n\n if previous.is_pseudo and previous.value == ')':\n prev_column -= 1\n prev_len = 0\n\n if previous.is_multiline_string:\n prev_len = len(previous.value.split('\\n')[-1])\n if '\\n' in previous.value:\n prev_column = 0 # Last line starts in column 0.\n\n self.spaces_required_before = cur_column - (prev_column + prev_len)", "def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)", "def indent(self):\r\n return _osgDB.Output_indent(self)", "def dumped (text, level, indent=2):\n return indented (\"{\\n%s\\n}\" % indented (text, level+1, indent) or \"None\", level, indent) + \"\\n\"", "def _consume_whitespace(self):\n try:\n while self.s[self.idx] in \" \\t\\n\\r\":\n self.idx += 1\n except IndexError:\n pass", "def getIndentation(self, line):\n\t\n\t\tnonSpace = re.search('\\S', line)\n\t\n\t\tif nonSpace is None:\n\t\t\treturn 0\n\t\t\t\n\t\telse:\n\t\t\tif re.match('^\\t*\\S', line):\n\t\t\t\treturn nonSpace.start()\n\t\t\t\t\n\t\t\telif re.match('^\\ *\\S', line):\n\t\t\t\treturn nonSpace.start() / 4", "def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0):\n if style.Get('USE_TABS'):\n if newlines_before > 0:\n indent_before = '\\t' * indent_level + _TabbedContinuationAlignPadding(\n spaces, style.Get('CONTINUATION_ALIGN_STYLE'),\n style.Get('INDENT_WIDTH'))\n else:\n indent_before = '\\t' * indent_level + ' ' * spaces\n else:\n indent_before = (' ' * indent_level * style.Get('INDENT_WIDTH') +\n ' ' * spaces)\n\n if self.is_comment:\n comment_lines = [s.lstrip() for s in self.value.splitlines()]\n self.value = ('\\n' + indent_before).join(comment_lines)\n\n # Update our own value since we are changing node value\n self.value = self.value\n\n if not self.whitespace_prefix:\n self.whitespace_prefix = ('\\n' * (self.newlines or newlines_before) +\n indent_before)\n else:\n self.whitespace_prefix += indent_before", "def test_Indent_two():\n assert fmt.Indent(2, \"abc\\nd\") == \" abc\\n d\"", "def word_spacing(computer, name, value):\r\n if value == 'normal':\r\n return 0\r\n else:\r\n return length(computer, name, value, pixels_only=True)", "def leveled_indent(lvl: int = 0, spaces_per_indent: int = 3) -> str:\n return (\" \" * spaces_per_indent) * lvl", "def prologue(_indent):\n return \"\"", "def whitespace(self, value: Whitespace) -> 'Tailwind':\n self.element.classes('whitespace-' + value)\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position.
def scan(self, pattern): if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) self.last = self.match m = self._re_cache[pattern].match(self.data, self.pos) if m is None: return False self.start_pos = m.start() self.pos = m.end() self.match = m.group() return True
[ "def match(self, text, pos=0):\n error = ParseError(text)\n node = self.match_core(text, pos, defaultdict(dict), error)\n if node is None:\n raise error\n return node", "def update_matches(self, begin, end):\n if self.entry != None:\n self.__get_matches(self.entry.get_text(), begin, end)", "def find_changed(self, find_text):\n # Make the search string global. \n self.find_text = find_text\n \n if len(find_text) < 2:\n return\n \n self.match_total = 0\n self.match_position = 0\n \n # Move cursor to Start. - Test with KeepAnchor\n self.textedit.moveCursor(QtGui.QTextCursor.MoveOperation.Start, \n QtGui.QTextCursor.MoveMode.MoveAnchor) \n\n # Count the total number of matches.\n #position_list = [] # For testing the matching. \n while True:\n\n if self.perform_search(find_text, self.case_sensitive):\n self.match_total += 1\n #position_list.append(self.textedit.textCursor().position()) \n else:\n # Reached last match at bottom of the text\n break\n\n # Return to start / top of text.\n self.textedit.moveCursor(QtGui.QTextCursor.MoveOperation.Start, \n QtGui.QTextCursor.MoveMode.MoveAnchor) \n\n # Update the matches label. match_position is 0\n self.sb_label.setText(\"{} of {} matches\".format(self.match_position, \n self.match_total)) \n #print(\"Position list:\", position_list) # Position list: [160, 294]\n\n # Find first match, going forward from start / top of text. \n if self.perform_search(find_text, self.case_sensitive):\n self.match_position += 1 \n \n # Update the matches label. match_position should be 1\n self.sb_label.setText(\"{} of {} matches\".format(self.match_position, \n self.match_total))", "def re_search(self, pattern): # noqa\n # Converting pattern to regex\n pattern = re.compile(pattern)\n if pattern.search(self.data):\n return True\n else:\n return False", "def match(self, text, pos, lno):\n mtch = self.pattern.match(text, pos)\n ret = []\n if self.next_rule is not None and mtch is not None:\n pos = 0\n for rule in self.next_rule:\n another_mtch, another_t = rule.match(mtch.group(), pos, 0)\n if another_mtch:\n ret.append(another_t)\n pos += len(another_mtch.group())\n else:\n if mtch:\n ret = mtch.group()\n else:\n ret = ''\n return mtch, Token(self.identifier, content=ret, position=pos, lineno=lno)", "def _cfa_find_next_match(self, p: Position) -> bool:\n # Called only from unit tests.\n table = []\n if self.search_headline:\n table.append(p.h)\n if self.search_body:\n table.append(p.b)\n for s in table:\n self.reverse = False\n pos, newpos = self.inner_search_helper(s, 0, len(s), self.find_text)\n if pos != -1:\n return True\n return False", "def indexOf(self, data, pattern, caseSensitive, from_pos, to_pos):\n # type: (bytearray, bytearray, bool, int, int) -> int\n # Original signature had \"from\" and \"to\", from is a reserved keyword in python", "def getMatch(self, text, pattern):\n\n return re.search(pattern,text,re.MULTILINE + re.DOTALL)", "def searchLine(self, pattern, count=1, start_from=0, match=0):\n count = self.total_lines if count == 'max' else count\n if start_from > 0:\n self.goToLine(start_from)\n else:\n self.goToStart()\n isdone = False\n eof = False\n output = []\n match_pat = re.compile(pattern)\n lineno = self.line_no\n while not eof and not isdone:\n prev = self.file_obj.tell()\n line = self.file_obj.readline()\n if not line:\n eof = True\n continue\n lineno += 1\n self.offset = self.file_obj.tell()\n if match:\n ismatched = match_pat.match(line)\n else:\n ismatched = match_pat.search(line)\n if ismatched:\n count -= 1\n output.append((line, lineno - 1, prev))\n if count == 0:\n isdone = True\n\n return output", "def is_match_pattern(self, pattern):\n if len(pattern.pattern) != len(self.pattern):\n return False\n else:\n return self.is_match(range(len(pattern.pattern)), pattern.pattern)", "def matchPos(self):\n\t\tif self.expressionStateNode: \n\t\t\tself.expressionStateNode.setPos(self.pos)", "def get_pattern_position(pattern,in_text):\n if in_text.find(pattern) == -1 :\n return in_text.find(pattern)\n else:\n return in_text.find(pattern)+1", "def patMatch(seq, pat, notDegPos=None):\n assert(len(seq)==len(pat))\n for x in range(0, len(pat)):\n patChar = pat[x]\n nuc = seq[x]\n\n assert(patChar in \"MKYRACTGNWSDVB\")\n assert(nuc in \"MKYRACTGNWSDX\")\n\n if notDegPos!=None and x==notDegPos and patChar!=nuc:\n #print x, seq, pat, notDegPos, patChar, nuc, \"<br>\"\n return False\n\n if nuc==\"X\":\n return False\n if patChar==\"N\":\n continue\n if patChar==\"D\" and nuc in [\"AGT\"]:\n continue\n if patChar==\"B\" and nuc in [\"CGT\"]:\n continue\n if patChar==\"V\" and nuc in [\"ACG\"]:\n continue\n if patChar==\"W\" and nuc in [\"A\", \"T\"]:\n continue\n if patChar==\"S\" and nuc in [\"G\", \"C\"]:\n continue\n if patChar==\"M\" and nuc in [\"A\", \"C\"]:\n continue\n if patChar==\"K\" and nuc in [\"T\", \"G\"]:\n continue\n if patChar==\"R\" and nuc in [\"A\", \"G\"]:\n continue\n if patChar==\"Y\" and nuc in [\"C\", \"T\"]:\n continue\n if patChar!=nuc:\n return False\n return True", "def matches_seq(self, seq):\n # check matches to start of sequence\n for start_len in range(1, min(len(seq) + 1, self.motif_len)):\n try:\n start_pat, start_mod_pos = self._partial_pats[\n 'start'][start_len]\n except KeyError:\n continue\n if start_pat.match(seq[:start_len]):\n return True\n\n # check central sequence overlaps\n if len(seq) < self.motif_len:\n for short_pat, mod_pos in self._partial_pats['short'][len(seq)]:\n if short_pat.match(seq):\n return True\n else:\n if self.motif_pat.search(seq):\n return True\n\n # check end of seq matches\n for end_len in range(1, min(len(seq) + 1, self.motif_len)):\n try:\n end_pat, end_mod_pos = self._partial_pats['end'][end_len]\n except KeyError:\n continue\n if end_pat.match(seq[-end_len:]):\n return True\n\n return False", "def search_cpp (self, pattern, body=None,\r\n headers=None, include_dirs=None, lang=\"c\"):\r\n\r\n self._check_compiler()\r\n (src, out) = self._preprocess(body, headers, include_dirs, lang)\r\n\r\n if type(pattern) is StringType:\r\n pattern = re.compile(pattern)\r\n\r\n file = open(out)\r\n match = 0\r\n while 1:\r\n line = file.readline()\r\n if line == '':\r\n break\r\n if pattern.search(line):\r\n match = 1\r\n break\r\n\r\n file.close()\r\n self._clean()\r\n return match", "def search(self):\n self.search_text.tag_remove(\"match\", \"0.0\", tk.END)\n\n regex = self.regex_entry.get()\n search_text = self.search_text.get(\"0.0\", tk.END)\n\n matches = re.finditer(regex, search_text)\n for match in matches:\n index1 = self.search_text.index(\"0.0 + \" + str(match.span()[0]) + \" chars\")\n index2 = self.search_text.index(\"0.0 + \" + str(match.span()[1]) + \" chars\")\n self.search_text.tag_add(\"match\", index1, index2)", "def find_next_match(self, p: Position) -> tuple[Position, int, int]:\n if not self.search_headline and not self.search_body: # pragma: no cover\n return None, None, None\n if not self.find_text: # pragma: no cover\n return None, None, None\n attempts = 0\n u = self.c.undoer\n if self.pattern_match:\n ok = self.compile_pattern()\n if not ok:\n return None, None, None\n while p:\n pos, newpos = self._fnm_search(p)\n if pos is not None:\n # Success.\n if self.mark_finds and not p.isMarked(): # pragma: no cover\n undoType = 'Mark Finds'\n bunch = u.beforeMark(p, undoType)\n p.setMarked()\n p.setDirty()\n u.afterMark(p, undoType, bunch)\n return p, pos, newpos\n # Searching the pane failed: switch to another pane or node.\n if self._fnm_should_stay_in_node(p):\n # Switching panes is possible. Do so.\n self.in_headline = not self.in_headline\n s = p.h if self.in_headline else p.b\n ins = len(s) if self.reverse else 0\n self.work_s = s\n self.work_sel = (ins, ins, ins)\n else:\n # Switch to the next/prev node, if possible.\n attempts += 1\n p = self._fnm_next_after_fail(p)\n if p: # Found another node: select the proper pane.\n self.in_headline = self._fnm_first_search_pane()\n s = p.h if self.in_headline else p.b\n ins = len(s) if self.reverse else 0\n self.work_s = s\n self.work_sel = (ins, ins, ins)\n return None, None, None", "def matchPositionConn():\n with ar_qui.ar_undoChunkOpen('Match Position'):\n ar_gen.ar_matchPosition()", "def GetMatch(self, start):\n raise NotImplementedError", "def findPat(seq, pat):\n seq = seq.upper()\n pat = pat.upper()\n for i in range(0, len(seq)-len(pat)+1):\n #print \"new pos\", i, seq[i:i+len(pat)],\"<br>\"\n found = True\n for x in range(0, len(pat)):\n #print \"new step\", x, \"<br>\"\n if pat[x]==\"N\":\n #print \"N\",\"<br>\"\n continue\n seqPos = i+x\n if seqPos == len(seq):\n found = False\n break\n if not matchNuc(pat[x], seq[seqPos]):\n #if not patMatch(seq[seqPos], pat[x]):\n #print i, x, pat[x], seq[seqPos], \"no match<br>\"\n found = False\n break\n #print \"match\", i, x, found, \"<br>\"\n if found:\n #print \"yielding\", i, \"<br>\"\n yield i" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if ``ttype`` is a subtype of ``other``. exists for backwards compatibility. use ``ttype in other`` now.
def is_token_subtype(ttype, other): return ttype in other
[ "def is_subtype_of(self, other):\n # pylint: disable=protected-access\n if type(self) is not type(\n other) or self._callable_params != other._callable_params:\n return False\n\n try:\n tf.nest.assert_same_structure(self._comparable[:-1],\n other._comparable[:-1])\n except (TypeError, ValueError):\n return False\n\n self_elements = tf.nest.flatten(self._comparable[:-1])\n other_elements = tf.nest.flatten(other._comparable[:-1])\n\n def is_subtype_or_equal(a, b):\n try:\n return a.is_subtype_of(b)\n except AttributeError:\n return a == b\n\n return all(\n is_subtype_or_equal(self_element, other_element)\n for (self_element, other_element) in zip(self_elements, other_elements))", "def is_equivalent_to(self, other: 'Type') -> bool:\n return self.is_assignable_from(other) and other.is_assignable_from(self)", "def comparable_with(self, other: 'FieldType') -> bool:\n # https://cloud.google.com/spanner/docs/reference/standard-sql/data-types#comparable_data_types\n return type(self) == type(other)", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, self.__class__)", "def check_equivalent_to(self, other: 'Type') -> None:\n if not self.is_equivalent_to(other):\n raise TypesNotEquivalentError(self, other)", "def __le__(self, other):\n return type(self) == type(other) or type(other) == TAny", "def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:\n left = mypy.types.get_proper_type(left)\n right = mypy.types.get_proper_type(right)\n if (\n isinstance(left, mypy.types.LiteralType)\n and isinstance(left.value, int)\n and left.value in (0, 1)\n and isinstance(right, mypy.types.Instance)\n and right.type.fullname == \"builtins.bool\"\n ):\n # Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.\n return True\n with mypy.state.strict_optional_set(True):\n return mypy.subtypes.is_subtype(left, right)", "def isSubtype(firstType: str, secondType: str) -> bool:\n if secondType == firstType:\n return True\n\n if firstType == 'Element':\n return secondType in ('Element', 'Image', 'Feature',\n 'Collection', 'ImageCollection', 'FeatureCollection')\n elif firstType in ('FeatureCollection', 'Collection'):\n return secondType in ('Collection', 'ImageCollection', 'FeatureCollection')\n elif firstType == object:\n return True\n else:\n return False", "def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoField_isOfType(self, type)", "def __eq__(self, other):\n return (isvariadic(other) and\n set(self.variadic_type) == set(other.variadic_type))", "def check_identical_to(self, other: 'Type') -> None:\n if not self.is_identical_to(other):\n raise TypesNotIdenticalError(self, other)", "def __eq__(self, other):\r\n if not isinstance(self, type(other)):\r\n return False\r\n\r\n for ftr_type in FeatureType:\r\n if not deep_eq(getattr(self, ftr_type.value), getattr(other, ftr_type.value)):\r\n return False\r\n\r\n return self.bbox == other.bbox and self.timestamp == other.timestamp", "def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoBase_isOfType(self, type)", "def _issubclass(a, b):\n try:\n return issubclass(a, b)\n except:\n pass\n\n return False", "def isinstance(self, cls):\n return self.cls.issubclass(cls)", "def __eq__(self, other):\n return (isinstance(other, self.__class__)\\\n and (self._ontosers == other._ontosers) )", "def subtype(self, type1_uri, type2_uri):\n # log.info(\"FieldComparison.subtype(%s, %s)\"%(type1_uri, type2_uri))\n if not type2_uri or (type1_uri == type2_uri):\n return True\n if not type1_uri:\n return False\n type1_info = self.get_uri_type_info(type1_uri)\n type1_supertype_uris = (type1_info and type1_info.get_all_type_uris()) or []\n # log.info(\"FieldComparison.subtype: type1_uris (supertypes) %r\"%(type1_uris,))\n return type2_uri in type1_supertype_uris", "def type_equals(a: drgn.Type, b: drgn.Type) -> bool:\n return type_canonical_name(a) == type_canonical_name(b)", "def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.ScXMLObject_isOfType(self, type)", "def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoError_isOfType(self, type)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a static text analysation function that returns float values.
def make_analysator(f): def text_analyse(text): try: rv = f(text) except Exception: return 0.0 if not rv: return 0.0 try: return min(1.0, max(0.0, float(rv))) except (ValueError, TypeError): return 0.0 text_analyse.__doc__ = f.__doc__ return staticmethod(text_analyse)
[ "def eval_texts_calc_wrapper(line):\n # These signals are needed to run the functions as written in eval texts.\n signals = {\n 'signal0': np.array([1, 1, 1, 0, 0]),\n 'signal1': np.array([1, 1, 1, 0, 0]),\n 'time': np.array([0, 1, 2, 3, 4])}\n\n # These variable names are needed to run the functions as written in\n # eval texts.\n extra_globals = {\n 'value': 1,\n 'shift_value': 1,\n 'window_size': 3,\n 'min_length': 1,\n 'max_length': 2,\n 'from_value': 1,\n 'to_value': 0,\n 'reset_array': np.array([False, True, False, True, False])}\n\n return calc_wrapper(line, signals, extra_globals)", "def evaluate(self, text):\n normalized = normalize_text(text)\n\n tokens = text_to_ngram(normalized, self.n, self.generate_char_model)\n\n likelihood = 0.0\n\n for ngram in tokens:\n prob = self.smooth(self.delimiter.join(ngram))\n likelihood += math.log(prob,self.log_base)\n return -math.inf if likelihood == 0.0 else likelihood", "def float_func(self, fl, meta):\n fl = fl[0]\n constAddr = self.compiler.addConst(fl.value, fl.type)\n self.compiler.pushOperando(constAddr)\n self.compiler.pushTipo(fl.type)\n return fl", "def make_function(text):\n\n try:\n exec 'f = lambda x: ' + text\n 1+f(2.0) ## test to see if there are any errors in the definition\n except ZeroDivisionError: ## ignore zero division errors\n pass\n except:\n raise FunctionError()\n return f", "def calculation(self):\n\n screen_value = str(self.screen.text()).split(' ')\n screen_text = str(self.screen.text())\n #x = screen_value.split(' ')\n x = (eval(str(screen_text)))\n # val1 = float(screen_value[0])\n # operator = screen_value[1]\n # val2 = float(screen_value[2])\n # result = self.maths(val1, val2, operator)\n self.screen.setText(str(x))", "def __call__(self, text):\n for unit in self.units:\n text = unit.transform(text)\n return text", "def getFloatFromTC(objTextControl, default = None):\n try:\n return float(objTextControl.GetValue())\n except:\n return default", "def _search_float(self, pattern, arg=None):\n string = self._search(pattern)\n if string:\n try:\n return float(string)\n except: pass\n raise WeatherParseError(text=self.text, arg=arg)", "def test__get_value_types_float(self):\n value, m_type = formatters._get_value_types(1.1)\n assert value == 1.1\n assert m_type == 'float'", "def parse_function(self, text):\n m = re.match('(triangulo|trapecio): (.*)', text)\n if not m:\n raise Exception('Error parseando la funcion: %s' % text)\n\n func_type = m.group(1)\n points_raw = m.group(2)\n\n points = []\n\n pattern = re.compile(r'\\(([\\d\\.]+),\\s*([\\d\\.]+)\\)')\n m = pattern.search(points_raw)\n\n while m:\n points.append(Point(float(m.group(1)), float(m.group(2))))\n m = pattern.search(points_raw, m.end())\n\n if func_type == 'triangulo':\n cls = TriangularFunction\n else:\n cls = TrapezoidalFunction\n return cls(*points)", "def parse_text(text):\n\n def tofloats(lst):\n return (float(t) for t in lst)\n\n try:\n text = text.replace(\",\", \"\") # 25,350.10 MB\n if \"--\" in text:\n return None\n if \"/\" in text: # \"6.19/0.88\" total/avg\n return tuple(tofloats(text.split(\"/\")))\n if \":\" in text: # 11:14 hr:mn\n hour, mins = tofloats(text.split(\":\"))\n return timedelta(hours=hour, minutes=mins)\n return float(text)\n except ValueError:\n _LOGGER.error(\"Error parsing traffic meter stats: %s\", text)\n return None", "def parseDouble(text):\n return float(text or 0)", "def cal_f1(self,base,comp):\n\n if type(base)==type(\"string\"):\n base=word_tokenize(base)\n base = [w.lower() for w in base]\n else:\n base = [w.lower() for w in base]\n if type(comp)==type(\"string\"):\n comp=word_tokenize(comp)\n comp = [w.lower() for w in comp]\n else:\n comp = [w.lower() for w in comp]\n precision=0\n for item in comp:\n if item in base:\n precision=precision+1\n precision=precision/len(comp)\n\n recall=0\n for item in base:\n if item in comp:\n recall=recall+1\n recall=recall/len(base)\n\n try:\n F1=2 * (precision * recall) / (precision + recall)\n except ZeroDivisionError:\n F1=0\n\n return F1,precision,recall", "def floats(draw):\n number = draw(st.floats(allow_nan=False, allow_infinity=False))\n fmt = draw(\n st.sampled_from(\n [\"{:.20f}\", \"{:.20e}\", \"{:.20E}\", \"{:+.20f}\", \"{:+.20e}\", \"{:+.20E}\"]\n )\n )\n return Strategy(fmt.format(number), number)", "def Evaluate(self, , *float):\n ...", "def test__get_value_types_float_str(self):\n value, m_type = formatters._get_value_types('1.1')\n assert value == 1.1\n assert m_type == 'float'", "def check_text_recognition(text_rec: str, text_check: str) -> float:\n if len(text_rec) == len(text_check):\n match = re.search(text_check, text_rec)\n if match:\n return 1.0\n else:\n return 0.0\n else:\n match = re.search(r'^[a-zA-Zа-яА-Я]\\d*$', text_check)\n if match:\n text_match = re.search(r'^[a-zA-Zа-яА-Я]\\d*$', text_rec)\n if text_match and text_match[0] == text_check:\n return float(len(text_check) / len(text_rec))\n else:\n return 0.0\n else:\n text_match = re.search(r'\\d*$', text_rec)\n if text_match and text_match[0] == text_check:\n return float(len(text_check) / len(text_rec))\n else:\n return 0.0", "def _parse_float(self, float_element):\n if float_element.text is not None:\n return float(float_element.text)\n else:\n return float()", "def transform_floating_literal(self, node):\n try:\n value = next(node.get_tokens()).spelling\n except (StopIteration, ValueError):\n # No tokens\n value = node.literal\n return float(value)", "def testFloat(self):\n idx = self.d.GetHeaderNames().index('Float')\n \n query = 'Float == 0.10'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('0.1', result[0][idx])\n \n query = 'Float == 1.0'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('1.0', result[0][idx])\n \n query = 'Float < 0'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('-1.5', result[0][idx])\n \n query = 'Float >= 4.3'\n result, ind = self.d.RunQuery(query)\n floats = []\n for i in range(len(result)):\n floats.append(result[i][idx])\n self.assertEqual(['4.3','7.1'], floats)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given regular expression matches the last part of the shebang if one exists. >>> from pygments.util import shebang_matches >>> shebang_matches('!/usr/bin/env python', r'python(2\.\d)?') True >>> shebang_matches('!/usr/bin/python2.4', r'python(2\.\d)?') True >>> shebang_matches('!/usr/bin/pythonruby', r'python(2\.\d)?') False >>> shebang_matches('!/usr/bin/python/ruby', r'python(2\.\d)?') False >>> shebang_matches('!/usr/bin/startsomethingwith python', ... r'python(2\.\d)?') True
def shebang_matches(text, regex): index = text.find('\n') if index >= 0: first_line = text[:index].lower() else: first_line = text.lower() if first_line.startswith('#!'): try: found = [x for x in split_path_re.split(first_line[2:].strip()) if x and not x.startswith('-')][-1] except IndexError: return False regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE) if regex.search(found) is not None: return True return False
[ "def rewrite_shebang(data, target, prefix):\n shebang_match = re.match(SHEBANG_REGEX, data, re.MULTILINE)\n prefix_b = prefix.encode('utf-8')\n\n if shebang_match:\n if data.count(prefix_b) > 1:\n # More than one occurrence of prefix, can't fully cleanup.\n return data, False\n\n shebang, executable, options = shebang_match.groups()\n\n if executable.startswith(prefix_b):\n # shebang points inside environment, rewrite\n executable_name = executable.decode(\"utf-8\").split(\"/\")[-1]\n new_shebang = \"#!/usr/bin/env {}{}\".format(\n executable_name, options.decode(\"utf-8\")\n )\n data = data.replace(shebang, new_shebang.encode(\"utf-8\"))\n\n return data, True\n\n return data, False", "def _ExtractInterpFromShebang(data):\n firstline = data.splitlines()[:1]\n if not firstline:\n return None\n\n # The format here can be tricky.\n shebang = firstline[0].strip()\n m = re.match(r\"^#!\\s*([^\\s]+)(?:\\s+([^\\s]+))?\", shebang)\n if not m:\n return None\n\n # If the using `env`, find the target program.\n interp = m.group(1)\n if os.path.basename(interp) == \"env\":\n interp = m.group(2)\n\n return interp", "def test_shebang_test(self):\n with open(\"tests/test_models/test_engine/test_file_storage.py\\\n\", mode='r') as _file:\n readShebang = _file.read()\n lines = readShebang.splitlines()\n self.assertEqual(lines[0], '#!/usr/bin/python3')", "def path_is_partial_match(regex, path):\n for i in range(1, len(regex) + 1):\n partial_regex = regex[:i].rstrip('$') + '$'\n try:\n if re.match(partial_regex, path):\n return True\n except re.error:\n pass\n return False", "def guess_language_by_shebang(line: str) -> int:\n pattern = re.compile(r\"#!/(?:\\S+/)+(\\S+)\")\n matched = pattern.match(line)\n if not matched: return 0\n\n language = matched.group(1).lower()\n for supp_language in SUPPORTED_LANGUAGES.keys():\n if language == supp_language:\n return list(SUPPORTED_LANGUAGES.keys()).index(language)\n return 0", "def resolve_shebang(path, ignoreshell=False):\n try:\n f = file(path)\n try:\n # At most 80 characters in the first line\n header = f.read(80).splitlines()[0]\n finally:\n f.close()\n \n m = _RE_SHEBANG.search(header)\n if not m:\n return []\n cmd, arg = m.group(1,2)\n if os.path.isfile(cmd):\n # Keep this one, the hg script for instance contains a weird windows\n # shebang referencing the current python install.\n cmdfile = os.path.basename(cmd).lower()\n if cmdfile == 'python.exe':\n cmd = 'python'\n pass\n elif cmd not in _SHEBANG_CMDS:\n raise CommandNotFound('Unknown interpreter \"%s\" referenced in '\\\n 'shebang' % header)\n cmd = _SHEBANG_CMDS.get(cmd)\n if cmd is None or (ignoreshell and cmd == 'pysh'):\n return []\n if arg is None:\n return [cmd, win32_to_unix_path(path)]\n return [cmd, arg, win32_to_unix_path(path)]\n except IOError, e:\n if e.errno!=errno.ENOENT and \\\n (e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM\n raise\n return []", "def shebang(self):\n try:\n first_line = self.stripped_lines()[0]\n if first_line.startswith(\"#!\"):\n return first_line[2:].strip()\n except IndexError:\n pass\n return \"\"", "def shebang(path):\n return get(path)", "def test_shebang(self):\n with open(\"models/engine/file_storage.py\", mode='r') as _file:\n readShebang = _file.read()\n lines = readShebang.splitlines()\n self.assertEqual(lines[0], '#!/usr/bin/python3')", "def command_line_regex(pattern):\n def _pred(process):\n cmdline = process.properties.get('CmdLine', None)\n return cmdline is not None and re.match(pattern, cmdline)\n return _pred", "def expr_match( expr, text):\n\tif expr[0] == '/':\n\t\tif re.match( expr[1:], text):\n\t\t\treturn True\n\telse:\n\t\tif expr[0:2] == '\\/':\n\t\t\treturn text == expr[1:]\n\t\telse:\n\t\t\treturn text == expr\n\treturn False", "def _is_python_file(filename):\n if filename.endswith('.py'):\n return True\n else:\n with open(filename, 'r') as file_handle:\n first_line = file_handle.readline()\n return 'python' in first_line and '#!' in first_line", "def _is_end_comment(line):\n return bool((line.endswith(\"'''\") or line.endswith('\"\"\"')))", "def test_ends_at(line):\n return TEST_END_RE.match(line)", "def contains_fragment(path):\r\n return path.count('#') != 0", "def regex_is_found(response, regex_str):\n if not regex_str:\n return None\n return bool(re.search(regex_str, response.text))", "def is_regex(self):\n return True", "def is_regex(self):\n return False", "def contains_any_py_chars(input_str):\n # return any(c in PYTHON for c in list(input_str.lower()))\n return re.search(r'[python]', input_str.lower()) # good example of search()", "def detect(source):\r\n source = source.replace(' ', '')\r\n if re.search(r'eval\\(function\\(h,u,n,t,e,r', source):\r\n return True\r\n else:\r\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE.
def doctype_matches(text, regex): m = doctype_lookup_re.match(text) if m is None: return False doctype = m.group(2) return re.compile(regex).match(doctype.strip()) is not None
[ "def parse_doctype(self):\n if self.seen_doctype == 1:\n xmlproc.XMLProcessor.parse_doctype(self)\n else:\n arizonareport.send_out(4, str(\"Ignoring DOCTYPE (%s,%d)\" % (self.get_current_sysid(), self.get_line())) )\n self.scan_to(\"]>\")\n self.seen_doctype = 1", "def doctype(self, irc, msg, args, url):\n size = conf.supybot.protocols.http.peekSize()\n s = utils.web.getUrl(url, size=size)\n m = self._doctypeRe.search(s)\n if m:\n s = utils.str.normalizeWhitespace(m.group(0))\n irc.reply(s)\n else:\n irc.reply('That URL has no specified doctype.')", "def _document_is_type_1(text):\n type_1 = re.compile('Document ')\n for line in text:\n if type_1.match(line):\n return True\n return False", "def return_doctype(self, document_id):\n if not isinstance(document_id, str):\n raise Exception(f\"document_id not a string\")\n for doctype in self.doctypes.values():\n if doctype.is_valid(document_id):\n return doctype\n raise Exception(\"No associated doctype\")", "def _end_of_type_1_document(text):\n end_of_document = re.compile('Document ')\n if end_of_document.match(text):\n return True\n else:\n return False", "def is_mimetype(v):\n return rx_mimetype.match(v) is not None", "def is_stylesheet(self):\n\n if self.typ == 'stylesheet':\n return True\n elif self.typ == 'generic':\n if self.validfilename:\n extn = ((os.path.splitext(self.validfilename))[1]).lower()\n if extn in self.stylesheet_extns:\n return True\n \n return False", "def validate_mime_type(mimetype):\n valid_prefixes = [\n 'application', 'audio', 'font', 'example', 'image',\n 'message', 'model', 'mulitpart', 'text', 'video'\n ]\n validated = False\n for prefix in valid_prefixes:\n if prefix + '/' == mimetype[:len(prefix)+1]:\n validated = True\n return validated", "def _test_re(string):\n try:\n x = re.compile(string)\n return True\n except re.error:\n return False", "def is_assessor_good_type(assessor_obj, types_list, full_regex=False):\n atype = assessor_obj.attrs.get('xsiType')\n proctype = assessor_obj.attrs.get('%s/proctype' % atype)\n for exp in types_list:\n regex = extract_exp(exp, full_regex)\n if regex.match(proctype):\n return True\n return False", "def is_valid_pdf(fname):\n try:\n pdfrw.PdfReader(fname)\n except pdfrw.PdfParseError:\n return False\n except Exception:\n return True\n return True", "def is_re(v):\n try:\n re.compile(v)\n return True\n except Exception:\n return False", "def is_regex(self):\n return True", "def match(string):\n # Avoid circular dependencies by importing here.\n # pylint: disable=import-outside-toplevel\n from fparser.two.Fortran2008 import Component_Attr_Spec_List\n\n return Type_Declaration_StmtBase.match(\n Declaration_Type_Spec, Component_Attr_Spec_List, Component_Decl_List, string\n )", "def check_regex(self,regexp) :\n return re.compile(regexp).match(self.name)", "def isRegularDirective(self):\n return self._isRegularDirective", "def is_valid_xml(medline_xml, parser=None, tree=None):\n if parser is None:\n parser = etree.XMLParser(load_dtd=True, no_network=False)\n if tree is None:\n tree = etree.parse(medline_xml, parser)\n dtd = tree.docinfo.externalDTD\n return dtd.validate(tree)", "def accepts_html(accept_header):\r\n return (accept_header is not None\r\n and _accepts_html_re.search(accept_header) is not None)", "def is_regex(self):\n return False", "def validate(self):\n\n for line in self.htmlstring.split(\"\\n\"):\n if re.search(r\"<(\\\"[^\\\"]*\\\"|'[^']*'|[^'\\\">])*>\",line):\n print(\"Valid html string!\")\n else:\n print(\"Invalid html string - {}\".format(line))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse block attributes. >>> t = Textile() >>> t.pba(r'\3') '' >>> t.pba(r'\\3', element='td') ' colspan="3"' >>> t.pba(r'/4', element='td') ' rowspan="4"' >>> t.pba(r'\\3/4', element='td') ' colspan="3" rowspan="4"' >>> t.pba('^', element='td')
def pba(self, block_attributes, element=None): style = [] aclass = '' lang = '' colspan = '' rowspan = '' block_id = '' if not block_attributes: return '' matched = block_attributes if element == 'td': m = re.search(r'\\(\d+)', matched) if m: colspan = m.group(1) m = re.search(r'/(\d+)', matched) if m: rowspan = m.group(1) if element == 'td' or element == 'tr': m = re.search(r'(%s)' % self.vertical_align_re, matched) if m: style.append("vertical-align:%s;" % self.vAlign[m.group(1)]) m = re.search(r'\{([^}]*)\}', matched) if m: style.append(m.group(1).rstrip(';') + ';') matched = matched.replace(m.group(0), '') m = re.search(r'\[([^\]]+)\]', matched, re.U) if m: lang = m.group(1) matched = matched.replace(m.group(0), '') m = re.search(r'\(([^()]+)\)', matched, re.U) if m: aclass = m.group(1) matched = matched.replace(m.group(0), '') m = re.search(r'([(]+)', matched) if m: style.append("padding-left:%sem;" % len(m.group(1))) matched = matched.replace(m.group(0), '') m = re.search(r'([)]+)', matched) if m: style.append("padding-right:%sem;" % len(m.group(1))) matched = matched.replace(m.group(0), '') m = re.search(r'(%s)' % self.horizontal_align_re, matched) if m: style.append("text-align:%s;" % self.hAlign[m.group(1)]) m = re.search(r'^(.*)#(.*)$', aclass) if m: block_id = m.group(2) aclass = m.group(1) if self.restricted: if lang: return ' lang="%s"' % lang else: return '' result = [] if style: result.append(' style="%s"' % "".join(style)) if aclass: result.append(' class="%s"' % aclass) if lang: result.append(' lang="%s"' % lang) if block_id: result.append(' id="%s"' % block_id) if colspan: result.append(' colspan="%s"' % colspan) if rowspan: result.append(' rowspan="%s"' % rowspan) return ''.join(result)
[ "def parse_block(block: str) -> str:\n try:\n match = pattern.search(block)\n charset, encoding, raw_text = match.groups()\n except AttributeError:\n # match is None so .groups fails\n raise ValueError(f\"Could not recognise format of: {block}\") from None\n\n if str.lower(encoding) == 'b':\n text = b64decode(raw_text)\n elif str.lower(encoding) == 'q':\n text = quopri.decodestring(raw_text)\n else:\n raise ValueError(f\"Unknown encoding '{encoding}'\") from None\n exit(1)\n\n decoded = text.decode(charset)\n return decoded", "def test_pi_with_non_attribute_data(self):\n pi_data = u\"\"\" \\t keyword att1=\"value1\" \"\"\"\n data = parse_pi_data(pi_data)\n self.assertEqual(data, {u\"keyword\": None, u\"att1\": u\"value1\"})", "def get_attributes(html):\n\n for i, c in enumerate(html):\n if c == '>':\n if USE_BUFFER:\n html = buffer(html, 0, i)\n else:\n html = html[:i]\n break\n return dict((name.lower().strip(), value.strip('\\'\" ')) for (name, value) in attributes_regex.findall(html))", "def get_attribute(self):\n data = self.data\n # Step 1 (skip chars)\n c = data.skip(skip1)\n assert c is None or len(c) == 1\n # Step 2\n if c in (b\">\", None):\n return None\n # Step 3\n attr_name = []\n attr_value = []\n # Step 4 attribute name\n while True:\n if c == b\"=\" and attr_name:\n break\n elif c in space_chars_bytes:\n # Step 6!\n c = data.skip()\n break\n elif c in (b\"/\", b\">\"):\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_name.append(c)\n # Step 5\n c = next(data)\n # Step 7\n if c != b\"=\":\n data.previous()\n return b\"\".join(attr_name), b\"\"\n # Step 8\n next(data)\n # Step 9\n c = data.skip()\n # Step 10\n if c in (b\"'\", b'\"'):\n # 10.1\n quote_char = c\n while True:\n # 10.2\n c = next(data)\n # 10.3\n if c == quote_char:\n next(data)\n return b\"\".join(attr_name), b\"\".join(attr_value)\n # 10.4\n else:\n attr_value.append(c)\n elif c == b\">\":\n return b\"\".join(attr_name), b\"\"\n elif c is None:\n return None\n else:\n attr_value.append(c)\n # Step 11\n while True:\n c = next(data)\n if c in spaces_angle_brackets:\n return b\"\".join(attr_name), b\"\".join(attr_value)\n elif c is None:\n return None\n else:\n attr_value.append(c)", "def parse_attrs(buf):\n attrs = []\n while buf:\n t = ord(buf[0])\n l = ord(buf[1])\n if l < 2:\n break\n d, buf = buf[2:l], buf[l:]\n attrs.append((t, d))\n return attrs", "def handleBlock(block):\n mlines = filter(lambda line : line.startswith('-'), block)\n plines = filter(lambda line : line.startswith('+'), block)\n mcount = len(mlines)\n pcount = len(plines)\n if mcount > pcount:\n plines.extend([''] * (mcount - pcount))\n elif pcount > mcount:\n mlines.extend([''] * (pcount - mcount))\n count = max(mcount, pcount)\n return [(mlines[i],plines[i]) for i in range(count)]", "def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element", "def findAttributes(self, tagName, tagLine):\n\t\n\t\tattributes = {}\n\t\t\n\t\t# Try to find links\n\t\tlinkMatch = re.search('till\\s(BILD|SIDA)?\\s?\\'([a-zåäö\\.\\/\\-\\_]+)\\'', tagLine)\n\t\n\t\t# If link is found\n\t\tif linkMatch:\n\t\t\t\n\t\t\t# Modifier can be BILD or SIDA\n\t\t\tmodifier = linkMatch.group(1)\n\t\t\thref = linkMatch.group(2)\n\t\t\n\t\t\t# If image\n\t\t\tif modifier == 'BILD':\n\t\t\t\thref = 'bilder/'+href\n\t\t\t\t\n\t\t\telif modifier == 'SIDA':\n\t\t\t\thref = 'sidor/'+href\n\t\t\t\t\n\t\t\t\t# If .html is missing\n\t\t\t\tif href[-5:] != '.html':\n\t\t\t\t\thref += '.html'\n\t\t\t\t\t\n\t\t\t# Find www. links\n\t\t\telif href[:4] == 'www':\n\t\t\t\thref = 'http://'+href\n\t\t\t\t\n\t\t\t\n\t\t\t# Remove href part from tagName\n\t\t\ttagLine = tagLine[:linkMatch.start()] + tagLine[linkMatch.end():]\n\t\t\t\n\t\t\t# Set link attribute\n\t\t\tattributes['href'] = href\n\t\t\n\t\t# If image, set src\n\t\tif tagName == 'img':\n\t\t\tsrcMatch = re.search('\\'([a-zåäö\\.\\/\\-\\_]+)\\'', tagLine)\n\t\t\n\t\t\tif srcMatch:\n\t\t\t\tsrc = 'bilder/'+srcMatch.group(1)\n\t\t\t\n\t\t\t\t# Remove href part from tagName\n\t\t\t\ttagLine = tagLine[:srcMatch.start()] + tagLine[srcMatch.end():]\n\t\t\t\n\t\t\tattributes['src'] = src\n\t\t\n\t\t# Find classes\n\t\tclassMatch = re.findall('\\.([a-zåäö\\-]+)', tagLine)\n\t\n\t\tif classMatch:\n\t\n\t\t\tattributes['class'] = ' '.join(classMatch)\n\t\n\t\treturn attributes", "def _parse_bead(line):\n\n bead_index = int(line.split(\":\")[0].strip())\n bead_name = line.split(\":\")[1].strip()\n mapping_indices = eval(line.split(\":\")[2])\n bead_mapping = BeadMapping(bead_name, mapping_indices)\n return (bead_index, bead_mapping)", "def _process_attributes(self, attributes_element):\n for element in list(attributes_element):\n if element.tag != \"attribute\":\n raise AglyphError(\n \"unexpected element: attributes/%s\" % element.tag)\n name = element.get(\"name\")\n if not name:\n raise AglyphError(\n \"attribute/@name is required and cannot be empty\")\n value = self._unserialize_element_value(element)\n yield (name, value)", "def glyphparse(self, addr):\n s = struct.Struct('<bbbbL')\n width, height, bytesperline, somea, pixptr = s.unpack_from(self.mem, addr - self.addr)\n # somea seems to be padding; always 0 in v2.32\n\n # sys.stdout.write('glyphparse: address 0x%x wid=%d height=%d bpl=%d somea=%d pixptr=0x%x\\n' % (addr, width, height, bytesperline, somea, pixptr) )\n if height <= 8:\n height *= 2 # CN characters are right height, latin are reported 1/2 height\n\n img = {'address': addr, 'width': width, 'height': height, 'palette': None, 'pixels': []}\n\n for y in range(height):\n linebits = self.readbits(pixptr + y * bytesperline, width)\n line = [int(color) for color in linebits]\n img['pixels'].append(line)\n\n img['checksum'] = self.gfxchecksum(img)\n return img", "def parse_PAUP_log(branch_lengths):\n BL_table = get_BL_table(branch_lengths)\n BL_dict = {}\n for line in BL_table:\n info = find_fields(line)\n parent = info[\"parent\"]\n bl = float(info[\"bl\"])\n taxa = parse_taxa(info[\"taxa\"])\n\n BL_dict[taxa] = (parent, bl)\n\n return BL_dict", "def extract_dynamic_tag_attributes(line, source, syntax, inside_parentheses=False):\r\n if not line.startswith(DYNAMIC_ATTRIBUTES_PREFIX):\r\n return None\r\n line = line[len(DYNAMIC_ATTRIBUTES_PREFIX):]\r\n\r\n terminators = {\r\n WHITESPACE,\r\n NEWLINE,\r\n LITERAL_CONTENT_PREFIX,\r\n LITERAL_CONTENT_SPACE_PREFIX,\r\n # we want to terminate extract_identifier() by DYNAMIC_ATTRIBUTES_PREFIX,\r\n # but it contains two characters, whereas the function checks only one character.\r\n # Therefore, we use a single asterisk terminator here instead of DYNAMIC_ATTRIBUTES_PREFIX.\r\n '*',\r\n INLINE_TAG_SEPARATOR,\r\n LINE_BREAK\r\n }\r\n if inside_parentheses:\r\n terminators.add(CLOSE_BRACE)\r\n\r\n result = extract_identifier(line, source, '', terminators)\r\n if result is None:\r\n return None\r\n\r\n expr, tail, source = result\r\n attributes = u(\r\n '\\n%for __plim_key__, __plim_value__ in {expr}.items():\\n'\r\n '{var_start}__plim_key__{var_end}=\"{var_start}__plim_value__{var_end}\"\\n'\r\n '%endfor\\n'\r\n ).format(\r\n expr=expr,\r\n var_start=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,\r\n var_end=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE\r\n )\r\n return attributes, tail, source", "def buildBlock(self, b):\n \"\"\"\n s = self.style\n colClass = self.getColClass(s.colWidth)\n b.block(self)\n b.div(class_=colClass, marginright=s.columnMarginRight, width=s.colWidth,\n marginleft=s.columnMarginLeft, margintop=s.columnMarginTop,\n paddingleft=s.columnPaddingLeft, float=s.columnFloat,\n display=s.columnDisplay,\n media=(\n \tMedia(width=s.columnWidthMobile,\n\t\t\t\tdisplay=s.columnDisplayMobile,\n float=s.columnFloatMobile,\n marginleft=s.columnMarginLeftMobile,\n marginright=s.columnMarginRightMobile,\n paddingleft=s.columnPaddingLeftMobile,\n paddingright=s.columnPaddingRightMobile,),\n ))\n \"\"\"\n self.buildColumn(b)\n \"\"\"\n b._div(comment=colClass)\n b._block(self)\n \"\"\"", "def readBpseq(bpseq_fn):\n content = open(bpseq_fn).readlines()\n seq = [-1] * len(content)\n struct = [-1] * len(content)\n for i, entry in enumerate(content):\n pos, base, pair = entry.strip().split()\n seq[i] = base\n p = int(pair)\n struct[i] = [1, p][p == 0]\n return \"\".join(seq), struct", "def compute_bb_properties(md):\n image_widths = md.apply(lambda row: row['bbx2'] - row['bbx1'], axis=1)\n image_heights = md.apply(lambda row: row['bby2'] - row['bby1'], axis=1)\n image_area = image_widths * image_heights\n image_properties = pd.concat([image_widths, image_heights, image_area], axis = 1)\n image_properties.columns = ['Width', 'Height', 'Area']\n return image_properties", "def test_placement(self):\n p = url_block_pattern\n \n self.assertEqual(_re_match(p, '[text](page)'), '[text](page)')\n self.assertEqual(_re_match(p, 'Lorem [text](page) ipsum.'),\n '[text](page)')\n self.assertEqual(_re_match(p, 'Lorem ipsum [text.](page)'), \n '[text.](page)')\n self.assertEqual(_re_match(p, '[Text](page) lorem ipsum.'), \n '[Text](page)')\n \n self.assertEqual(_re_match(p, 'Lorem\\n[text](page)\\nipsum'),\n '[text](page)')", "def test_padding(self):\n for pad in [\"pad_first\", \"pad_before_eq\", \"pad_after_eq\"]:\n node = Attribute(wraptext(\"id\"), wraptext(\"foo\"), **{pad: \"\\n\"})\n self.assertEqual(\"\\n\", getattr(node, pad))\n setattr(node, pad, \" \")\n self.assertEqual(\" \", getattr(node, pad))\n setattr(node, pad, None)\n self.assertEqual(\"\", getattr(node, pad))\n self.assertRaises(ValueError, setattr, node, pad, True)", "def parse_cell_parameters(txt):\n # Define re for the card block.\n cell_parameters_block_re = re.compile(r\"\"\"\n ^ [ \\t]*\n CELL_PARAMETERS [ \\t]*\n [{(]? \\s* (?P<units>[a-z]*) \\s* [)}]? \\s* [\\n]\n (?P<block>\n (\n (\n \\s* # White space in front of the element spec is ok\n (\n # First number\n (\n [-|+]? # Plus or minus in front of the number (optional)\n (\\d* # optional decimal in the beginning .0001 is ok, for example\n [\\.] # There has to be a dot followed by\n \\d+) # at least one decimal\n | # OR\n (\\d+ # at least one decimal, followed by\n [\\.]? # an optional dot\n \\d*) # followed by optional decimals\n ([E|e|d|D][+|-]?\\d+)? # optional exponents E+03, e-05, d0, D0\n \n (\n \\s+ # White space between numbers\n [-|+]? # Plus or minus in front of the number (optional)\n (\\d* # optional decimal in the beginning .0001 is ok, for example\n [\\.] # There has to be a dot followed by\n \\d+) # at least one decimal\n | # OR\n (\\d+ # at least one decimal, followed by\n [\\.]? # an optional dot\n \\d*) # followed by optional decimals\n ([E|e|d|D][+|-]?\\d+)? # optional exponents E+03, e-05, d0, D0\n ){2} # I expect three float values\n )\n |\n \\#\n |\n ! # If a line is commented out, that is also ok\n )\n .* # I do not care what is after the comment or the vector\n | # OR\n \\s* # A line only containing white space\n )\n [\\n] # line break at the end\n ){3} # I need exactly 3 vectors\n )\n \"\"\", RE_FLAGS)\n \n \n cell_vector_regex = re.compile(r\"\"\"\n ^ # Linestart\n [ \\t]* # Optional white space\n (?P<x> # Get x\n [\\-|\\+]? ( \\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]+\n (?P<y> # Get y\n [\\-|\\+]? (\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]+\n (?P<z> # Get z\n [\\-|\\+]? (\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n \"\"\", re.X | re.M) \n #~ cell_parameters_block_re = re.compile(r\"\"\"\n #~ ^ [ \\t]* CELL_PARAMETERS [ \\t]*\n #~ [{(]? [ \\t]* (?P<units>\\S+?)? [ \\t]* [)}]? [ \\t]* $\\n\n #~ (?P<block>\n #~ (?:\n #~ ^ [ \\t]* \\S+ [ \\t]+ \\S+ [ \\t]+ \\S+ [ \\t]* $\\n?\n #~ ){3}\n #~ )\n #~ \"\"\", RE_FLAGS)\n # Define re for the info contained in the block.\n #~ atomic_species_re = re.compile(r\"\"\"\n #~ ^ [ \\t]* (\\S+) [ \\t]+ (\\S+) [ \\t]+ (\\S+) [ \\t]* $\\n?\n #~ \"\"\", RE_FLAGS)\n # Find the card block and extract units and the lines of the block.\n match = cell_parameters_block_re.search(txt)\n if not match:\n return None\n # Use specified units or None if not specified.\n units = match.group('units')\n if units is not None:\n units = units.lower()\n # Get the string containing the lines of the block.\n if match.group('block') is None:\n raise ParsingError(\n 'The CELL_PARAMETER card block was parsed as empty in\\n' + txt\n )\n else:\n blockstr = match.group('block')\n # Define a small helper function to convert strings of fortran-type floats.\n fortfloat = lambda s: float(s.replace('d', 'e').replace('D', 'E'))\n # Now, extract the lattice vectors.\n lattice_vectors = []\n for match in cell_vector_regex.finditer(blockstr):\n lattice_vectors.append(map(fortfloat, (match.group('x'),match.group('y'),match.group('z'))))\n info_dict = dict(units=units, cell=lattice_vectors)\n return info_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks whether the text has text not already enclosed by a block tag >>> t = Textile() >>> t.hasRawText('foo bar biz baz') False >>> t.hasRawText(' why yes, yes it does') True
def hasRawText(self, text): r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip() r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r) return '' != r
[ "def has_text(self):", "def is_html_like(text):\n if isinstance(text, str):\n text = text.strip()\n if text.startswith(\"<\"):\n return True\n return False\n return False", "def has_text_content(element):\n return element.string is not None", "def is_html(text):\n if text is not None and '<html' in text[:300].lower():\n return True\n return False", "def is_unstructured_text(self):\r\n\r\n return not self.label.isupper()", "def has_richtext_widget(self):\n return self.has_field([self.rich_text_heading, strip_tags(self.rich_text)])", "def has_text(self, *args, **kwargs):\n\n return self.assert_text(*args, **kwargs)", "def mightRender(self, text):\r\n return True", "def inHTML(text, index, body):\n # if there is a < then lxml will interpret that as a tag, so only search for the stuff before it\n text = text.split(b\"<\")[0]\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" not in path\n except IndexError:\n return False", "def isnewblock(self):\r\n # First get read of the leading empty lines:\r\n string = re.sub(r\"\\A([\\t ]*\\n)*\", \"\", self.string)\r\n if re.match(r\"elif|else|finally|except| |\\t\", string):\r\n return False\r\n else:\r\n return True", "def wait_for_text(self, text):\n self.wait_for(lambda: text in self.currentFrame().toPlainText(),\n 'Can\\'t find \"%s\" in current frame' % text)\n return True", "def has_dirty_blocktrans(path):\n with io.open(path, encoding='utf-8') as infile:\n for line in infile:\n if '{% blocktrans' in line and '{% endblocktrans' not in line:\n if 'trimmed' not in line:\n return True\n return False", "def verify(self, plain_text):", "def _isTextValid(self, strText):\n clusterLanguageId = self.getLanguageId()\n\n #Some regex\n for regex, regexLanguageId in self.document.regex_filter_list:\n regexLanguageId = int(regexLanguageId)\n #Does it match the text language\n if regexLanguageId != clusterLanguageId and \\\n regexLanguageId != 0:\n continue\n #Ignore case available\n #if re.search(regex, strText, re.IGNORECASE) != None:\n if re.search(regex, strText, flags=re.UNICODE) != None:\n TextCluster.logger.info(\"Discard:%s\\n%s\" % (regex.encode(\"utf-8\"), strText.encode(\"utf-8\")))\n return False\n\n return True", "def any_text_contains(\n self, text: str, deep: bool = True, separator: str = \"\", strip: bool = False\n ) -> bool:\n ...", "def _isTextValid(self, strText):\n clusterLanguageId = self.getLanguageId()\n\n # Some regex\n for regex, regexLanguageId in self.document.regex_filter_list:\n regexLanguageId = int(regexLanguageId)\n # Does it match the text language\n if regexLanguageId != clusterLanguageId and \\\n regexLanguageId != 0:\n continue\n # Ignore case available\n # if re.search(regex, strText, re.IGNORECASE) != None:\n if re.search(regex, strText, flags=re.UNICODE) != None:\n TextCluster.logger.info(\"Discard:%s\\n%s\" % (\n regex, strText))\n return False\n\n return True", "def paragraph_is_text_like(p):\n return not isinstance(p, pyth.document.Image)", "def has_raw(self):\n return self.__has_volume(\"/volumes/raw\")", "def check_text_slots(response_dict : Dict) -> bool:\n if re.findall(\"(?<=\\{)(.*?)(?=\\})\", response_dict['text']):\n return True\n else:\n False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }